Linux内核中测试代码运行时间的demo代码

211 阅读3分钟

以测算spinlock开销为例

diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -59,7 +59,23 @@
 #include <linux/lockdep.h>
 #include <asm/barrier.h>
 #include <asm/mmiowb.h>
+#include <linux/timer.h>
+#include <linux/timex.h>
+#include <linux/time64.h>
 
+static atomic_t spin_lock_count = ATOMIC_INIT(0);
+static atomic_t spin_unlock_count = ATOMIC_INIT(0);
+
+static atomic_t spin_lock_irq_count = ATOMIC_INIT(0);
+static atomic_t spin_unlock_irq_count = ATOMIC_INIT(0);
+
+static long long int spin_lock_time = 0;
+static long long int spin_unlock_time = 0;
+
+static long long int spin_lock_irq_time = 0;
+static long long int spin_unlock_irq_time = 0;
+
+#define PRINT_FREQ_SEC 10
 
 /*
  * Must define these before including other files, inline functions need them
@@ -351,7 +367,22 @@ do {						\
 
 static __always_inline void spin_lock(spinlock_t *lock)
 {
+	struct timespec64 ts_start;
+	struct timespec64 ts_end;
+	struct timespec64 ts_delta;
+	
+	atomic_add(1, &spin_lock_count);
+	ktime_get_boottime_ts64(&ts_start);
 	raw_spin_lock(&lock->rlock);
+	ktime_get_boottime_ts64(&ts_end);
+	
+	ts_delta = timespec64_sub(ts_end, ts_start);
+	spin_lock_time += timespec64_to_ns(&ts_delta);
+	if ((atomic_read(&spin_lock_count) % PRINT_FREQ_SEC) == 0) {
+		pr_err("%s per time consumed :%lld\n", __func__, spin_lock_time / PRINT_FREQ_SEC);
+		spin_lock_time = 0;
+		atomic_set(&spin_lock_count, 0);
+	}
 }
 
 static __always_inline void spin_lock_bh(spinlock_t *lock)
@@ -376,7 +407,22 @@ do {									\
 
 static __always_inline void spin_lock_irq(spinlock_t *lock)
 {
+	struct timespec64 ts_start;
+	struct timespec64 ts_end;
+	struct timespec64 ts_delta;
+	
+	atomic_add(1, &spin_lock_irq_count);
+	ktime_get_boottime_ts64(&ts_start);
 	raw_spin_lock_irq(&lock->rlock);
+	ktime_get_boottime_ts64(&ts_end);
+	
+	ts_delta = timespec64_sub(ts_end, ts_start);
+	spin_lock_irq_time += timespec64_to_ns(&ts_delta);
+	if ((atomic_read(&spin_lock_irq_count) % PRINT_FREQ_SEC) == 0) {
+		pr_err("%s per time consumed :%lld\n", __func__, spin_lock_irq_time / PRINT_FREQ_SEC);
+		spin_lock_irq_time = 0;
+		atomic_set(&spin_lock_irq_count, 0);
+	}
 }
 
 #define spin_lock_irqsave(lock, flags)				\
@@ -391,7 +437,22 @@ do {									\
 
 static __always_inline void spin_unlock(spinlock_t *lock)
 {
+	struct timespec64 ts_start;
+	struct timespec64 ts_end;
+	struct timespec64 ts_delta;
+	
+	atomic_add(1, &spin_unlock_count);
+	ktime_get_boottime_ts64(&ts_start);
 	raw_spin_unlock(&lock->rlock);
+	ktime_get_boottime_ts64(&ts_end);
+	
+	ts_delta = timespec64_sub(ts_end, ts_start);
+	spin_unlock_time += timespec64_to_ns(&ts_delta);
+	if ((atomic_read(&spin_unlock_count) % PRINT_FREQ_SEC) == 0) {
+		pr_err("%s per time consumed :%lld\n", __func__, spin_unlock_time / PRINT_FREQ_SEC);
+		spin_unlock_time = 0;
+		atomic_set(&spin_unlock_count, 0);
+	}
 }
 
 static __always_inline void spin_unlock_bh(spinlock_t *lock)
@@ -401,7 +462,22 @@ static __always_inline void spin_unlock_bh(spinlock_t *lock)
 
 static __always_inline void spin_unlock_irq(spinlock_t *lock)
 {
+	struct timespec64 ts_start;
+	struct timespec64 ts_end;
+	struct timespec64 ts_delta;
+	
+	atomic_add(1, &spin_unlock_irq_count);
+	ktime_get_boottime_ts64(&ts_start);
 	raw_spin_unlock_irq(&lock->rlock);
+	ktime_get_boottime_ts64(&ts_end);
+	
+	ts_delta = timespec64_sub(ts_end, ts_start);
+	spin_unlock_irq_time += timespec64_to_ns(&ts_delta);
+	if ((atomic_read(&spin_unlock_irq_count) % PRINT_FREQ_SEC) == 0) {
+		pr_err("%s per time consumed :%lld\n", __func__, spin_unlock_irq_time / PRINT_FREQ_SEC);
+		spin_unlock_irq_time = 0;
+		atomic_set(&spin_unlock_irq_count, 0);
+	}
 }
 
 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index 21fc60b1d2f7..1b94ba958cec 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -24,6 +24,7 @@ endif
 obj-$(CONFIG_SMP) += spinlock.o
 obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
 obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
+obj-y += test_spinlock.o
 obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o
 obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
 obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>//jiffies在此头文件中定义
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/hrtimer.h>
#include <linux/smp.h>
#include <linux/sort.h>
#include <linux/random.h>
#include <linux/proc_fs.h>
#include <linux/workqueue.h>
#include <linux/timer.h>
#include <linux/timex.h>
#include <linux/spinlock.h>

#define HRTIMER0_US (1000000 * 1000 * 1000)


struct hrtimer g_hrtimer0;

static enum hrtimer_restart hrtimer_test_fn0(struct hrtimer *hrtimer)
{
	pr_err("#### v1 hrtimer timeout: %lu(ms)\n",HRTIMER0_US/1000/1000);

	static DEFINE_SPINLOCK(test_spinlock);
	spin_lock(&test_spinlock);
	spin_unlock(&test_spinlock);
	
	spin_lock_irq(&test_spinlock);
	spin_unlock_irq(&test_spinlock);
	hrtimer_forward_now(hrtimer, ns_to_ktime(HRTIMER0_US));
	return HRTIMER_RESTART;
}

static void init_hrtimer(void)
{
	int cpu_id = 0;
	int bound_on;
	struct hrtimer *hrtimer0 = &g_hrtimer0;
	hrtimer_init(hrtimer0, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
	hrtimer0->function = hrtimer_test_fn0;
	hrtimer_start(hrtimer0, ns_to_ktime(HRTIMER0_US),
			HRTIMER_MODE_REL_PINNED);
	bound_on = hrtimer0->base->cpu_base->cpu;
	pr_err("bound_on:%d\n", bound_on);
}

static int __init test_spinlock_init (void)
{
	pr_err("#### hr_timer_test module init...\n");
	init_hrtimer();
	return 0;
}
 
static void __exit test_spinlock_exit (void)
{
	struct hrtimer *hrtimer = &g_hrtimer0;
	hrtimer_cancel(hrtimer);
	pr_err("#### hr_timer_test module exit...\n");
}

module_init(test_spinlock_init);
module_exit(test_spinlock_exit);
MODULE_LICENSE("GPL");