Commit 96a2c464de07d7c72988db851c029b204fc59108

Authored by Frederic Weisbecker
1 parent 0efb4d2072

tracing/bkl: Add bkl ftrace events

Add two events lock_kernel and unlock_kernel() to trace the bkl uses.
This opens the door for userspace tools to perform statistics about
the callsites that use it, dependencies with other locks (by pairing
the trace with lock events), use with recursivity and so on...

The {__reacquire,release}_kernel_lock() events are not traced because
these are called from schedule, thus the sched events are sufficient
to trace them.

Example of a trace:

hald-addon-stor-4152  [000]   165.875501: unlock_kernel: depth: 0, fs/block_dev.c:1358 __blkdev_put()
hald-addon-stor-4152  [000]   167.832974: lock_kernel: depth: 0, fs/block_dev.c:1167 __blkdev_get()

How to get the callsites that acquire it recursively:

cd /debug/tracing/events/bkl
echo "lock_depth > 0" > filter

firefox-4951  [001]   206.276967: unlock_kernel: depth: 1, fs/reiserfs/super.c:575 reiserfs_dirty_inode()

You can also filter by file and/or line.

v2: Use of FILTER_PTR_STRING attribute for files and lines fields to
    make them traceable.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Li Zefan <lizf@cn.fujitsu.com>

Showing 3 changed files with 82 additions and 9 deletions Side-by-side Diff

include/linux/smp_lock.h
... ... @@ -3,6 +3,7 @@
3 3  
4 4 #ifdef CONFIG_LOCK_KERNEL
5 5 #include <linux/sched.h>
  6 +#include <trace/events/bkl.h>
6 7  
7 8 #define kernel_locked() (current->lock_depth >= 0)
8 9  
9 10  
... ... @@ -24,9 +25,19 @@
24 25 return 0;
25 26 }
26 27  
27   -extern void __lockfunc lock_kernel(void) __acquires(kernel_lock);
28   -extern void __lockfunc unlock_kernel(void) __releases(kernel_lock);
  28 +extern void __lockfunc _lock_kernel(void) __acquires(kernel_lock);
  29 +extern void __lockfunc _unlock_kernel(void) __releases(kernel_lock);
29 30  
  31 +#define lock_kernel() { \
  32 + trace_lock_kernel(__func__, __FILE__, __LINE__); \
  33 + _lock_kernel(); \
  34 +}
  35 +
  36 +#define unlock_kernel() { \
  37 + trace_unlock_kernel(__func__, __FILE__, __LINE__); \
  38 + _unlock_kernel(); \
  39 +}
  40 +
30 41 /*
31 42 * Various legacy drivers don't really need the BKL in a specific
32 43 * function, but they *do* need to know that the BKL became available.
... ... @@ -41,8 +52,8 @@
41 52  
42 53 #else
43 54  
44   -#define lock_kernel() do { } while(0)
45   -#define unlock_kernel() do { } while(0)
  55 +#define lock_kernel() trace_lock_kernel(__func__, __FILE__, __LINE__);
  56 +#define unlock_kernel() trace_unlock_kernel(__func__, __FILE__, __LINE__);
46 57 #define release_kernel_lock(task) do { } while(0)
47 58 #define cycle_kernel_lock() do { } while(0)
48 59 #define reacquire_kernel_lock(task) 0
include/trace/events/bkl.h
  1 +#undef TRACE_SYSTEM
  2 +#define TRACE_SYSTEM bkl
  3 +
  4 +#if !defined(_TRACE_BKL_H) || defined(TRACE_HEADER_MULTI_READ)
  5 +#define _TRACE_BKL_H
  6 +
  7 +#include <linux/tracepoint.h>
  8 +
  9 +TRACE_EVENT(lock_kernel,
  10 +
  11 + TP_PROTO(const char *func, const char *file, int line),
  12 +
  13 + TP_ARGS(func, file, line),
  14 +
  15 + TP_STRUCT__entry(
  16 + __field( int, lock_depth )
  17 + __field_ext( const char *, func, FILTER_PTR_STRING )
  18 + __field_ext( const char *, file, FILTER_PTR_STRING )
  19 + __field( int, line )
  20 + ),
  21 +
  22 + TP_fast_assign(
  23 + /* We want to record the lock_depth after lock is acquired */
  24 + __entry->lock_depth = current->lock_depth + 1;
  25 + __entry->func = func;
  26 + __entry->file = file;
  27 + __entry->line = line;
  28 + ),
  29 +
  30 + TP_printk("depth: %d, %s:%d %s()", __entry->lock_depth,
  31 + __entry->file, __entry->line, __entry->func)
  32 +);
  33 +
  34 +TRACE_EVENT(unlock_kernel,
  35 +
  36 + TP_PROTO(const char *func, const char *file, int line),
  37 +
  38 + TP_ARGS(func, file, line),
  39 +
  40 + TP_STRUCT__entry(
  41 + __field(int, lock_depth)
  42 + __field(const char *, func)
  43 + __field(const char *, file)
  44 + __field(int, line)
  45 + ),
  46 +
  47 + TP_fast_assign(
  48 + __entry->lock_depth = current->lock_depth;
  49 + __entry->func = func;
  50 + __entry->file = file;
  51 + __entry->line = line;
  52 + ),
  53 +
  54 + TP_printk("depth: %d, %s:%d %s()", __entry->lock_depth,
  55 + __entry->file, __entry->line, __entry->func)
  56 +);
  57 +
  58 +#endif /* _TRACE_BKL_H */
  59 +
  60 +/* This part must be outside protection */
  61 +#include <trace/define_trace.h>
... ... @@ -5,10 +5,11 @@
5 5 * relegated to obsolescence, but used by various less
6 6 * important (or lazy) subsystems.
7 7 */
8   -#include <linux/smp_lock.h>
9 8 #include <linux/module.h>
10 9 #include <linux/kallsyms.h>
11 10 #include <linux/semaphore.h>
  11 +#define CREATE_TRACE_POINTS
  12 +#include <linux/smp_lock.h>
12 13  
13 14 /*
14 15 * The 'big kernel lock'
... ... @@ -113,7 +114,7 @@
113 114 * This cannot happen asynchronously, so we only need to
114 115 * worry about other CPU's.
115 116 */
116   -void __lockfunc lock_kernel(void)
  117 +void __lockfunc _lock_kernel(void)
117 118 {
118 119 int depth = current->lock_depth+1;
119 120 if (likely(!depth))
120 121  
... ... @@ -121,13 +122,13 @@
121 122 current->lock_depth = depth;
122 123 }
123 124  
124   -void __lockfunc unlock_kernel(void)
  125 +void __lockfunc _unlock_kernel(void)
125 126 {
126 127 BUG_ON(current->lock_depth < 0);
127 128 if (likely(--current->lock_depth < 0))
128 129 __unlock_kernel();
129 130 }
130 131  
131   -EXPORT_SYMBOL(lock_kernel);
132   -EXPORT_SYMBOL(unlock_kernel);
  132 +EXPORT_SYMBOL(_lock_kernel);
  133 +EXPORT_SYMBOL(_unlock_kernel);