Commit 28b1bd1cbc33cae95a309691d814399a69cf3070

Authored by Ingo Molnar

Merge branch 'core/locking' into tracing/ftrace

Showing 22 changed files Side-by-side Diff

Documentation/lockdep-design.txt
... ... @@ -27,33 +27,37 @@
27 27 State
28 28 -----
29 29  
30   -The validator tracks lock-class usage history into 5 separate state bits:
  30 +The validator tracks lock-class usage history into 4n + 1 separate state bits:
31 31  
32   -- 'ever held in hardirq context' [ == hardirq-safe ]
33   -- 'ever held in softirq context' [ == softirq-safe ]
34   -- 'ever held with hardirqs enabled' [ == hardirq-unsafe ]
35   -- 'ever held with softirqs and hardirqs enabled' [ == softirq-unsafe ]
  32 +- 'ever held in STATE context'
  33 +- 'ever head as readlock in STATE context'
  34 +- 'ever head with STATE enabled'
  35 +- 'ever head as readlock with STATE enabled'
36 36  
  37 +Where STATE can be either one of (kernel/lockdep_states.h)
  38 + - hardirq
  39 + - softirq
  40 + - reclaim_fs
  41 +
37 42 - 'ever used' [ == !unused ]
38 43  
39   -When locking rules are violated, these 4 state bits are presented in the
40   -locking error messages, inside curlies. A contrived example:
  44 +When locking rules are violated, these state bits are presented in the
  45 +locking error messages, inside curlies. A contrived example:
41 46  
42 47 modprobe/2287 is trying to acquire lock:
43   - (&sio_locks[i].lock){--..}, at: [<c02867fd>] mutex_lock+0x21/0x24
  48 + (&sio_locks[i].lock){-.-...}, at: [<c02867fd>] mutex_lock+0x21/0x24
44 49  
45 50 but task is already holding lock:
46   - (&sio_locks[i].lock){--..}, at: [<c02867fd>] mutex_lock+0x21/0x24
  51 + (&sio_locks[i].lock){-.-...}, at: [<c02867fd>] mutex_lock+0x21/0x24
47 52  
48 53  
49   -The bit position indicates hardirq, softirq, hardirq-read,
50   -softirq-read respectively, and the character displayed in each
51   -indicates:
  54 +The bit position indicates STATE, STATE-read, for each of the states listed
  55 +above, and the character displayed in each indicates:
52 56  
53 57 '.' acquired while irqs disabled
54 58 '+' acquired in irq context
55 59 '-' acquired with irqs enabled
56   - '?' read acquired in irq context with irqs enabled.
  60 + '?' acquired in irq context with irqs enabled.
57 61  
58 62 Unused mutexes cannot be part of the cause of an error.
59 63  
include/linux/lockdep.h
... ... @@ -20,44 +20,11 @@
20 20 #include <linux/stacktrace.h>
21 21  
22 22 /*
23   - * Lock-class usage-state bits:
  23 + * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
  24 + * the total number of states... :-(
24 25 */
25   -enum lock_usage_bit
26   -{
27   - LOCK_USED = 0,
28   - LOCK_USED_IN_HARDIRQ,
29   - LOCK_USED_IN_SOFTIRQ,
30   - LOCK_ENABLED_SOFTIRQS,
31   - LOCK_ENABLED_HARDIRQS,
32   - LOCK_USED_IN_HARDIRQ_READ,
33   - LOCK_USED_IN_SOFTIRQ_READ,
34   - LOCK_ENABLED_SOFTIRQS_READ,
35   - LOCK_ENABLED_HARDIRQS_READ,
36   - LOCK_USAGE_STATES
37   -};
  26 +#define XXX_LOCK_USAGE_STATES (1+3*4)
38 27  
39   -/*
40   - * Usage-state bitmasks:
41   - */
42   -#define LOCKF_USED (1 << LOCK_USED)
43   -#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
44   -#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
45   -#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
46   -#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
47   -
48   -#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
49   -#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
50   -
51   -#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
52   -#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
53   -#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
54   -#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
55   -
56   -#define LOCKF_ENABLED_IRQS_READ \
57   - (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
58   -#define LOCKF_USED_IN_IRQ_READ \
59   - (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
60   -
61 28 #define MAX_LOCKDEP_SUBCLASSES 8UL
62 29  
63 30 /*
... ... @@ -97,7 +64,7 @@
97 64 * IRQ/softirq usage tracking bits:
98 65 */
99 66 unsigned long usage_mask;
100   - struct stack_trace usage_traces[LOCK_USAGE_STATES];
  67 + struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
101 68  
102 69 /*
103 70 * These fields represent a directed graph of lock dependencies,
104 71  
... ... @@ -324,8 +291,12 @@
324 291 lock_set_class(lock, lock->name, lock->key, subclass, ip);
325 292 }
326 293  
327   -# define INIT_LOCKDEP .lockdep_recursion = 0,
  294 +extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
  295 +extern void lockdep_clear_current_reclaim_state(void);
  296 +extern void lockdep_trace_alloc(gfp_t mask);
328 297  
  298 +# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
  299 +
329 300 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
330 301  
331 302 #else /* !LOCKDEP */
... ... @@ -342,6 +313,9 @@
342 313 # define lock_release(l, n, i) do { } while (0)
343 314 # define lock_set_class(l, n, k, s, i) do { } while (0)
344 315 # define lock_set_subclass(l, s, i) do { } while (0)
  316 +# define lockdep_set_current_reclaim_state(g) do { } while (0)
  317 +# define lockdep_clear_current_reclaim_state() do { } while (0)
  318 +# define lockdep_trace_alloc(g) do { } while (0)
345 319 # define lockdep_init() do { } while (0)
346 320 # define lockdep_info() do { } while (0)
347 321 # define lockdep_init_map(lock, name, key, sub) \
include/linux/mutex.h
... ... @@ -50,8 +50,10 @@
50 50 atomic_t count;
51 51 spinlock_t wait_lock;
52 52 struct list_head wait_list;
53   -#ifdef CONFIG_DEBUG_MUTEXES
  53 +#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
54 54 struct thread_info *owner;
  55 +#endif
  56 +#ifdef CONFIG_DEBUG_MUTEXES
55 57 const char *name;
56 58 void *magic;
57 59 #endif
... ... @@ -68,7 +70,6 @@
68 70 struct list_head list;
69 71 struct task_struct *task;
70 72 #ifdef CONFIG_DEBUG_MUTEXES
71   - struct mutex *lock;
72 73 void *magic;
73 74 #endif
74 75 };
include/linux/sched.h
... ... @@ -333,7 +333,9 @@
333 333 extern signed long schedule_timeout_interruptible(signed long timeout);
334 334 extern signed long schedule_timeout_killable(signed long timeout);
335 335 extern signed long schedule_timeout_uninterruptible(signed long timeout);
  336 +asmlinkage void __schedule(void);
336 337 asmlinkage void schedule(void);
  338 +extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
337 339  
338 340 struct nsproxy;
339 341 struct user_namespace;
... ... @@ -1330,6 +1332,7 @@
1330 1332 int lockdep_depth;
1331 1333 unsigned int lockdep_recursion;
1332 1334 struct held_lock held_locks[MAX_LOCK_DEPTH];
  1335 + gfp_t lockdep_reclaim_gfp;
1333 1336 #endif
1334 1337  
1335 1338 /* journalling filesystem info */
include/linux/timer.h
... ... @@ -5,6 +5,7 @@
5 5 #include <linux/ktime.h>
6 6 #include <linux/stddef.h>
7 7 #include <linux/debugobjects.h>
  8 +#include <linux/stringify.h>
8 9  
9 10 struct tvec_base;
10 11  
11 12  
12 13  
13 14  
14 15  
15 16  
16 17  
17 18  
18 19  
19 20  
20 21  
21 22  
... ... @@ -21,52 +22,126 @@
21 22 char start_comm[16];
22 23 int start_pid;
23 24 #endif
  25 +#ifdef CONFIG_LOCKDEP
  26 + struct lockdep_map lockdep_map;
  27 +#endif
24 28 };
25 29  
26 30 extern struct tvec_base boot_tvec_bases;
27 31  
  32 +#ifdef CONFIG_LOCKDEP
  33 +/*
  34 + * NB: because we have to copy the lockdep_map, setting the lockdep_map key
  35 + * (second argument) here is required, otherwise it could be initialised to
  36 + * the copy of the lockdep_map later! We use the pointer to and the string
  37 + * "<file>:<line>" as the key resp. the name of the lockdep_map.
  38 + */
  39 +#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) \
  40 + .lockdep_map = STATIC_LOCKDEP_MAP_INIT(_kn, &_kn),
  41 +#else
  42 +#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn)
  43 +#endif
  44 +
28 45 #define TIMER_INITIALIZER(_function, _expires, _data) { \
29 46 .entry = { .prev = TIMER_ENTRY_STATIC }, \
30 47 .function = (_function), \
31 48 .expires = (_expires), \
32 49 .data = (_data), \
33 50 .base = &boot_tvec_bases, \
  51 + __TIMER_LOCKDEP_MAP_INITIALIZER( \
  52 + __FILE__ ":" __stringify(__LINE__)) \
34 53 }
35 54  
36 55 #define DEFINE_TIMER(_name, _function, _expires, _data) \
37 56 struct timer_list _name = \
38 57 TIMER_INITIALIZER(_function, _expires, _data)
39 58  
40   -void init_timer(struct timer_list *timer);
41   -void init_timer_deferrable(struct timer_list *timer);
  59 +void init_timer_key(struct timer_list *timer,
  60 + const char *name,
  61 + struct lock_class_key *key);
  62 +void init_timer_deferrable_key(struct timer_list *timer,
  63 + const char *name,
  64 + struct lock_class_key *key);
42 65  
  66 +#ifdef CONFIG_LOCKDEP
  67 +#define init_timer(timer) \
  68 + do { \
  69 + static struct lock_class_key __key; \
  70 + init_timer_key((timer), #timer, &__key); \
  71 + } while (0)
  72 +
  73 +#define init_timer_deferrable(timer) \
  74 + do { \
  75 + static struct lock_class_key __key; \
  76 + init_timer_deferrable_key((timer), #timer, &__key); \
  77 + } while (0)
  78 +
  79 +#define init_timer_on_stack(timer) \
  80 + do { \
  81 + static struct lock_class_key __key; \
  82 + init_timer_on_stack_key((timer), #timer, &__key); \
  83 + } while (0)
  84 +
  85 +#define setup_timer(timer, fn, data) \
  86 + do { \
  87 + static struct lock_class_key __key; \
  88 + setup_timer_key((timer), #timer, &__key, (fn), (data));\
  89 + } while (0)
  90 +
  91 +#define setup_timer_on_stack(timer, fn, data) \
  92 + do { \
  93 + static struct lock_class_key __key; \
  94 + setup_timer_on_stack_key((timer), #timer, &__key, \
  95 + (fn), (data)); \
  96 + } while (0)
  97 +#else
  98 +#define init_timer(timer)\
  99 + init_timer_key((timer), NULL, NULL)
  100 +#define init_timer_deferrable(timer)\
  101 + init_timer_deferrable_key((timer), NULL, NULL)
  102 +#define init_timer_on_stack(timer)\
  103 + init_timer_on_stack_key((timer), NULL, NULL)
  104 +#define setup_timer(timer, fn, data)\
  105 + setup_timer_key((timer), NULL, NULL, (fn), (data))
  106 +#define setup_timer_on_stack(timer, fn, data)\
  107 + setup_timer_on_stack_key((timer), NULL, NULL, (fn), (data))
  108 +#endif
  109 +
43 110 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
44   -extern void init_timer_on_stack(struct timer_list *timer);
  111 +extern void init_timer_on_stack_key(struct timer_list *timer,
  112 + const char *name,
  113 + struct lock_class_key *key);
45 114 extern void destroy_timer_on_stack(struct timer_list *timer);
46 115 #else
47 116 static inline void destroy_timer_on_stack(struct timer_list *timer) { }
48   -static inline void init_timer_on_stack(struct timer_list *timer)
  117 +static inline void init_timer_on_stack_key(struct timer_list *timer,
  118 + const char *name,
  119 + struct lock_class_key *key)
49 120 {
50   - init_timer(timer);
  121 + init_timer_key(timer, name, key);
51 122 }
52 123 #endif
53 124  
54   -static inline void setup_timer(struct timer_list * timer,
  125 +static inline void setup_timer_key(struct timer_list * timer,
  126 + const char *name,
  127 + struct lock_class_key *key,
55 128 void (*function)(unsigned long),
56 129 unsigned long data)
57 130 {
58 131 timer->function = function;
59 132 timer->data = data;
60   - init_timer(timer);
  133 + init_timer_key(timer, name, key);
61 134 }
62 135  
63   -static inline void setup_timer_on_stack(struct timer_list *timer,
  136 +static inline void setup_timer_on_stack_key(struct timer_list *timer,
  137 + const char *name,
  138 + struct lock_class_key *key,
64 139 void (*function)(unsigned long),
65 140 unsigned long data)
66 141 {
67 142 timer->function = function;
68 143 timer->data = data;
69   - init_timer_on_stack(timer);
  144 + init_timer_on_stack_key(timer, name, key);
70 145 }
71 146  
72 147 /**
... ... @@ -41,6 +41,7 @@
41 41 #include <linux/utsname.h>
42 42 #include <linux/hash.h>
43 43 #include <linux/ftrace.h>
  44 +#include <linux/stringify.h>
44 45  
45 46 #include <asm/sections.h>
46 47  
47 48  
48 49  
... ... @@ -310,12 +311,14 @@
310 311 #if VERBOSE
311 312 # define HARDIRQ_VERBOSE 1
312 313 # define SOFTIRQ_VERBOSE 1
  314 +# define RECLAIM_VERBOSE 1
313 315 #else
314 316 # define HARDIRQ_VERBOSE 0
315 317 # define SOFTIRQ_VERBOSE 0
  318 +# define RECLAIM_VERBOSE 0
316 319 #endif
317 320  
318   -#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
  321 +#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
319 322 /*
320 323 * Quick filtering for interesting events:
321 324 */
322 325  
... ... @@ -443,17 +446,18 @@
443 446 * Locking printouts:
444 447 */
445 448  
  449 +#define __USAGE(__STATE) \
  450 + [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
  451 + [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
  452 + [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
  453 + [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
  454 +
446 455 static const char *usage_str[] =
447 456 {
448   - [LOCK_USED] = "initial-use ",
449   - [LOCK_USED_IN_HARDIRQ] = "in-hardirq-W",
450   - [LOCK_USED_IN_SOFTIRQ] = "in-softirq-W",
451   - [LOCK_ENABLED_SOFTIRQS] = "softirq-on-W",
452   - [LOCK_ENABLED_HARDIRQS] = "hardirq-on-W",
453   - [LOCK_USED_IN_HARDIRQ_READ] = "in-hardirq-R",
454   - [LOCK_USED_IN_SOFTIRQ_READ] = "in-softirq-R",
455   - [LOCK_ENABLED_SOFTIRQS_READ] = "softirq-on-R",
456   - [LOCK_ENABLED_HARDIRQS_READ] = "hardirq-on-R",
  457 +#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
  458 +#include "lockdep_states.h"
  459 +#undef LOCKDEP_STATE
  460 + [LOCK_USED] = "INITIAL USE",
457 461 };
458 462  
459 463 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
460 464  
461 465  
462 466  
463 467  
464 468  
465 469  
466 470  
... ... @@ -461,46 +465,45 @@
461 465 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
462 466 }
463 467  
464   -void
465   -get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4)
  468 +static inline unsigned long lock_flag(enum lock_usage_bit bit)
466 469 {
467   - *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.';
  470 + return 1UL << bit;
  471 +}
468 472  
469   - if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
470   - *c1 = '+';
471   - else
472   - if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
473   - *c1 = '-';
  473 +static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
  474 +{
  475 + char c = '.';
474 476  
475   - if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
476   - *c2 = '+';
477   - else
478   - if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
479   - *c2 = '-';
480   -
481   - if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
482   - *c3 = '-';
483   - if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) {
484   - *c3 = '+';
485   - if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
486   - *c3 = '?';
  477 + if (class->usage_mask & lock_flag(bit + 2))
  478 + c = '+';
  479 + if (class->usage_mask & lock_flag(bit)) {
  480 + c = '-';
  481 + if (class->usage_mask & lock_flag(bit + 2))
  482 + c = '?';
487 483 }
488 484  
489   - if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
490   - *c4 = '-';
491   - if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) {
492   - *c4 = '+';
493   - if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
494   - *c4 = '?';
495   - }
  485 + return c;
496 486 }
497 487  
  488 +void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
  489 +{
  490 + int i = 0;
  491 +
  492 +#define LOCKDEP_STATE(__STATE) \
  493 + usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
  494 + usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
  495 +#include "lockdep_states.h"
  496 +#undef LOCKDEP_STATE
  497 +
  498 + usage[i] = '\0';
  499 +}
  500 +
498 501 static void print_lock_name(struct lock_class *class)
499 502 {
500   - char str[KSYM_NAME_LEN], c1, c2, c3, c4;
  503 + char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
501 504 const char *name;
502 505  
503   - get_usage_chars(class, &c1, &c2, &c3, &c4);
  506 + get_usage_chars(class, usage);
504 507  
505 508 name = class->name;
506 509 if (!name) {
... ... @@ -513,7 +516,7 @@
513 516 if (class->subclass)
514 517 printk("/%d", class->subclass);
515 518 }
516   - printk("){%c%c%c%c}", c1, c2, c3, c4);
  519 + printk("){%s}", usage);
517 520 }
518 521  
519 522 static void print_lockdep_cache(struct lockdep_map *lock)
520 523  
521 524  
522 525  
523 526  
524 527  
525 528  
526 529  
... ... @@ -1263,48 +1266,84 @@
1263 1266 bit_backwards, bit_forwards, irqclass);
1264 1267 }
1265 1268  
1266   -static int
1267   -check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1268   - struct held_lock *next)
  1269 +static const char *state_names[] = {
  1270 +#define LOCKDEP_STATE(__STATE) \
  1271 + __stringify(__STATE),
  1272 +#include "lockdep_states.h"
  1273 +#undef LOCKDEP_STATE
  1274 +};
  1275 +
  1276 +static const char *state_rnames[] = {
  1277 +#define LOCKDEP_STATE(__STATE) \
  1278 + __stringify(__STATE)"-READ",
  1279 +#include "lockdep_states.h"
  1280 +#undef LOCKDEP_STATE
  1281 +};
  1282 +
  1283 +static inline const char *state_name(enum lock_usage_bit bit)
1269 1284 {
  1285 + return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
  1286 +}
  1287 +
  1288 +static int exclusive_bit(int new_bit)
  1289 +{
1270 1290 /*
  1291 + * USED_IN
  1292 + * USED_IN_READ
  1293 + * ENABLED
  1294 + * ENABLED_READ
  1295 + *
  1296 + * bit 0 - write/read
  1297 + * bit 1 - used_in/enabled
  1298 + * bit 2+ state
  1299 + */
  1300 +
  1301 + int state = new_bit & ~3;
  1302 + int dir = new_bit & 2;
  1303 +
  1304 + /*
  1305 + * keep state, bit flip the direction and strip read.
  1306 + */
  1307 + return state | (dir ^ 2);
  1308 +}
  1309 +
  1310 +static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
  1311 + struct held_lock *next, enum lock_usage_bit bit)
  1312 +{
  1313 + /*
1271 1314 * Prove that the new dependency does not connect a hardirq-safe
1272 1315 * lock with a hardirq-unsafe lock - to achieve this we search
1273 1316 * the backwards-subgraph starting at <prev>, and the
1274 1317 * forwards-subgraph starting at <next>:
1275 1318 */
1276   - if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
1277   - LOCK_ENABLED_HARDIRQS, "hard"))
  1319 + if (!check_usage(curr, prev, next, bit,
  1320 + exclusive_bit(bit), state_name(bit)))
1278 1321 return 0;
1279 1322  
  1323 + bit++; /* _READ */
  1324 +
1280 1325 /*
1281 1326 * Prove that the new dependency does not connect a hardirq-safe-read
1282 1327 * lock with a hardirq-unsafe lock - to achieve this we search
1283 1328 * the backwards-subgraph starting at <prev>, and the
1284 1329 * forwards-subgraph starting at <next>:
1285 1330 */
1286   - if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
1287   - LOCK_ENABLED_HARDIRQS, "hard-read"))
  1331 + if (!check_usage(curr, prev, next, bit,
  1332 + exclusive_bit(bit), state_name(bit)))
1288 1333 return 0;
1289 1334  
1290   - /*
1291   - * Prove that the new dependency does not connect a softirq-safe
1292   - * lock with a softirq-unsafe lock - to achieve this we search
1293   - * the backwards-subgraph starting at <prev>, and the
1294   - * forwards-subgraph starting at <next>:
1295   - */
1296   - if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
1297   - LOCK_ENABLED_SOFTIRQS, "soft"))
  1335 + return 1;
  1336 +}
  1337 +
  1338 +static int
  1339 +check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
  1340 + struct held_lock *next)
  1341 +{
  1342 +#define LOCKDEP_STATE(__STATE) \
  1343 + if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
1298 1344 return 0;
1299   - /*
1300   - * Prove that the new dependency does not connect a softirq-safe-read
1301   - * lock with a softirq-unsafe lock - to achieve this we search
1302   - * the backwards-subgraph starting at <prev>, and the
1303   - * forwards-subgraph starting at <next>:
1304   - */
1305   - if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
1306   - LOCK_ENABLED_SOFTIRQS, "soft"))
1307   - return 0;
  1345 +#include "lockdep_states.h"
  1346 +#undef LOCKDEP_STATE
1308 1347  
1309 1348 return 1;
1310 1349 }
1311 1350  
... ... @@ -1861,9 +1900,9 @@
1861 1900 curr->comm, task_pid_nr(curr));
1862 1901 print_lock(this);
1863 1902 if (forwards)
1864   - printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
  1903 + printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
1865 1904 else
1866   - printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);
  1905 + printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
1867 1906 print_lock_name(other);
1868 1907 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
1869 1908  
... ... @@ -1933,7 +1972,7 @@
1933 1972 print_ip_sym(curr->softirq_disable_ip);
1934 1973 }
1935 1974  
1936   -static int hardirq_verbose(struct lock_class *class)
  1975 +static int HARDIRQ_verbose(struct lock_class *class)
1937 1976 {
1938 1977 #if HARDIRQ_VERBOSE
1939 1978 return class_filter(class);
... ... @@ -1941,7 +1980,7 @@
1941 1980 return 0;
1942 1981 }
1943 1982  
1944   -static int softirq_verbose(struct lock_class *class)
  1983 +static int SOFTIRQ_verbose(struct lock_class *class)
1945 1984 {
1946 1985 #if SOFTIRQ_VERBOSE
1947 1986 return class_filter(class);
1948 1987  
1949 1988  
1950 1989  
1951 1990  
1952 1991  
1953 1992  
1954 1993  
1955 1994  
... ... @@ -1949,185 +1988,95 @@
1949 1988 return 0;
1950 1989 }
1951 1990  
  1991 +static int RECLAIM_FS_verbose(struct lock_class *class)
  1992 +{
  1993 +#if RECLAIM_VERBOSE
  1994 + return class_filter(class);
  1995 +#endif
  1996 + return 0;
  1997 +}
  1998 +
1952 1999 #define STRICT_READ_CHECKS 1
1953 2000  
1954   -static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
  2001 +static int (*state_verbose_f[])(struct lock_class *class) = {
  2002 +#define LOCKDEP_STATE(__STATE) \
  2003 + __STATE##_verbose,
  2004 +#include "lockdep_states.h"
  2005 +#undef LOCKDEP_STATE
  2006 +};
  2007 +
  2008 +static inline int state_verbose(enum lock_usage_bit bit,
  2009 + struct lock_class *class)
  2010 +{
  2011 + return state_verbose_f[bit >> 2](class);
  2012 +}
  2013 +
  2014 +typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
  2015 + enum lock_usage_bit bit, const char *name);
  2016 +
  2017 +static int
  2018 +mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1955 2019 enum lock_usage_bit new_bit)
1956 2020 {
1957   - int ret = 1;
  2021 + int excl_bit = exclusive_bit(new_bit);
  2022 + int read = new_bit & 1;
  2023 + int dir = new_bit & 2;
1958 2024  
1959   - switch(new_bit) {
1960   - case LOCK_USED_IN_HARDIRQ:
1961   - if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
  2025 + /*
  2026 + * mark USED_IN has to look forwards -- to ensure no dependency
  2027 + * has ENABLED state, which would allow recursion deadlocks.
  2028 + *
  2029 + * mark ENABLED has to look backwards -- to ensure no dependee
  2030 + * has USED_IN state, which, again, would allow recursion deadlocks.
  2031 + */
  2032 + check_usage_f usage = dir ?
  2033 + check_usage_backwards : check_usage_forwards;
  2034 +
  2035 + /*
  2036 + * Validate that this particular lock does not have conflicting
  2037 + * usage states.
  2038 + */
  2039 + if (!valid_state(curr, this, new_bit, excl_bit))
  2040 + return 0;
  2041 +
  2042 + /*
  2043 + * Validate that the lock dependencies don't have conflicting usage
  2044 + * states.
  2045 + */
  2046 + if ((!read || !dir || STRICT_READ_CHECKS) &&
  2047 + !usage(curr, this, excl_bit, state_name(new_bit & ~1)))
  2048 + return 0;
  2049 +
  2050 + /*
  2051 + * Check for read in write conflicts
  2052 + */
  2053 + if (!read) {
  2054 + if (!valid_state(curr, this, new_bit, excl_bit + 1))
1962 2055 return 0;
1963   - if (!valid_state(curr, this, new_bit,
1964   - LOCK_ENABLED_HARDIRQS_READ))
  2056 +
  2057 + if (STRICT_READ_CHECKS &&
  2058 + !usage(curr, this, excl_bit + 1,
  2059 + state_name(new_bit + 1)))
1965 2060 return 0;
1966   - /*
1967   - * just marked it hardirq-safe, check that this lock
1968   - * took no hardirq-unsafe lock in the past:
1969   - */
1970   - if (!check_usage_forwards(curr, this,
1971   - LOCK_ENABLED_HARDIRQS, "hard"))
1972   - return 0;
1973   -#if STRICT_READ_CHECKS
1974   - /*
1975   - * just marked it hardirq-safe, check that this lock
1976   - * took no hardirq-unsafe-read lock in the past:
1977   - */
1978   - if (!check_usage_forwards(curr, this,
1979   - LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
1980   - return 0;
1981   -#endif
1982   - if (hardirq_verbose(hlock_class(this)))
1983   - ret = 2;
1984   - break;
1985   - case LOCK_USED_IN_SOFTIRQ:
1986   - if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
1987   - return 0;
1988   - if (!valid_state(curr, this, new_bit,
1989   - LOCK_ENABLED_SOFTIRQS_READ))
1990   - return 0;
1991   - /*
1992   - * just marked it softirq-safe, check that this lock
1993   - * took no softirq-unsafe lock in the past:
1994   - */
1995   - if (!check_usage_forwards(curr, this,
1996   - LOCK_ENABLED_SOFTIRQS, "soft"))
1997   - return 0;
1998   -#if STRICT_READ_CHECKS
1999   - /*
2000   - * just marked it softirq-safe, check that this lock
2001   - * took no softirq-unsafe-read lock in the past:
2002   - */
2003   - if (!check_usage_forwards(curr, this,
2004   - LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
2005   - return 0;
2006   -#endif
2007   - if (softirq_verbose(hlock_class(this)))
2008   - ret = 2;
2009   - break;
2010   - case LOCK_USED_IN_HARDIRQ_READ:
2011   - if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
2012   - return 0;
2013   - /*
2014   - * just marked it hardirq-read-safe, check that this lock
2015   - * took no hardirq-unsafe lock in the past:
2016   - */
2017   - if (!check_usage_forwards(curr, this,
2018   - LOCK_ENABLED_HARDIRQS, "hard"))
2019   - return 0;
2020   - if (hardirq_verbose(hlock_class(this)))
2021   - ret = 2;
2022   - break;
2023   - case LOCK_USED_IN_SOFTIRQ_READ:
2024   - if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
2025   - return 0;
2026   - /*
2027   - * just marked it softirq-read-safe, check that this lock
2028   - * took no softirq-unsafe lock in the past:
2029   - */
2030   - if (!check_usage_forwards(curr, this,
2031   - LOCK_ENABLED_SOFTIRQS, "soft"))
2032   - return 0;
2033   - if (softirq_verbose(hlock_class(this)))
2034   - ret = 2;
2035   - break;
2036   - case LOCK_ENABLED_HARDIRQS:
2037   - if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
2038   - return 0;
2039   - if (!valid_state(curr, this, new_bit,
2040   - LOCK_USED_IN_HARDIRQ_READ))
2041   - return 0;
2042   - /*
2043   - * just marked it hardirq-unsafe, check that no hardirq-safe
2044   - * lock in the system ever took it in the past:
2045   - */
2046   - if (!check_usage_backwards(curr, this,
2047   - LOCK_USED_IN_HARDIRQ, "hard"))
2048   - return 0;
2049   -#if STRICT_READ_CHECKS
2050   - /*
2051   - * just marked it hardirq-unsafe, check that no
2052   - * hardirq-safe-read lock in the system ever took
2053   - * it in the past:
2054   - */
2055   - if (!check_usage_backwards(curr, this,
2056   - LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
2057   - return 0;
2058   -#endif
2059   - if (hardirq_verbose(hlock_class(this)))
2060   - ret = 2;
2061   - break;
2062   - case LOCK_ENABLED_SOFTIRQS:
2063   - if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
2064   - return 0;
2065   - if (!valid_state(curr, this, new_bit,
2066   - LOCK_USED_IN_SOFTIRQ_READ))
2067   - return 0;
2068   - /*
2069   - * just marked it softirq-unsafe, check that no softirq-safe
2070   - * lock in the system ever took it in the past:
2071   - */
2072   - if (!check_usage_backwards(curr, this,
2073   - LOCK_USED_IN_SOFTIRQ, "soft"))
2074   - return 0;
2075   -#if STRICT_READ_CHECKS
2076   - /*
2077   - * just marked it softirq-unsafe, check that no
2078   - * softirq-safe-read lock in the system ever took
2079   - * it in the past:
2080   - */
2081   - if (!check_usage_backwards(curr, this,
2082   - LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
2083   - return 0;
2084   -#endif
2085   - if (softirq_verbose(hlock_class(this)))
2086   - ret = 2;
2087   - break;
2088   - case LOCK_ENABLED_HARDIRQS_READ:
2089   - if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
2090   - return 0;
2091   -#if STRICT_READ_CHECKS
2092   - /*
2093   - * just marked it hardirq-read-unsafe, check that no
2094   - * hardirq-safe lock in the system ever took it in the past:
2095   - */
2096   - if (!check_usage_backwards(curr, this,
2097   - LOCK_USED_IN_HARDIRQ, "hard"))
2098   - return 0;
2099   -#endif
2100   - if (hardirq_verbose(hlock_class(this)))
2101   - ret = 2;
2102   - break;
2103   - case LOCK_ENABLED_SOFTIRQS_READ:
2104   - if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
2105   - return 0;
2106   -#if STRICT_READ_CHECKS
2107   - /*
2108   - * just marked it softirq-read-unsafe, check that no
2109   - * softirq-safe lock in the system ever took it in the past:
2110   - */
2111   - if (!check_usage_backwards(curr, this,
2112   - LOCK_USED_IN_SOFTIRQ, "soft"))
2113   - return 0;
2114   -#endif
2115   - if (softirq_verbose(hlock_class(this)))
2116   - ret = 2;
2117   - break;
2118   - default:
2119   - WARN_ON(1);
2120   - break;
2121 2061 }
2122 2062  
2123   - return ret;
  2063 + if (state_verbose(new_bit, hlock_class(this)))
  2064 + return 2;
  2065 +
  2066 + return 1;
2124 2067 }
2125 2068  
  2069 +enum mark_type {
  2070 +#define LOCKDEP_STATE(__STATE) __STATE,
  2071 +#include "lockdep_states.h"
  2072 +#undef LOCKDEP_STATE
  2073 +};
  2074 +
2126 2075 /*
2127 2076 * Mark all held locks with a usage bit:
2128 2077 */
2129 2078 static int
2130   -mark_held_locks(struct task_struct *curr, int hardirq)
  2079 +mark_held_locks(struct task_struct *curr, enum mark_type mark)
2131 2080 {
2132 2081 enum lock_usage_bit usage_bit;
2133 2082 struct held_lock *hlock;
... ... @@ -2136,17 +2085,12 @@
2136 2085 for (i = 0; i < curr->lockdep_depth; i++) {
2137 2086 hlock = curr->held_locks + i;
2138 2087  
2139   - if (hardirq) {
2140   - if (hlock->read)
2141   - usage_bit = LOCK_ENABLED_HARDIRQS_READ;
2142   - else
2143   - usage_bit = LOCK_ENABLED_HARDIRQS;
2144   - } else {
2145   - if (hlock->read)
2146   - usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
2147   - else
2148   - usage_bit = LOCK_ENABLED_SOFTIRQS;
2149   - }
  2088 + usage_bit = 2 + (mark << 2); /* ENABLED */
  2089 + if (hlock->read)
  2090 + usage_bit += 1; /* READ */
  2091 +
  2092 + BUG_ON(usage_bit >= LOCK_USAGE_STATES);
  2093 +
2150 2094 if (!mark_lock(curr, hlock, usage_bit))
2151 2095 return 0;
2152 2096 }
... ... @@ -2200,7 +2144,7 @@
2200 2144 * We are going to turn hardirqs on, so set the
2201 2145 * usage bit for all held locks:
2202 2146 */
2203   - if (!mark_held_locks(curr, 1))
  2147 + if (!mark_held_locks(curr, HARDIRQ))
2204 2148 return;
2205 2149 /*
2206 2150 * If we have softirqs enabled, then set the usage
... ... @@ -2208,7 +2152,7 @@
2208 2152 * this bit from being set before)
2209 2153 */
2210 2154 if (curr->softirqs_enabled)
2211   - if (!mark_held_locks(curr, 0))
  2155 + if (!mark_held_locks(curr, SOFTIRQ))
2212 2156 return;
2213 2157  
2214 2158 curr->hardirq_enable_ip = ip;
... ... @@ -2288,7 +2232,7 @@
2288 2232 * enabled too:
2289 2233 */
2290 2234 if (curr->hardirqs_enabled)
2291   - mark_held_locks(curr, 0);
  2235 + mark_held_locks(curr, SOFTIRQ);
2292 2236 }
2293 2237  
2294 2238 /*
... ... @@ -2317,6 +2261,31 @@
2317 2261 debug_atomic_inc(&redundant_softirqs_off);
2318 2262 }
2319 2263  
  2264 +void lockdep_trace_alloc(gfp_t gfp_mask)
  2265 +{
  2266 + struct task_struct *curr = current;
  2267 +
  2268 + if (unlikely(!debug_locks))
  2269 + return;
  2270 +
  2271 + /* no reclaim without waiting on it */
  2272 + if (!(gfp_mask & __GFP_WAIT))
  2273 + return;
  2274 +
  2275 + /* this guy won't enter reclaim */
  2276 + if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
  2277 + return;
  2278 +
  2279 + /* We're only interested __GFP_FS allocations for now */
  2280 + if (!(gfp_mask & __GFP_FS))
  2281 + return;
  2282 +
  2283 + if (DEBUG_LOCKS_WARN_ON(irqs_disabled()))
  2284 + return;
  2285 +
  2286 + mark_held_locks(curr, RECLAIM_FS);
  2287 +}
  2288 +
2320 2289 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2321 2290 {
2322 2291 /*
2323 2292  
2324 2293  
2325 2294  
2326 2295  
... ... @@ -2345,23 +2314,39 @@
2345 2314 if (!hlock->hardirqs_off) {
2346 2315 if (hlock->read) {
2347 2316 if (!mark_lock(curr, hlock,
2348   - LOCK_ENABLED_HARDIRQS_READ))
  2317 + LOCK_ENABLED_HARDIRQ_READ))
2349 2318 return 0;
2350 2319 if (curr->softirqs_enabled)
2351 2320 if (!mark_lock(curr, hlock,
2352   - LOCK_ENABLED_SOFTIRQS_READ))
  2321 + LOCK_ENABLED_SOFTIRQ_READ))
2353 2322 return 0;
2354 2323 } else {
2355 2324 if (!mark_lock(curr, hlock,
2356   - LOCK_ENABLED_HARDIRQS))
  2325 + LOCK_ENABLED_HARDIRQ))
2357 2326 return 0;
2358 2327 if (curr->softirqs_enabled)
2359 2328 if (!mark_lock(curr, hlock,
2360   - LOCK_ENABLED_SOFTIRQS))
  2329 + LOCK_ENABLED_SOFTIRQ))
2361 2330 return 0;
2362 2331 }
2363 2332 }
2364 2333  
  2334 + /*
  2335 + * We reuse the irq context infrastructure more broadly as a general
  2336 + * context checking code. This tests GFP_FS recursion (a lock taken
  2337 + * during reclaim for a GFP_FS allocation is held over a GFP_FS
  2338 + * allocation).
  2339 + */
  2340 + if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
  2341 + if (hlock->read) {
  2342 + if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
  2343 + return 0;
  2344 + } else {
  2345 + if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
  2346 + return 0;
  2347 + }
  2348 + }
  2349 +
2365 2350 return 1;
2366 2351 }
2367 2352  
... ... @@ -2412,6 +2397,10 @@
2412 2397 return 0;
2413 2398 }
2414 2399  
  2400 +void lockdep_trace_alloc(gfp_t gfp_mask)
  2401 +{
  2402 +}
  2403 +
2415 2404 #endif
2416 2405  
2417 2406 /*
... ... @@ -2445,14 +2434,13 @@
2445 2434 return 0;
2446 2435  
2447 2436 switch (new_bit) {
2448   - case LOCK_USED_IN_HARDIRQ:
2449   - case LOCK_USED_IN_SOFTIRQ:
2450   - case LOCK_USED_IN_HARDIRQ_READ:
2451   - case LOCK_USED_IN_SOFTIRQ_READ:
2452   - case LOCK_ENABLED_HARDIRQS:
2453   - case LOCK_ENABLED_SOFTIRQS:
2454   - case LOCK_ENABLED_HARDIRQS_READ:
2455   - case LOCK_ENABLED_SOFTIRQS_READ:
  2437 +#define LOCKDEP_STATE(__STATE) \
  2438 + case LOCK_USED_IN_##__STATE: \
  2439 + case LOCK_USED_IN_##__STATE##_READ: \
  2440 + case LOCK_ENABLED_##__STATE: \
  2441 + case LOCK_ENABLED_##__STATE##_READ:
  2442 +#include "lockdep_states.h"
  2443 +#undef LOCKDEP_STATE
2456 2444 ret = mark_lock_irq(curr, this, new_bit);
2457 2445 if (!ret)
2458 2446 return 0;
... ... @@ -2965,6 +2953,16 @@
2965 2953 raw_local_irq_restore(flags);
2966 2954 }
2967 2955 EXPORT_SYMBOL_GPL(lock_release);
  2956 +
  2957 +void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
  2958 +{
  2959 + current->lockdep_reclaim_gfp = gfp_mask;
  2960 +}
  2961 +
  2962 +void lockdep_clear_current_reclaim_state(void)
  2963 +{
  2964 + current->lockdep_reclaim_gfp = 0;
  2965 +}
2968 2966  
2969 2967 #ifdef CONFIG_LOCK_STAT
2970 2968 static int
kernel/lockdep_internals.h
... ... @@ -7,6 +7,45 @@
7 7 */
8 8  
9 9 /*
  10 + * Lock-class usage-state bits:
  11 + */
  12 +enum lock_usage_bit {
  13 +#define LOCKDEP_STATE(__STATE) \
  14 + LOCK_USED_IN_##__STATE, \
  15 + LOCK_USED_IN_##__STATE##_READ, \
  16 + LOCK_ENABLED_##__STATE, \
  17 + LOCK_ENABLED_##__STATE##_READ,
  18 +#include "lockdep_states.h"
  19 +#undef LOCKDEP_STATE
  20 + LOCK_USED,
  21 + LOCK_USAGE_STATES
  22 +};
  23 +
  24 +/*
  25 + * Usage-state bitmasks:
  26 + */
  27 +#define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
  28 +
  29 +enum {
  30 +#define LOCKDEP_STATE(__STATE) \
  31 + __LOCKF(USED_IN_##__STATE) \
  32 + __LOCKF(USED_IN_##__STATE##_READ) \
  33 + __LOCKF(ENABLED_##__STATE) \
  34 + __LOCKF(ENABLED_##__STATE##_READ)
  35 +#include "lockdep_states.h"
  36 +#undef LOCKDEP_STATE
  37 + __LOCKF(USED)
  38 +};
  39 +
  40 +#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
  41 +#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
  42 +
  43 +#define LOCKF_ENABLED_IRQ_READ \
  44 + (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
  45 +#define LOCKF_USED_IN_IRQ_READ \
  46 + (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
  47 +
  48 +/*
10 49 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
11 50 * we track.
12 51 *
... ... @@ -31,8 +70,10 @@
31 70 extern struct list_head all_lock_classes;
32 71 extern struct lock_chain lock_chains[];
33 72  
34   -extern void
35   -get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4);
  73 +#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
  74 +
  75 +extern void get_usage_chars(struct lock_class *class,
  76 + char usage[LOCK_USAGE_CHARS]);
36 77  
37 78 extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
38 79  
kernel/lockdep_proc.c
... ... @@ -84,7 +84,7 @@
84 84 {
85 85 struct lock_class *class = v;
86 86 struct lock_list *entry;
87   - char c1, c2, c3, c4;
  87 + char usage[LOCK_USAGE_CHARS];
88 88  
89 89 if (v == SEQ_START_TOKEN) {
90 90 seq_printf(m, "all lock classes:\n");
... ... @@ -100,8 +100,8 @@
100 100 seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class));
101 101 #endif
102 102  
103   - get_usage_chars(class, &c1, &c2, &c3, &c4);
104   - seq_printf(m, " %c%c%c%c", c1, c2, c3, c4);
  103 + get_usage_chars(class, usage);
  104 + seq_printf(m, " %s", usage);
105 105  
106 106 seq_printf(m, ": ");
107 107 print_name(m, class);
108 108  
109 109  
110 110  
111 111  
112 112  
... ... @@ -300,27 +300,27 @@
300 300 nr_uncategorized++;
301 301 if (class->usage_mask & LOCKF_USED_IN_IRQ)
302 302 nr_irq_safe++;
303   - if (class->usage_mask & LOCKF_ENABLED_IRQS)
  303 + if (class->usage_mask & LOCKF_ENABLED_IRQ)
304 304 nr_irq_unsafe++;
305 305 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
306 306 nr_softirq_safe++;
307   - if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
  307 + if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ)
308 308 nr_softirq_unsafe++;
309 309 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
310 310 nr_hardirq_safe++;
311   - if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
  311 + if (class->usage_mask & LOCKF_ENABLED_HARDIRQ)
312 312 nr_hardirq_unsafe++;
313 313 if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
314 314 nr_irq_read_safe++;
315   - if (class->usage_mask & LOCKF_ENABLED_IRQS_READ)
  315 + if (class->usage_mask & LOCKF_ENABLED_IRQ_READ)
316 316 nr_irq_read_unsafe++;
317 317 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
318 318 nr_softirq_read_safe++;
319   - if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
  319 + if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ)
320 320 nr_softirq_read_unsafe++;
321 321 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
322 322 nr_hardirq_read_safe++;
323   - if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
  323 + if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
324 324 nr_hardirq_read_unsafe++;
325 325  
326 326 #ifdef CONFIG_PROVE_LOCKING
... ... @@ -601,6 +601,10 @@
601 601 static void seq_header(struct seq_file *m)
602 602 {
603 603 seq_printf(m, "lock_stat version 0.3\n");
  604 +
  605 + if (unlikely(!debug_locks))
  606 + seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n");
  607 +
604 608 seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
605 609 seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
606 610 "%14s %14s\n",
kernel/lockdep_states.h
  1 +/*
  2 + * Lockdep states,
  3 + *
  4 + * please update XXX_LOCK_USAGE_STATES in include/linux/lockdep.h whenever
  5 + * you add one, or come up with a nice dynamic solution.
  6 + */
  7 +LOCKDEP_STATE(HARDIRQ)
  8 +LOCKDEP_STATE(SOFTIRQ)
  9 +LOCKDEP_STATE(RECLAIM_FS)
kernel/mutex-debug.c
... ... @@ -26,11 +26,6 @@
26 26 /*
27 27 * Must be called with lock->wait_lock held.
28 28 */
29   -void debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner)
30   -{
31   - lock->owner = new_owner;
32   -}
33   -
34 29 void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
35 30 {
36 31 memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
... ... @@ -59,7 +54,6 @@
59 54  
60 55 /* Mark the current thread as blocked on the lock: */
61 56 ti->task->blocked_on = waiter;
62   - waiter->lock = lock;
63 57 }
64 58  
65 59 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
... ... @@ -82,7 +76,7 @@
82 76 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
83 77 DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
84 78 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
85   - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
  79 + mutex_clear_owner(lock);
86 80 }
87 81  
88 82 void debug_mutex_init(struct mutex *lock, const char *name,
... ... @@ -95,7 +89,6 @@
95 89 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
96 90 lockdep_init_map(&lock->dep_map, name, key, 0);
97 91 #endif
98   - lock->owner = NULL;
99 92 lock->magic = lock;
100 93 }
101 94  
kernel/mutex-debug.h
... ... @@ -13,14 +13,6 @@
13 13 /*
14 14 * This must be called with lock->wait_lock held.
15 15 */
16   -extern void
17   -debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner);
18   -
19   -static inline void debug_mutex_clear_owner(struct mutex *lock)
20   -{
21   - lock->owner = NULL;
22   -}
23   -
24 16 extern void debug_mutex_lock_common(struct mutex *lock,
25 17 struct mutex_waiter *waiter);
26 18 extern void debug_mutex_wake_waiter(struct mutex *lock,
... ... @@ -34,6 +26,16 @@
34 26 extern void debug_mutex_unlock(struct mutex *lock);
35 27 extern void debug_mutex_init(struct mutex *lock, const char *name,
36 28 struct lock_class_key *key);
  29 +
  30 +static inline void mutex_set_owner(struct mutex *lock)
  31 +{
  32 + lock->owner = current_thread_info();
  33 +}
  34 +
  35 +static inline void mutex_clear_owner(struct mutex *lock)
  36 +{
  37 + lock->owner = NULL;
  38 +}
37 39  
38 40 #define spin_lock_mutex(lock, flags) \
39 41 do { \
... ... @@ -10,6 +10,11 @@
10 10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 11 * David Howells for suggestions and improvements.
12 12 *
  13 + * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
  14 + * from the -rt tree, where it was originally implemented for rtmutexes
  15 + * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
  16 + * and Sven Dietrich.
  17 + *
13 18 * Also see Documentation/mutex-design.txt.
14 19 */
15 20 #include <linux/mutex.h>
... ... @@ -46,6 +51,7 @@
46 51 atomic_set(&lock->count, 1);
47 52 spin_lock_init(&lock->wait_lock);
48 53 INIT_LIST_HEAD(&lock->wait_list);
  54 + mutex_clear_owner(lock);
49 55  
50 56 debug_mutex_init(lock, name, key);
51 57 }
... ... @@ -91,6 +97,7 @@
91 97 * 'unlocked' into 'locked' state.
92 98 */
93 99 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
  100 + mutex_set_owner(lock);
94 101 }
95 102  
96 103 EXPORT_SYMBOL(mutex_lock);
... ... @@ -115,6 +122,14 @@
115 122 * The unlocking fastpath is the 0->1 transition from 'locked'
116 123 * into 'unlocked' state:
117 124 */
  125 +#ifndef CONFIG_DEBUG_MUTEXES
  126 + /*
  127 + * When debugging is enabled we must not clear the owner before time,
  128 + * the slow path will always be taken, and that clears the owner field
  129 + * after verifying that it was indeed current.
  130 + */
  131 + mutex_clear_owner(lock);
  132 +#endif
118 133 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
119 134 }
120 135  
121 136  
122 137  
123 138  
... ... @@ -129,21 +144,75 @@
129 144 {
130 145 struct task_struct *task = current;
131 146 struct mutex_waiter waiter;
132   - unsigned int old_val;
133 147 unsigned long flags;
134 148  
  149 + preempt_disable();
  150 + mutex_acquire(&lock->dep_map, subclass, 0, ip);
  151 +#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
  152 + /*
  153 + * Optimistic spinning.
  154 + *
  155 + * We try to spin for acquisition when we find that there are no
  156 + * pending waiters and the lock owner is currently running on a
  157 + * (different) CPU.
  158 + *
  159 + * The rationale is that if the lock owner is running, it is likely to
  160 + * release the lock soon.
  161 + *
  162 + * Since this needs the lock owner, and this mutex implementation
  163 + * doesn't track the owner atomically in the lock field, we need to
  164 + * track it non-atomically.
  165 + *
  166 + * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
  167 + * to serialize everything.
  168 + */
  169 +
  170 + for (;;) {
  171 + struct thread_info *owner;
  172 +
  173 + /*
  174 + * If there's an owner, wait for it to either
  175 + * release the lock or go to sleep.
  176 + */
  177 + owner = ACCESS_ONCE(lock->owner);
  178 + if (owner && !mutex_spin_on_owner(lock, owner))
  179 + break;
  180 +
  181 + if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
  182 + lock_acquired(&lock->dep_map, ip);
  183 + mutex_set_owner(lock);
  184 + preempt_enable();
  185 + return 0;
  186 + }
  187 +
  188 + /*
  189 + * When there's no owner, we might have preempted between the
  190 + * owner acquiring the lock and setting the owner field. If
  191 + * we're an RT task that will live-lock because we won't let
  192 + * the owner complete.
  193 + */
  194 + if (!owner && (need_resched() || rt_task(task)))
  195 + break;
  196 +
  197 + /*
  198 + * The cpu_relax() call is a compiler barrier which forces
  199 + * everything in this loop to be re-loaded. We don't need
  200 + * memory barriers as we'll eventually observe the right
  201 + * values at the cost of a few extra spins.
  202 + */
  203 + cpu_relax();
  204 + }
  205 +#endif
135 206 spin_lock_mutex(&lock->wait_lock, flags);
136 207  
137 208 debug_mutex_lock_common(lock, &waiter);
138   - mutex_acquire(&lock->dep_map, subclass, 0, ip);
139 209 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
140 210  
141 211 /* add waiting tasks to the end of the waitqueue (FIFO): */
142 212 list_add_tail(&waiter.list, &lock->wait_list);
143 213 waiter.task = task;
144 214  
145   - old_val = atomic_xchg(&lock->count, -1);
146   - if (old_val == 1)
  215 + if (atomic_xchg(&lock->count, -1) == 1)
147 216 goto done;
148 217  
149 218 lock_contended(&lock->dep_map, ip);
... ... @@ -158,8 +227,7 @@
158 227 * that when we release the lock, we properly wake up the
159 228 * other waiters:
160 229 */
161   - old_val = atomic_xchg(&lock->count, -1);
162   - if (old_val == 1)
  230 + if (atomic_xchg(&lock->count, -1) == 1)
163 231 break;
164 232  
165 233 /*
166 234  
167 235  
... ... @@ -173,21 +241,22 @@
173 241 spin_unlock_mutex(&lock->wait_lock, flags);
174 242  
175 243 debug_mutex_free_waiter(&waiter);
  244 + preempt_enable();
176 245 return -EINTR;
177 246 }
178 247 __set_task_state(task, state);
179 248  
180 249 /* didnt get the lock, go to sleep: */
181 250 spin_unlock_mutex(&lock->wait_lock, flags);
182   - schedule();
  251 + __schedule();
183 252 spin_lock_mutex(&lock->wait_lock, flags);
184 253 }
185 254  
186 255 done:
187 256 lock_acquired(&lock->dep_map, ip);
188 257 /* got the lock - rejoice! */
189   - mutex_remove_waiter(lock, &waiter, task_thread_info(task));
190   - debug_mutex_set_owner(lock, task_thread_info(task));
  258 + mutex_remove_waiter(lock, &waiter, current_thread_info());
  259 + mutex_set_owner(lock);
191 260  
192 261 /* set it to 0 if there are no waiters left: */
193 262 if (likely(list_empty(&lock->wait_list)))
... ... @@ -196,6 +265,7 @@
196 265 spin_unlock_mutex(&lock->wait_lock, flags);
197 266  
198 267 debug_mutex_free_waiter(&waiter);
  268 + preempt_enable();
199 269  
200 270 return 0;
201 271 }
... ... @@ -222,7 +292,8 @@
222 292 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
223 293 {
224 294 might_sleep();
225   - return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
  295 + return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
  296 + subclass, _RET_IP_);
226 297 }
227 298  
228 299 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
... ... @@ -260,8 +331,6 @@
260 331 wake_up_process(waiter->task);
261 332 }
262 333  
263   - debug_mutex_clear_owner(lock);
264   -
265 334 spin_unlock_mutex(&lock->wait_lock, flags);
266 335 }
267 336  
268 337  
269 338  
270 339  
271 340  
272 341  
... ... @@ -298,18 +367,30 @@
298 367 */
299 368 int __sched mutex_lock_interruptible(struct mutex *lock)
300 369 {
  370 + int ret;
  371 +
301 372 might_sleep();
302   - return __mutex_fastpath_lock_retval
  373 + ret = __mutex_fastpath_lock_retval
303 374 (&lock->count, __mutex_lock_interruptible_slowpath);
  375 + if (!ret)
  376 + mutex_set_owner(lock);
  377 +
  378 + return ret;
304 379 }
305 380  
306 381 EXPORT_SYMBOL(mutex_lock_interruptible);
307 382  
308 383 int __sched mutex_lock_killable(struct mutex *lock)
309 384 {
  385 + int ret;
  386 +
310 387 might_sleep();
311   - return __mutex_fastpath_lock_retval
  388 + ret = __mutex_fastpath_lock_retval
312 389 (&lock->count, __mutex_lock_killable_slowpath);
  390 + if (!ret)
  391 + mutex_set_owner(lock);
  392 +
  393 + return ret;
313 394 }
314 395 EXPORT_SYMBOL(mutex_lock_killable);
315 396  
316 397  
... ... @@ -352,9 +433,10 @@
352 433  
353 434 prev = atomic_xchg(&lock->count, -1);
354 435 if (likely(prev == 1)) {
355   - debug_mutex_set_owner(lock, current_thread_info());
  436 + mutex_set_owner(lock);
356 437 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
357 438 }
  439 +
358 440 /* Set it back to 0 if there are no waiters: */
359 441 if (likely(list_empty(&lock->wait_list)))
360 442 atomic_set(&lock->count, 0);
... ... @@ -380,8 +462,13 @@
380 462 */
381 463 int __sched mutex_trylock(struct mutex *lock)
382 464 {
383   - return __mutex_fastpath_trylock(&lock->count,
384   - __mutex_trylock_slowpath);
  465 + int ret;
  466 +
  467 + ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
  468 + if (ret)
  469 + mutex_set_owner(lock);
  470 +
  471 + return ret;
385 472 }
386 473  
387 474 EXPORT_SYMBOL(mutex_trylock);
... ... @@ -16,8 +16,26 @@
16 16 #define mutex_remove_waiter(lock, waiter, ti) \
17 17 __list_del((waiter)->list.prev, (waiter)->list.next)
18 18  
19   -#define debug_mutex_set_owner(lock, new_owner) do { } while (0)
20   -#define debug_mutex_clear_owner(lock) do { } while (0)
  19 +#ifdef CONFIG_SMP
  20 +static inline void mutex_set_owner(struct mutex *lock)
  21 +{
  22 + lock->owner = current_thread_info();
  23 +}
  24 +
  25 +static inline void mutex_clear_owner(struct mutex *lock)
  26 +{
  27 + lock->owner = NULL;
  28 +}
  29 +#else
  30 +static inline void mutex_set_owner(struct mutex *lock)
  31 +{
  32 +}
  33 +
  34 +static inline void mutex_clear_owner(struct mutex *lock)
  35 +{
  36 +}
  37 +#endif
  38 +
21 39 #define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
22 40 #define debug_mutex_free_waiter(waiter) do { } while (0)
23 41 #define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
... ... @@ -4543,15 +4543,13 @@
4543 4543 /*
4544 4544 * schedule() is the main scheduler function.
4545 4545 */
4546   -asmlinkage void __sched schedule(void)
  4546 +asmlinkage void __sched __schedule(void)
4547 4547 {
4548 4548 struct task_struct *prev, *next;
4549 4549 unsigned long *switch_count;
4550 4550 struct rq *rq;
4551 4551 int cpu;
4552 4552  
4553   -need_resched:
4554   - preempt_disable();
4555 4553 cpu = smp_processor_id();
4556 4554 rq = cpu_rq(cpu);
4557 4555 rcu_qsctr_inc(cpu);
4558 4556  
4559 4557  
... ... @@ -4608,12 +4606,79 @@
4608 4606  
4609 4607 if (unlikely(reacquire_kernel_lock(current) < 0))
4610 4608 goto need_resched_nonpreemptible;
  4609 +}
4611 4610  
  4611 +asmlinkage void __sched schedule(void)
  4612 +{
  4613 +need_resched:
  4614 + preempt_disable();
  4615 + __schedule();
4612 4616 preempt_enable_no_resched();
4613 4617 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
4614 4618 goto need_resched;
4615 4619 }
4616 4620 EXPORT_SYMBOL(schedule);
  4621 +
  4622 +#ifdef CONFIG_SMP
  4623 +/*
  4624 + * Look out! "owner" is an entirely speculative pointer
  4625 + * access and not reliable.
  4626 + */
  4627 +int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
  4628 +{
  4629 + unsigned int cpu;
  4630 + struct rq *rq;
  4631 +
  4632 + if (!sched_feat(OWNER_SPIN))
  4633 + return 0;
  4634 +
  4635 +#ifdef CONFIG_DEBUG_PAGEALLOC
  4636 + /*
  4637 + * Need to access the cpu field knowing that
  4638 + * DEBUG_PAGEALLOC could have unmapped it if
  4639 + * the mutex owner just released it and exited.
  4640 + */
  4641 + if (probe_kernel_address(&owner->cpu, cpu))
  4642 + goto out;
  4643 +#else
  4644 + cpu = owner->cpu;
  4645 +#endif
  4646 +
  4647 + /*
  4648 + * Even if the access succeeded (likely case),
  4649 + * the cpu field may no longer be valid.
  4650 + */
  4651 + if (cpu >= nr_cpumask_bits)
  4652 + goto out;
  4653 +
  4654 + /*
  4655 + * We need to validate that we can do a
  4656 + * get_cpu() and that we have the percpu area.
  4657 + */
  4658 + if (!cpu_online(cpu))
  4659 + goto out;
  4660 +
  4661 + rq = cpu_rq(cpu);
  4662 +
  4663 + for (;;) {
  4664 + /*
  4665 + * Owner changed, break to re-assess state.
  4666 + */
  4667 + if (lock->owner != owner)
  4668 + break;
  4669 +
  4670 + /*
  4671 + * Is that owner really running on that cpu?
  4672 + */
  4673 + if (task_thread_info(rq->curr) != owner || need_resched())
  4674 + return 0;
  4675 +
  4676 + cpu_relax();
  4677 + }
  4678 +out:
  4679 + return 1;
  4680 +}
  4681 +#endif
4617 4682  
4618 4683 #ifdef CONFIG_PREEMPT
4619 4684 /*
kernel/sched_features.h
... ... @@ -13,4 +13,5 @@
13 13 SCHED_FEAT(ASYM_EFF_LOAD, 1)
14 14 SCHED_FEAT(WAKEUP_OVERLAP, 0)
15 15 SCHED_FEAT(LAST_BUDDY, 1)
  16 +SCHED_FEAT(OWNER_SPIN, 1)
... ... @@ -491,14 +491,18 @@
491 491 debug_object_free(timer, &timer_debug_descr);
492 492 }
493 493  
494   -static void __init_timer(struct timer_list *timer);
  494 +static void __init_timer(struct timer_list *timer,
  495 + const char *name,
  496 + struct lock_class_key *key);
495 497  
496   -void init_timer_on_stack(struct timer_list *timer)
  498 +void init_timer_on_stack_key(struct timer_list *timer,
  499 + const char *name,
  500 + struct lock_class_key *key)
497 501 {
498 502 debug_object_init_on_stack(timer, &timer_debug_descr);
499   - __init_timer(timer);
  503 + __init_timer(timer, name, key);
500 504 }
501   -EXPORT_SYMBOL_GPL(init_timer_on_stack);
  505 +EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
502 506  
503 507 void destroy_timer_on_stack(struct timer_list *timer)
504 508 {
... ... @@ -512,7 +516,9 @@
512 516 static inline void debug_timer_deactivate(struct timer_list *timer) { }
513 517 #endif
514 518  
515   -static void __init_timer(struct timer_list *timer)
  519 +static void __init_timer(struct timer_list *timer,
  520 + const char *name,
  521 + struct lock_class_key *key)
516 522 {
517 523 timer->entry.next = NULL;
518 524 timer->base = __raw_get_cpu_var(tvec_bases);
... ... @@ -521,6 +527,7 @@
521 527 timer->start_pid = -1;
522 528 memset(timer->start_comm, 0, TASK_COMM_LEN);
523 529 #endif
  530 + lockdep_init_map(&timer->lockdep_map, name, key, 0);
524 531 }
525 532  
526 533 /**
527 534  
528 535  
529 536  
530 537  
531 538  
... ... @@ -530,19 +537,23 @@
530 537 * init_timer() must be done to a timer prior calling *any* of the
531 538 * other timer functions.
532 539 */
533   -void init_timer(struct timer_list *timer)
  540 +void init_timer_key(struct timer_list *timer,
  541 + const char *name,
  542 + struct lock_class_key *key)
534 543 {
535 544 debug_timer_init(timer);
536   - __init_timer(timer);
  545 + __init_timer(timer, name, key);
537 546 }
538   -EXPORT_SYMBOL(init_timer);
  547 +EXPORT_SYMBOL(init_timer_key);
539 548  
540   -void init_timer_deferrable(struct timer_list *timer)
  549 +void init_timer_deferrable_key(struct timer_list *timer,
  550 + const char *name,
  551 + struct lock_class_key *key)
541 552 {
542   - init_timer(timer);
  553 + init_timer_key(timer, name, key);
543 554 timer_set_deferrable(timer);
544 555 }
545   -EXPORT_SYMBOL(init_timer_deferrable);
  556 +EXPORT_SYMBOL(init_timer_deferrable_key);
546 557  
547 558 static inline void detach_timer(struct timer_list *timer,
548 559 int clear_pending)
... ... @@ -789,6 +800,15 @@
789 800 */
790 801 int del_timer_sync(struct timer_list *timer)
791 802 {
  803 +#ifdef CONFIG_LOCKDEP
  804 + unsigned long flags;
  805 +
  806 + local_irq_save(flags);
  807 + lock_map_acquire(&timer->lockdep_map);
  808 + lock_map_release(&timer->lockdep_map);
  809 + local_irq_restore(flags);
  810 +#endif
  811 +
792 812 for (;;) {
793 813 int ret = try_to_del_timer_sync(timer);
794 814 if (ret >= 0)
795 815  
796 816  
... ... @@ -861,10 +881,36 @@
861 881  
862 882 set_running_timer(base, timer);
863 883 detach_timer(timer, 1);
  884 +
864 885 spin_unlock_irq(&base->lock);
865 886 {
866 887 int preempt_count = preempt_count();
  888 +
  889 +#ifdef CONFIG_LOCKDEP
  890 + /*
  891 + * It is permissible to free the timer from
  892 + * inside the function that is called from
  893 + * it, this we need to take into account for
  894 + * lockdep too. To avoid bogus "held lock
  895 + * freed" warnings as well as problems when
  896 + * looking into timer->lockdep_map, make a
  897 + * copy and use that here.
  898 + */
  899 + struct lockdep_map lockdep_map =
  900 + timer->lockdep_map;
  901 +#endif
  902 + /*
  903 + * Couple the lock chain with the lock chain at
  904 + * del_timer_sync() by acquiring the lock_map
  905 + * around the fn() call here and in
  906 + * del_timer_sync().
  907 + */
  908 + lock_map_acquire(&lockdep_map);
  909 +
867 910 fn(data);
  911 +
  912 + lock_map_release(&lockdep_map);
  913 +
868 914 if (preempt_count != preempt_count()) {
869 915 printk(KERN_ERR "huh, entered %p "
870 916 "with preempt_count %08x, exited"
... ... @@ -402,7 +402,7 @@
402 402 bool
403 403 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
404 404 select STACKTRACE
405   - select FRAME_POINTER if !X86 && !MIPS && !PPC
  405 + select FRAME_POINTER if !MIPS && !PPC
406 406 select KALLSYMS
407 407 select KALLSYMS_ALL
408 408  
... ... @@ -1479,6 +1479,8 @@
1479 1479 unsigned long did_some_progress;
1480 1480 unsigned long pages_reclaimed = 0;
1481 1481  
  1482 + lockdep_trace_alloc(gfp_mask);
  1483 +
1482 1484 might_sleep_if(wait);
1483 1485  
1484 1486 if (should_fail_alloc_page(gfp_mask, order))
1485 1487  
... ... @@ -1578,12 +1580,15 @@
1578 1580 */
1579 1581 cpuset_update_task_memory_state();
1580 1582 p->flags |= PF_MEMALLOC;
  1583 +
  1584 + lockdep_set_current_reclaim_state(gfp_mask);
1581 1585 reclaim_state.reclaimed_slab = 0;
1582 1586 p->reclaim_state = &reclaim_state;
1583 1587  
1584 1588 did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
1585 1589  
1586 1590 p->reclaim_state = NULL;
  1591 + lockdep_clear_current_reclaim_state();
1587 1592 p->flags &= ~PF_MEMALLOC;
1588 1593  
1589 1594 cond_resched();
... ... @@ -3327,6 +3327,8 @@
3327 3327 unsigned long save_flags;
3328 3328 void *ptr;
3329 3329  
  3330 + lockdep_trace_alloc(flags);
  3331 +
3330 3332 if (slab_should_failslab(cachep, flags))
3331 3333 return NULL;
3332 3334  
... ... @@ -3402,6 +3404,8 @@
3402 3404 {
3403 3405 unsigned long save_flags;
3404 3406 void *objp;
  3407 +
  3408 + lockdep_trace_alloc(flags);
3405 3409  
3406 3410 if (slab_should_failslab(cachep, flags))
3407 3411 return NULL;
... ... @@ -466,6 +466,8 @@
466 466 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
467 467 void *ret;
468 468  
  469 + lockdep_trace_alloc(flags);
  470 +
469 471 if (size < PAGE_SIZE - align) {
470 472 if (!size)
471 473 return ZERO_SIZE_PTR;
... ... @@ -1597,6 +1597,7 @@
1597 1597 unsigned long flags;
1598 1598 unsigned int objsize;
1599 1599  
  1600 + lockdep_trace_alloc(gfpflags);
1600 1601 might_sleep_if(gfpflags & __GFP_WAIT);
1601 1602  
1602 1603 if (should_failslab(s->objsize, gfpflags))
... ... @@ -1965,6 +1965,8 @@
1965 1965 };
1966 1966 node_to_cpumask_ptr(cpumask, pgdat->node_id);
1967 1967  
  1968 + lockdep_set_current_reclaim_state(GFP_KERNEL);
  1969 +
1968 1970 if (!cpumask_empty(cpumask))
1969 1971 set_cpus_allowed_ptr(tsk, cpumask);
1970 1972 current->reclaim_state = &reclaim_state;