Commit 443cd507ce7f78c6f8742b72736585c031d5a921

Authored by Huang, Ying
Committed by Ingo Molnar
1 parent 2429e4ee78

lockdep: add lock_class information to lock_chain and output it

This patch records array of lock_class into lock_chain, and export
lock_chain information via /proc/lockdep_chains.

It is based on x86/master branch of git-x86 tree, and has been tested
on x86_64 platform.

Signed-off-by: Huang Ying <ying.huang@intel.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 4 changed files with 135 additions and 3 deletions Side-by-side Diff

include/linux/lockdep.h
... ... @@ -182,6 +182,9 @@
182 182 * We record lock dependency chains, so that we can cache them:
183 183 */
184 184 struct lock_chain {
  185 + u8 irq_context;
  186 + u8 depth;
  187 + u16 base;
185 188 struct list_head entry;
186 189 u64 chain_key;
187 190 };
... ... @@ -1458,18 +1458,30 @@
1458 1458 }
1459 1459  
1460 1460 unsigned long nr_lock_chains;
1461   -static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
  1461 +struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
  1462 +atomic_t nr_chain_hlocks;
  1463 +static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1462 1464  
  1465 +struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
  1466 +{
  1467 + return lock_classes + chain_hlocks[chain->base + i];
  1468 +}
  1469 +
1463 1470 /*
1464 1471 * Look up a dependency chain. If the key is not present yet then
1465 1472 * add it and return 1 - in this case the new dependency chain is
1466 1473 * validated. If the key is already hashed, return 0.
1467 1474 * (On return with 1 graph_lock is held.)
1468 1475 */
1469   -static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class)
  1476 +static inline int lookup_chain_cache(struct task_struct *curr,
  1477 + struct held_lock *hlock,
  1478 + u64 chain_key)
1470 1479 {
  1480 + struct lock_class *class = hlock->class;
1471 1481 struct list_head *hash_head = chainhashentry(chain_key);
1472 1482 struct lock_chain *chain;
  1483 + struct held_lock *hlock_curr, *hlock_next;
  1484 + int i, j, n;
1473 1485  
1474 1486 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1475 1487 return 0;
... ... @@ -1517,6 +1529,26 @@
1517 1529 }
1518 1530 chain = lock_chains + nr_lock_chains++;
1519 1531 chain->chain_key = chain_key;
  1532 + chain->irq_context = hlock->irq_context;
  1533 + /* Find the first held_lock of current chain */
  1534 + hlock_next = hlock;
  1535 + for (i = curr->lockdep_depth - 1; i >= 0; i--) {
  1536 + hlock_curr = curr->held_locks + i;
  1537 + if (hlock_curr->irq_context != hlock_next->irq_context)
  1538 + break;
  1539 + hlock_next = hlock;
  1540 + }
  1541 + i++;
  1542 + chain->depth = curr->lockdep_depth + 1 - i;
  1543 + n = atomic_add_return(chain->depth, &nr_chain_hlocks);
  1544 + if (unlikely(n < MAX_LOCKDEP_CHAIN_HLOCKS)) {
  1545 + chain->base = n - chain->depth;
  1546 + for (j = 0; j < chain->depth - 1; j++, i++) {
  1547 + int lock_id = curr->held_locks[i].class - lock_classes;
  1548 + chain_hlocks[chain->base + j] = lock_id;
  1549 + }
  1550 + chain_hlocks[chain->base + j] = class - lock_classes;
  1551 + }
1520 1552 list_add_tail_rcu(&chain->entry, hash_head);
1521 1553 debug_atomic_inc(&chain_lookup_misses);
1522 1554 inc_chains();
... ... @@ -1538,7 +1570,7 @@
1538 1570 * graph_lock for us)
1539 1571 */
1540 1572 if (!hlock->trylock && (hlock->check == 2) &&
1541   - lookup_chain_cache(chain_key, hlock->class)) {
  1573 + lookup_chain_cache(curr, hlock, chain_key)) {
1542 1574 /*
1543 1575 * Check whether last held lock:
1544 1576 *
kernel/lockdep_internals.h
... ... @@ -23,6 +23,8 @@
23 23 #define MAX_LOCKDEP_CHAINS_BITS 14
24 24 #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
25 25  
  26 +#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
  27 +
26 28 /*
27 29 * Stack-trace: tightly packed array of stack backtrace
28 30 * addresses. Protected by the hash_lock.
29 31  
30 32  
... ... @@ -30,15 +32,19 @@
30 32 #define MAX_STACK_TRACE_ENTRIES 262144UL
31 33  
32 34 extern struct list_head all_lock_classes;
  35 +extern struct lock_chain lock_chains[];
33 36  
34 37 extern void
35 38 get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4);
36 39  
37 40 extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
38 41  
  42 +struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
  43 +
39 44 extern unsigned long nr_lock_classes;
40 45 extern unsigned long nr_list_entries;
41 46 extern unsigned long nr_lock_chains;
  47 +extern atomic_t nr_chain_hlocks;
42 48 extern unsigned long nr_stack_trace_entries;
43 49  
44 50 extern unsigned int nr_hardirq_chains;
kernel/lockdep_proc.c
... ... @@ -178,6 +178,93 @@
178 178 .release = seq_release,
179 179 };
180 180  
  181 +static void *lc_next(struct seq_file *m, void *v, loff_t *pos)
  182 +{
  183 + struct lock_chain *chain;
  184 +
  185 + (*pos)++;
  186 +
  187 + if (v == SEQ_START_TOKEN)
  188 + chain = m->private;
  189 + else {
  190 + chain = v;
  191 +
  192 + if (*pos < nr_lock_chains)
  193 + chain = lock_chains + *pos;
  194 + else
  195 + chain = NULL;
  196 + }
  197 +
  198 + return chain;
  199 +}
  200 +
  201 +static void *lc_start(struct seq_file *m, loff_t *pos)
  202 +{
  203 + if (*pos == 0)
  204 + return SEQ_START_TOKEN;
  205 +
  206 + if (*pos < nr_lock_chains)
  207 + return lock_chains + *pos;
  208 +
  209 + return NULL;
  210 +}
  211 +
  212 +static void lc_stop(struct seq_file *m, void *v)
  213 +{
  214 +}
  215 +
  216 +static int lc_show(struct seq_file *m, void *v)
  217 +{
  218 + struct lock_chain *chain = v;
  219 + struct lock_class *class;
  220 + int i;
  221 +
  222 + if (v == SEQ_START_TOKEN) {
  223 + seq_printf(m, "all lock chains:\n");
  224 + return 0;
  225 + }
  226 +
  227 + seq_printf(m, "irq_context: %d\n", chain->irq_context);
  228 +
  229 + for (i = 0; i < chain->depth; i++) {
  230 + class = lock_chain_get_class(chain, i);
  231 + seq_printf(m, "[%p] ", class->key);
  232 + print_name(m, class);
  233 + seq_puts(m, "\n");
  234 + }
  235 + seq_puts(m, "\n");
  236 +
  237 + return 0;
  238 +}
  239 +
  240 +static const struct seq_operations lockdep_chains_ops = {
  241 + .start = lc_start,
  242 + .next = lc_next,
  243 + .stop = lc_stop,
  244 + .show = lc_show,
  245 +};
  246 +
  247 +static int lockdep_chains_open(struct inode *inode, struct file *file)
  248 +{
  249 + int res = seq_open(file, &lockdep_chains_ops);
  250 + if (!res) {
  251 + struct seq_file *m = file->private_data;
  252 +
  253 + if (nr_lock_chains)
  254 + m->private = lock_chains;
  255 + else
  256 + m->private = NULL;
  257 + }
  258 + return res;
  259 +}
  260 +
  261 +static const struct file_operations proc_lockdep_chains_operations = {
  262 + .open = lockdep_chains_open,
  263 + .read = seq_read,
  264 + .llseek = seq_lseek,
  265 + .release = seq_release,
  266 +};
  267 +
181 268 static void lockdep_stats_debug_show(struct seq_file *m)
182 269 {
183 270 #ifdef CONFIG_DEBUG_LOCKDEP
... ... @@ -294,6 +381,8 @@
294 381 #ifdef CONFIG_PROVE_LOCKING
295 382 seq_printf(m, " dependency chains: %11lu [max: %lu]\n",
296 383 nr_lock_chains, MAX_LOCKDEP_CHAINS);
  384 + seq_printf(m, " dependency chain hlocks: %11d [max: %lu]\n",
  385 + atomic_read(&nr_chain_hlocks), MAX_LOCKDEP_CHAIN_HLOCKS);
297 386 #endif
298 387  
299 388 #ifdef CONFIG_TRACE_IRQFLAGS
... ... @@ -661,6 +750,8 @@
661 750 static int __init lockdep_proc_init(void)
662 751 {
663 752 proc_create("lockdep", S_IRUSR, NULL, &proc_lockdep_operations);
  753 + proc_create("lockdep_chains", S_IRUSR, NULL,
  754 + &proc_lockdep_chains_operations);
664 755 proc_create("lockdep_stats", S_IRUSR, NULL,
665 756 &proc_lockdep_stats_operations);
666 757