Commit 673d62cc5ea6fca046650f17f77985b112c62322

Authored by Vegard Nossum
Committed by Thomas Gleixner
1 parent bef69ea0dc

debugobjects: fix lockdep warning

Daniel J. Blueman reported:
> =======================================================
> [ INFO: possible circular locking dependency detected ]
> 2.6.27-rc4-224c #1
> -------------------------------------------------------
> hald/4680 is trying to acquire lock:
>  (&n->list_lock){++..}, at: [<ffffffff802bfa26>] add_partial+0x26/0x80
>
> but task is already holding lock:
>  (&obj_hash[i].lock){++..}, at: [<ffffffff8041cfdc>]
> debug_object_free+0x5c/0x120

We fix it by moving the actual freeing to outside the lock (the lock
now only protects the list).

The pool lock is also promoted to irq-safe (suggested by Dan). It's
necessary because free_pool is now called outside the irq disabled
region. So we need to protect against an interrupt handler which calls
debug_object_init().

[tglx@linutronix.de: added hlist_move_list helper to avoid looping
		     through the list twice]

Reported-by: Daniel J Blueman <daniel.blueman@gmail.com>
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Showing 2 changed files with 36 additions and 8 deletions Side-by-side Diff

include/linux/list.h
... ... @@ -619,6 +619,19 @@
619 619 next->next->pprev = &next->next;
620 620 }
621 621  
  622 +/*
  623 + * Move a list from one list head to another. Fixup the pprev
  624 + * reference of the first entry if it exists.
  625 + */
  626 +static inline void hlist_move_list(struct hlist_head *old,
  627 + struct hlist_head *new)
  628 +{
  629 + new->first = old->first;
  630 + if (new->first)
  631 + new->first->pprev = &new->first;
  632 + old->first = NULL;
  633 +}
  634 +
622 635 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
623 636  
624 637 #define hlist_for_each(pos, head) \
... ... @@ -112,6 +112,7 @@
112 112  
113 113 /*
114 114 * Allocate a new object. If the pool is empty, switch off the debugger.
  115 + * Must be called with interrupts disabled.
115 116 */
116 117 static struct debug_obj *
117 118 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
118 119  
119 120  
120 121  
121 122  
... ... @@ -148,17 +149,18 @@
148 149 static void free_object(struct debug_obj *obj)
149 150 {
150 151 unsigned long idx = (unsigned long)(obj - obj_static_pool);
  152 + unsigned long flags;
151 153  
152 154 if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) {
153   - spin_lock(&pool_lock);
  155 + spin_lock_irqsave(&pool_lock, flags);
154 156 hlist_add_head(&obj->node, &obj_pool);
155 157 obj_pool_free++;
156 158 obj_pool_used--;
157   - spin_unlock(&pool_lock);
  159 + spin_unlock_irqrestore(&pool_lock, flags);
158 160 } else {
159   - spin_lock(&pool_lock);
  161 + spin_lock_irqsave(&pool_lock, flags);
160 162 obj_pool_used--;
161   - spin_unlock(&pool_lock);
  163 + spin_unlock_irqrestore(&pool_lock, flags);
162 164 kmem_cache_free(obj_cache, obj);
163 165 }
164 166 }
... ... @@ -171,6 +173,7 @@
171 173 {
172 174 struct debug_bucket *db = obj_hash;
173 175 struct hlist_node *node, *tmp;
  176 + HLIST_HEAD(freelist);
174 177 struct debug_obj *obj;
175 178 unsigned long flags;
176 179 int i;
177 180  
... ... @@ -179,11 +182,14 @@
179 182  
180 183 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
181 184 spin_lock_irqsave(&db->lock, flags);
182   - hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
  185 + hlist_move_list(&db->list, &freelist);
  186 + spin_unlock_irqrestore(&db->lock, flags);
  187 +
  188 + /* Now free them */
  189 + hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
183 190 hlist_del(&obj->node);
184 191 free_object(obj);
185 192 }
186   - spin_unlock_irqrestore(&db->lock, flags);
187 193 }
188 194 }
189 195  
190 196  
... ... @@ -498,8 +504,9 @@
498 504 return;
499 505 default:
500 506 hlist_del(&obj->node);
  507 + spin_unlock_irqrestore(&db->lock, flags);
501 508 free_object(obj);
502   - break;
  509 + return;
503 510 }
504 511 out_unlock:
505 512 spin_unlock_irqrestore(&db->lock, flags);
... ... @@ -510,6 +517,7 @@
510 517 {
511 518 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
512 519 struct hlist_node *node, *tmp;
  520 + HLIST_HEAD(freelist);
513 521 struct debug_obj_descr *descr;
514 522 enum debug_obj_state state;
515 523 struct debug_bucket *db;
516 524  
... ... @@ -545,11 +553,18 @@
545 553 goto repeat;
546 554 default:
547 555 hlist_del(&obj->node);
548   - free_object(obj);
  556 + hlist_add_head(&obj->node, &freelist);
549 557 break;
550 558 }
551 559 }
552 560 spin_unlock_irqrestore(&db->lock, flags);
  561 +
  562 + /* Now free them */
  563 + hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
  564 + hlist_del(&obj->node);
  565 + free_object(obj);
  566 + }
  567 +
553 568 if (cnt > debug_objects_maxchain)
554 569 debug_objects_maxchain = cnt;
555 570 }