Blame view

lib/spinlock_debug.c 7.13 KB
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
1
2
3
4
5
6
7
  /*
   * Copyright 2005, Red Hat, Inc., Ingo Molnar
   * Released under the General Public License (GPL).
   *
   * This file contains the spinlock/rwlock implementations for
   * DEBUG_SPINLOCK.
   */
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
8
  #include <linux/spinlock.h>
bb81a09e5   Andrew Morton   [PATCH] x86: all ...
9
  #include <linux/nmi.h>
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
10
  #include <linux/interrupt.h>
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
11
  #include <linux/debug_locks.h>
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
12
  #include <linux/delay.h>
8bc3bcc93   Paul Gortmaker   lib: reduce the u...
13
  #include <linux/export.h>
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
14

c2f21ce2e   Thomas Gleixner   locking: Implemen...
15
16
  void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
  			  struct lock_class_key *key)
8a25d5deb   Ingo Molnar   [PATCH] lockdep: ...
17
18
19
20
21
22
  {
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  	/*
  	 * Make sure we are not reinitializing a held lock:
  	 */
  	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
4dfbb9d8c   Peter Zijlstra   Lockdep: add lock...
23
  	lockdep_init_map(&lock->dep_map, name, key, 0);
8a25d5deb   Ingo Molnar   [PATCH] lockdep: ...
24
  #endif
edc35bd72   Thomas Gleixner   locking: Rename _...
25
  	lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8a25d5deb   Ingo Molnar   [PATCH] lockdep: ...
26
27
28
29
  	lock->magic = SPINLOCK_MAGIC;
  	lock->owner = SPINLOCK_OWNER_INIT;
  	lock->owner_cpu = -1;
  }
c2f21ce2e   Thomas Gleixner   locking: Implemen...
30
  EXPORT_SYMBOL(__raw_spin_lock_init);
8a25d5deb   Ingo Molnar   [PATCH] lockdep: ...
31
32
33
34
35
36
37
38
39
  
  void __rwlock_init(rwlock_t *lock, const char *name,
  		   struct lock_class_key *key)
  {
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  	/*
  	 * Make sure we are not reinitializing a held lock:
  	 */
  	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
4dfbb9d8c   Peter Zijlstra   Lockdep: add lock...
40
  	lockdep_init_map(&lock->dep_map, name, key, 0);
8a25d5deb   Ingo Molnar   [PATCH] lockdep: ...
41
  #endif
fb3a6bbc9   Thomas Gleixner   locking: Convert ...
42
  	lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
8a25d5deb   Ingo Molnar   [PATCH] lockdep: ...
43
44
45
46
47
48
  	lock->magic = RWLOCK_MAGIC;
  	lock->owner = SPINLOCK_OWNER_INIT;
  	lock->owner_cpu = -1;
  }
  
  EXPORT_SYMBOL(__rwlock_init);
4e101b0e6   Akinobu Mita   lib/spinlock_debu...
49
  static void spin_dump(raw_spinlock_t *lock, const char *msg)
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
50
  {
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
51
  	struct task_struct *owner = NULL;
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
52
53
54
55
56
  	if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
  		owner = lock->owner;
  	printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d
  ",
  		msg, raw_smp_processor_id(),
ba25f9dcc   Pavel Emelyanov   Use helpers to ob...
57
  		current->comm, task_pid_nr(current));
4b0681487   Stephen Boyd   spinlock_debug: p...
58
  	printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
59
60
61
62
  			".owner_cpu: %d
  ",
  		lock, lock->magic,
  		owner ? owner->comm : "<none>",
ba25f9dcc   Pavel Emelyanov   Use helpers to ob...
63
  		owner ? task_pid_nr(owner) : -1,
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
64
65
  		lock->owner_cpu);
  	dump_stack();
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
66
  }
4e101b0e6   Akinobu Mita   lib/spinlock_debu...
67
68
69
70
71
72
73
  static void spin_bug(raw_spinlock_t *lock, const char *msg)
  {
  	if (!debug_locks_off())
  		return;
  
  	spin_dump(lock, msg);
  }
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
74
  #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
75
  static inline void
c2f21ce2e   Thomas Gleixner   locking: Implemen...
76
  debug_spin_lock_before(raw_spinlock_t *lock)
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
77
78
79
80
81
82
  {
  	SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  	SPIN_BUG_ON(lock->owner == current, lock, "recursion");
  	SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  							lock, "cpu recursion");
  }
c2f21ce2e   Thomas Gleixner   locking: Implemen...
83
  static inline void debug_spin_lock_after(raw_spinlock_t *lock)
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
84
85
86
87
  {
  	lock->owner_cpu = raw_smp_processor_id();
  	lock->owner = current;
  }
c2f21ce2e   Thomas Gleixner   locking: Implemen...
88
  static inline void debug_spin_unlock(raw_spinlock_t *lock)
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
89
90
  {
  	SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
c2f21ce2e   Thomas Gleixner   locking: Implemen...
91
  	SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
92
93
94
95
96
97
  	SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
  	SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  							lock, "wrong CPU");
  	lock->owner = SPINLOCK_OWNER_INIT;
  	lock->owner_cpu = -1;
  }
c2f21ce2e   Thomas Gleixner   locking: Implemen...
98
  static void __spin_lock_debug(raw_spinlock_t *lock)
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
99
  {
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
100
  	u64 i;
c22f008ba   Chuck Ebbert   [PATCH] spinlock_...
101
  	u64 loops = loops_per_jiffy * HZ;
214f766ea   Vikram Mulukutla   lib/spinlock_debu...
102
103
104
105
106
107
108
109
  
  	for (i = 0; i < loops; i++) {
  		if (arch_spin_trylock(&lock->raw_lock))
  			return;
  		__delay(1);
  	}
  	/* lockup suspected: */
  	spin_dump(lock, "lockup suspected");
bb81a09e5   Andrew Morton   [PATCH] x86: all ...
110
  #ifdef CONFIG_SMP
214f766ea   Vikram Mulukutla   lib/spinlock_debu...
111
  	trigger_all_cpu_backtrace();
bb81a09e5   Andrew Morton   [PATCH] x86: all ...
112
  #endif
214f766ea   Vikram Mulukutla   lib/spinlock_debu...
113
114
115
116
117
118
119
120
121
122
  
  	/*
  	 * The trylock above was causing a livelock.  Give the lower level arch
  	 * specific lock code a chance to acquire the lock. We have already
  	 * printed a warning/backtrace at this point. The non-debug arch
  	 * specific code might actually succeed in acquiring the lock.  If it is
  	 * not successful, the end-result is the same - there is no forward
  	 * progress.
  	 */
  	arch_spin_lock(&lock->raw_lock);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
123
  }
9828ea9d7   Thomas Gleixner   locking: Further ...
124
  void do_raw_spin_lock(raw_spinlock_t *lock)
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
125
126
  {
  	debug_spin_lock_before(lock);
0199c4e68   Thomas Gleixner   locking: Convert ...
127
  	if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
128
129
130
  		__spin_lock_debug(lock);
  	debug_spin_lock_after(lock);
  }
9828ea9d7   Thomas Gleixner   locking: Further ...
131
  int do_raw_spin_trylock(raw_spinlock_t *lock)
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
132
  {
0199c4e68   Thomas Gleixner   locking: Convert ...
133
  	int ret = arch_spin_trylock(&lock->raw_lock);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
134
135
136
137
138
139
140
141
142
143
144
  
  	if (ret)
  		debug_spin_lock_after(lock);
  #ifndef CONFIG_SMP
  	/*
  	 * Must not happen on UP:
  	 */
  	SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
  #endif
  	return ret;
  }
9828ea9d7   Thomas Gleixner   locking: Further ...
145
  void do_raw_spin_unlock(raw_spinlock_t *lock)
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
146
147
  {
  	debug_spin_unlock(lock);
0199c4e68   Thomas Gleixner   locking: Convert ...
148
  	arch_spin_unlock(&lock->raw_lock);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
149
150
151
152
  }
  
  static void rwlock_bug(rwlock_t *lock, const char *msg)
  {
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
153
154
155
156
157
158
  	if (!debug_locks_off())
  		return;
  
  	printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p
  ",
  		msg, raw_smp_processor_id(), current->comm,
ba25f9dcc   Pavel Emelyanov   Use helpers to ob...
159
  		task_pid_nr(current), lock);
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
160
  	dump_stack();
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
161
162
163
  }
  
  #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
72f0b4e21   Andrew Morton   [PATCH] disable d...
164
  #if 0		/* __write_lock_debug() can lock up - maybe this can too? */
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
165
166
  static void __read_lock_debug(rwlock_t *lock)
  {
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
167
  	u64 i;
c22f008ba   Chuck Ebbert   [PATCH] spinlock_...
168
169
  	u64 loops = loops_per_jiffy * HZ;
  	int print_once = 1;
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
170
171
  
  	for (;;) {
c22f008ba   Chuck Ebbert   [PATCH] spinlock_...
172
  		for (i = 0; i < loops; i++) {
e5931943d   Thomas Gleixner   locking: Convert ...
173
  			if (arch_read_trylock(&lock->raw_lock))
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
174
  				return;
e0a602963   Ingo Molnar   [PATCH] Fix spinl...
175
  			__delay(1);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
176
177
178
179
  		}
  		/* lockup suspected: */
  		if (print_once) {
  			print_once = 0;
51989b9ff   Dave Jones   [PATCH] printk le...
180
181
182
  			printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
  					"%s/%d, %p
  ",
bb44f116a   Ingo Molnar   [PATCH] fix spinl...
183
184
  				raw_smp_processor_id(), current->comm,
  				current->pid, lock);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
185
186
187
188
  			dump_stack();
  		}
  	}
  }
72f0b4e21   Andrew Morton   [PATCH] disable d...
189
  #endif
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
190

9828ea9d7   Thomas Gleixner   locking: Further ...
191
  void do_raw_read_lock(rwlock_t *lock)
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
192
193
  {
  	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
e5931943d   Thomas Gleixner   locking: Convert ...
194
  	arch_read_lock(&lock->raw_lock);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
195
  }
9828ea9d7   Thomas Gleixner   locking: Further ...
196
  int do_raw_read_trylock(rwlock_t *lock)
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
197
  {
e5931943d   Thomas Gleixner   locking: Convert ...
198
  	int ret = arch_read_trylock(&lock->raw_lock);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
199
200
201
202
203
204
205
206
207
  
  #ifndef CONFIG_SMP
  	/*
  	 * Must not happen on UP:
  	 */
  	RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  #endif
  	return ret;
  }
9828ea9d7   Thomas Gleixner   locking: Further ...
208
  void do_raw_read_unlock(rwlock_t *lock)
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
209
210
  {
  	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
e5931943d   Thomas Gleixner   locking: Convert ...
211
  	arch_read_unlock(&lock->raw_lock);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
  }
  
  static inline void debug_write_lock_before(rwlock_t *lock)
  {
  	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  	RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
  	RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  							lock, "cpu recursion");
  }
  
  static inline void debug_write_lock_after(rwlock_t *lock)
  {
  	lock->owner_cpu = raw_smp_processor_id();
  	lock->owner = current;
  }
  
  static inline void debug_write_unlock(rwlock_t *lock)
  {
  	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  	RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
  	RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  							lock, "wrong CPU");
  	lock->owner = SPINLOCK_OWNER_INIT;
  	lock->owner_cpu = -1;
  }
72f0b4e21   Andrew Morton   [PATCH] disable d...
237
  #if 0		/* This can cause lockups */
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
238
239
  static void __write_lock_debug(rwlock_t *lock)
  {
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
240
  	u64 i;
c22f008ba   Chuck Ebbert   [PATCH] spinlock_...
241
242
  	u64 loops = loops_per_jiffy * HZ;
  	int print_once = 1;
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
243
244
  
  	for (;;) {
c22f008ba   Chuck Ebbert   [PATCH] spinlock_...
245
  		for (i = 0; i < loops; i++) {
e5931943d   Thomas Gleixner   locking: Convert ...
246
  			if (arch_write_trylock(&lock->raw_lock))
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
247
  				return;
e0a602963   Ingo Molnar   [PATCH] Fix spinl...
248
  			__delay(1);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
249
250
251
252
  		}
  		/* lockup suspected: */
  		if (print_once) {
  			print_once = 0;
51989b9ff   Dave Jones   [PATCH] printk le...
253
254
255
  			printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
  					"%s/%d, %p
  ",
bb44f116a   Ingo Molnar   [PATCH] fix spinl...
256
257
  				raw_smp_processor_id(), current->comm,
  				current->pid, lock);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
258
259
260
261
  			dump_stack();
  		}
  	}
  }
72f0b4e21   Andrew Morton   [PATCH] disable d...
262
  #endif
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
263

9828ea9d7   Thomas Gleixner   locking: Further ...
264
  void do_raw_write_lock(rwlock_t *lock)
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
265
266
  {
  	debug_write_lock_before(lock);
e5931943d   Thomas Gleixner   locking: Convert ...
267
  	arch_write_lock(&lock->raw_lock);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
268
269
  	debug_write_lock_after(lock);
  }
9828ea9d7   Thomas Gleixner   locking: Further ...
270
  int do_raw_write_trylock(rwlock_t *lock)
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
271
  {
e5931943d   Thomas Gleixner   locking: Convert ...
272
  	int ret = arch_write_trylock(&lock->raw_lock);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
273
274
275
276
277
278
279
280
281
282
283
  
  	if (ret)
  		debug_write_lock_after(lock);
  #ifndef CONFIG_SMP
  	/*
  	 * Must not happen on UP:
  	 */
  	RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  #endif
  	return ret;
  }
9828ea9d7   Thomas Gleixner   locking: Further ...
284
  void do_raw_write_unlock(rwlock_t *lock)
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
285
286
  {
  	debug_write_unlock(lock);
e5931943d   Thomas Gleixner   locking: Convert ...
287
  	arch_write_unlock(&lock->raw_lock);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
288
  }