Blame view

lib/spinlock_debug.c 6.8 KB
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
1
2
3
4
5
6
7
  /*
   * Copyright 2005, Red Hat, Inc., Ingo Molnar
   * Released under the General Public License (GPL).
   *
   * This file contains the spinlock/rwlock implementations for
   * DEBUG_SPINLOCK.
   */
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
8
  #include <linux/spinlock.h>
bb81a09e5   Andrew Morton   [PATCH] x86: all ...
9
  #include <linux/nmi.h>
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
10
  #include <linux/interrupt.h>
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
11
  #include <linux/debug_locks.h>
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
12
  #include <linux/delay.h>
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
13
  #include <linux/module.h>
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
14

8a25d5deb   Ingo Molnar   [PATCH] lockdep: ...
15
16
17
18
19
20
21
22
  void __spin_lock_init(spinlock_t *lock, const char *name,
  		      struct lock_class_key *key)
  {
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  	/*
  	 * Make sure we are not reinitializing a held lock:
  	 */
  	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
4dfbb9d8c   Peter Zijlstra   Lockdep: add lock...
23
  	lockdep_init_map(&lock->dep_map, name, key, 0);
8a25d5deb   Ingo Molnar   [PATCH] lockdep: ...
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
  #endif
  	lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
  	lock->magic = SPINLOCK_MAGIC;
  	lock->owner = SPINLOCK_OWNER_INIT;
  	lock->owner_cpu = -1;
  }
  
  EXPORT_SYMBOL(__spin_lock_init);
  
  void __rwlock_init(rwlock_t *lock, const char *name,
  		   struct lock_class_key *key)
  {
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  	/*
  	 * Make sure we are not reinitializing a held lock:
  	 */
  	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
4dfbb9d8c   Peter Zijlstra   Lockdep: add lock...
41
  	lockdep_init_map(&lock->dep_map, name, key, 0);
8a25d5deb   Ingo Molnar   [PATCH] lockdep: ...
42
43
44
45
46
47
48
49
  #endif
  	lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED;
  	lock->magic = RWLOCK_MAGIC;
  	lock->owner = SPINLOCK_OWNER_INIT;
  	lock->owner_cpu = -1;
  }
  
  EXPORT_SYMBOL(__rwlock_init);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
50
51
  static void spin_bug(spinlock_t *lock, const char *msg)
  {
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
52
  	struct task_struct *owner = NULL;
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
53
54
55
56
57
58
59
60
  	if (!debug_locks_off())
  		return;
  
  	if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
  		owner = lock->owner;
  	printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d
  ",
  		msg, raw_smp_processor_id(),
ba25f9dcc   Pavel Emelyanov   Use helpers to ob...
61
  		current->comm, task_pid_nr(current));
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
62
63
64
65
66
  	printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
  			".owner_cpu: %d
  ",
  		lock, lock->magic,
  		owner ? owner->comm : "<none>",
ba25f9dcc   Pavel Emelyanov   Use helpers to ob...
67
  		owner ? task_pid_nr(owner) : -1,
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
68
69
  		lock->owner_cpu);
  	dump_stack();
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
70
71
72
  }
  
  #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
73
74
  static inline void
  debug_spin_lock_before(spinlock_t *lock)
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
  {
  	SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  	SPIN_BUG_ON(lock->owner == current, lock, "recursion");
  	SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  							lock, "cpu recursion");
  }
  
  static inline void debug_spin_lock_after(spinlock_t *lock)
  {
  	lock->owner_cpu = raw_smp_processor_id();
  	lock->owner = current;
  }
  
  static inline void debug_spin_unlock(spinlock_t *lock)
  {
  	SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  	SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
  	SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
  	SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  							lock, "wrong CPU");
  	lock->owner = SPINLOCK_OWNER_INIT;
  	lock->owner_cpu = -1;
  }
  
  static void __spin_lock_debug(spinlock_t *lock)
  {
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
101
  	u64 i;
c22f008ba   Chuck Ebbert   [PATCH] spinlock_...
102
103
  	u64 loops = loops_per_jiffy * HZ;
  	int print_once = 1;
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
104
105
  
  	for (;;) {
c22f008ba   Chuck Ebbert   [PATCH] spinlock_...
106
  		for (i = 0; i < loops; i++) {
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
107
108
  			if (__raw_spin_trylock(&lock->raw_lock))
  				return;
e0a602963   Ingo Molnar   [PATCH] Fix spinl...
109
  			__delay(1);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
110
111
112
113
  		}
  		/* lockup suspected: */
  		if (print_once) {
  			print_once = 0;
51989b9ff   Dave Jones   [PATCH] printk le...
114
115
116
  			printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, "
  					"%s/%d, %p
  ",
bb44f116a   Ingo Molnar   [PATCH] fix spinl...
117
  				raw_smp_processor_id(), current->comm,
ba25f9dcc   Pavel Emelyanov   Use helpers to ob...
118
  				task_pid_nr(current), lock);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
119
  			dump_stack();
bb81a09e5   Andrew Morton   [PATCH] x86: all ...
120
121
122
  #ifdef CONFIG_SMP
  			trigger_all_cpu_backtrace();
  #endif
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
  		}
  	}
  }
  
  void _raw_spin_lock(spinlock_t *lock)
  {
  	debug_spin_lock_before(lock);
  	if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
  		__spin_lock_debug(lock);
  	debug_spin_lock_after(lock);
  }
  
  int _raw_spin_trylock(spinlock_t *lock)
  {
  	int ret = __raw_spin_trylock(&lock->raw_lock);
  
  	if (ret)
  		debug_spin_lock_after(lock);
  #ifndef CONFIG_SMP
  	/*
  	 * Must not happen on UP:
  	 */
  	SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
  #endif
  	return ret;
  }
  
  void _raw_spin_unlock(spinlock_t *lock)
  {
  	debug_spin_unlock(lock);
  	__raw_spin_unlock(&lock->raw_lock);
  }
  
  static void rwlock_bug(rwlock_t *lock, const char *msg)
  {
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
158
159
160
161
162
163
  	if (!debug_locks_off())
  		return;
  
  	printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p
  ",
  		msg, raw_smp_processor_id(), current->comm,
ba25f9dcc   Pavel Emelyanov   Use helpers to ob...
164
  		task_pid_nr(current), lock);
9a11b49a8   Ingo Molnar   [PATCH] lockdep: ...
165
  	dump_stack();
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
166
167
168
  }
  
  #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
72f0b4e21   Andrew Morton   [PATCH] disable d...
169
  #if 0		/* __write_lock_debug() can lock up - maybe this can too? */
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
170
171
  static void __read_lock_debug(rwlock_t *lock)
  {
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
172
  	u64 i;
c22f008ba   Chuck Ebbert   [PATCH] spinlock_...
173
174
  	u64 loops = loops_per_jiffy * HZ;
  	int print_once = 1;
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
175
176
  
  	for (;;) {
c22f008ba   Chuck Ebbert   [PATCH] spinlock_...
177
  		for (i = 0; i < loops; i++) {
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
178
179
  			if (__raw_read_trylock(&lock->raw_lock))
  				return;
e0a602963   Ingo Molnar   [PATCH] Fix spinl...
180
  			__delay(1);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
181
182
183
184
  		}
  		/* lockup suspected: */
  		if (print_once) {
  			print_once = 0;
51989b9ff   Dave Jones   [PATCH] printk le...
185
186
187
  			printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
  					"%s/%d, %p
  ",
bb44f116a   Ingo Molnar   [PATCH] fix spinl...
188
189
  				raw_smp_processor_id(), current->comm,
  				current->pid, lock);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
190
191
192
193
  			dump_stack();
  		}
  	}
  }
72f0b4e21   Andrew Morton   [PATCH] disable d...
194
  #endif
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
195
196
197
198
  
  void _raw_read_lock(rwlock_t *lock)
  {
  	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
72f0b4e21   Andrew Morton   [PATCH] disable d...
199
  	__raw_read_lock(&lock->raw_lock);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
  }
  
  int _raw_read_trylock(rwlock_t *lock)
  {
  	int ret = __raw_read_trylock(&lock->raw_lock);
  
  #ifndef CONFIG_SMP
  	/*
  	 * Must not happen on UP:
  	 */
  	RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  #endif
  	return ret;
  }
  
  void _raw_read_unlock(rwlock_t *lock)
  {
  	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  	__raw_read_unlock(&lock->raw_lock);
  }
  
  static inline void debug_write_lock_before(rwlock_t *lock)
  {
  	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  	RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
  	RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  							lock, "cpu recursion");
  }
  
  static inline void debug_write_lock_after(rwlock_t *lock)
  {
  	lock->owner_cpu = raw_smp_processor_id();
  	lock->owner = current;
  }
  
  static inline void debug_write_unlock(rwlock_t *lock)
  {
  	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  	RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
  	RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  							lock, "wrong CPU");
  	lock->owner = SPINLOCK_OWNER_INIT;
  	lock->owner_cpu = -1;
  }
72f0b4e21   Andrew Morton   [PATCH] disable d...
244
  #if 0		/* This can cause lockups */
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
245
246
  static void __write_lock_debug(rwlock_t *lock)
  {
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
247
  	u64 i;
c22f008ba   Chuck Ebbert   [PATCH] spinlock_...
248
249
  	u64 loops = loops_per_jiffy * HZ;
  	int print_once = 1;
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
250
251
  
  	for (;;) {
c22f008ba   Chuck Ebbert   [PATCH] spinlock_...
252
  		for (i = 0; i < loops; i++) {
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
253
254
  			if (__raw_write_trylock(&lock->raw_lock))
  				return;
e0a602963   Ingo Molnar   [PATCH] Fix spinl...
255
  			__delay(1);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
256
257
258
259
  		}
  		/* lockup suspected: */
  		if (print_once) {
  			print_once = 0;
51989b9ff   Dave Jones   [PATCH] printk le...
260
261
262
  			printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
  					"%s/%d, %p
  ",
bb44f116a   Ingo Molnar   [PATCH] fix spinl...
263
264
  				raw_smp_processor_id(), current->comm,
  				current->pid, lock);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
265
266
267
268
  			dump_stack();
  		}
  	}
  }
72f0b4e21   Andrew Morton   [PATCH] disable d...
269
  #endif
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
270
271
272
273
  
  void _raw_write_lock(rwlock_t *lock)
  {
  	debug_write_lock_before(lock);
72f0b4e21   Andrew Morton   [PATCH] disable d...
274
  	__raw_write_lock(&lock->raw_lock);
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
  	debug_write_lock_after(lock);
  }
  
  int _raw_write_trylock(rwlock_t *lock)
  {
  	int ret = __raw_write_trylock(&lock->raw_lock);
  
  	if (ret)
  		debug_write_lock_after(lock);
  #ifndef CONFIG_SMP
  	/*
  	 * Must not happen on UP:
  	 */
  	RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  #endif
  	return ret;
  }
  
  void _raw_write_unlock(rwlock_t *lock)
  {
  	debug_write_unlock(lock);
  	__raw_write_unlock(&lock->raw_lock);
  }