Blame view

include/linux/lockdep.h 16.2 KB
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
1
2
3
  /*
   * Runtime locking correctness validator
   *
4b32d0a4e   Peter Zijlstra   lockdep: various ...
4
5
   *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
6
7
8
9
10
   *
   * see Documentation/lockdep-design.txt for more details.
   */
  #ifndef __LINUX_LOCKDEP_H
  #define __LINUX_LOCKDEP_H
a1e96b031   Heiko Carstens   [PATCH] lockdep: ...
11
  struct task_struct;
f20786ff4   Peter Zijlstra   lockstat: core in...
12
  struct lockdep_map;
a1e96b031   Heiko Carstens   [PATCH] lockdep: ...
13

2edf5e498   Dave Young   sysctl extern cle...
14
15
16
  /* for sysctl */
  extern int prove_locking;
  extern int lock_stat;
db0b0ead6   Michael S. Tsirkin   [PATCH] lockdep: ...
17
  #ifdef CONFIG_LOCKDEP
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
18
19
20
21
  #include <linux/linkage.h>
  #include <linux/list.h>
  #include <linux/debug_locks.h>
  #include <linux/stacktrace.h>
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
22
  /*
9851673bc   Peter Zijlstra   lockdep: move sta...
23
24
   * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
   * the total number of states... :-(
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
25
   */
9851673bc   Peter Zijlstra   lockdep: move sta...
26
  #define XXX_LOCK_USAGE_STATES		(1+3*4)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
27
28
29
30
  
  #define MAX_LOCKDEP_SUBCLASSES		8UL
  
  /*
620162505   Hitoshi Mitake   lockdep: Add impr...
31
32
33
34
35
36
37
38
39
40
41
   * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
   * cached in the instance of lockdep_map
   *
   * Currently main class (subclass == 0) and signle depth subclass
   * are cached in lockdep_map. This optimization is mainly targeting
   * on rq->lock. double_rq_lock() acquires this highly competitive with
   * single depth.
   */
  #define NR_LOCKDEP_CACHING_CLASSES	2
  
  /*
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
42
43
44
45
46
47
48
49
50
51
52
   * Lock-classes are keyed via unique addresses, by embedding the
   * lockclass-key into the kernel (or module) .data section. (For
   * static locks we use the lock address itself as the key.)
   */
  struct lockdep_subclass_key {
  	char __one_byte;
  } __attribute__ ((__packed__));
  
  struct lock_class_key {
  	struct lockdep_subclass_key	subkeys[MAX_LOCKDEP_SUBCLASSES];
  };
1704f47b5   Peter Zijlstra   lockdep: Add nova...
53
  extern struct lock_class_key __lockdep_no_validate__;
c7e78cff6   Peter Zijlstra   lockstat: contend...
54
  #define LOCKSTAT_POINTS		4
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
  /*
   * The lock-class itself:
   */
  struct lock_class {
  	/*
  	 * class-hash:
  	 */
  	struct list_head		hash_entry;
  
  	/*
  	 * global list of all lock-classes:
  	 */
  	struct list_head		lock_entry;
  
  	struct lockdep_subclass_key	*key;
  	unsigned int			subclass;
e351b660f   Ming Lei   lockdep: Reintrod...
71
  	unsigned int			dep_gen_id;
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
72
73
74
75
76
  
  	/*
  	 * IRQ/softirq usage tracking bits:
  	 */
  	unsigned long			usage_mask;
9851673bc   Peter Zijlstra   lockdep: move sta...
77
  	struct stack_trace		usage_traces[XXX_LOCK_USAGE_STATES];
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
  
  	/*
  	 * These fields represent a directed graph of lock dependencies,
  	 * to every node we attach a list of "forward" and a list of
  	 * "backward" graph nodes.
  	 */
  	struct list_head		locks_after, locks_before;
  
  	/*
  	 * Generation counter, when doing certain classes of graph walking,
  	 * to ensure that we check one node only once:
  	 */
  	unsigned int			version;
  
  	/*
  	 * Statistics counter:
  	 */
  	unsigned long			ops;
  
  	const char			*name;
  	int				name_version;
f20786ff4   Peter Zijlstra   lockstat: core in...
99
100
  
  #ifdef CONFIG_LOCK_STAT
c7e78cff6   Peter Zijlstra   lockstat: contend...
101
102
  	unsigned long			contention_point[LOCKSTAT_POINTS];
  	unsigned long			contending_point[LOCKSTAT_POINTS];
f20786ff4   Peter Zijlstra   lockstat: core in...
103
104
105
106
107
108
109
110
111
  #endif
  };
  
  #ifdef CONFIG_LOCK_STAT
  struct lock_time {
  	s64				min;
  	s64				max;
  	s64				total;
  	unsigned long			nr;
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
112
  };
96645678c   Peter Zijlstra   lockstat: measure...
113
114
115
116
117
118
119
120
121
122
  enum bounce_type {
  	bounce_acquired_write,
  	bounce_acquired_read,
  	bounce_contended_write,
  	bounce_contended_read,
  	nr_bounce_types,
  
  	bounce_acquired = bounce_acquired_write,
  	bounce_contended = bounce_contended_write,
  };
f20786ff4   Peter Zijlstra   lockstat: core in...
123
124
  struct lock_class_stats {
  	unsigned long			contention_point[4];
c7e78cff6   Peter Zijlstra   lockstat: contend...
125
  	unsigned long			contending_point[4];
f20786ff4   Peter Zijlstra   lockstat: core in...
126
127
128
129
  	struct lock_time		read_waittime;
  	struct lock_time		write_waittime;
  	struct lock_time		read_holdtime;
  	struct lock_time		write_holdtime;
96645678c   Peter Zijlstra   lockstat: measure...
130
  	unsigned long			bounces[nr_bounce_types];
f20786ff4   Peter Zijlstra   lockstat: core in...
131
132
133
134
135
  };
  
  struct lock_class_stats lock_stats(struct lock_class *class);
  void clear_lock_stats(struct lock_class *class);
  #endif
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
136
137
138
139
140
141
  /*
   * Map the lock object (the lock instance) to the lock-class object.
   * This is embedded into specific lock instances:
   */
  struct lockdep_map {
  	struct lock_class_key		*key;
620162505   Hitoshi Mitake   lockdep: Add impr...
142
  	struct lock_class		*class_cache[NR_LOCKDEP_CACHING_CLASSES];
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
143
  	const char			*name;
96645678c   Peter Zijlstra   lockstat: measure...
144
145
  #ifdef CONFIG_LOCK_STAT
  	int				cpu;
c7e78cff6   Peter Zijlstra   lockstat: contend...
146
  	unsigned long			ip;
96645678c   Peter Zijlstra   lockstat: measure...
147
  #endif
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
148
149
150
151
152
153
154
155
156
157
  };
  
  /*
   * Every lock has a list of other locks that were taken after it.
   * We only grow the list, never remove from it:
   */
  struct lock_list {
  	struct list_head		entry;
  	struct lock_class		*class;
  	struct stack_trace		trace;
068135e63   Jason Baron   [PATCH] lockdep: ...
158
  	int				distance;
c94aa5ca3   Ming Lei   lockdep: Print th...
159

af0129614   Peter Zijlstra   lockdep: BFS cleanup
160
161
162
  	/*
  	 * The parent field is used to implement breadth-first search, and the
  	 * bit 0 is reused to indicate if the lock has been accessed in BFS.
c94aa5ca3   Ming Lei   lockdep: Print th...
163
164
  	 */
  	struct lock_list		*parent;
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
165
166
167
168
169
170
  };
  
  /*
   * We record lock dependency chains, so that we can cache them:
   */
  struct lock_chain {
443cd507c   Huang, Ying   lockdep: add lock...
171
172
173
  	u8				irq_context;
  	u8				depth;
  	u16				base;
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
174
175
176
  	struct list_head		entry;
  	u64				chain_key;
  };
e5f363e35   Ingo Molnar   lockdep: increase...
177
  #define MAX_LOCKDEP_KEYS_BITS		13
b42e737e5   Peter Zijlstra   lockdep: fix over...
178
179
180
181
182
183
  /*
   * Subtract one because we offset hlock->class_idx by 1 in order
   * to make 0 mean no class. This avoids overflowing the class_idx
   * bitfield and hitting the BUG in hlock_class().
   */
  #define MAX_LOCKDEP_KEYS		((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
f82b217e3   Dave Jones   lockdep: shrink h...
184

fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
  struct held_lock {
  	/*
  	 * One-way hash of the dependency chain up to this point. We
  	 * hash the hashes step by step as the dependency chain grows.
  	 *
  	 * We use it for dependency-caching and we skip detection
  	 * passes and dependency-updates if there is a cache-hit, so
  	 * it is absolutely critical for 100% coverage of the validator
  	 * to have a unique key value for every unique dependency path
  	 * that can occur in the system, to make a unique hash value
  	 * as likely as possible - hence the 64-bit width.
  	 *
  	 * The task struct holds the current hash value (initialized
  	 * with zero), here we store the previous hash value:
  	 */
  	u64				prev_chain_key;
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
201
202
  	unsigned long			acquire_ip;
  	struct lockdep_map		*instance;
7531e2f34   Peter Zijlstra   lockdep: lock pro...
203
  	struct lockdep_map		*nest_lock;
f20786ff4   Peter Zijlstra   lockstat: core in...
204
205
206
207
  #ifdef CONFIG_LOCK_STAT
  	u64 				waittime_stamp;
  	u64				holdtime_stamp;
  #endif
f82b217e3   Dave Jones   lockdep: shrink h...
208
  	unsigned int			class_idx:MAX_LOCKDEP_KEYS_BITS;
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
209
210
211
212
213
214
215
216
217
218
219
220
221
  	/*
  	 * The lock-stack is unified in that the lock chains of interrupt
  	 * contexts nest ontop of process context chains, but we 'separate'
  	 * the hashes by starting with 0 if we cross into an interrupt
  	 * context, and we also keep do not add cross-context lock
  	 * dependencies - the lock usage graph walking covers that area
  	 * anyway, and we'd just unnecessarily increase the number of
  	 * dependencies otherwise. [Note: hardirq and softirq contexts
  	 * are separated from each other too.]
  	 *
  	 * The following field is used to detect when we cross into an
  	 * interrupt context:
  	 */
f82b217e3   Dave Jones   lockdep: shrink h...
222
  	unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
bb97a91e2   Peter Zijlstra   lockdep: Deal wit...
223
  	unsigned int trylock:1;						/* 16 bits */
f82b217e3   Dave Jones   lockdep: shrink h...
224
225
226
  	unsigned int read:2;        /* see lock_acquire() comment */
  	unsigned int check:2;       /* see lock_acquire() comment */
  	unsigned int hardirqs_off:1;
bb97a91e2   Peter Zijlstra   lockdep: Deal wit...
227
  	unsigned int references:11;					/* 32 bits */
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
228
229
230
231
232
233
234
235
236
237
  };
  
  /*
   * Initialization, self-test and debugging-output methods:
   */
  extern void lockdep_init(void);
  extern void lockdep_info(void);
  extern void lockdep_reset(void);
  extern void lockdep_reset_lock(struct lockdep_map *lock);
  extern void lockdep_free_key_range(void *start, unsigned long size);
b351d164e   Peter Zijlstra   lockdep: syscall ...
238
  extern void lockdep_sys_exit(void);
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
239
240
241
  
  extern void lockdep_off(void);
  extern void lockdep_on(void);
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
242
243
244
245
246
247
248
249
  
  /*
   * These methods are used by specific locking variants (spinlocks,
   * rwlocks, mutexes and rwsems) to pass init/acquire/release events
   * to lockdep:
   */
  
  extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
4dfbb9d8c   Peter Zijlstra   Lockdep: add lock...
250
  			     struct lock_class_key *key, int subclass);
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
251
252
  
  /*
851a67b82   Peter Zijlstra   lockdep: annotate...
253
254
255
256
257
258
259
   * To initialize a lockdep_map statically use this macro.
   * Note that _name must not be NULL.
   */
  #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
  	{ .name = (_name), .key = (void *)(_key), }
  
  /*
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
260
261
262
263
264
265
   * Reinitialize a lock key - for cases where there is special locking or
   * special initialization of locks so that the validator gets the scope
   * of dependencies wrong: they are either too broad (they need a class-split)
   * or they are too narrow (they suffer from a false class-split):
   */
  #define lockdep_set_class(lock, key) \
4dfbb9d8c   Peter Zijlstra   Lockdep: add lock...
266
  		lockdep_init_map(&(lock)->dep_map, #key, key, 0)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
267
  #define lockdep_set_class_and_name(lock, key, name) \
4dfbb9d8c   Peter Zijlstra   Lockdep: add lock...
268
269
270
271
272
273
  		lockdep_init_map(&(lock)->dep_map, name, key, 0)
  #define lockdep_set_class_and_subclass(lock, key, sub) \
  		lockdep_init_map(&(lock)->dep_map, #key, key, sub)
  #define lockdep_set_subclass(lock, sub)	\
  		lockdep_init_map(&(lock)->dep_map, #lock, \
  				 (lock)->dep_map.key, sub)
1704f47b5   Peter Zijlstra   lockdep: Add nova...
274
275
276
  
  #define lockdep_set_novalidate_class(lock) \
  	lockdep_set_class(lock, &__lockdep_no_validate__)
9a7aa12f3   Jan Kara   vfs: Set special ...
277
278
279
280
281
282
283
284
285
286
  /*
   * Compare locking classes
   */
  #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
  
  static inline int lockdep_match_key(struct lockdep_map *lock,
  				    struct lock_class_key *key)
  {
  	return lock->key == key;
  }
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
  
  /*
   * Acquire a lock.
   *
   * Values for "read":
   *
   *   0: exclusive (write) acquire
   *   1: read-acquire (no recursion allowed)
   *   2: read-acquire with same-instance recursion allowed
   *
   * Values for check:
   *
   *   0: disabled
   *   1: simple checks (freeing, held-at-exit-time, etc.)
   *   2: full validation
   */
  extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
7531e2f34   Peter Zijlstra   lockdep: lock pro...
304
305
  			 int trylock, int read, int check,
  			 struct lockdep_map *nest_lock, unsigned long ip);
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
306
307
308
  
  extern void lock_release(struct lockdep_map *lock, int nested,
  			 unsigned long ip);
f607c6685   Peter Zijlstra   lockdep: Introduc...
309
310
311
  #define lockdep_is_held(lock)	lock_is_held(&(lock)->dep_map)
  
  extern int lock_is_held(struct lockdep_map *lock);
00ef9f734   Peter Zijlstra   lockdep: change a...
312
313
314
315
316
317
318
319
320
  extern void lock_set_class(struct lockdep_map *lock, const char *name,
  			   struct lock_class_key *key, unsigned int subclass,
  			   unsigned long ip);
  
  static inline void lock_set_subclass(struct lockdep_map *lock,
  		unsigned int subclass, unsigned long ip)
  {
  	lock_set_class(lock, lock->name, lock->key, subclass, ip);
  }
64aa348ed   Peter Zijlstra   lockdep: lock_set...
321

cf40bd16f   Nick Piggin   lockdep: annotate...
322
323
324
325
326
  extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
  extern void lockdep_clear_current_reclaim_state(void);
  extern void lockdep_trace_alloc(gfp_t mask);
  
  # define INIT_LOCKDEP				.lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
327

e3a55fd18   Jarek Poplawski   [PATCH] lockdep: ...
328
  #define lockdep_depth(tsk)	(debug_locks ? (tsk)->lockdep_depth : 0)
d5abe6691   Peter Zijlstra   [PATCH] debug: wo...
329

f607c6685   Peter Zijlstra   lockdep: Introduc...
330
  #define lockdep_assert_held(l)	WARN_ON(debug_locks && !lockdep_is_held(l))
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
331
332
333
334
335
336
337
338
339
  #else /* !LOCKDEP */
  
  static inline void lockdep_off(void)
  {
  }
  
  static inline void lockdep_on(void)
  {
  }
7531e2f34   Peter Zijlstra   lockdep: lock pro...
340
  # define lock_acquire(l, s, t, r, c, n, i)	do { } while (0)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
341
  # define lock_release(l, n, i)			do { } while (0)
00ef9f734   Peter Zijlstra   lockdep: change a...
342
  # define lock_set_class(l, n, k, s, i)		do { } while (0)
64aa348ed   Peter Zijlstra   lockdep: lock_set...
343
  # define lock_set_subclass(l, s, i)		do { } while (0)
cf40bd16f   Nick Piggin   lockdep: annotate...
344
345
346
  # define lockdep_set_current_reclaim_state(g)	do { } while (0)
  # define lockdep_clear_current_reclaim_state()	do { } while (0)
  # define lockdep_trace_alloc(g)			do { } while (0)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
347
348
  # define lockdep_init()				do { } while (0)
  # define lockdep_info()				do { } while (0)
e25cf3db5   Ingo Molnar   lockdep: include/...
349
350
  # define lockdep_init_map(lock, name, key, sub) \
  		do { (void)(name); (void)(key); } while (0)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
351
352
  # define lockdep_set_class(lock, key)		do { (void)(key); } while (0)
  # define lockdep_set_class_and_name(lock, key, name) \
e25cf3db5   Ingo Molnar   lockdep: include/...
353
  		do { (void)(key); (void)(name); } while (0)
4dfbb9d8c   Peter Zijlstra   Lockdep: add lock...
354
355
  #define lockdep_set_class_and_subclass(lock, key, sub) \
  		do { (void)(key); } while (0)
07646e217   Andrew Morton   Lockdep: fix comp...
356
  #define lockdep_set_subclass(lock, sub)		do { } while (0)
1704f47b5   Peter Zijlstra   lockdep: Add nova...
357
358
  
  #define lockdep_set_novalidate_class(lock) do { } while (0)
9a7aa12f3   Jan Kara   vfs: Set special ...
359
360
361
362
363
  /*
   * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
   * case since the result is not well defined and the caller should rather
   * #ifdef the call himself.
   */
07646e217   Andrew Morton   Lockdep: fix comp...
364

fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
365
366
367
  # define INIT_LOCKDEP
  # define lockdep_reset()		do { debug_locks = 1; } while (0)
  # define lockdep_free_key_range(start, size)	do { } while (0)
b351d164e   Peter Zijlstra   lockdep: syscall ...
368
  # define lockdep_sys_exit() 			do { } while (0)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
369
370
371
372
  /*
   * The class key takes no space if lockdep is disabled:
   */
  struct lock_class_key { };
d5abe6691   Peter Zijlstra   [PATCH] debug: wo...
373
374
  
  #define lockdep_depth(tsk)	(0)
f607c6685   Peter Zijlstra   lockdep: Introduc...
375
  #define lockdep_assert_held(l)			do { } while (0)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
376
  #endif /* !LOCKDEP */
f20786ff4   Peter Zijlstra   lockstat: core in...
377
378
379
  #ifdef CONFIG_LOCK_STAT
  
  extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
c7e78cff6   Peter Zijlstra   lockstat: contend...
380
  extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
f20786ff4   Peter Zijlstra   lockstat: core in...
381
382
383
384
385
386
  
  #define LOCK_CONTENDED(_lock, try, lock)			\
  do {								\
  	if (!try(_lock)) {					\
  		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
  		lock(_lock);					\
f20786ff4   Peter Zijlstra   lockstat: core in...
387
  	}							\
c7e78cff6   Peter Zijlstra   lockstat: contend...
388
  	lock_acquired(&(_lock)->dep_map, _RET_IP_);			\
f20786ff4   Peter Zijlstra   lockstat: core in...
389
390
391
392
393
  } while (0)
  
  #else /* CONFIG_LOCK_STAT */
  
  #define lock_contended(lockdep_map, ip) do {} while (0)
c7e78cff6   Peter Zijlstra   lockstat: contend...
394
  #define lock_acquired(lockdep_map, ip) do {} while (0)
f20786ff4   Peter Zijlstra   lockstat: core in...
395
396
397
398
399
  
  #define LOCK_CONTENDED(_lock, try, lock) \
  	lock(_lock)
  
  #endif /* CONFIG_LOCK_STAT */
e8c158bb3   Robin Holt   Factor out #ifdef...
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
  #ifdef CONFIG_LOCKDEP
  
  /*
   * On lockdep we dont want the hand-coded irq-enable of
   * _raw_*_lock_flags() code, because lockdep assumes
   * that interrupts are not re-enabled during lock-acquire:
   */
  #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
  	LOCK_CONTENDED((_lock), (try), (lock))
  
  #else /* CONFIG_LOCKDEP */
  
  #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
  	lockfl((_lock), (flags))
  
  #endif /* CONFIG_LOCKDEP */
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
416
  #ifdef CONFIG_TRACE_IRQFLAGS
3117df045   Ingo Molnar   [PATCH] lockdep: ...
417
  extern void print_irqtrace_events(struct task_struct *curr);
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
418
  #else
3117df045   Ingo Molnar   [PATCH] lockdep: ...
419
420
421
  static inline void print_irqtrace_events(struct task_struct *curr)
  {
  }
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
  #endif
  
  /*
   * For trivial one-depth nesting of a lock-class, the following
   * global define can be used. (Subsystems with multiple levels
   * of nesting should define their own lock-nesting subclasses.)
   */
  #define SINGLE_DEPTH_NESTING			1
  
  /*
   * Map the dependency ops to NOP or to real lockdep ops, depending
   * on the per lock-class debug mode:
   */
  
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  # ifdef CONFIG_PROVE_LOCKING
7531e2f34   Peter Zijlstra   lockdep: lock pro...
438
  #  define spin_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
b7d39aff9   Peter Zijlstra   lockdep: spin_loc...
439
  #  define spin_acquire_nest(l, s, t, n, i)	lock_acquire(l, s, t, 0, 2, n, i)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
440
  # else
7531e2f34   Peter Zijlstra   lockdep: lock pro...
441
  #  define spin_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
b7d39aff9   Peter Zijlstra   lockdep: spin_loc...
442
  #  define spin_acquire_nest(l, s, t, n, i)	lock_acquire(l, s, t, 0, 1, NULL, i)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
443
444
445
446
447
448
449
450
451
  # endif
  # define spin_release(l, n, i)			lock_release(l, n, i)
  #else
  # define spin_acquire(l, s, t, i)		do { } while (0)
  # define spin_release(l, n, i)			do { } while (0)
  #endif
  
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  # ifdef CONFIG_PROVE_LOCKING
7531e2f34   Peter Zijlstra   lockdep: lock pro...
452
453
  #  define rwlock_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
  #  define rwlock_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 2, 2, NULL, i)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
454
  # else
7531e2f34   Peter Zijlstra   lockdep: lock pro...
455
456
  #  define rwlock_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
  #  define rwlock_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 2, 1, NULL, i)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
457
458
459
460
461
462
463
464
465
466
  # endif
  # define rwlock_release(l, n, i)		lock_release(l, n, i)
  #else
  # define rwlock_acquire(l, s, t, i)		do { } while (0)
  # define rwlock_acquire_read(l, s, t, i)	do { } while (0)
  # define rwlock_release(l, n, i)		do { } while (0)
  #endif
  
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  # ifdef CONFIG_PROVE_LOCKING
7531e2f34   Peter Zijlstra   lockdep: lock pro...
467
  #  define mutex_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
e4c70a662   Peter Zijlstra   lockdep, mutex: p...
468
  #  define mutex_acquire_nest(l, s, t, n, i)	lock_acquire(l, s, t, 0, 2, n, i)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
469
  # else
7531e2f34   Peter Zijlstra   lockdep: lock pro...
470
  #  define mutex_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
e4c70a662   Peter Zijlstra   lockdep, mutex: p...
471
  #  define mutex_acquire_nest(l, s, t, n, i)	lock_acquire(l, s, t, 0, 1, n, i)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
472
473
474
475
  # endif
  # define mutex_release(l, n, i)			lock_release(l, n, i)
  #else
  # define mutex_acquire(l, s, t, i)		do { } while (0)
e4c70a662   Peter Zijlstra   lockdep, mutex: p...
476
  # define mutex_acquire_nest(l, s, t, n, i)	do { } while (0)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
477
478
479
480
481
  # define mutex_release(l, n, i)			do { } while (0)
  #endif
  
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  # ifdef CONFIG_PROVE_LOCKING
7531e2f34   Peter Zijlstra   lockdep: lock pro...
482
483
  #  define rwsem_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
  #  define rwsem_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 1, 2, NULL, i)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
484
  # else
7531e2f34   Peter Zijlstra   lockdep: lock pro...
485
486
  #  define rwsem_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
  #  define rwsem_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 1, 1, NULL, i)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
487
488
489
490
491
492
493
  # endif
  # define rwsem_release(l, n, i)			lock_release(l, n, i)
  #else
  # define rwsem_acquire(l, s, t, i)		do { } while (0)
  # define rwsem_acquire_read(l, s, t, i)		do { } while (0)
  # define rwsem_release(l, n, i)			do { } while (0)
  #endif
4f3e7524b   Peter Zijlstra   lockdep: map_acquire
494
495
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  # ifdef CONFIG_PROVE_LOCKING
3295f0ef9   Ingo Molnar   lockdep: rename m...
496
  #  define lock_map_acquire(l)		lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
e159489ba   Tejun Heo   workqueue: relax ...
497
  #  define lock_map_acquire_read(l)	lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_)
4f3e7524b   Peter Zijlstra   lockdep: map_acquire
498
  # else
3295f0ef9   Ingo Molnar   lockdep: rename m...
499
  #  define lock_map_acquire(l)		lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
e159489ba   Tejun Heo   workqueue: relax ...
500
  #  define lock_map_acquire_read(l)	lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_)
4f3e7524b   Peter Zijlstra   lockdep: map_acquire
501
  # endif
3295f0ef9   Ingo Molnar   lockdep: rename m...
502
  # define lock_map_release(l)			lock_release(l, 1, _THIS_IP_)
4f3e7524b   Peter Zijlstra   lockdep: map_acquire
503
  #else
3295f0ef9   Ingo Molnar   lockdep: rename m...
504
  # define lock_map_acquire(l)			do { } while (0)
e159489ba   Tejun Heo   workqueue: relax ...
505
  # define lock_map_acquire_read(l)		do { } while (0)
3295f0ef9   Ingo Molnar   lockdep: rename m...
506
  # define lock_map_release(l)			do { } while (0)
4f3e7524b   Peter Zijlstra   lockdep: map_acquire
507
  #endif
76b189e91   Peter Zijlstra   lockdep: add migh...
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
  #ifdef CONFIG_PROVE_LOCKING
  # define might_lock(lock) 						\
  do {									\
  	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
  	lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_);	\
  	lock_release(&(lock)->dep_map, 0, _THIS_IP_);			\
  } while (0)
  # define might_lock_read(lock) 						\
  do {									\
  	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
  	lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_);	\
  	lock_release(&(lock)->dep_map, 0, _THIS_IP_);			\
  } while (0)
  #else
  # define might_lock(lock) do { } while (0)
  # define might_lock_read(lock) do { } while (0)
  #endif
0632eb3d7   Paul E. McKenney   rcu: Integrate rc...
525
526
527
  #ifdef CONFIG_PROVE_RCU
  extern void lockdep_rcu_dereference(const char *file, const int line);
  #endif
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
528
  #endif /* __LINUX_LOCKDEP_H */