Blame view

include/linux/lockdep.h 17.8 KB
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
1
2
3
  /*
   * Runtime locking correctness validator
   *
4b32d0a4e   Peter Zijlstra   lockdep: various ...
4
   *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
90eec103b   Peter Zijlstra   treewide: Remove ...
5
   *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
6
   *
214e0aed6   Davidlohr Bueso   locking/Documenta...
7
   * see Documentation/locking/lockdep-design.txt for more details.
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
8
9
10
   */
  #ifndef __LINUX_LOCKDEP_H
  #define __LINUX_LOCKDEP_H
a1e96b031   Heiko Carstens   [PATCH] lockdep: ...
11
  struct task_struct;
f20786ff4   Peter Zijlstra   lockstat: core in...
12
  struct lockdep_map;
a1e96b031   Heiko Carstens   [PATCH] lockdep: ...
13

2edf5e498   Dave Young   sysctl extern cle...
14
15
16
  /* for sysctl */
  extern int prove_locking;
  extern int lock_stat;
a5ecddfe0   Bartosz Golaszewski   lockdep: make MAX...
17
  #define MAX_LOCKDEP_SUBCLASSES		8UL
db0b0ead6   Michael S. Tsirkin   [PATCH] lockdep: ...
18
  #ifdef CONFIG_LOCKDEP
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
19
20
21
22
  #include <linux/linkage.h>
  #include <linux/list.h>
  #include <linux/debug_locks.h>
  #include <linux/stacktrace.h>
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
23
  /*
9851673bc   Peter Zijlstra   lockdep: move sta...
24
25
   * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
   * the total number of states... :-(
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
26
   */
9851673bc   Peter Zijlstra   lockdep: move sta...
27
  #define XXX_LOCK_USAGE_STATES		(1+3*4)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
28

fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
29
  /*
620162505   Hitoshi Mitake   lockdep: Add impr...
30
31
32
33
34
35
36
37
38
39
40
   * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
   * cached in the instance of lockdep_map
   *
   * Currently main class (subclass == 0) and signle depth subclass
   * are cached in lockdep_map. This optimization is mainly targeting
   * on rq->lock. double_rq_lock() acquires this highly competitive with
   * single depth.
   */
  #define NR_LOCKDEP_CACHING_CLASSES	2
  
  /*
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
41
42
43
44
45
46
47
48
49
50
51
   * Lock-classes are keyed via unique addresses, by embedding the
   * lockclass-key into the kernel (or module) .data section. (For
   * static locks we use the lock address itself as the key.)
   */
  struct lockdep_subclass_key {
  	char __one_byte;
  } __attribute__ ((__packed__));
  
  struct lock_class_key {
  	struct lockdep_subclass_key	subkeys[MAX_LOCKDEP_SUBCLASSES];
  };
1704f47b5   Peter Zijlstra   lockdep: Add nova...
52
  extern struct lock_class_key __lockdep_no_validate__;
c7e78cff6   Peter Zijlstra   lockstat: contend...
53
  #define LOCKSTAT_POINTS		4
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
54
55
56
57
58
59
60
  /*
   * The lock-class itself:
   */
  struct lock_class {
  	/*
  	 * class-hash:
  	 */
a63f38cc4   Andrew Morton   locking/lockdep: ...
61
  	struct hlist_node		hash_entry;
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
62
63
64
65
66
67
68
69
  
  	/*
  	 * global list of all lock-classes:
  	 */
  	struct list_head		lock_entry;
  
  	struct lockdep_subclass_key	*key;
  	unsigned int			subclass;
e351b660f   Ming Lei   lockdep: Reintrod...
70
  	unsigned int			dep_gen_id;
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
71
72
73
74
75
  
  	/*
  	 * IRQ/softirq usage tracking bits:
  	 */
  	unsigned long			usage_mask;
9851673bc   Peter Zijlstra   lockdep: move sta...
76
  	struct stack_trace		usage_traces[XXX_LOCK_USAGE_STATES];
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
  
  	/*
  	 * These fields represent a directed graph of lock dependencies,
  	 * to every node we attach a list of "forward" and a list of
  	 * "backward" graph nodes.
  	 */
  	struct list_head		locks_after, locks_before;
  
  	/*
  	 * Generation counter, when doing certain classes of graph walking,
  	 * to ensure that we check one node only once:
  	 */
  	unsigned int			version;
  
  	/*
  	 * Statistics counter:
  	 */
  	unsigned long			ops;
  
  	const char			*name;
  	int				name_version;
f20786ff4   Peter Zijlstra   lockstat: core in...
98
99
  
  #ifdef CONFIG_LOCK_STAT
c7e78cff6   Peter Zijlstra   lockstat: contend...
100
101
  	unsigned long			contention_point[LOCKSTAT_POINTS];
  	unsigned long			contending_point[LOCKSTAT_POINTS];
f20786ff4   Peter Zijlstra   lockstat: core in...
102
103
104
105
106
107
108
109
110
  #endif
  };
  
  #ifdef CONFIG_LOCK_STAT
  struct lock_time {
  	s64				min;
  	s64				max;
  	s64				total;
  	unsigned long			nr;
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
111
  };
96645678c   Peter Zijlstra   lockstat: measure...
112
113
114
115
116
117
118
119
120
121
  enum bounce_type {
  	bounce_acquired_write,
  	bounce_acquired_read,
  	bounce_contended_write,
  	bounce_contended_read,
  	nr_bounce_types,
  
  	bounce_acquired = bounce_acquired_write,
  	bounce_contended = bounce_contended_write,
  };
f20786ff4   Peter Zijlstra   lockstat: core in...
122
  struct lock_class_stats {
68722101e   George Beshers   locking/lockdep: ...
123
124
  	unsigned long			contention_point[LOCKSTAT_POINTS];
  	unsigned long			contending_point[LOCKSTAT_POINTS];
f20786ff4   Peter Zijlstra   lockstat: core in...
125
126
127
128
  	struct lock_time		read_waittime;
  	struct lock_time		write_waittime;
  	struct lock_time		read_holdtime;
  	struct lock_time		write_holdtime;
96645678c   Peter Zijlstra   lockstat: measure...
129
  	unsigned long			bounces[nr_bounce_types];
f20786ff4   Peter Zijlstra   lockstat: core in...
130
131
132
133
134
  };
  
  struct lock_class_stats lock_stats(struct lock_class *class);
  void clear_lock_stats(struct lock_class *class);
  #endif
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
135
136
137
138
139
140
  /*
   * Map the lock object (the lock instance) to the lock-class object.
   * This is embedded into specific lock instances:
   */
  struct lockdep_map {
  	struct lock_class_key		*key;
620162505   Hitoshi Mitake   lockdep: Add impr...
141
  	struct lock_class		*class_cache[NR_LOCKDEP_CACHING_CLASSES];
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
142
  	const char			*name;
96645678c   Peter Zijlstra   lockstat: measure...
143
144
  #ifdef CONFIG_LOCK_STAT
  	int				cpu;
c7e78cff6   Peter Zijlstra   lockstat: contend...
145
  	unsigned long			ip;
96645678c   Peter Zijlstra   lockstat: measure...
146
  #endif
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
147
  };
4d82a1deb   Peter Zijlstra   lockdep: fix oops...
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
  static inline void lockdep_copy_map(struct lockdep_map *to,
  				    struct lockdep_map *from)
  {
  	int i;
  
  	*to = *from;
  	/*
  	 * Since the class cache can be modified concurrently we could observe
  	 * half pointers (64bit arch using 32bit copy insns). Therefore clear
  	 * the caches and take the performance hit.
  	 *
  	 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
  	 *     that relies on cache abuse.
  	 */
  	for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
  		to->class_cache[i] = NULL;
  }
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
165
166
167
168
169
170
171
172
  /*
   * Every lock has a list of other locks that were taken after it.
   * We only grow the list, never remove from it:
   */
  struct lock_list {
  	struct list_head		entry;
  	struct lock_class		*class;
  	struct stack_trace		trace;
068135e63   Jason Baron   [PATCH] lockdep: ...
173
  	int				distance;
c94aa5ca3   Ming Lei   lockdep: Print th...
174

af0129614   Peter Zijlstra   lockdep: BFS cleanup
175
176
177
  	/*
  	 * The parent field is used to implement breadth-first search, and the
  	 * bit 0 is reused to indicate if the lock has been accessed in BFS.
c94aa5ca3   Ming Lei   lockdep: Print th...
178
179
  	 */
  	struct lock_list		*parent;
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
180
181
182
183
184
185
  };
  
  /*
   * We record lock dependency chains, so that we can cache them:
   */
  struct lock_chain {
75dd602a5   Peter Zijlstra   lockdep: Fix lock...
186
187
188
189
190
  	/* see BUILD_BUG_ON()s in lookup_chain_cache() */
  	unsigned int			irq_context :  2,
  					depth       :  6,
  					base	    : 24;
  	/* 4 byte hole */
a63f38cc4   Andrew Morton   locking/lockdep: ...
191
  	struct hlist_node		entry;
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
192
193
  	u64				chain_key;
  };
e5f363e35   Ingo Molnar   lockdep: increase...
194
  #define MAX_LOCKDEP_KEYS_BITS		13
b42e737e5   Peter Zijlstra   lockdep: fix over...
195
196
197
198
199
200
  /*
   * Subtract one because we offset hlock->class_idx by 1 in order
   * to make 0 mean no class. This avoids overflowing the class_idx
   * bitfield and hitting the BUG in hlock_class().
   */
  #define MAX_LOCKDEP_KEYS		((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
f82b217e3   Dave Jones   lockdep: shrink h...
201

fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
  struct held_lock {
  	/*
  	 * One-way hash of the dependency chain up to this point. We
  	 * hash the hashes step by step as the dependency chain grows.
  	 *
  	 * We use it for dependency-caching and we skip detection
  	 * passes and dependency-updates if there is a cache-hit, so
  	 * it is absolutely critical for 100% coverage of the validator
  	 * to have a unique key value for every unique dependency path
  	 * that can occur in the system, to make a unique hash value
  	 * as likely as possible - hence the 64-bit width.
  	 *
  	 * The task struct holds the current hash value (initialized
  	 * with zero), here we store the previous hash value:
  	 */
  	u64				prev_chain_key;
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
218
219
  	unsigned long			acquire_ip;
  	struct lockdep_map		*instance;
7531e2f34   Peter Zijlstra   lockdep: lock pro...
220
  	struct lockdep_map		*nest_lock;
f20786ff4   Peter Zijlstra   lockstat: core in...
221
222
223
224
  #ifdef CONFIG_LOCK_STAT
  	u64 				waittime_stamp;
  	u64				holdtime_stamp;
  #endif
f82b217e3   Dave Jones   lockdep: shrink h...
225
  	unsigned int			class_idx:MAX_LOCKDEP_KEYS_BITS;
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
226
227
228
229
230
231
232
233
234
235
236
237
238
  	/*
  	 * The lock-stack is unified in that the lock chains of interrupt
  	 * contexts nest ontop of process context chains, but we 'separate'
  	 * the hashes by starting with 0 if we cross into an interrupt
  	 * context, and we also keep do not add cross-context lock
  	 * dependencies - the lock usage graph walking covers that area
  	 * anyway, and we'd just unnecessarily increase the number of
  	 * dependencies otherwise. [Note: hardirq and softirq contexts
  	 * are separated from each other too.]
  	 *
  	 * The following field is used to detect when we cross into an
  	 * interrupt context:
  	 */
f82b217e3   Dave Jones   lockdep: shrink h...
239
  	unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
bb97a91e2   Peter Zijlstra   lockdep: Deal wit...
240
  	unsigned int trylock:1;						/* 16 bits */
f82b217e3   Dave Jones   lockdep: shrink h...
241
  	unsigned int read:2;        /* see lock_acquire() comment */
fb9edbe98   Oleg Nesterov   lockdep: Make hel...
242
  	unsigned int check:1;       /* see lock_acquire() comment */
f82b217e3   Dave Jones   lockdep: shrink h...
243
  	unsigned int hardirqs_off:1;
fb9edbe98   Oleg Nesterov   lockdep: Make hel...
244
  	unsigned int references:12;					/* 32 bits */
a24fc60d6   Peter Zijlstra   lockdep: Implemen...
245
  	unsigned int pin_count;
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
246
247
248
249
250
  };
  
  /*
   * Initialization, self-test and debugging-output methods:
   */
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
251
252
253
254
  extern void lockdep_info(void);
  extern void lockdep_reset(void);
  extern void lockdep_reset_lock(struct lockdep_map *lock);
  extern void lockdep_free_key_range(void *start, unsigned long size);
63f9a7fde   Andi Kleen   asmlinkage: Make ...
255
  extern asmlinkage void lockdep_sys_exit(void);
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
256
257
258
  
  extern void lockdep_off(void);
  extern void lockdep_on(void);
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
259
260
261
262
263
264
265
266
  
  /*
   * These methods are used by specific locking variants (spinlocks,
   * rwlocks, mutexes and rwsems) to pass init/acquire/release events
   * to lockdep:
   */
  
  extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
4dfbb9d8c   Peter Zijlstra   Lockdep: add lock...
267
  			     struct lock_class_key *key, int subclass);
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
268
269
  
  /*
851a67b82   Peter Zijlstra   lockdep: annotate...
270
271
272
273
274
275
276
   * To initialize a lockdep_map statically use this macro.
   * Note that _name must not be NULL.
   */
  #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
  	{ .name = (_name), .key = (void *)(_key), }
  
  /*
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
277
278
279
280
281
282
   * Reinitialize a lock key - for cases where there is special locking or
   * special initialization of locks so that the validator gets the scope
   * of dependencies wrong: they are either too broad (they need a class-split)
   * or they are too narrow (they suffer from a false class-split):
   */
  #define lockdep_set_class(lock, key) \
4dfbb9d8c   Peter Zijlstra   Lockdep: add lock...
283
  		lockdep_init_map(&(lock)->dep_map, #key, key, 0)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
284
  #define lockdep_set_class_and_name(lock, key, name) \
4dfbb9d8c   Peter Zijlstra   Lockdep: add lock...
285
286
287
288
289
290
  		lockdep_init_map(&(lock)->dep_map, name, key, 0)
  #define lockdep_set_class_and_subclass(lock, key, sub) \
  		lockdep_init_map(&(lock)->dep_map, #key, key, sub)
  #define lockdep_set_subclass(lock, sub)	\
  		lockdep_init_map(&(lock)->dep_map, #lock, \
  				 (lock)->dep_map.key, sub)
1704f47b5   Peter Zijlstra   lockdep: Add nova...
291
292
  
  #define lockdep_set_novalidate_class(lock) \
47be1c1a0   Oleg Nesterov   lockdep: Change l...
293
  	lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
9a7aa12f3   Jan Kara   vfs: Set special ...
294
295
296
297
298
299
300
301
302
303
  /*
   * Compare locking classes
   */
  #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
  
  static inline int lockdep_match_key(struct lockdep_map *lock,
  				    struct lock_class_key *key)
  {
  	return lock->key == key;
  }
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
304
305
306
307
308
309
310
311
312
313
314
315
  
  /*
   * Acquire a lock.
   *
   * Values for "read":
   *
   *   0: exclusive (write) acquire
   *   1: read-acquire (no recursion allowed)
   *   2: read-acquire with same-instance recursion allowed
   *
   * Values for check:
   *
fb9edbe98   Oleg Nesterov   lockdep: Make hel...
316
317
   *   0: simple checks (freeing, held-at-exit-time, etc.)
   *   1: full validation
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
318
319
   */
  extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
7531e2f34   Peter Zijlstra   lockdep: lock pro...
320
321
  			 int trylock, int read, int check,
  			 struct lockdep_map *nest_lock, unsigned long ip);
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
322
323
324
  
  extern void lock_release(struct lockdep_map *lock, int nested,
  			 unsigned long ip);
f8319483f   Peter Zijlstra   locking/lockdep: ...
325
326
327
328
329
330
331
332
333
  /*
   * Same "read" as for lock_acquire(), except -1 means any.
   */
  extern int lock_is_held_type(struct lockdep_map *lock, int read);
  
  static inline int lock_is_held(struct lockdep_map *lock)
  {
  	return lock_is_held_type(lock, -1);
  }
f607c6685   Peter Zijlstra   lockdep: Introduc...
334

f8319483f   Peter Zijlstra   locking/lockdep: ...
335
336
  #define lockdep_is_held(lock)		lock_is_held(&(lock)->dep_map)
  #define lockdep_is_held_type(lock, r)	lock_is_held_type(&(lock)->dep_map, (r))
f607c6685   Peter Zijlstra   lockdep: Introduc...
337

00ef9f734   Peter Zijlstra   lockdep: change a...
338
339
340
341
342
343
344
345
346
  extern void lock_set_class(struct lockdep_map *lock, const char *name,
  			   struct lock_class_key *key, unsigned int subclass,
  			   unsigned long ip);
  
  static inline void lock_set_subclass(struct lockdep_map *lock,
  		unsigned int subclass, unsigned long ip)
  {
  	lock_set_class(lock, lock->name, lock->key, subclass, ip);
  }
64aa348ed   Peter Zijlstra   lockdep: lock_set...
347

cf40bd16f   Nick Piggin   lockdep: annotate...
348
349
350
  extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
  extern void lockdep_clear_current_reclaim_state(void);
  extern void lockdep_trace_alloc(gfp_t mask);
e7904a28f   Peter Zijlstra   locking/lockdep, ...
351
352
353
354
355
356
357
  struct pin_cookie { unsigned int val; };
  
  #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
  
  extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
  extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
  extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
a24fc60d6   Peter Zijlstra   lockdep: Implemen...
358

cf40bd16f   Nick Piggin   lockdep: annotate...
359
  # define INIT_LOCKDEP				.lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
360

e3a55fd18   Jarek Poplawski   [PATCH] lockdep: ...
361
  #define lockdep_depth(tsk)	(debug_locks ? (tsk)->lockdep_depth : 0)
d5abe6691   Peter Zijlstra   [PATCH] debug: wo...
362

b1ae345d9   Johannes Berg   lockdep: make loc...
363
364
365
  #define lockdep_assert_held(l)	do {				\
  		WARN_ON(debug_locks && !lockdep_is_held(l));	\
  	} while (0)
f607c6685   Peter Zijlstra   lockdep: Introduc...
366

f8319483f   Peter Zijlstra   locking/lockdep: ...
367
368
369
370
371
372
373
  #define lockdep_assert_held_exclusive(l)	do {			\
  		WARN_ON(debug_locks && !lockdep_is_held_type(l, 0));	\
  	} while (0)
  
  #define lockdep_assert_held_read(l)	do {				\
  		WARN_ON(debug_locks && !lockdep_is_held_type(l, 1));	\
  	} while (0)
9a37110d2   Peter Hurley   locking: Add WARN...
374
375
376
  #define lockdep_assert_held_once(l)	do {				\
  		WARN_ON_ONCE(debug_locks && !lockdep_is_held(l));	\
  	} while (0)
94d24fc47   Peter Zijlstra   printk, lockdep: ...
377
  #define lockdep_recursing(tsk)	((tsk)->lockdep_recursion)
e7904a28f   Peter Zijlstra   locking/lockdep, ...
378
379
380
  #define lockdep_pin_lock(l)	lock_pin_lock(&(l)->dep_map)
  #define lockdep_repin_lock(l,c)	lock_repin_lock(&(l)->dep_map, (c))
  #define lockdep_unpin_lock(l,c)	lock_unpin_lock(&(l)->dep_map, (c))
a24fc60d6   Peter Zijlstra   lockdep: Implemen...
381

a51805efa   Michel Lespinasse   lockdep: Introduc...
382
  #else /* !CONFIG_LOCKDEP */
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
383
384
385
386
387
388
389
390
  
  static inline void lockdep_off(void)
  {
  }
  
  static inline void lockdep_on(void)
  {
  }
7531e2f34   Peter Zijlstra   lockdep: lock pro...
391
  # define lock_acquire(l, s, t, r, c, n, i)	do { } while (0)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
392
  # define lock_release(l, n, i)			do { } while (0)
00ef9f734   Peter Zijlstra   lockdep: change a...
393
  # define lock_set_class(l, n, k, s, i)		do { } while (0)
64aa348ed   Peter Zijlstra   lockdep: lock_set...
394
  # define lock_set_subclass(l, s, i)		do { } while (0)
cf40bd16f   Nick Piggin   lockdep: annotate...
395
396
397
  # define lockdep_set_current_reclaim_state(g)	do { } while (0)
  # define lockdep_clear_current_reclaim_state()	do { } while (0)
  # define lockdep_trace_alloc(g)			do { } while (0)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
398
  # define lockdep_info()				do { } while (0)
e25cf3db5   Ingo Molnar   lockdep: include/...
399
400
  # define lockdep_init_map(lock, name, key, sub) \
  		do { (void)(name); (void)(key); } while (0)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
401
402
  # define lockdep_set_class(lock, key)		do { (void)(key); } while (0)
  # define lockdep_set_class_and_name(lock, key, name) \
e25cf3db5   Ingo Molnar   lockdep: include/...
403
  		do { (void)(key); (void)(name); } while (0)
4dfbb9d8c   Peter Zijlstra   Lockdep: add lock...
404
405
  #define lockdep_set_class_and_subclass(lock, key, sub) \
  		do { (void)(key); } while (0)
07646e217   Andrew Morton   Lockdep: fix comp...
406
  #define lockdep_set_subclass(lock, sub)		do { } while (0)
1704f47b5   Peter Zijlstra   lockdep: Add nova...
407
408
  
  #define lockdep_set_novalidate_class(lock) do { } while (0)
9a7aa12f3   Jan Kara   vfs: Set special ...
409
410
411
412
413
  /*
   * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
   * case since the result is not well defined and the caller should rather
   * #ifdef the call himself.
   */
07646e217   Andrew Morton   Lockdep: fix comp...
414

fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
415
416
417
  # define INIT_LOCKDEP
  # define lockdep_reset()		do { debug_locks = 1; } while (0)
  # define lockdep_free_key_range(start, size)	do { } while (0)
b351d164e   Peter Zijlstra   lockdep: syscall ...
418
  # define lockdep_sys_exit() 			do { } while (0)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
419
420
421
422
  /*
   * The class key takes no space if lockdep is disabled:
   */
  struct lock_class_key { };
d5abe6691   Peter Zijlstra   [PATCH] debug: wo...
423
424
  
  #define lockdep_depth(tsk)	(0)
f8319483f   Peter Zijlstra   locking/lockdep: ...
425
  #define lockdep_is_held_type(l, r)		(1)
5cd3f5aff   Paul Bolle   lockdep: Silence ...
426
  #define lockdep_assert_held(l)			do { (void)(l); } while (0)
f8319483f   Peter Zijlstra   locking/lockdep: ...
427
428
  #define lockdep_assert_held_exclusive(l)	do { (void)(l); } while (0)
  #define lockdep_assert_held_read(l)		do { (void)(l); } while (0)
9a37110d2   Peter Hurley   locking: Add WARN...
429
  #define lockdep_assert_held_once(l)		do { (void)(l); } while (0)
f607c6685   Peter Zijlstra   lockdep: Introduc...
430

94d24fc47   Peter Zijlstra   printk, lockdep: ...
431
  #define lockdep_recursing(tsk)			(0)
e7904a28f   Peter Zijlstra   locking/lockdep, ...
432
433
434
435
436
437
438
  struct pin_cookie { };
  
  #define NIL_COOKIE (struct pin_cookie){ }
  
  #define lockdep_pin_lock(l)			({ struct pin_cookie cookie; cookie; })
  #define lockdep_repin_lock(l, c)		do { (void)(l); (void)(c); } while (0)
  #define lockdep_unpin_lock(l, c)		do { (void)(l); (void)(c); } while (0)
a24fc60d6   Peter Zijlstra   lockdep: Implemen...
439

fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
440
  #endif /* !LOCKDEP */
f20786ff4   Peter Zijlstra   lockstat: core in...
441
442
443
  #ifdef CONFIG_LOCK_STAT
  
  extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
c7e78cff6   Peter Zijlstra   lockstat: contend...
444
  extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
f20786ff4   Peter Zijlstra   lockstat: core in...
445
446
447
448
449
450
  
  #define LOCK_CONTENDED(_lock, try, lock)			\
  do {								\
  	if (!try(_lock)) {					\
  		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
  		lock(_lock);					\
f20786ff4   Peter Zijlstra   lockstat: core in...
451
  	}							\
c7e78cff6   Peter Zijlstra   lockstat: contend...
452
  	lock_acquired(&(_lock)->dep_map, _RET_IP_);			\
f20786ff4   Peter Zijlstra   lockstat: core in...
453
  } while (0)
916633a40   Michal Hocko   locking/rwsem: Pr...
454
455
456
457
458
459
460
461
462
463
464
  #define LOCK_CONTENDED_RETURN(_lock, try, lock)			\
  ({								\
  	int ____err = 0;					\
  	if (!try(_lock)) {					\
  		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
  		____err = lock(_lock);				\
  	}							\
  	if (!____err)						\
  		lock_acquired(&(_lock)->dep_map, _RET_IP_);	\
  	____err;						\
  })
f20786ff4   Peter Zijlstra   lockstat: core in...
465
466
467
  #else /* CONFIG_LOCK_STAT */
  
  #define lock_contended(lockdep_map, ip) do {} while (0)
c7e78cff6   Peter Zijlstra   lockstat: contend...
468
  #define lock_acquired(lockdep_map, ip) do {} while (0)
f20786ff4   Peter Zijlstra   lockstat: core in...
469
470
471
  
  #define LOCK_CONTENDED(_lock, try, lock) \
  	lock(_lock)
916633a40   Michal Hocko   locking/rwsem: Pr...
472
473
  #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
  	lock(_lock)
f20786ff4   Peter Zijlstra   lockstat: core in...
474
  #endif /* CONFIG_LOCK_STAT */
e8c158bb3   Robin Holt   Factor out #ifdef...
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
  #ifdef CONFIG_LOCKDEP
  
  /*
   * On lockdep we dont want the hand-coded irq-enable of
   * _raw_*_lock_flags() code, because lockdep assumes
   * that interrupts are not re-enabled during lock-acquire:
   */
  #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
  	LOCK_CONTENDED((_lock), (try), (lock))
  
  #else /* CONFIG_LOCKDEP */
  
  #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
  	lockfl((_lock), (flags))
  
  #endif /* CONFIG_LOCKDEP */
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
491
  #ifdef CONFIG_TRACE_IRQFLAGS
3117df045   Ingo Molnar   [PATCH] lockdep: ...
492
  extern void print_irqtrace_events(struct task_struct *curr);
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
493
  #else
3117df045   Ingo Molnar   [PATCH] lockdep: ...
494
495
496
  static inline void print_irqtrace_events(struct task_struct *curr)
  {
  }
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
497
498
499
500
501
502
503
504
505
506
507
508
509
  #endif
  
  /*
   * For trivial one-depth nesting of a lock-class, the following
   * global define can be used. (Subsystems with multiple levels
   * of nesting should define their own lock-nesting subclasses.)
   */
  #define SINGLE_DEPTH_NESTING			1
  
  /*
   * Map the dependency ops to NOP or to real lockdep ops, depending
   * on the per lock-class debug mode:
   */
fb9edbe98   Oleg Nesterov   lockdep: Make hel...
510
511
512
  #define lock_acquire_exclusive(l, s, t, n, i)		lock_acquire(l, s, t, 0, 1, n, i)
  #define lock_acquire_shared(l, s, t, n, i)		lock_acquire(l, s, t, 1, 1, n, i)
  #define lock_acquire_shared_recursive(l, s, t, n, i)	lock_acquire(l, s, t, 2, 1, n, i)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
513

a51805efa   Michel Lespinasse   lockdep: Introduc...
514
515
516
  #define spin_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
  #define spin_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
  #define spin_release(l, n, i)			lock_release(l, n, i)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
517

a51805efa   Michel Lespinasse   lockdep: Introduc...
518
519
520
  #define rwlock_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
  #define rwlock_acquire_read(l, s, t, i)		lock_acquire_shared_recursive(l, s, t, NULL, i)
  #define rwlock_release(l, n, i)			lock_release(l, n, i)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
521

1ca7d67cf   John Stultz   seqcount: Add loc...
522
523
524
  #define seqcount_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
  #define seqcount_acquire_read(l, s, t, i)	lock_acquire_shared_recursive(l, s, t, NULL, i)
  #define seqcount_release(l, n, i)		lock_release(l, n, i)
a51805efa   Michel Lespinasse   lockdep: Introduc...
525
526
527
528
529
530
531
  #define mutex_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
  #define mutex_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
  #define mutex_release(l, n, i)			lock_release(l, n, i)
  
  #define rwsem_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
  #define rwsem_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
  #define rwsem_acquire_read(l, s, t, i)		lock_acquire_shared(l, s, t, NULL, i)
1ca7d67cf   John Stultz   seqcount: Add loc...
532
  #define rwsem_release(l, n, i)			lock_release(l, n, i)
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
533

a51805efa   Michel Lespinasse   lockdep: Introduc...
534
535
  #define lock_map_acquire(l)			lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
  #define lock_map_acquire_read(l)		lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
dd56af42b   Paul E. McKenney   rcu: Eliminate de...
536
  #define lock_map_acquire_tryread(l)		lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
1ca7d67cf   John Stultz   seqcount: Add loc...
537
  #define lock_map_release(l)			lock_release(l, 1, _THIS_IP_)
4f3e7524b   Peter Zijlstra   lockdep: map_acquire
538

76b189e91   Peter Zijlstra   lockdep: add migh...
539
540
541
542
  #ifdef CONFIG_PROVE_LOCKING
  # define might_lock(lock) 						\
  do {									\
  	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
fb9edbe98   Oleg Nesterov   lockdep: Make hel...
543
  	lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);	\
76b189e91   Peter Zijlstra   lockdep: add migh...
544
545
546
547
548
  	lock_release(&(lock)->dep_map, 0, _THIS_IP_);			\
  } while (0)
  # define might_lock_read(lock) 						\
  do {									\
  	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
fb9edbe98   Oleg Nesterov   lockdep: Make hel...
549
  	lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);	\
76b189e91   Peter Zijlstra   lockdep: add migh...
550
551
552
553
554
555
  	lock_release(&(lock)->dep_map, 0, _THIS_IP_);			\
  } while (0)
  #else
  # define might_lock(lock) do { } while (0)
  # define might_lock_read(lock) do { } while (0)
  #endif
d24209bb6   Paul E. McKenney   rcu: Improve diag...
556
  #ifdef CONFIG_LOCKDEP
b3fbab057   Paul E. McKenney   rcu: Restore chec...
557
  void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
d24209bb6   Paul E. McKenney   rcu: Improve diag...
558
559
560
561
562
  #else
  static inline void
  lockdep_rcu_suspicious(const char *file, const int line, const char *s)
  {
  }
0632eb3d7   Paul E. McKenney   rcu: Integrate rc...
563
  #endif
fbb9ce953   Ingo Molnar   [PATCH] lockdep: ...
564
  #endif /* __LINUX_LOCKDEP_H */