Blame view
kernel/lockdep_internals.h
4.48 KB
fbb9ce953 [PATCH] lockdep: ... |
1 2 3 4 5 6 7 8 9 |
/* * kernel/lockdep_internals.h * * Runtime locking correctness validator * * lockdep subsystem internal functions and variables. */ /* |
9851673bc lockdep: move sta... |
10 11 12 |
* Lock-class usage-state bits: */ enum lock_usage_bit { |
d7b1b0213 lockdep: generate... |
13 14 15 16 17 18 19 20 |
#define LOCKDEP_STATE(__STATE) \ LOCK_USED_IN_##__STATE, \ LOCK_USED_IN_##__STATE##_READ, \ LOCK_ENABLED_##__STATE, \ LOCK_ENABLED_##__STATE##_READ, #include "lockdep_states.h" #undef LOCKDEP_STATE LOCK_USED, |
9851673bc lockdep: move sta... |
21 22 23 24 25 26 |
LOCK_USAGE_STATES }; /* * Usage-state bitmasks: */ |
d7b1b0213 lockdep: generate... |
27 28 29 30 31 32 33 34 35 36 37 38 |
#define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE), enum { #define LOCKDEP_STATE(__STATE) \ __LOCKF(USED_IN_##__STATE) \ __LOCKF(USED_IN_##__STATE##_READ) \ __LOCKF(ENABLED_##__STATE) \ __LOCKF(ENABLED_##__STATE##_READ) #include "lockdep_states.h" #undef LOCKDEP_STATE __LOCKF(USED) }; |
9851673bc lockdep: move sta... |
39 40 41 |
#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ) #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) |
9851673bc lockdep: move sta... |
42 43 44 45 46 47 |
#define LOCKF_ENABLED_IRQ_READ \ (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ) #define LOCKF_USED_IN_IRQ_READ \ (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) /* |
fbb9ce953 [PATCH] lockdep: ... |
48 49 50 51 52 53 54 55 |
* MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies * we track. * * We use the per-lock dependency maps in two ways: we grow it by adding * every to-be-taken lock to all currently held lock's own dependency * table (if it's not there yet), and we check it for lock order * conflicts and deadlocks. */ |
d80c19df5 lockdep: increase... |
56 |
#define MAX_LOCKDEP_ENTRIES 16384UL |
fbb9ce953 [PATCH] lockdep: ... |
57 |
|
d80c19df5 lockdep: increase... |
58 |
#define MAX_LOCKDEP_CHAINS_BITS 15 |
fbb9ce953 [PATCH] lockdep: ... |
59 |
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) |
443cd507c lockdep: add lock... |
60 |
#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) |
fbb9ce953 [PATCH] lockdep: ... |
61 62 63 64 |
/* * Stack-trace: tightly packed array of stack backtrace * addresses. Protected by the hash_lock. */ |
9bb25bf36 [PATCH] lockdep: ... |
65 |
#define MAX_STACK_TRACE_ENTRIES 262144UL |
fbb9ce953 [PATCH] lockdep: ... |
66 67 |
extern struct list_head all_lock_classes; |
443cd507c lockdep: add lock... |
68 |
extern struct lock_chain lock_chains[]; |
fbb9ce953 [PATCH] lockdep: ... |
69 |
|
f510b233c lockdep: get_user... |
70 71 72 73 |
#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2) extern void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]); |
fbb9ce953 [PATCH] lockdep: ... |
74 75 |
extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str); |
443cd507c lockdep: add lock... |
76 |
struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i); |
fbb9ce953 [PATCH] lockdep: ... |
77 78 79 |
extern unsigned long nr_lock_classes; extern unsigned long nr_list_entries; extern unsigned long nr_lock_chains; |
cd1a28e84 lockdep: add lock... |
80 |
extern int nr_chain_hlocks; |
fbb9ce953 [PATCH] lockdep: ... |
81 82 83 84 85 86 87 |
extern unsigned long nr_stack_trace_entries; extern unsigned int nr_hardirq_chains; extern unsigned int nr_softirq_chains; extern unsigned int nr_process_chains; extern unsigned int max_lockdep_depth; extern unsigned int max_recursion_depth; |
af0129614 lockdep: BFS cleanup |
88 |
extern unsigned int max_bfs_queue_depth; |
d6672c501 lockdep: build fix |
89 |
#ifdef CONFIG_PROVE_LOCKING |
419ca3f13 lockdep: fix comb... |
90 91 |
extern unsigned long lockdep_count_forward_deps(struct lock_class *); extern unsigned long lockdep_count_backward_deps(struct lock_class *); |
d6672c501 lockdep: build fix |
92 93 94 95 96 97 98 99 100 101 102 103 |
#else static inline unsigned long lockdep_count_forward_deps(struct lock_class *class) { return 0; } static inline unsigned long lockdep_count_backward_deps(struct lock_class *class) { return 0; } #endif |
419ca3f13 lockdep: fix comb... |
104 |
|
fbb9ce953 [PATCH] lockdep: ... |
105 |
#ifdef CONFIG_DEBUG_LOCKDEP |
bd6d29c25 lockstat: Make lo... |
106 107 |
#include <asm/local.h> |
fbb9ce953 [PATCH] lockdep: ... |
108 |
/* |
bd6d29c25 lockstat: Make lo... |
109 110 111 |
* Various lockdep statistics. * We want them per cpu as they are often accessed in fast path * and we want to avoid too much cache bouncing. |
fbb9ce953 [PATCH] lockdep: ... |
112 |
*/ |
bd6d29c25 lockstat: Make lo... |
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
struct lockdep_stats { int chain_lookup_hits; int chain_lookup_misses; int hardirqs_on_events; int hardirqs_off_events; int redundant_hardirqs_on; int redundant_hardirqs_off; int softirqs_on_events; int softirqs_off_events; int redundant_softirqs_on; int redundant_softirqs_off; int nr_unused_locks; int nr_cyclic_checks; int nr_cyclic_check_recursions; int nr_find_usage_forwards_checks; int nr_find_usage_forwards_recursions; int nr_find_usage_backwards_checks; int nr_find_usage_backwards_recursions; }; DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); |
ba697f40d lockdep: Provide ... |
134 135 |
#define __debug_atomic_inc(ptr) \ this_cpu_inc(lockdep_stats.ptr); |
bd6d29c25 lockstat: Make lo... |
136 |
#define debug_atomic_inc(ptr) { \ |
bd6d29c25 lockstat: Make lo... |
137 |
WARN_ON_ONCE(!irqs_disabled()); \ |
54d47a2be lockdep: No need ... |
138 |
__this_cpu_inc(lockdep_stats.ptr); \ |
bd6d29c25 lockstat: Make lo... |
139 140 141 |
} #define debug_atomic_dec(ptr) { \ |
bd6d29c25 lockstat: Make lo... |
142 |
WARN_ON_ONCE(!irqs_disabled()); \ |
54d47a2be lockdep: No need ... |
143 |
__this_cpu_dec(lockdep_stats.ptr); \ |
bd6d29c25 lockstat: Make lo... |
144 145 146 147 148 149 150 151 152 153 154 155 |
} #define debug_atomic_read(ptr) ({ \ struct lockdep_stats *__cpu_lockdep_stats; \ unsigned long long __total = 0; \ int __cpu; \ for_each_possible_cpu(__cpu) { \ __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \ __total += __cpu_lockdep_stats->ptr; \ } \ __total; \ }) |
fbb9ce953 [PATCH] lockdep: ... |
156 |
#else |
ba697f40d lockdep: Provide ... |
157 |
# define __debug_atomic_inc(ptr) do { } while (0) |
fbb9ce953 [PATCH] lockdep: ... |
158 159 160 161 |
# define debug_atomic_inc(ptr) do { } while (0) # define debug_atomic_dec(ptr) do { } while (0) # define debug_atomic_read(ptr) 0 #endif |