Blame view
kernel/locking/lockdep_internals.h
5.06 KB
b24413180 License cleanup: ... |
1 |
/* SPDX-License-Identifier: GPL-2.0 */ |
fbb9ce953 [PATCH] lockdep: ... |
2 3 4 5 6 7 8 9 10 |
/* * kernel/lockdep_internals.h * * Runtime locking correctness validator * * lockdep subsystem internal functions and variables. */ /* |
9851673bc lockdep: move sta... |
11 12 13 |
* Lock-class usage-state bits: */ enum lock_usage_bit { |
d7b1b0213 lockdep: generate... |
14 15 16 17 18 19 20 21 |
#define LOCKDEP_STATE(__STATE) \ LOCK_USED_IN_##__STATE, \ LOCK_USED_IN_##__STATE##_READ, \ LOCK_ENABLED_##__STATE, \ LOCK_ENABLED_##__STATE##_READ, #include "lockdep_states.h" #undef LOCKDEP_STATE LOCK_USED, |
9851673bc lockdep: move sta... |
22 23 24 25 26 27 |
LOCK_USAGE_STATES }; /* * Usage-state bitmasks: */ |
d7b1b0213 lockdep: generate... |
28 29 30 31 32 33 34 35 36 37 38 39 |
#define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE), enum { #define LOCKDEP_STATE(__STATE) \ __LOCKF(USED_IN_##__STATE) \ __LOCKF(USED_IN_##__STATE##_READ) \ __LOCKF(ENABLED_##__STATE) \ __LOCKF(ENABLED_##__STATE##_READ) #include "lockdep_states.h" #undef LOCKDEP_STATE __LOCKF(USED) }; |
9851673bc lockdep: move sta... |
40 41 42 |
#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ) #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) |
9851673bc lockdep: move sta... |
43 44 45 46 47 48 |
#define LOCKF_ENABLED_IRQ_READ \ (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ) #define LOCKF_USED_IN_IRQ_READ \ (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) /* |
395102db4 sparc64: Use LOCK... |
49 |
* CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text, |
e245d99e6 lockdep: Limit st... |
50 |
* .data and .bss to fit in required 32MB limit for the kernel. With |
395102db4 sparc64: Use LOCK... |
51 |
* CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems. |
e245d99e6 lockdep: Limit st... |
52 53 54 |
* So, reduce the static allocations for lockdeps related structures so that * everything fits in current required size limit. */ |
395102db4 sparc64: Use LOCK... |
55 |
#ifdef CONFIG_LOCKDEP_SMALL |
e245d99e6 lockdep: Limit st... |
56 |
/* |
fbb9ce953 [PATCH] lockdep: ... |
57 58 59 60 61 62 63 64 |
* MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies * we track. * * We use the per-lock dependency maps in two ways: we grow it by adding * every to-be-taken lock to all currently held lock's own dependency * table (if it's not there yet), and we check it for lock order * conflicts and deadlocks. */ |
e245d99e6 lockdep: Limit st... |
65 66 67 68 |
#define MAX_LOCKDEP_ENTRIES 16384UL #define MAX_LOCKDEP_CHAINS_BITS 15 #define MAX_STACK_TRACE_ENTRIES 262144UL #else |
1413c0389 lockdep: Increase... |
69 |
#define MAX_LOCKDEP_ENTRIES 32768UL |
fbb9ce953 [PATCH] lockdep: ... |
70 |
|
1413c0389 lockdep: Increase... |
71 |
#define MAX_LOCKDEP_CHAINS_BITS 16 |
443cd507c lockdep: add lock... |
72 |
|
fbb9ce953 [PATCH] lockdep: ... |
73 74 75 76 |
/* * Stack-trace: tightly packed array of stack backtrace * addresses. Protected by the hash_lock. */ |
1413c0389 lockdep: Increase... |
77 |
#define MAX_STACK_TRACE_ENTRIES 524288UL |
e245d99e6 lockdep: Limit st... |
78 79 80 81 82 |
#endif #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) |
fbb9ce953 [PATCH] lockdep: ... |
83 84 |
extern struct list_head all_lock_classes; |
443cd507c lockdep: add lock... |
85 |
extern struct lock_chain lock_chains[]; |
fbb9ce953 [PATCH] lockdep: ... |
86 |
|
f510b233c lockdep: get_user... |
87 88 89 90 |
#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2) extern void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]); |
fbb9ce953 [PATCH] lockdep: ... |
91 92 |
extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str); |
443cd507c lockdep: add lock... |
93 |
struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i); |
fbb9ce953 [PATCH] lockdep: ... |
94 95 96 |
extern unsigned long nr_lock_classes; extern unsigned long nr_list_entries; extern unsigned long nr_lock_chains; |
cd1a28e84 lockdep: add lock... |
97 |
extern int nr_chain_hlocks; |
fbb9ce953 [PATCH] lockdep: ... |
98 99 100 101 102 103 104 |
extern unsigned long nr_stack_trace_entries; extern unsigned int nr_hardirq_chains; extern unsigned int nr_softirq_chains; extern unsigned int nr_process_chains; extern unsigned int max_lockdep_depth; extern unsigned int max_recursion_depth; |
af0129614 lockdep: BFS cleanup |
105 |
extern unsigned int max_bfs_queue_depth; |
d6672c501 lockdep: build fix |
106 |
#ifdef CONFIG_PROVE_LOCKING |
419ca3f13 lockdep: fix comb... |
107 108 |
extern unsigned long lockdep_count_forward_deps(struct lock_class *); extern unsigned long lockdep_count_backward_deps(struct lock_class *); |
d6672c501 lockdep: build fix |
109 110 111 112 113 114 115 116 117 118 119 120 |
#else static inline unsigned long lockdep_count_forward_deps(struct lock_class *class) { return 0; } static inline unsigned long lockdep_count_backward_deps(struct lock_class *class) { return 0; } #endif |
419ca3f13 lockdep: fix comb... |
121 |
|
fbb9ce953 [PATCH] lockdep: ... |
122 |
#ifdef CONFIG_DEBUG_LOCKDEP |
bd6d29c25 lockstat: Make lo... |
123 124 |
#include <asm/local.h> |
fbb9ce953 [PATCH] lockdep: ... |
125 |
/* |
bd6d29c25 lockstat: Make lo... |
126 127 128 |
* Various lockdep statistics. * We want them per cpu as they are often accessed in fast path * and we want to avoid too much cache bouncing. |
fbb9ce953 [PATCH] lockdep: ... |
129 |
*/ |
bd6d29c25 lockstat: Make lo... |
130 131 132 133 134 135 136 137 138 139 140 141 |
struct lockdep_stats { int chain_lookup_hits; int chain_lookup_misses; int hardirqs_on_events; int hardirqs_off_events; int redundant_hardirqs_on; int redundant_hardirqs_off; int softirqs_on_events; int softirqs_off_events; int redundant_softirqs_on; int redundant_softirqs_off; int nr_unused_locks; |
ae813308f locking/lockdep: ... |
142 143 |
int nr_redundant_checks; int nr_redundant; |
bd6d29c25 lockstat: Make lo... |
144 145 146 147 148 149 150 151 152 |
int nr_cyclic_checks; int nr_cyclic_check_recursions; int nr_find_usage_forwards_checks; int nr_find_usage_forwards_recursions; int nr_find_usage_backwards_checks; int nr_find_usage_backwards_recursions; }; DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); |
ba697f40d lockdep: Provide ... |
153 154 |
#define __debug_atomic_inc(ptr) \ this_cpu_inc(lockdep_stats.ptr); |
bd6d29c25 lockstat: Make lo... |
155 |
#define debug_atomic_inc(ptr) { \ |
bd6d29c25 lockstat: Make lo... |
156 |
WARN_ON_ONCE(!irqs_disabled()); \ |
54d47a2be lockdep: No need ... |
157 |
__this_cpu_inc(lockdep_stats.ptr); \ |
bd6d29c25 lockstat: Make lo... |
158 159 160 |
} #define debug_atomic_dec(ptr) { \ |
bd6d29c25 lockstat: Make lo... |
161 |
WARN_ON_ONCE(!irqs_disabled()); \ |
54d47a2be lockdep: No need ... |
162 |
__this_cpu_dec(lockdep_stats.ptr); \ |
bd6d29c25 lockstat: Make lo... |
163 164 165 166 167 168 169 170 171 172 173 174 |
} #define debug_atomic_read(ptr) ({ \ struct lockdep_stats *__cpu_lockdep_stats; \ unsigned long long __total = 0; \ int __cpu; \ for_each_possible_cpu(__cpu) { \ __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \ __total += __cpu_lockdep_stats->ptr; \ } \ __total; \ }) |
fbb9ce953 [PATCH] lockdep: ... |
175 |
#else |
ba697f40d lockdep: Provide ... |
176 |
# define __debug_atomic_inc(ptr) do { } while (0) |
fbb9ce953 [PATCH] lockdep: ... |
177 178 179 180 |
# define debug_atomic_inc(ptr) do { } while (0) # define debug_atomic_dec(ptr) do { } while (0) # define debug_atomic_read(ptr) 0 #endif |