Commit de30a2b355ea85350ca2f58f3b9bf4e5bc007986
Committed by
Linus Torvalds
1 parent
5bdc9b447c
Exists in
master
and in
4 other branches
[PATCH] lockdep: irqtrace subsystem, core
Accurate hard-IRQ-flags and softirq-flags state tracing. This allows us to attach extra functionality to IRQ flags on/off events (such as trace-on/off). Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 10 changed files with 313 additions and 30 deletions Side-by-side Diff
arch/powerpc/kernel/irq.c
include/asm-powerpc/irqflags.h
1 | +/* | |
2 | + * include/asm-powerpc/irqflags.h | |
3 | + * | |
4 | + * IRQ flags handling | |
5 | + * | |
6 | + * This file gets included from lowlevel asm headers too, to provide | |
7 | + * wrapped versions of the local_irq_*() APIs, based on the | |
8 | + * raw_local_irq_*() macros from the lowlevel headers. | |
9 | + */ | |
10 | +#ifndef _ASM_IRQFLAGS_H | |
11 | +#define _ASM_IRQFLAGS_H | |
12 | + | |
13 | +/* | |
14 | + * Get definitions for raw_local_save_flags(x), etc. | |
15 | + */ | |
16 | +#include <asm-powerpc/hw_irq.h> | |
17 | + | |
18 | +/* | |
19 | + * Do the CPU's IRQ-state tracing from assembly code. We call a | |
20 | + * C function, so save all the C-clobbered registers: | |
21 | + */ | |
22 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
23 | + | |
24 | +#error No support on PowerPC yet for CONFIG_TRACE_IRQFLAGS | |
25 | + | |
26 | +#else | |
27 | +# define TRACE_IRQS_ON | |
28 | +# define TRACE_IRQS_OFF | |
29 | +#endif | |
30 | + | |
31 | +#endif |
include/linux/hardirq.h
... | ... | @@ -86,9 +86,6 @@ |
86 | 86 | # define synchronize_irq(irq) barrier() |
87 | 87 | #endif |
88 | 88 | |
89 | -#define nmi_enter() irq_enter() | |
90 | -#define nmi_exit() sub_preempt_count(HARDIRQ_OFFSET) | |
91 | - | |
92 | 89 | struct task_struct; |
93 | 90 | |
94 | 91 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
95 | 92 | |
96 | 93 | |
97 | 94 | |
... | ... | @@ -97,13 +94,36 @@ |
97 | 94 | } |
98 | 95 | #endif |
99 | 96 | |
97 | +/* | |
98 | + * It is safe to do non-atomic ops on ->hardirq_context, | |
99 | + * because NMI handlers may not preempt and the ops are | |
100 | + * always balanced, so the interrupted value of ->hardirq_context | |
101 | + * will always be restored. | |
102 | + */ | |
100 | 103 | #define irq_enter() \ |
101 | 104 | do { \ |
102 | 105 | account_system_vtime(current); \ |
103 | 106 | add_preempt_count(HARDIRQ_OFFSET); \ |
107 | + trace_hardirq_enter(); \ | |
104 | 108 | } while (0) |
105 | 109 | |
110 | +/* | |
111 | + * Exit irq context without processing softirqs: | |
112 | + */ | |
113 | +#define __irq_exit() \ | |
114 | + do { \ | |
115 | + trace_hardirq_exit(); \ | |
116 | + account_system_vtime(current); \ | |
117 | + sub_preempt_count(HARDIRQ_OFFSET); \ | |
118 | + } while (0) | |
119 | + | |
120 | +/* | |
121 | + * Exit irq context and process softirqs if needed: | |
122 | + */ | |
106 | 123 | extern void irq_exit(void); |
124 | + | |
125 | +#define nmi_enter() irq_enter() | |
126 | +#define nmi_exit() __irq_exit() | |
107 | 127 | |
108 | 128 | #endif /* LINUX_HARDIRQ_H */ |
include/linux/init_task.h
... | ... | @@ -3,6 +3,7 @@ |
3 | 3 | |
4 | 4 | #include <linux/file.h> |
5 | 5 | #include <linux/rcupdate.h> |
6 | +#include <linux/irqflags.h> | |
6 | 7 | |
7 | 8 | #define INIT_FDTABLE \ |
8 | 9 | { \ |
... | ... | @@ -124,6 +125,7 @@ |
124 | 125 | .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ |
125 | 126 | .fs_excl = ATOMIC_INIT(0), \ |
126 | 127 | .pi_lock = SPIN_LOCK_UNLOCKED, \ |
128 | + INIT_TRACE_IRQFLAGS \ | |
127 | 129 | } |
128 | 130 | |
129 | 131 |
include/linux/interrupt.h
... | ... | @@ -10,6 +10,7 @@ |
10 | 10 | #include <linux/irqreturn.h> |
11 | 11 | #include <linux/hardirq.h> |
12 | 12 | #include <linux/sched.h> |
13 | +#include <linux/irqflags.h> | |
13 | 14 | #include <asm/atomic.h> |
14 | 15 | #include <asm/ptrace.h> |
15 | 16 | #include <asm/system.h> |
16 | 17 | |
... | ... | @@ -199,13 +200,11 @@ |
199 | 200 | #define save_and_cli(x) save_and_cli(&x) |
200 | 201 | #endif /* CONFIG_SMP */ |
201 | 202 | |
202 | -/* SoftIRQ primitives. */ | |
203 | -#define local_bh_disable() \ | |
204 | - do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0) | |
205 | -#define __local_bh_enable() \ | |
206 | - do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0) | |
207 | - | |
203 | +extern void local_bh_disable(void); | |
204 | +extern void __local_bh_enable(void); | |
205 | +extern void _local_bh_enable(void); | |
208 | 206 | extern void local_bh_enable(void); |
207 | +extern void local_bh_enable_ip(unsigned long ip); | |
209 | 208 | |
210 | 209 | /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high |
211 | 210 | frequency threaded job scheduling. For almost all the purposes |
include/linux/irqflags.h
1 | +/* | |
2 | + * include/linux/irqflags.h | |
3 | + * | |
4 | + * IRQ flags tracing: follow the state of the hardirq and softirq flags and | |
5 | + * provide callbacks for transitions between ON and OFF states. | |
6 | + * | |
7 | + * This file gets included from lowlevel asm headers too, to provide | |
8 | + * wrapped versions of the local_irq_*() APIs, based on the | |
9 | + * raw_local_irq_*() macros from the lowlevel headers. | |
10 | + */ | |
11 | +#ifndef _LINUX_TRACE_IRQFLAGS_H | |
12 | +#define _LINUX_TRACE_IRQFLAGS_H | |
13 | + | |
14 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
15 | + extern void trace_hardirqs_on(void); | |
16 | + extern void trace_hardirqs_off(void); | |
17 | + extern void trace_softirqs_on(unsigned long ip); | |
18 | + extern void trace_softirqs_off(unsigned long ip); | |
19 | +# define trace_hardirq_context(p) ((p)->hardirq_context) | |
20 | +# define trace_softirq_context(p) ((p)->softirq_context) | |
21 | +# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled) | |
22 | +# define trace_softirqs_enabled(p) ((p)->softirqs_enabled) | |
23 | +# define trace_hardirq_enter() do { current->hardirq_context++; } while (0) | |
24 | +# define trace_hardirq_exit() do { current->hardirq_context--; } while (0) | |
25 | +# define trace_softirq_enter() do { current->softirq_context++; } while (0) | |
26 | +# define trace_softirq_exit() do { current->softirq_context--; } while (0) | |
27 | +# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, | |
28 | +#else | |
29 | +# define trace_hardirqs_on() do { } while (0) | |
30 | +# define trace_hardirqs_off() do { } while (0) | |
31 | +# define trace_softirqs_on(ip) do { } while (0) | |
32 | +# define trace_softirqs_off(ip) do { } while (0) | |
33 | +# define trace_hardirq_context(p) 0 | |
34 | +# define trace_softirq_context(p) 0 | |
35 | +# define trace_hardirqs_enabled(p) 0 | |
36 | +# define trace_softirqs_enabled(p) 0 | |
37 | +# define trace_hardirq_enter() do { } while (0) | |
38 | +# define trace_hardirq_exit() do { } while (0) | |
39 | +# define trace_softirq_enter() do { } while (0) | |
40 | +# define trace_softirq_exit() do { } while (0) | |
41 | +# define INIT_TRACE_IRQFLAGS | |
42 | +#endif | |
43 | + | |
44 | +#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | |
45 | + | |
46 | +#include <asm/irqflags.h> | |
47 | + | |
48 | +#define local_irq_enable() \ | |
49 | + do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) | |
50 | +#define local_irq_disable() \ | |
51 | + do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) | |
52 | +#define local_irq_save(flags) \ | |
53 | + do { raw_local_irq_save(flags); trace_hardirqs_off(); } while (0) | |
54 | + | |
55 | +#define local_irq_restore(flags) \ | |
56 | + do { \ | |
57 | + if (raw_irqs_disabled_flags(flags)) { \ | |
58 | + raw_local_irq_restore(flags); \ | |
59 | + trace_hardirqs_off(); \ | |
60 | + } else { \ | |
61 | + trace_hardirqs_on(); \ | |
62 | + raw_local_irq_restore(flags); \ | |
63 | + } \ | |
64 | + } while (0) | |
65 | +#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ | |
66 | +/* | |
67 | + * The local_irq_*() APIs are equal to the raw_local_irq*() | |
68 | + * if !TRACE_IRQFLAGS. | |
69 | + */ | |
70 | +# define raw_local_irq_disable() local_irq_disable() | |
71 | +# define raw_local_irq_enable() local_irq_enable() | |
72 | +# define raw_local_irq_save(flags) local_irq_save(flags) | |
73 | +# define raw_local_irq_restore(flags) local_irq_restore(flags) | |
74 | +#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ | |
75 | + | |
76 | +#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | |
77 | +#define safe_halt() \ | |
78 | + do { \ | |
79 | + trace_hardirqs_on(); \ | |
80 | + raw_safe_halt(); \ | |
81 | + } while (0) | |
82 | + | |
83 | +#define local_save_flags(flags) raw_local_save_flags(flags) | |
84 | + | |
85 | +#define irqs_disabled() \ | |
86 | +({ \ | |
87 | + unsigned long flags; \ | |
88 | + \ | |
89 | + raw_local_save_flags(flags); \ | |
90 | + raw_irqs_disabled_flags(flags); \ | |
91 | +}) | |
92 | + | |
93 | +#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags) | |
94 | +#endif /* CONFIG_X86 */ | |
95 | + | |
96 | +#endif |
include/linux/sched.h
... | ... | @@ -871,6 +871,21 @@ |
871 | 871 | /* mutex deadlock detection */ |
872 | 872 | struct mutex_waiter *blocked_on; |
873 | 873 | #endif |
874 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
875 | + unsigned int irq_events; | |
876 | + int hardirqs_enabled; | |
877 | + unsigned long hardirq_enable_ip; | |
878 | + unsigned int hardirq_enable_event; | |
879 | + unsigned long hardirq_disable_ip; | |
880 | + unsigned int hardirq_disable_event; | |
881 | + int softirqs_enabled; | |
882 | + unsigned long softirq_disable_ip; | |
883 | + unsigned int softirq_disable_event; | |
884 | + unsigned long softirq_enable_ip; | |
885 | + unsigned int softirq_enable_event; | |
886 | + int hardirq_context; | |
887 | + int softirq_context; | |
888 | +#endif | |
874 | 889 | |
875 | 890 | /* journalling filesystem info */ |
876 | 891 | void *journal_info; |
kernel/fork.c
... | ... | @@ -968,6 +968,10 @@ |
968 | 968 | if (!p) |
969 | 969 | goto fork_out; |
970 | 970 | |
971 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
972 | + DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); | |
973 | + DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); | |
974 | +#endif | |
971 | 975 | retval = -EAGAIN; |
972 | 976 | if (atomic_read(&p->user->processes) >= |
973 | 977 | p->signal->rlim[RLIMIT_NPROC].rlim_cur) { |
... | ... | @@ -1041,6 +1045,21 @@ |
1041 | 1045 | goto bad_fork_cleanup_cpuset; |
1042 | 1046 | } |
1043 | 1047 | mpol_fix_fork_child_flag(p); |
1048 | +#endif | |
1049 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
1050 | + p->irq_events = 0; | |
1051 | + p->hardirqs_enabled = 0; | |
1052 | + p->hardirq_enable_ip = 0; | |
1053 | + p->hardirq_enable_event = 0; | |
1054 | + p->hardirq_disable_ip = _THIS_IP_; | |
1055 | + p->hardirq_disable_event = 0; | |
1056 | + p->softirqs_enabled = 1; | |
1057 | + p->softirq_enable_ip = _THIS_IP_; | |
1058 | + p->softirq_enable_event = 0; | |
1059 | + p->softirq_disable_ip = 0; | |
1060 | + p->softirq_disable_event = 0; | |
1061 | + p->hardirq_context = 0; | |
1062 | + p->softirq_context = 0; | |
1044 | 1063 | #endif |
1045 | 1064 | |
1046 | 1065 | rt_mutex_init_task(p); |
kernel/sched.c
... | ... | @@ -4462,7 +4462,9 @@ |
4462 | 4462 | BUG_ON(!in_softirq()); |
4463 | 4463 | |
4464 | 4464 | if (need_resched() && __resched_legal()) { |
4465 | - __local_bh_enable(); | |
4465 | + raw_local_irq_disable(); | |
4466 | + _local_bh_enable(); | |
4467 | + raw_local_irq_enable(); | |
4466 | 4468 | __cond_resched(); |
4467 | 4469 | local_bh_disable(); |
4468 | 4470 | return 1; |
kernel/softirq.c
... | ... | @@ -62,6 +62,119 @@ |
62 | 62 | } |
63 | 63 | |
64 | 64 | /* |
65 | + * This one is for softirq.c-internal use, | |
66 | + * where hardirqs are disabled legitimately: | |
67 | + */ | |
68 | +static void __local_bh_disable(unsigned long ip) | |
69 | +{ | |
70 | + unsigned long flags; | |
71 | + | |
72 | + WARN_ON_ONCE(in_irq()); | |
73 | + | |
74 | + raw_local_irq_save(flags); | |
75 | + add_preempt_count(SOFTIRQ_OFFSET); | |
76 | + /* | |
77 | + * Were softirqs turned off above: | |
78 | + */ | |
79 | + if (softirq_count() == SOFTIRQ_OFFSET) | |
80 | + trace_softirqs_off(ip); | |
81 | + raw_local_irq_restore(flags); | |
82 | +} | |
83 | + | |
84 | +void local_bh_disable(void) | |
85 | +{ | |
86 | + __local_bh_disable((unsigned long)__builtin_return_address(0)); | |
87 | +} | |
88 | + | |
89 | +EXPORT_SYMBOL(local_bh_disable); | |
90 | + | |
91 | +void __local_bh_enable(void) | |
92 | +{ | |
93 | + WARN_ON_ONCE(in_irq()); | |
94 | + | |
95 | + /* | |
96 | + * softirqs should never be enabled by __local_bh_enable(), | |
97 | + * it always nests inside local_bh_enable() sections: | |
98 | + */ | |
99 | + WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET); | |
100 | + | |
101 | + sub_preempt_count(SOFTIRQ_OFFSET); | |
102 | +} | |
103 | +EXPORT_SYMBOL_GPL(__local_bh_enable); | |
104 | + | |
105 | +/* | |
106 | + * Special-case - softirqs can safely be enabled in | |
107 | + * cond_resched_softirq(), or by __do_softirq(), | |
108 | + * without processing still-pending softirqs: | |
109 | + */ | |
110 | +void _local_bh_enable(void) | |
111 | +{ | |
112 | + WARN_ON_ONCE(in_irq()); | |
113 | + WARN_ON_ONCE(!irqs_disabled()); | |
114 | + | |
115 | + if (softirq_count() == SOFTIRQ_OFFSET) | |
116 | + trace_softirqs_on((unsigned long)__builtin_return_address(0)); | |
117 | + sub_preempt_count(SOFTIRQ_OFFSET); | |
118 | +} | |
119 | + | |
120 | +EXPORT_SYMBOL(_local_bh_enable); | |
121 | + | |
122 | +void local_bh_enable(void) | |
123 | +{ | |
124 | + unsigned long flags; | |
125 | + | |
126 | + WARN_ON_ONCE(in_irq()); | |
127 | + WARN_ON_ONCE(irqs_disabled()); | |
128 | + | |
129 | + local_irq_save(flags); | |
130 | + /* | |
131 | + * Are softirqs going to be turned on now: | |
132 | + */ | |
133 | + if (softirq_count() == SOFTIRQ_OFFSET) | |
134 | + trace_softirqs_on((unsigned long)__builtin_return_address(0)); | |
135 | + /* | |
136 | + * Keep preemption disabled until we are done with | |
137 | + * softirq processing: | |
138 | + */ | |
139 | + sub_preempt_count(SOFTIRQ_OFFSET - 1); | |
140 | + | |
141 | + if (unlikely(!in_interrupt() && local_softirq_pending())) | |
142 | + do_softirq(); | |
143 | + | |
144 | + dec_preempt_count(); | |
145 | + local_irq_restore(flags); | |
146 | + preempt_check_resched(); | |
147 | +} | |
148 | +EXPORT_SYMBOL(local_bh_enable); | |
149 | + | |
150 | +void local_bh_enable_ip(unsigned long ip) | |
151 | +{ | |
152 | + unsigned long flags; | |
153 | + | |
154 | + WARN_ON_ONCE(in_irq()); | |
155 | + | |
156 | + local_irq_save(flags); | |
157 | + /* | |
158 | + * Are softirqs going to be turned on now: | |
159 | + */ | |
160 | + if (softirq_count() == SOFTIRQ_OFFSET) | |
161 | + trace_softirqs_on(ip); | |
162 | + /* | |
163 | + * Keep preemption disabled until we are done with | |
164 | + * softirq processing: | |
165 | + */ | |
166 | + sub_preempt_count(SOFTIRQ_OFFSET - 1); | |
167 | + | |
168 | + if (unlikely(!in_interrupt() && local_softirq_pending())) | |
169 | + do_softirq(); | |
170 | + | |
171 | + dec_preempt_count(); | |
172 | + local_irq_restore(flags); | |
173 | + preempt_check_resched(); | |
174 | +} | |
175 | +EXPORT_SYMBOL(local_bh_enable_ip); | |
176 | + | |
177 | +/* | |
65 | 178 | * We restart softirq processing MAX_SOFTIRQ_RESTART times, |
66 | 179 | * and we fall back to softirqd after that. |
67 | 180 | * |
68 | 181 | |
... | ... | @@ -80,8 +193,9 @@ |
80 | 193 | int cpu; |
81 | 194 | |
82 | 195 | pending = local_softirq_pending(); |
196 | + __local_bh_disable((unsigned long)__builtin_return_address(0)); | |
197 | + trace_softirq_enter(); | |
83 | 198 | |
84 | - local_bh_disable(); | |
85 | 199 | cpu = smp_processor_id(); |
86 | 200 | restart: |
87 | 201 | /* Reset the pending bitmask before enabling irqs */ |
... | ... | @@ -109,7 +223,8 @@ |
109 | 223 | if (pending) |
110 | 224 | wakeup_softirqd(); |
111 | 225 | |
112 | - __local_bh_enable(); | |
226 | + trace_softirq_exit(); | |
227 | + _local_bh_enable(); | |
113 | 228 | } |
114 | 229 | |
115 | 230 | #ifndef __ARCH_HAS_DO_SOFTIRQ |
... | ... | @@ -136,23 +251,6 @@ |
136 | 251 | |
137 | 252 | #endif |
138 | 253 | |
139 | -void local_bh_enable(void) | |
140 | -{ | |
141 | - WARN_ON(irqs_disabled()); | |
142 | - /* | |
143 | - * Keep preemption disabled until we are done with | |
144 | - * softirq processing: | |
145 | - */ | |
146 | - sub_preempt_count(SOFTIRQ_OFFSET - 1); | |
147 | - | |
148 | - if (unlikely(!in_interrupt() && local_softirq_pending())) | |
149 | - do_softirq(); | |
150 | - | |
151 | - dec_preempt_count(); | |
152 | - preempt_check_resched(); | |
153 | -} | |
154 | -EXPORT_SYMBOL(local_bh_enable); | |
155 | - | |
156 | 254 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED |
157 | 255 | # define invoke_softirq() __do_softirq() |
158 | 256 | #else |
... | ... | @@ -165,6 +263,7 @@ |
165 | 263 | void irq_exit(void) |
166 | 264 | { |
167 | 265 | account_system_vtime(current); |
266 | + trace_hardirq_exit(); | |
168 | 267 | sub_preempt_count(IRQ_EXIT_OFFSET); |
169 | 268 | if (!in_interrupt() && local_softirq_pending()) |
170 | 269 | invoke_softirq(); |