Commit afbfb52e47273a440df33274452c603e8c332de2
1 parent
c03c69610b
Exists in
master
and in
39 other branches
sh: stacktrace/lockdep/irqflags tracing support.
Wire up all of the essentials for lockdep.. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Showing 11 changed files with 289 additions and 103 deletions Side-by-side Diff
arch/sh/Kconfig
arch/sh/Kconfig.debug
arch/sh/kernel/Makefile
arch/sh/kernel/cpu/sh2/entry.S
... | ... | @@ -184,6 +184,11 @@ |
184 | 184 | add r15,r8 |
185 | 185 | mov.l r9,@r8 |
186 | 186 | mov r9,r8 |
187 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
188 | + mov.l 5f, r9 | |
189 | + jsr @r9 | |
190 | + nop | |
191 | +#endif | |
187 | 192 | sti |
188 | 193 | bra system_call |
189 | 194 | nop |
... | ... | @@ -193,6 +198,9 @@ |
193 | 198 | 2: .long break_point_trap_software |
194 | 199 | 3: .long NR_syscalls |
195 | 200 | 4: .long sys_call_table |
201 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
202 | +5: .long trace_hardirqs_on | |
203 | +#endif | |
196 | 204 | |
197 | 205 | #if defined(CONFIG_SH_STANDARD_BIOS) |
198 | 206 | /* Unwind the stack and jmp to the debug entry */ |
... | ... | @@ -255,6 +263,11 @@ |
255 | 263 | |
256 | 264 | restore_all: |
257 | 265 | cli |
266 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
267 | + mov.l 3f, r0 | |
268 | + jsr @r0 | |
269 | + nop | |
270 | +#endif | |
258 | 271 | mov r15,r0 |
259 | 272 | mov.l $cpu_mode,r2 |
260 | 273 | mov #OFF_SR,r3 |
... | ... | @@ -307,6 +320,9 @@ |
307 | 320 | .long __current_thread_info |
308 | 321 | $cpu_mode: |
309 | 322 | .long __cpu_mode |
323 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
324 | +3: .long trace_hardirqs_off | |
325 | +#endif | |
310 | 326 | |
311 | 327 | ! common exception handler |
312 | 328 | #include "../../entry-common.S" |
arch/sh/kernel/cpu/sh3/entry.S
arch/sh/kernel/entry-common.S
... | ... | @@ -100,6 +100,11 @@ |
100 | 100 | .align 2 |
101 | 101 | ENTRY(exception_error) |
102 | 102 | ! |
103 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
104 | + mov.l 3f, r0 | |
105 | + jsr @r0 | |
106 | + nop | |
107 | +#endif | |
103 | 108 | sti |
104 | 109 | mov.l 2f, r0 |
105 | 110 | jmp @r0 |
106 | 111 | |
... | ... | @@ -109,10 +114,18 @@ |
109 | 114 | .align 2 |
110 | 115 | 1: .long break_point_trap_software |
111 | 116 | 2: .long do_exception_error |
117 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
118 | +3: .long trace_hardirqs_on | |
119 | +#endif | |
112 | 120 | |
113 | 121 | .align 2 |
114 | 122 | ret_from_exception: |
115 | 123 | preempt_stop() |
124 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
125 | + mov.l 4f, r0 | |
126 | + jsr @r0 | |
127 | + nop | |
128 | +#endif | |
116 | 129 | ENTRY(ret_from_irq) |
117 | 130 | ! |
118 | 131 | mov #OFF_SR, r0 |
... | ... | @@ -143,6 +156,11 @@ |
143 | 156 | mov.l 1f, r0 |
144 | 157 | mov.l r0, @(TI_PRE_COUNT,r8) |
145 | 158 | |
159 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
160 | + mov.l 3f, r0 | |
161 | + jsr @r0 | |
162 | + nop | |
163 | +#endif | |
146 | 164 | sti |
147 | 165 | mov.l 2f, r0 |
148 | 166 | jsr @r0 |
149 | 167 | |
... | ... | @@ -150,9 +168,15 @@ |
150 | 168 | mov #0, r0 |
151 | 169 | mov.l r0, @(TI_PRE_COUNT,r8) |
152 | 170 | cli |
171 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
172 | + mov.l 4f, r0 | |
173 | + jsr @r0 | |
174 | + nop | |
175 | +#endif | |
153 | 176 | |
154 | 177 | bra need_resched |
155 | 178 | nop |
179 | + | |
156 | 180 | noresched: |
157 | 181 | bra __restore_all |
158 | 182 | nop |
159 | 183 | |
160 | 184 | |
... | ... | @@ -160,11 +184,20 @@ |
160 | 184 | .align 2 |
161 | 185 | 1: .long PREEMPT_ACTIVE |
162 | 186 | 2: .long schedule |
187 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
188 | +3: .long trace_hardirqs_on | |
189 | +4: .long trace_hardirqs_off | |
163 | 190 | #endif |
191 | +#endif | |
164 | 192 | |
165 | 193 | ENTRY(resume_userspace) |
166 | 194 | ! r8: current_thread_info |
167 | 195 | cli |
196 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
197 | + mov.l 5f, r0 | |
198 | + jsr @r0 | |
199 | + nop | |
200 | +#endif | |
168 | 201 | mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags |
169 | 202 | tst #_TIF_WORK_MASK, r0 |
170 | 203 | bt/s __restore_all |
... | ... | @@ -210,6 +243,11 @@ |
210 | 243 | jsr @r1 ! schedule |
211 | 244 | nop |
212 | 245 | cli |
246 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
247 | + mov.l 5f, r0 | |
248 | + jsr @r0 | |
249 | + nop | |
250 | +#endif | |
213 | 251 | ! |
214 | 252 | mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags |
215 | 253 | tst #_TIF_WORK_MASK, r0 |
... | ... | @@ -221,6 +259,10 @@ |
221 | 259 | 1: .long schedule |
222 | 260 | 2: .long do_notify_resume |
223 | 261 | 3: .long restore_all |
262 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
263 | +4: .long trace_hardirqs_on | |
264 | +5: .long trace_hardirqs_off | |
265 | +#endif | |
224 | 266 | |
225 | 267 | .align 2 |
226 | 268 | syscall_exit_work: |
... | ... | @@ -229,6 +271,11 @@ |
229 | 271 | tst #_TIF_SYSCALL_TRACE, r0 |
230 | 272 | bt/s work_pending |
231 | 273 | tst #_TIF_NEED_RESCHED, r0 |
274 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
275 | + mov.l 5f, r0 | |
276 | + jsr @r0 | |
277 | + nop | |
278 | +#endif | |
232 | 279 | sti |
233 | 280 | ! XXX setup arguments... |
234 | 281 | mov.l 4f, r0 ! do_syscall_trace |
... | ... | @@ -265,7 +312,7 @@ |
265 | 312 | mov.l r0, @(OFF_R0,r15) ! Return value |
266 | 313 | |
267 | 314 | __restore_all: |
268 | - mov.l 1f,r0 | |
315 | + mov.l 1f, r0 | |
269 | 316 | jmp @r0 |
270 | 317 | nop |
271 | 318 | |
272 | 319 | |
... | ... | @@ -331,7 +378,13 @@ |
331 | 378 | mov #OFF_TRA, r9 |
332 | 379 | add r15, r9 |
333 | 380 | mov.l r8, @r9 ! set TRA value to tra |
381 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
382 | + mov.l 5f, r10 | |
383 | + jsr @r10 | |
384 | + nop | |
385 | +#endif | |
334 | 386 | sti |
387 | + | |
335 | 388 | ! |
336 | 389 | get_current_thread_info r8, r10 |
337 | 390 | mov.l @(TI_FLAGS,r8), r8 |
... | ... | @@ -355,6 +408,11 @@ |
355 | 408 | ! |
356 | 409 | syscall_exit: |
357 | 410 | cli |
411 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
412 | + mov.l 6f, r0 | |
413 | + jsr @r0 | |
414 | + nop | |
415 | +#endif | |
358 | 416 | ! |
359 | 417 | get_current_thread_info r8, r0 |
360 | 418 | mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags |
... | ... | @@ -369,4 +427,8 @@ |
369 | 427 | 2: .long NR_syscalls |
370 | 428 | 3: .long sys_call_table |
371 | 429 | 4: .long do_syscall_trace |
430 | +#ifdef CONFIG_TRACE_IRQFLAGS | |
431 | +5: .long trace_hardirqs_on | |
432 | +6: .long trace_hardirqs_off | |
433 | +#endif |
arch/sh/kernel/stacktrace.c
1 | +/* | |
2 | + * arch/sh/kernel/stacktrace.c | |
3 | + * | |
4 | + * Stack trace management functions | |
5 | + * | |
6 | + * Copyright (C) 2006 Paul Mundt | |
7 | + * | |
8 | + * This file is subject to the terms and conditions of the GNU General Public | |
9 | + * License. See the file "COPYING" in the main directory of this archive | |
10 | + * for more details. | |
11 | + */ | |
12 | +#include <linux/sched.h> | |
13 | +#include <linux/stacktrace.h> | |
14 | +#include <linux/thread_info.h> | |
15 | +#include <asm/ptrace.h> | |
16 | + | |
17 | +/* | |
18 | + * Save stack-backtrace addresses into a stack_trace buffer. | |
19 | + */ | |
20 | +void save_stack_trace(struct stack_trace *trace, struct task_struct *task) | |
21 | +{ | |
22 | + unsigned long *sp; | |
23 | + | |
24 | + if (!task) | |
25 | + task = current; | |
26 | + if (task == current) | |
27 | + sp = (unsigned long *)current_stack_pointer; | |
28 | + else | |
29 | + sp = (unsigned long *)task->thread.sp; | |
30 | + | |
31 | + while (!kstack_end(sp)) { | |
32 | + unsigned long addr = *sp++; | |
33 | + | |
34 | + if (__kernel_text_address(addr)) { | |
35 | + if (trace->skip > 0) | |
36 | + trace->skip--; | |
37 | + else | |
38 | + trace->entries[trace->nr_entries++] = addr; | |
39 | + if (trace->nr_entries >= trace->max_entries) | |
40 | + break; | |
41 | + } | |
42 | + } | |
43 | +} |
arch/sh/mm/fault.c
include/asm-sh/irqflags.h
1 | +#ifndef __ASM_SH_IRQFLAGS_H | |
2 | +#define __ASM_SH_IRQFLAGS_H | |
3 | + | |
4 | +static inline void raw_local_irq_enable(void) | |
5 | +{ | |
6 | + unsigned long __dummy0, __dummy1; | |
7 | + | |
8 | + __asm__ __volatile__ ( | |
9 | + "stc sr, %0\n\t" | |
10 | + "and %1, %0\n\t" | |
11 | +#ifdef CONFIG_CPU_HAS_SR_RB | |
12 | + "stc r6_bank, %1\n\t" | |
13 | + "or %1, %0\n\t" | |
14 | +#endif | |
15 | + "ldc %0, sr\n\t" | |
16 | + : "=&r" (__dummy0), "=r" (__dummy1) | |
17 | + : "1" (~0x000000f0) | |
18 | + : "memory" | |
19 | + ); | |
20 | +} | |
21 | + | |
22 | +static inline void raw_local_irq_disable(void) | |
23 | +{ | |
24 | + unsigned long flags; | |
25 | + | |
26 | + __asm__ __volatile__ ( | |
27 | + "stc sr, %0\n\t" | |
28 | + "or #0xf0, %0\n\t" | |
29 | + "ldc %0, sr\n\t" | |
30 | + : "=&z" (flags) | |
31 | + : /* no inputs */ | |
32 | + : "memory" | |
33 | + ); | |
34 | +} | |
35 | + | |
36 | +static inline void set_bl_bit(void) | |
37 | +{ | |
38 | + unsigned long __dummy0, __dummy1; | |
39 | + | |
40 | + __asm__ __volatile__ ( | |
41 | + "stc sr, %0\n\t" | |
42 | + "or %2, %0\n\t" | |
43 | + "and %3, %0\n\t" | |
44 | + "ldc %0, sr\n\t" | |
45 | + : "=&r" (__dummy0), "=r" (__dummy1) | |
46 | + : "r" (0x10000000), "r" (0xffffff0f) | |
47 | + : "memory" | |
48 | + ); | |
49 | +} | |
50 | + | |
51 | +static inline void clear_bl_bit(void) | |
52 | +{ | |
53 | + unsigned long __dummy0, __dummy1; | |
54 | + | |
55 | + __asm__ __volatile__ ( | |
56 | + "stc sr, %0\n\t" | |
57 | + "and %2, %0\n\t" | |
58 | + "ldc %0, sr\n\t" | |
59 | + : "=&r" (__dummy0), "=r" (__dummy1) | |
60 | + : "1" (~0x10000000) | |
61 | + : "memory" | |
62 | + ); | |
63 | +} | |
64 | + | |
65 | +static inline unsigned long __raw_local_save_flags(void) | |
66 | +{ | |
67 | + unsigned long flags; | |
68 | + | |
69 | + __asm__ __volatile__ ( | |
70 | + "stc sr, %0\n\t" | |
71 | + "and #0xf0, %0\n\t" | |
72 | + : "=&z" (flags) | |
73 | + : /* no inputs */ | |
74 | + : "memory" | |
75 | + ); | |
76 | + | |
77 | + return flags; | |
78 | +} | |
79 | + | |
80 | +#define raw_local_save_flags(flags) \ | |
81 | + do { (flags) = __raw_local_save_flags(); } while (0) | |
82 | + | |
83 | +static inline int raw_irqs_disabled_flags(unsigned long flags) | |
84 | +{ | |
85 | + return (flags != 0); | |
86 | +} | |
87 | + | |
88 | +static inline int raw_irqs_disabled(void) | |
89 | +{ | |
90 | + unsigned long flags = __raw_local_save_flags(); | |
91 | + | |
92 | + return raw_irqs_disabled_flags(flags); | |
93 | +} | |
94 | + | |
95 | +static inline unsigned long __raw_local_irq_save(void) | |
96 | +{ | |
97 | + unsigned long flags, __dummy; | |
98 | + | |
99 | + __asm__ __volatile__ ( | |
100 | + "stc sr, %1\n\t" | |
101 | + "mov %1, %0\n\t" | |
102 | + "or #0xf0, %0\n\t" | |
103 | + "ldc %0, sr\n\t" | |
104 | + "mov %1, %0\n\t" | |
105 | + "and #0xf0, %0\n\t" | |
106 | + : "=&z" (flags), "=&r" (__dummy) | |
107 | + : /* no inputs */ | |
108 | + : "memory" | |
109 | + ); | |
110 | + | |
111 | + return flags; | |
112 | +} | |
113 | + | |
114 | +#define raw_local_irq_save(flags) \ | |
115 | + do { (flags) = __raw_local_irq_save(); } while (0) | |
116 | + | |
117 | +static inline void raw_local_irq_restore(unsigned long flags) | |
118 | +{ | |
119 | + if ((flags & 0xf0) != 0xf0) | |
120 | + raw_local_irq_enable(); | |
121 | +} | |
122 | + | |
123 | +#endif /* __ASM_SH_IRQFLAGS_H */ |
include/asm-sh/rwsem.h
... | ... | @@ -25,11 +25,21 @@ |
25 | 25 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
26 | 26 | spinlock_t wait_lock; |
27 | 27 | struct list_head wait_list; |
28 | +#ifdef CONFIG_DEBUG_LOCK_ALLOC | |
29 | + struct lockdep_map dep_map; | |
30 | +#endif | |
28 | 31 | }; |
29 | 32 | |
33 | +#ifdef CONFIG_DEBUG_LOCK_ALLOC | |
34 | +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } | |
35 | +#else | |
36 | +# define __RWSEM_DEP_MAP_INIT(lockname) | |
37 | +#endif | |
38 | + | |
30 | 39 | #define __RWSEM_INITIALIZER(name) \ |
31 | 40 | { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ |
32 | - LIST_HEAD_INIT((name).wait_list) } | |
41 | + LIST_HEAD_INIT((name).wait_list) \ | |
42 | + __RWSEM_DEP_MAP_INIT(name) } | |
33 | 43 | |
34 | 44 | #define DECLARE_RWSEM(name) \ |
35 | 45 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) |
... | ... | @@ -39,6 +49,16 @@ |
39 | 49 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); |
40 | 50 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); |
41 | 51 | |
52 | +extern void __init_rwsem(struct rw_semaphore *sem, const char *name, | |
53 | + struct lock_class_key *key); | |
54 | + | |
55 | +#define init_rwsem(sem) \ | |
56 | +do { \ | |
57 | + static struct lock_class_key __key; \ | |
58 | + \ | |
59 | + __init_rwsem((sem), #sem, &__key); \ | |
60 | +} while (0) | |
61 | + | |
42 | 62 | static inline void init_rwsem(struct rw_semaphore *sem) |
43 | 63 | { |
44 | 64 | sem->count = RWSEM_UNLOCKED_VALUE; |
... | ... | @@ -139,6 +159,11 @@ |
139 | 159 | tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); |
140 | 160 | if (tmp < 0) |
141 | 161 | rwsem_downgrade_wake(sem); |
162 | +} | |
163 | + | |
164 | +static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) | |
165 | +{ | |
166 | + __down_write(sem); | |
142 | 167 | } |
143 | 168 | |
144 | 169 | /* |
include/asm-sh/system.h
... | ... | @@ -6,6 +6,7 @@ |
6 | 6 | * Copyright (C) 2002 Paul Mundt |
7 | 7 | */ |
8 | 8 | |
9 | +#include <linux/irqflags.h> | |
9 | 10 | #include <asm/types.h> |
10 | 11 | |
11 | 12 | /* |
... | ... | @@ -131,103 +132,6 @@ |
131 | 132 | |
132 | 133 | #define set_mb(var, value) do { xchg(&var, value); } while (0) |
133 | 134 | |
134 | -/* Interrupt Control */ | |
135 | -#ifdef CONFIG_CPU_HAS_SR_RB | |
136 | -static inline void local_irq_enable(void) | |
137 | -{ | |
138 | - unsigned long __dummy0, __dummy1; | |
139 | - | |
140 | - __asm__ __volatile__("stc sr, %0\n\t" | |
141 | - "and %1, %0\n\t" | |
142 | - "stc r6_bank, %1\n\t" | |
143 | - "or %1, %0\n\t" | |
144 | - "ldc %0, sr" | |
145 | - : "=&r" (__dummy0), "=r" (__dummy1) | |
146 | - : "1" (~0x000000f0) | |
147 | - : "memory"); | |
148 | -} | |
149 | -#else | |
150 | -static inline void local_irq_enable(void) | |
151 | -{ | |
152 | - unsigned long __dummy0, __dummy1; | |
153 | - | |
154 | - __asm__ __volatile__ ( | |
155 | - "stc sr, %0\n\t" | |
156 | - "and %1, %0\n\t" | |
157 | - "ldc %0, sr\n\t" | |
158 | - : "=&r" (__dummy0), "=r" (__dummy1) | |
159 | - : "1" (~0x000000f0) | |
160 | - : "memory"); | |
161 | -} | |
162 | -#endif | |
163 | - | |
164 | -static inline void local_irq_disable(void) | |
165 | -{ | |
166 | - unsigned long __dummy; | |
167 | - __asm__ __volatile__("stc sr, %0\n\t" | |
168 | - "or #0xf0, %0\n\t" | |
169 | - "ldc %0, sr" | |
170 | - : "=&z" (__dummy) | |
171 | - : /* no inputs */ | |
172 | - : "memory"); | |
173 | -} | |
174 | - | |
175 | -static inline void set_bl_bit(void) | |
176 | -{ | |
177 | - unsigned long __dummy0, __dummy1; | |
178 | - | |
179 | - __asm__ __volatile__ ("stc sr, %0\n\t" | |
180 | - "or %2, %0\n\t" | |
181 | - "and %3, %0\n\t" | |
182 | - "ldc %0, sr" | |
183 | - : "=&r" (__dummy0), "=r" (__dummy1) | |
184 | - : "r" (0x10000000), "r" (0xffffff0f) | |
185 | - : "memory"); | |
186 | -} | |
187 | - | |
188 | -static inline void clear_bl_bit(void) | |
189 | -{ | |
190 | - unsigned long __dummy0, __dummy1; | |
191 | - | |
192 | - __asm__ __volatile__ ("stc sr, %0\n\t" | |
193 | - "and %2, %0\n\t" | |
194 | - "ldc %0, sr" | |
195 | - : "=&r" (__dummy0), "=r" (__dummy1) | |
196 | - : "1" (~0x10000000) | |
197 | - : "memory"); | |
198 | -} | |
199 | - | |
200 | -#define local_save_flags(x) \ | |
201 | - __asm__("stc sr, %0; and #0xf0, %0" : "=&z" (x) :/**/: "memory" ) | |
202 | - | |
203 | -#define irqs_disabled() \ | |
204 | -({ \ | |
205 | - unsigned long flags; \ | |
206 | - local_save_flags(flags); \ | |
207 | - (flags != 0); \ | |
208 | -}) | |
209 | - | |
210 | -static inline unsigned long local_irq_save(void) | |
211 | -{ | |
212 | - unsigned long flags, __dummy; | |
213 | - | |
214 | - __asm__ __volatile__("stc sr, %1\n\t" | |
215 | - "mov %1, %0\n\t" | |
216 | - "or #0xf0, %0\n\t" | |
217 | - "ldc %0, sr\n\t" | |
218 | - "mov %1, %0\n\t" | |
219 | - "and #0xf0, %0" | |
220 | - : "=&z" (flags), "=&r" (__dummy) | |
221 | - :/**/ | |
222 | - : "memory" ); | |
223 | - return flags; | |
224 | -} | |
225 | - | |
226 | -#define local_irq_restore(x) do { \ | |
227 | - if ((x & 0x000000f0) != 0x000000f0) \ | |
228 | - local_irq_enable(); \ | |
229 | -} while (0) | |
230 | - | |
231 | 135 | /* |
232 | 136 | * Jump to P2 area. |
233 | 137 | * When handling TLB or caches, we need to do it from P2 area. |
... | ... | @@ -263,9 +167,6 @@ |
263 | 167 | "2:" \ |
264 | 168 | : "=&r" (__dummy)); \ |
265 | 169 | } while (0) |
266 | - | |
267 | -/* For spinlocks etc */ | |
268 | -#define local_irq_save(x) x = local_irq_save() | |
269 | 170 | |
270 | 171 | static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) |
271 | 172 | { |