Commit 10e267234cc0133bc9ed26bc34eb09de90c248c0

Authored by David S. Miller
Committed by David S. Miller
1 parent af1713e0f1

[SPARC64]: Add irqtrace/stacktrace/lockdep support.

Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 13 changed files with 289 additions and 66 deletions Side-by-side Diff

arch/sparc64/Kconfig
... ... @@ -26,6 +26,14 @@
26 26 bool
27 27 default y
28 28  
  29 +config STACKTRACE_SUPPORT
  30 + bool
  31 + default y
  32 +
  33 +config LOCKDEP_SUPPORT
  34 + bool
  35 + default y
  36 +
29 37 config TIME_INTERPOLATION
30 38 bool
31 39 default y
arch/sparc64/Kconfig.debug
1 1 menu "Kernel hacking"
2 2  
  3 +config TRACE_IRQFLAGS_SUPPORT
  4 + bool
  5 + default y
  6 +
3 7 source "lib/Kconfig.debug"
4 8  
5 9 config DEBUG_STACK_USAGE
arch/sparc64/kernel/Makefile
... ... @@ -14,6 +14,7 @@
14 14 power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \
15 15 visemul.o prom.o of_device.o
16 16  
  17 +obj-$(CONFIG_STACKTRACE) += stacktrace.o
17 18 obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \
18 19 pci_psycho.o pci_sabre.o pci_schizo.o \
19 20 pci_sun4v.o pci_sun4v_asm.o
arch/sparc64/kernel/entry.S
... ... @@ -597,7 +597,12 @@
597 597 1: ba,pt %xcc, etrap_irq
598 598 rd %pc, %g7
599 599  
600   -2: mov %l4, %o1
  600 +2:
  601 +#ifdef CONFIG_TRACE_IRQFLAGS
  602 + call trace_hardirqs_off
  603 + nop
  604 +#endif
  605 + mov %l4, %o1
601 606 mov %l5, %o2
602 607 call spitfire_access_error
603 608 add %sp, PTREGS_OFF, %o0
... ... @@ -824,6 +829,10 @@
824 829 wrpr %g0, 15, %pil
825 830 ba,pt %xcc, etrap_irq
826 831 rd %pc, %g7
  832 +#ifdef CONFIG_TRACE_IRQFLAGS
  833 + call trace_hardirqs_off
  834 + nop
  835 +#endif
827 836 mov 0x0, %o0
828 837 call cheetah_plus_parity_error
829 838 add %sp, PTREGS_OFF, %o1
... ... @@ -855,6 +864,10 @@
855 864 wrpr %g0, 15, %pil
856 865 ba,pt %xcc, etrap_irq
857 866 rd %pc, %g7
  867 +#ifdef CONFIG_TRACE_IRQFLAGS
  868 + call trace_hardirqs_off
  869 + nop
  870 +#endif
858 871 mov 0x1, %o0
859 872 call cheetah_plus_parity_error
860 873 add %sp, PTREGS_OFF, %o1
... ... @@ -1183,6 +1196,10 @@
1183 1196 wrpr %g0, 15, %pil
1184 1197 ba,pt %xcc, etrap_irq
1185 1198 rd %pc, %g7
  1199 +#ifdef CONFIG_TRACE_IRQFLAGS
  1200 + call trace_hardirqs_off
  1201 + nop
  1202 +#endif
1186 1203 mov %l4, %o1
1187 1204 mov %l5, %o2
1188 1205 call cheetah_fecc_handler
... ... @@ -1211,6 +1228,10 @@
1211 1228 wrpr %g0, 15, %pil
1212 1229 ba,pt %xcc, etrap_irq
1213 1230 rd %pc, %g7
  1231 +#ifdef CONFIG_TRACE_IRQFLAGS
  1232 + call trace_hardirqs_off
  1233 + nop
  1234 +#endif
1214 1235 mov %l4, %o1
1215 1236 mov %l5, %o2
1216 1237 call cheetah_cee_handler
... ... @@ -1239,6 +1260,10 @@
1239 1260 wrpr %g0, 15, %pil
1240 1261 ba,pt %xcc, etrap_irq
1241 1262 rd %pc, %g7
  1263 +#ifdef CONFIG_TRACE_IRQFLAGS
  1264 + call trace_hardirqs_off
  1265 + nop
  1266 +#endif
1242 1267 mov %l4, %o1
1243 1268 mov %l5, %o2
1244 1269 call cheetah_deferred_handler
arch/sparc64/kernel/head.S
... ... @@ -489,6 +489,14 @@
489 489 call __bzero
490 490 sub %o1, %o0, %o1
491 491  
  492 +#ifdef CONFIG_LOCKDEP
  493 + /* We have this call this super early, as even prom_init can grab
  494 + * spinlocks and thus call into the lockdep code.
  495 + */
  496 + call lockdep_init
  497 + nop
  498 +#endif
  499 +
492 500 mov %l6, %o1 ! OpenPROM stack
493 501 call prom_init
494 502 mov %l7, %o0 ! OpenPROM cif handler
arch/sparc64/kernel/rtrap.S
... ... @@ -165,14 +165,26 @@
165 165 __handle_softirq_continue:
166 166 rtrap_xcall:
167 167 sethi %hi(0xf << 20), %l4
168   - andcc %l1, TSTATE_PRIV, %l3
169 168 and %l1, %l4, %l4
  169 + andn %l1, %l4, %l1
  170 + srl %l4, 20, %l4
  171 +#ifdef CONFIG_TRACE_IRQFLAGS
  172 + brnz,pn %l4, rtrap_no_irq_enable
  173 + nop
  174 + call trace_hardirqs_on
  175 + nop
  176 + wrpr %l4, %pil
  177 +rtrap_no_irq_enable:
  178 +#endif
  179 + andcc %l1, TSTATE_PRIV, %l3
170 180 bne,pn %icc, to_kernel
171   - andn %l1, %l4, %l1
  181 + nop
172 182  
173 183 /* We must hold IRQs off and atomically test schedule+signal
174 184 * state, then hold them off all the way back to userspace.
175   - * If we are returning to kernel, none of this matters.
  185 + * If we are returning to kernel, none of this matters. Note
  186 + * that we are disabling interrupts via PSTATE_IE, not using
  187 + * %pil.
176 188 *
177 189 * If we do not do this, there is a window where we would do
178 190 * the tests, later the signal/resched event arrives but we do
... ... @@ -256,7 +268,6 @@
256 268  
257 269 ld [%sp + PTREGS_OFF + PT_V9_Y], %o3
258 270 wr %o3, %g0, %y
259   - srl %l4, 20, %l4
260 271 wrpr %l4, 0x0, %pil
261 272 wrpr %g0, 0x1, %tl
262 273 wrpr %l1, %g0, %tstate
... ... @@ -374,8 +385,8 @@
374 385 ldx [%g6 + TI_FLAGS], %l5
375 386 andcc %l5, _TIF_NEED_RESCHED, %g0
376 387 be,pt %xcc, kern_fpucheck
377   - srl %l4, 20, %l5
378   - cmp %l5, 0
  388 + nop
  389 + cmp %l4, 0
379 390 bne,pn %xcc, kern_fpucheck
380 391 sethi %hi(PREEMPT_ACTIVE), %l6
381 392 stw %l6, [%g6 + TI_PRE_COUNT]
arch/sparc64/kernel/stacktrace.c
  1 +#include <linux/sched.h>
  2 +#include <linux/stacktrace.h>
  3 +#include <linux/thread_info.h>
  4 +#include <asm/ptrace.h>
  5 +
  6 +void save_stack_trace(struct stack_trace *trace, struct task_struct *task)
  7 +{
  8 + unsigned long ksp, fp, thread_base;
  9 + struct thread_info *tp;
  10 +
  11 + if (!task)
  12 + task = current;
  13 + tp = task_thread_info(task);
  14 + if (task == current) {
  15 + flushw_all();
  16 + __asm__ __volatile__(
  17 + "mov %%fp, %0"
  18 + : "=r" (ksp)
  19 + );
  20 + } else
  21 + ksp = tp->ksp;
  22 +
  23 + fp = ksp + STACK_BIAS;
  24 + thread_base = (unsigned long) tp;
  25 + do {
  26 + struct reg_window *rw;
  27 +
  28 + /* Bogus frame pointer? */
  29 + if (fp < (thread_base + sizeof(struct thread_info)) ||
  30 + fp >= (thread_base + THREAD_SIZE))
  31 + break;
  32 +
  33 + rw = (struct reg_window *) fp;
  34 + if (trace->skip > 0)
  35 + trace->skip--;
  36 + else
  37 + trace->entries[trace->nr_entries++] = rw->ins[7];
  38 +
  39 + fp = rw->ins[6] + STACK_BIAS;
  40 + } while (trace->nr_entries < trace->max_entries);
  41 +}
arch/sparc64/kernel/sun4v_ivec.S
... ... @@ -190,7 +190,10 @@
190 190 mov %g1, %g4
191 191 ba,pt %xcc, etrap_irq
192 192 rd %pc, %g7
193   -
  193 +#ifdef CONFIG_TRACE_IRQFLAGS
  194 + call trace_hardirqs_off
  195 + nop
  196 +#endif
194 197 /* Log the event. */
195 198 add %sp, PTREGS_OFF, %o0
196 199 call sun4v_resum_error
... ... @@ -216,7 +219,10 @@
216 219 wrpr %g0, 15, %pil
217 220 ba,pt %xcc, etrap_irq
218 221 rd %pc, %g7
219   -
  222 +#ifdef CONFIG_TRACE_IRQFLAGS
  223 + call trace_hardirqs_off
  224 + nop
  225 +#endif
220 226 call sun4v_resum_overflow
221 227 add %sp, PTREGS_OFF, %o0
222 228  
... ... @@ -295,7 +301,10 @@
295 301 mov %g1, %g4
296 302 ba,pt %xcc, etrap_irq
297 303 rd %pc, %g7
298   -
  304 +#ifdef CONFIG_TRACE_IRQFLAGS
  305 + call trace_hardirqs_off
  306 + nop
  307 +#endif
299 308 /* Log the event. */
300 309 add %sp, PTREGS_OFF, %o0
301 310 call sun4v_nonresum_error
... ... @@ -321,7 +330,10 @@
321 330 wrpr %g0, 15, %pil
322 331 ba,pt %xcc, etrap_irq
323 332 rd %pc, %g7
324   -
  333 +#ifdef CONFIG_TRACE_IRQFLAGS
  334 + call trace_hardirqs_off
  335 + nop
  336 +#endif
325 337 call sun4v_nonresum_overflow
326 338 add %sp, PTREGS_OFF, %o0
327 339  
arch/sparc64/mm/ultra.S
... ... @@ -477,6 +477,10 @@
477 477 sethi %hi(109f), %g7
478 478 b,pt %xcc, etrap_irq
479 479 109: or %g7, %lo(109b), %g7
  480 +#ifdef CONFIG_TRACE_IRQFLAGS
  481 + call trace_hardirqs_off
  482 + nop
  483 +#endif
480 484 call smp_synchronize_tick_client
481 485 nop
482 486 clr %l6
... ... @@ -508,6 +512,10 @@
508 512 sethi %hi(109f), %g7
509 513 b,pt %xcc, etrap_irq
510 514 109: or %g7, %lo(109b), %g7
  515 +#ifdef CONFIG_TRACE_IRQFLAGS
  516 + call trace_hardirqs_off
  517 + nop
  518 +#endif
511 519 call __show_regs
512 520 add %sp, PTREGS_OFF, %o0
513 521 clr %l6
include/asm-sparc64/irqflags.h
  1 +/*
  2 + * include/asm-sparc64/irqflags.h
  3 + *
  4 + * IRQ flags handling
  5 + *
  6 + * This file gets included from lowlevel asm headers too, to provide
  7 + * wrapped versions of the local_irq_*() APIs, based on the
  8 + * raw_local_irq_*() functions from the lowlevel headers.
  9 + */
  10 +#ifndef _ASM_IRQFLAGS_H
  11 +#define _ASM_IRQFLAGS_H
  12 +
  13 +#ifndef __ASSEMBLY__
  14 +
  15 +static inline unsigned long __raw_local_save_flags(void)
  16 +{
  17 + unsigned long flags;
  18 +
  19 + __asm__ __volatile__(
  20 + "rdpr %%pil, %0"
  21 + : "=r" (flags)
  22 + );
  23 +
  24 + return flags;
  25 +}
  26 +
  27 +#define raw_local_save_flags(flags) \
  28 + do { (flags) = __raw_local_save_flags(); } while (0)
  29 +
  30 +static inline void raw_local_irq_restore(unsigned long flags)
  31 +{
  32 + __asm__ __volatile__(
  33 + "wrpr %0, %%pil"
  34 + : /* no output */
  35 + : "r" (flags)
  36 + : "memory"
  37 + );
  38 +}
  39 +
  40 +static inline void raw_local_irq_disable(void)
  41 +{
  42 + __asm__ __volatile__(
  43 + "wrpr 15, %%pil"
  44 + : /* no outputs */
  45 + : /* no inputs */
  46 + : "memory"
  47 + );
  48 +}
  49 +
  50 +static inline void raw_local_irq_enable(void)
  51 +{
  52 + __asm__ __volatile__(
  53 + "wrpr 0, %%pil"
  54 + : /* no outputs */
  55 + : /* no inputs */
  56 + : "memory"
  57 + );
  58 +}
  59 +
  60 +static inline int raw_irqs_disabled_flags(unsigned long flags)
  61 +{
  62 + return (flags > 0);
  63 +}
  64 +
  65 +static inline int raw_irqs_disabled(void)
  66 +{
  67 + unsigned long flags = __raw_local_save_flags();
  68 +
  69 + return raw_irqs_disabled_flags(flags);
  70 +}
  71 +
  72 +/*
  73 + * For spinlocks, etc:
  74 + */
  75 +static inline unsigned long __raw_local_irq_save(void)
  76 +{
  77 + unsigned long flags = __raw_local_save_flags();
  78 +
  79 + raw_local_irq_disable();
  80 +
  81 + return flags;
  82 +}
  83 +
  84 +#define raw_local_irq_save(flags) \
  85 + do { (flags) = __raw_local_irq_save(); } while (0)
  86 +
  87 +#endif /* (__ASSEMBLY__) */
  88 +
  89 +#endif /* !(_ASM_IRQFLAGS_H) */
include/asm-sparc64/rwsem.h
... ... @@ -23,21 +23,34 @@
23 23 signed int count;
24 24 spinlock_t wait_lock;
25 25 struct list_head wait_list;
  26 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
  27 + struct lockdep_map dep_map;
  28 +#endif
26 29 };
27 30  
  31 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
  32 +# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
  33 +#else
  34 +# define __RWSEM_DEP_MAP_INIT(lockname)
  35 +#endif
  36 +
28 37 #define __RWSEM_INITIALIZER(name) \
29   -{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) }
  38 +{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
  39 + __RWSEM_DEP_MAP_INIT(name) }
30 40  
31 41 #define DECLARE_RWSEM(name) \
32 42 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
33 43  
34   -static __inline__ void init_rwsem(struct rw_semaphore *sem)
35   -{
36   - sem->count = RWSEM_UNLOCKED_VALUE;
37   - spin_lock_init(&sem->wait_lock);
38   - INIT_LIST_HEAD(&sem->wait_list);
39   -}
  44 +extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
  45 + struct lock_class_key *key);
40 46  
  47 +#define init_rwsem(sem) \
  48 +do { \
  49 + static struct lock_class_key __key; \
  50 + \
  51 + __init_rwsem((sem), #sem, &__key); \
  52 +} while (0)
  53 +
41 54 extern void __down_read(struct rw_semaphore *sem);
42 55 extern int __down_read_trylock(struct rw_semaphore *sem);
43 56 extern void __down_write(struct rw_semaphore *sem);
... ... @@ -45,6 +58,11 @@
45 58 extern void __up_read(struct rw_semaphore *sem);
46 59 extern void __up_write(struct rw_semaphore *sem);
47 60 extern void __downgrade_write(struct rw_semaphore *sem);
  61 +
  62 +static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
  63 +{
  64 + __down_write(sem);
  65 +}
48 66  
49 67 static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
50 68 {
include/asm-sparc64/system.h
... ... @@ -7,6 +7,9 @@
7 7 #include <asm/visasm.h>
8 8  
9 9 #ifndef __ASSEMBLY__
  10 +
  11 +#include <linux/irqflags.h>
  12 +
10 13 /*
11 14 * Sparc (general) CPU types
12 15 */
... ... @@ -71,52 +74,6 @@
71 74 membar_safe("#StoreStore | #LoadStore")
72 75  
73 76 #endif
74   -
75   -#define setipl(__new_ipl) \
76   - __asm__ __volatile__("wrpr %0, %%pil" : : "r" (__new_ipl) : "memory")
77   -
78   -#define local_irq_disable() \
79   - __asm__ __volatile__("wrpr 15, %%pil" : : : "memory")
80   -
81   -#define local_irq_enable() \
82   - __asm__ __volatile__("wrpr 0, %%pil" : : : "memory")
83   -
84   -#define getipl() \
85   -({ unsigned long retval; __asm__ __volatile__("rdpr %%pil, %0" : "=r" (retval)); retval; })
86   -
87   -#define swap_pil(__new_pil) \
88   -({ unsigned long retval; \
89   - __asm__ __volatile__("rdpr %%pil, %0\n\t" \
90   - "wrpr %1, %%pil" \
91   - : "=&r" (retval) \
92   - : "r" (__new_pil) \
93   - : "memory"); \
94   - retval; \
95   -})
96   -
97   -#define read_pil_and_cli() \
98   -({ unsigned long retval; \
99   - __asm__ __volatile__("rdpr %%pil, %0\n\t" \
100   - "wrpr 15, %%pil" \
101   - : "=r" (retval) \
102   - : : "memory"); \
103   - retval; \
104   -})
105   -
106   -#define local_save_flags(flags) ((flags) = getipl())
107   -#define local_irq_save(flags) ((flags) = read_pil_and_cli())
108   -#define local_irq_restore(flags) setipl((flags))
109   -
110   -/* On sparc64 IRQ flags are the PIL register. A value of zero
111   - * means all interrupt levels are enabled, any other value means
112   - * only IRQ levels greater than that value will be received.
113   - * Consequently this means that the lowest IRQ level is one.
114   - */
115   -#define irqs_disabled() \
116   -({ unsigned long flags; \
117   - local_save_flags(flags);\
118   - (flags > 0); \
119   -})
120 77  
121 78 #define nop() __asm__ __volatile__ ("nop")
122 79  
include/asm-sparc64/ttable.h
... ... @@ -137,10 +137,49 @@
137 137 #endif
138 138 #define BREAKPOINT_TRAP TRAP(breakpoint_trap)
139 139  
  140 +#ifdef CONFIG_TRACE_IRQFLAGS
  141 +
140 142 #define TRAP_IRQ(routine, level) \
141 143 rdpr %pil, %g2; \
142 144 wrpr %g0, 15, %pil; \
143   - b,pt %xcc, etrap_irq; \
  145 + sethi %hi(1f-4), %g7; \
  146 + ba,pt %xcc, etrap_irq; \
  147 + or %g7, %lo(1f-4), %g7; \
  148 + nop; \
  149 + nop; \
  150 + nop; \
  151 + .subsection 2; \
  152 +1: call trace_hardirqs_off; \
  153 + nop; \
  154 + mov level, %o0; \
  155 + call routine; \
  156 + add %sp, PTREGS_OFF, %o1; \
  157 + ba,a,pt %xcc, rtrap_irq; \
  158 + .previous;
  159 +
  160 +#define TICK_SMP_IRQ \
  161 + rdpr %pil, %g2; \
  162 + wrpr %g0, 15, %pil; \
  163 + sethi %hi(1f-4), %g7; \
  164 + ba,pt %xcc, etrap_irq; \
  165 + or %g7, %lo(1f-4), %g7; \
  166 + nop; \
  167 + nop; \
  168 + nop; \
  169 + .subsection 2; \
  170 +1: call trace_hardirqs_off; \
  171 + nop; \
  172 + call smp_percpu_timer_interrupt; \
  173 + add %sp, PTREGS_OFF, %o0; \
  174 + ba,a,pt %xcc, rtrap_irq; \
  175 + .previous;
  176 +
  177 +#else
  178 +
  179 +#define TRAP_IRQ(routine, level) \
  180 + rdpr %pil, %g2; \
  181 + wrpr %g0, 15, %pil; \
  182 + ba,pt %xcc, etrap_irq; \
144 183 rd %pc, %g7; \
145 184 mov level, %o0; \
146 185 call routine; \
147 186  
... ... @@ -151,11 +190,13 @@
151 190 rdpr %pil, %g2; \
152 191 wrpr %g0, 15, %pil; \
153 192 sethi %hi(109f), %g7; \
154   - b,pt %xcc, etrap_irq; \
  193 + ba,pt %xcc, etrap_irq; \
155 194 109: or %g7, %lo(109b), %g7; \
156 195 call smp_percpu_timer_interrupt; \
157 196 add %sp, PTREGS_OFF, %o0; \
158 197 ba,a,pt %xcc, rtrap_irq;
  198 +
  199 +#endif
159 200  
160 201 #define TRAP_IVEC TRAP_NOSAVE(do_ivec)
161 202