Commit 954fbc8985328a3b59b5881243d3aa04a8f8da7c
1 parent
3f6c148df4
Exists in
master
and in
39 other branches
sparc64: Implement perf_arch_fetch_caller_regs
We provide regs->tstate, regs->tpc, regs->tnpc and regs->u_regs[UREG_FP]. regs->tstate is necessary for: user_mode() (via perf_exclude_event()) perf_misc_flags() (via perf_prepare_sample()) regs->tpc is necessary for: perf_instruction_pointer() (via perf_prepare_sample()) and regs->u_regs[UREG_FP] is necessary for: perf_callchain() (via perf_prepare_sample()) The regs->tnpc value is provided just to be tidy. Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 1 changed file with 75 additions and 0 deletions Side-by-side Diff
arch/sparc/kernel/helpers.S
... | ... | @@ -46,6 +46,81 @@ |
46 | 46 | nop |
47 | 47 | .size stack_trace_flush,.-stack_trace_flush |
48 | 48 | |
49 | +#ifdef CONFIG_PERF_EVENTS | |
50 | + .globl perf_arch_fetch_caller_regs | |
51 | + .type perf_arch_fetch_caller_regs,#function | |
52 | +perf_arch_fetch_caller_regs: | |
53 | + /* We always read the %pstate into %o5 since we will use | |
54 | + * that to construct a fake %tstate to store into the regs. | |
55 | + */ | |
56 | + rdpr %pstate, %o5 | |
57 | + brz,pn %o2, 50f | |
58 | + mov %o2, %g7 | |
59 | + | |
60 | + /* Turn off interrupts while we walk around the register | |
61 | + * window by hand. | |
62 | + */ | |
63 | + wrpr %o5, PSTATE_IE, %pstate | |
64 | + | |
65 | + /* The %canrestore tells us how many register windows are | |
66 | + * still live in the chip above us, past that we have to | |
67 | + * walk the frame as saved on the stack. We stash away | |
68 | + * the %cwp in %g1 so we can return back to the original | |
69 | + * register window. | |
70 | + */ | |
71 | + rdpr %cwp, %g1 | |
72 | + rdpr %canrestore, %g2 | |
73 | + sub %g1, 1, %g3 | |
74 | + | |
75 | + /* We have the skip count in %g7, if it hits zero then | |
76 | + * %fp/%i7 are the registers we need. Otherwise if our | |
77 | + * %canrestore count maintained in %g2 hits zero we have | |
78 | + * to start traversing the stack. | |
79 | + */ | |
80 | +10: brz,pn %g2, 4f | |
81 | + sub %g2, 1, %g2 | |
82 | + wrpr %g3, %cwp | |
83 | + subcc %g7, 1, %g7 | |
84 | + bne,pt %xcc, 10b | |
85 | + sub %g3, 1, %g3 | |
86 | + | |
87 | + /* We found the values we need in the cpu's register | |
88 | + * windows. | |
89 | + */ | |
90 | + mov %fp, %g3 | |
91 | + ba,pt %xcc, 3f | |
92 | + mov %i7, %g2 | |
93 | + | |
94 | +50: mov %fp, %g3 | |
95 | + ba,pt %xcc, 2f | |
96 | + mov %i7, %g2 | |
97 | + | |
98 | + /* We hit the end of the valid register windows in the | |
99 | + * cpu, start traversing the stack frame. | |
100 | + */ | |
101 | +4: mov %fp, %g3 | |
102 | + | |
103 | +20: ldx [%g3 + STACK_BIAS + RW_V9_I7], %g2 | |
104 | + subcc %g7, 1, %g7 | |
105 | + bne,pn %xcc, 20b | |
106 | + ldx [%g3 + STACK_BIAS + RW_V9_I6], %g3 | |
107 | + | |
108 | + /* Restore the current register window position and | |
109 | + * re-enable interrupts. | |
110 | + */ | |
111 | +3: wrpr %g1, %cwp | |
112 | + wrpr %o5, %pstate | |
113 | + | |
114 | +2: stx %g3, [%o0 + PT_V9_FP] | |
115 | + sllx %o5, 8, %o5 | |
116 | + stx %o5, [%o0 + PT_V9_TSTATE] | |
117 | + stx %g2, [%o0 + PT_V9_TPC] | |
118 | + add %g2, 4, %g2 | |
119 | + retl | |
120 | + stx %g2, [%o0 + PT_V9_TNPC] | |
121 | + .size perf_arch_fetch_caller_regs,.-perf_arch_fetch_caller_regs | |
122 | +#endif /* CONFIG_PERF_EVENTS */ | |
123 | + | |
49 | 124 | #ifdef CONFIG_SMP |
50 | 125 | .globl hard_smp_processor_id |
51 | 126 | .type hard_smp_processor_id,#function |