Commit 57fcfdf9b2bbe8ea47771ffc16c418a20e4173c6
1 parent
da28c59799
Exists in
master
and in
7 other branches
sh: kprobes SMP support.
Presently kprobes support relies on several saved opcode variables for saving and restoring state, without any specific locking. This is inherently racy on SMP, and given that we already use per-CPU variables for everything else, convert these over too. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Showing 1 changed file with 50 additions and 50 deletions Side-by-side Diff
arch/sh/kernel/kprobes.c
... | ... | @@ -20,9 +20,9 @@ |
20 | 20 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
21 | 21 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
22 | 22 | |
23 | -static struct kprobe saved_current_opcode; | |
24 | -static struct kprobe saved_next_opcode; | |
25 | -static struct kprobe saved_next_opcode2; | |
23 | +static DEFINE_PER_CPU(struct kprobe, saved_current_opcode); | |
24 | +static DEFINE_PER_CPU(struct kprobe, saved_next_opcode); | |
25 | +static DEFINE_PER_CPU(struct kprobe, saved_next_opcode2); | |
26 | 26 | |
27 | 27 | #define OPCODE_JMP(x) (((x) & 0xF0FF) == 0x402b) |
28 | 28 | #define OPCODE_JSR(x) (((x) & 0xF0FF) == 0x400b) |
29 | 29 | |
30 | 30 | |
... | ... | @@ -102,16 +102,21 @@ |
102 | 102 | |
103 | 103 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
104 | 104 | { |
105 | - if (saved_next_opcode.addr != 0x0) { | |
105 | + struct kprobe *saved = &__get_cpu_var(saved_next_opcode); | |
106 | + | |
107 | + if (saved->addr) { | |
106 | 108 | arch_disarm_kprobe(p); |
107 | - arch_disarm_kprobe(&saved_next_opcode); | |
108 | - saved_next_opcode.addr = 0x0; | |
109 | - saved_next_opcode.opcode = 0x0; | |
109 | + arch_disarm_kprobe(saved); | |
110 | 110 | |
111 | - if (saved_next_opcode2.addr != 0x0) { | |
112 | - arch_disarm_kprobe(&saved_next_opcode2); | |
113 | - saved_next_opcode2.addr = 0x0; | |
114 | - saved_next_opcode2.opcode = 0x0; | |
111 | + saved->addr = NULL; | |
112 | + saved->opcode = 0; | |
113 | + | |
114 | + saved = &__get_cpu_var(saved_next_opcode2); | |
115 | + if (saved->addr) { | |
116 | + arch_disarm_kprobe(saved); | |
117 | + | |
118 | + saved->addr = NULL; | |
119 | + saved->opcode = 0; | |
115 | 120 | } |
116 | 121 | } |
117 | 122 | } |
118 | 123 | |
119 | 124 | |
120 | 125 | |
121 | 126 | |
122 | 127 | |
123 | 128 | |
124 | 129 | |
125 | 130 | |
126 | 131 | |
127 | 132 | |
128 | 133 | |
129 | 134 | |
130 | 135 | |
131 | 136 | |
... | ... | @@ -141,57 +146,59 @@ |
141 | 146 | */ |
142 | 147 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
143 | 148 | { |
144 | - kprobe_opcode_t *addr = NULL; | |
145 | - saved_current_opcode.addr = (kprobe_opcode_t *) (regs->pc); | |
146 | - addr = saved_current_opcode.addr; | |
149 | + __get_cpu_var(saved_current_opcode).addr = (kprobe_opcode_t *)regs->pc; | |
147 | 150 | |
148 | 151 | if (p != NULL) { |
152 | + struct kprobe *op1, *op2; | |
153 | + | |
149 | 154 | arch_disarm_kprobe(p); |
150 | 155 | |
156 | + op1 = &__get_cpu_var(saved_next_opcode); | |
157 | + op2 = &__get_cpu_var(saved_next_opcode2); | |
158 | + | |
151 | 159 | if (OPCODE_JSR(p->opcode) || OPCODE_JMP(p->opcode)) { |
152 | 160 | unsigned int reg_nr = ((p->opcode >> 8) & 0x000F); |
153 | - saved_next_opcode.addr = | |
154 | - (kprobe_opcode_t *) regs->regs[reg_nr]; | |
161 | + op1->addr = (kprobe_opcode_t *) regs->regs[reg_nr]; | |
155 | 162 | } else if (OPCODE_BRA(p->opcode) || OPCODE_BSR(p->opcode)) { |
156 | 163 | unsigned long disp = (p->opcode & 0x0FFF); |
157 | - saved_next_opcode.addr = | |
164 | + op1->addr = | |
158 | 165 | (kprobe_opcode_t *) (regs->pc + 4 + disp * 2); |
159 | 166 | |
160 | 167 | } else if (OPCODE_BRAF(p->opcode) || OPCODE_BSRF(p->opcode)) { |
161 | 168 | unsigned int reg_nr = ((p->opcode >> 8) & 0x000F); |
162 | - saved_next_opcode.addr = | |
169 | + op1->addr = | |
163 | 170 | (kprobe_opcode_t *) (regs->pc + 4 + |
164 | 171 | regs->regs[reg_nr]); |
165 | 172 | |
166 | 173 | } else if (OPCODE_RTS(p->opcode)) { |
167 | - saved_next_opcode.addr = (kprobe_opcode_t *) regs->pr; | |
174 | + op1->addr = (kprobe_opcode_t *) regs->pr; | |
168 | 175 | |
169 | 176 | } else if (OPCODE_BF(p->opcode) || OPCODE_BT(p->opcode)) { |
170 | 177 | unsigned long disp = (p->opcode & 0x00FF); |
171 | 178 | /* case 1 */ |
172 | - saved_next_opcode.addr = p->addr + 1; | |
179 | + op1->addr = p->addr + 1; | |
173 | 180 | /* case 2 */ |
174 | - saved_next_opcode2.addr = | |
181 | + op2->addr = | |
175 | 182 | (kprobe_opcode_t *) (regs->pc + 4 + disp * 2); |
176 | - saved_next_opcode2.opcode = *(saved_next_opcode2.addr); | |
177 | - arch_arm_kprobe(&saved_next_opcode2); | |
183 | + op2->opcode = *(op2->addr); | |
184 | + arch_arm_kprobe(op2); | |
178 | 185 | |
179 | 186 | } else if (OPCODE_BF_S(p->opcode) || OPCODE_BT_S(p->opcode)) { |
180 | 187 | unsigned long disp = (p->opcode & 0x00FF); |
181 | 188 | /* case 1 */ |
182 | - saved_next_opcode.addr = p->addr + 2; | |
189 | + op1->addr = p->addr + 2; | |
183 | 190 | /* case 2 */ |
184 | - saved_next_opcode2.addr = | |
191 | + op2->addr = | |
185 | 192 | (kprobe_opcode_t *) (regs->pc + 4 + disp * 2); |
186 | - saved_next_opcode2.opcode = *(saved_next_opcode2.addr); | |
187 | - arch_arm_kprobe(&saved_next_opcode2); | |
193 | + op2->opcode = *(op2->addr); | |
194 | + arch_arm_kprobe(op2); | |
188 | 195 | |
189 | 196 | } else { |
190 | - saved_next_opcode.addr = p->addr + 1; | |
197 | + op1->addr = p->addr + 1; | |
191 | 198 | } |
192 | 199 | |
193 | - saved_next_opcode.opcode = *(saved_next_opcode.addr); | |
194 | - arch_arm_kprobe(&saved_next_opcode); | |
200 | + op1->opcode = *(op1->addr); | |
201 | + arch_arm_kprobe(op1); | |
195 | 202 | } |
196 | 203 | } |
197 | 204 | |
198 | 205 | |
199 | 206 | |
... | ... | @@ -376,21 +383,23 @@ |
376 | 383 | cur->post_handler(cur, regs, 0); |
377 | 384 | } |
378 | 385 | |
379 | - if (saved_next_opcode.addr != 0x0) { | |
380 | - arch_disarm_kprobe(&saved_next_opcode); | |
381 | - saved_next_opcode.addr = 0x0; | |
382 | - saved_next_opcode.opcode = 0x0; | |
386 | + p = &__get_cpu_var(saved_next_opcode); | |
387 | + if (p->addr) { | |
388 | + arch_disarm_kprobe(p); | |
389 | + p->addr = NULL; | |
390 | + p->opcode = 0; | |
383 | 391 | |
384 | - addr = saved_current_opcode.addr; | |
385 | - saved_current_opcode.addr = 0x0; | |
392 | + addr = __get_cpu_var(saved_current_opcode).addr; | |
393 | + __get_cpu_var(saved_current_opcode).addr = NULL; | |
386 | 394 | |
387 | 395 | p = get_kprobe(addr); |
388 | 396 | arch_arm_kprobe(p); |
389 | 397 | |
390 | - if (saved_next_opcode2.addr != 0x0) { | |
391 | - arch_disarm_kprobe(&saved_next_opcode2); | |
392 | - saved_next_opcode2.addr = 0x0; | |
393 | - saved_next_opcode2.opcode = 0x0; | |
398 | + p = &__get_cpu_var(saved_next_opcode2); | |
399 | + if (p->addr) { | |
400 | + arch_disarm_kprobe(p); | |
401 | + p->addr = NULL; | |
402 | + p->opcode = 0; | |
394 | 403 | } |
395 | 404 | } |
396 | 405 | |
... | ... | @@ -572,15 +581,6 @@ |
572 | 581 | |
573 | 582 | int __init arch_init_kprobes(void) |
574 | 583 | { |
575 | - saved_next_opcode.addr = 0x0; | |
576 | - saved_next_opcode.opcode = 0x0; | |
577 | - | |
578 | - saved_current_opcode.addr = 0x0; | |
579 | - saved_current_opcode.opcode = 0x0; | |
580 | - | |
581 | - saved_next_opcode2.addr = 0x0; | |
582 | - saved_next_opcode2.opcode = 0x0; | |
583 | - | |
584 | 584 | return register_kprobe(&trampoline_p); |
585 | 585 | } |