Commit 3fa17c395bb0c358745fbe0c8aa039d6cdac1735
Committed by
Chris Metcalf
1 parent
a61fd5e366
Exists in
master
and in
20 other branches
tile: support kprobes on tilegx
This change includes support for Kprobes, Jprobes and Return Probes. Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Signed-off-by: Tony Lu <zlu@tilera.com> Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Showing 14 changed files with 718 additions and 5 deletions Side-by-side Diff
- arch/tile/Kconfig
- arch/tile/include/asm/Kbuild
- arch/tile/include/asm/kdebug.h
- arch/tile/include/asm/kprobes.h
- arch/tile/include/asm/ptrace.h
- arch/tile/include/uapi/arch/opcode_tilegx.h
- arch/tile/include/uapi/arch/opcode_tilepro.h
- arch/tile/kernel/Makefile
- arch/tile/kernel/kprobes.c
- arch/tile/kernel/smp.c
- arch/tile/kernel/traps.c
- arch/tile/kernel/vmlinux.lds.S
- arch/tile/mm/fault.c
- samples/kprobes/kprobe_example.c
arch/tile/Kconfig
arch/tile/include/asm/Kbuild
arch/tile/include/asm/kdebug.h
1 | +/* | |
2 | + * Copyright 2012 Tilera Corporation. All Rights Reserved. | |
3 | + * | |
4 | + * This program is free software; you can redistribute it and/or | |
5 | + * modify it under the terms of the GNU General Public License | |
6 | + * as published by the Free Software Foundation, version 2. | |
7 | + * | |
8 | + * This program is distributed in the hope that it will be useful, but | |
9 | + * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | + * NON INFRINGEMENT. See the GNU General Public License for | |
12 | + * more details. | |
13 | + */ | |
14 | + | |
15 | +#ifndef _ASM_TILE_KDEBUG_H | |
16 | +#define _ASM_TILE_KDEBUG_H | |
17 | + | |
18 | +#include <linux/notifier.h> | |
19 | + | |
20 | +enum die_val { | |
21 | + DIE_OOPS = 1, | |
22 | + DIE_BREAK, | |
23 | + DIE_SSTEPBP, | |
24 | + DIE_PAGE_FAULT, | |
25 | + DIE_COMPILED_BPT | |
26 | +}; | |
27 | + | |
28 | +#endif /* _ASM_TILE_KDEBUG_H */ |
arch/tile/include/asm/kprobes.h
1 | +/* | |
2 | + * arch/tile/include/asm/kprobes.h | |
3 | + * | |
4 | + * Copyright 2012 Tilera Corporation. All Rights Reserved. | |
5 | + * | |
6 | + * This program is free software; you can redistribute it and/or | |
7 | + * modify it under the terms of the GNU General Public License | |
8 | + * as published by the Free Software Foundation, version 2. | |
9 | + * | |
10 | + * This program is distributed in the hope that it will be useful, but | |
11 | + * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
13 | + * NON INFRINGEMENT. See the GNU General Public License for | |
14 | + * more details. | |
15 | + */ | |
16 | + | |
17 | +#ifndef _ASM_TILE_KPROBES_H | |
18 | +#define _ASM_TILE_KPROBES_H | |
19 | + | |
20 | +#include <linux/types.h> | |
21 | +#include <linux/ptrace.h> | |
22 | +#include <linux/percpu.h> | |
23 | + | |
24 | +#include <arch/opcode.h> | |
25 | + | |
26 | +#define __ARCH_WANT_KPROBES_INSN_SLOT | |
27 | +#define MAX_INSN_SIZE 2 | |
28 | + | |
29 | +#define kretprobe_blacklist_size 0 | |
30 | + | |
31 | +typedef tile_bundle_bits kprobe_opcode_t; | |
32 | + | |
33 | +#define flush_insn_slot(p) \ | |
34 | + flush_icache_range((unsigned long)p->addr, \ | |
35 | + (unsigned long)p->addr + \ | |
36 | + (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) | |
37 | + | |
38 | +struct kprobe; | |
39 | + | |
40 | +/* Architecture specific copy of original instruction. */ | |
41 | +struct arch_specific_insn { | |
42 | + kprobe_opcode_t *insn; | |
43 | +}; | |
44 | + | |
45 | +struct prev_kprobe { | |
46 | + struct kprobe *kp; | |
47 | + unsigned long status; | |
48 | + unsigned long saved_pc; | |
49 | +}; | |
50 | + | |
51 | +#define MAX_JPROBES_STACK_SIZE 128 | |
52 | +#define MAX_JPROBES_STACK_ADDR \ | |
53 | + (((unsigned long)current_thread_info()) + THREAD_SIZE - 32 \ | |
54 | + - sizeof(struct pt_regs)) | |
55 | + | |
56 | +#define MIN_JPROBES_STACK_SIZE(ADDR) \ | |
57 | + ((((ADDR) + MAX_JPROBES_STACK_SIZE) > MAX_JPROBES_STACK_ADDR) \ | |
58 | + ? MAX_JPROBES_STACK_ADDR - (ADDR) \ | |
59 | + : MAX_JPROBES_STACK_SIZE) | |
60 | + | |
61 | +/* per-cpu kprobe control block. */ | |
62 | +struct kprobe_ctlblk { | |
63 | + unsigned long kprobe_status; | |
64 | + unsigned long kprobe_saved_pc; | |
65 | + unsigned long jprobe_saved_sp; | |
66 | + struct prev_kprobe prev_kprobe; | |
67 | + struct pt_regs jprobe_saved_regs; | |
68 | + char jprobes_stack[MAX_JPROBES_STACK_SIZE]; | |
69 | +}; | |
70 | + | |
71 | +extern tile_bundle_bits breakpoint2_insn; | |
72 | +extern tile_bundle_bits breakpoint_insn; | |
73 | + | |
74 | +void arch_remove_kprobe(struct kprobe *); | |
75 | + | |
76 | +extern int kprobe_exceptions_notify(struct notifier_block *self, | |
77 | + unsigned long val, void *data); | |
78 | + | |
79 | +#endif /* _ASM_TILE_KPROBES_H */ |
arch/tile/include/asm/ptrace.h
arch/tile/include/uapi/arch/opcode_tilegx.h
... | ... | @@ -61,6 +61,7 @@ |
61 | 61 | #define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES |
62 | 62 | #define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \ |
63 | 63 | TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES |
64 | +#define TILE_BPT_BUNDLE TILEGX_BPT_BUNDLE | |
64 | 65 | |
65 | 66 | /* 64-bit pattern for a { bpt ; nop } bundle. */ |
66 | 67 | #define TILEGX_BPT_BUNDLE 0x286a44ae51485000ULL |
arch/tile/include/uapi/arch/opcode_tilepro.h
... | ... | @@ -71,6 +71,7 @@ |
71 | 71 | #define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEPRO_BUNDLE_ALIGNMENT_IN_BYTES |
72 | 72 | #define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \ |
73 | 73 | TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES |
74 | +#define TILE_BPT_BUNDLE TILEPRO_BPT_BUNDLE | |
74 | 75 | |
75 | 76 | /* 64-bit pattern for a { bpt ; nop } bundle. */ |
76 | 77 | #define TILEPRO_BPT_BUNDLE 0x400b3cae70166000ULL |
arch/tile/kernel/Makefile
arch/tile/kernel/kprobes.c
1 | +/* | |
2 | + * arch/tile/kernel/kprobes.c | |
3 | + * Kprobes on TILE-Gx | |
4 | + * | |
5 | + * Some portions copied from the MIPS version. | |
6 | + * | |
7 | + * Copyright (C) IBM Corporation, 2002, 2004 | |
8 | + * Copyright 2006 Sony Corp. | |
9 | + * Copyright 2010 Cavium Networks | |
10 | + * | |
11 | + * Copyright 2012 Tilera Corporation. All Rights Reserved. | |
12 | + * | |
13 | + * This program is free software; you can redistribute it and/or | |
14 | + * modify it under the terms of the GNU General Public License | |
15 | + * as published by the Free Software Foundation, version 2. | |
16 | + * | |
17 | + * This program is distributed in the hope that it will be useful, but | |
18 | + * WITHOUT ANY WARRANTY; without even the implied warranty of | |
19 | + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
20 | + * NON INFRINGEMENT. See the GNU General Public License for | |
21 | + * more details. | |
22 | + */ | |
23 | + | |
24 | +#include <linux/kprobes.h> | |
25 | +#include <linux/kdebug.h> | |
26 | +#include <linux/module.h> | |
27 | +#include <linux/slab.h> | |
28 | +#include <linux/uaccess.h> | |
29 | +#include <asm/cacheflush.h> | |
30 | + | |
31 | +#include <arch/opcode.h> | |
32 | + | |
33 | +DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | |
34 | +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | |
35 | + | |
36 | +tile_bundle_bits breakpoint_insn = TILEGX_BPT_BUNDLE; | |
37 | +tile_bundle_bits breakpoint2_insn = TILEGX_BPT_BUNDLE | DIE_SSTEPBP; | |
38 | + | |
39 | +/* | |
40 | + * Check whether instruction is branch or jump, or if executing it | |
41 | + * has different results depending on where it is executed (e.g. lnk). | |
42 | + */ | |
43 | +static int __kprobes insn_has_control(kprobe_opcode_t insn) | |
44 | +{ | |
45 | + if (get_Mode(insn) != 0) { /* Y-format bundle */ | |
46 | + if (get_Opcode_Y1(insn) != RRR_1_OPCODE_Y1 || | |
47 | + get_RRROpcodeExtension_Y1(insn) != UNARY_RRR_1_OPCODE_Y1) | |
48 | + return 0; | |
49 | + | |
50 | + switch (get_UnaryOpcodeExtension_Y1(insn)) { | |
51 | + case JALRP_UNARY_OPCODE_Y1: | |
52 | + case JALR_UNARY_OPCODE_Y1: | |
53 | + case JRP_UNARY_OPCODE_Y1: | |
54 | + case JR_UNARY_OPCODE_Y1: | |
55 | + case LNK_UNARY_OPCODE_Y1: | |
56 | + return 1; | |
57 | + default: | |
58 | + return 0; | |
59 | + } | |
60 | + } | |
61 | + | |
62 | + switch (get_Opcode_X1(insn)) { | |
63 | + case BRANCH_OPCODE_X1: /* branch instructions */ | |
64 | + case JUMP_OPCODE_X1: /* jump instructions: j and jal */ | |
65 | + return 1; | |
66 | + | |
67 | + case RRR_0_OPCODE_X1: /* other jump instructions */ | |
68 | + if (get_RRROpcodeExtension_X1(insn) != UNARY_RRR_0_OPCODE_X1) | |
69 | + return 0; | |
70 | + switch (get_UnaryOpcodeExtension_X1(insn)) { | |
71 | + case JALRP_UNARY_OPCODE_X1: | |
72 | + case JALR_UNARY_OPCODE_X1: | |
73 | + case JRP_UNARY_OPCODE_X1: | |
74 | + case JR_UNARY_OPCODE_X1: | |
75 | + case LNK_UNARY_OPCODE_X1: | |
76 | + return 1; | |
77 | + default: | |
78 | + return 0; | |
79 | + } | |
80 | + default: | |
81 | + return 0; | |
82 | + } | |
83 | +} | |
84 | + | |
85 | +int __kprobes arch_prepare_kprobe(struct kprobe *p) | |
86 | +{ | |
87 | + unsigned long addr = (unsigned long)p->addr; | |
88 | + | |
89 | + if (addr & (sizeof(kprobe_opcode_t) - 1)) | |
90 | + return -EINVAL; | |
91 | + | |
92 | + if (insn_has_control(*p->addr)) { | |
93 | + pr_notice("Kprobes for control instructions are not " | |
94 | + "supported\n"); | |
95 | + return -EINVAL; | |
96 | + } | |
97 | + | |
98 | + /* insn: must be on special executable page on tile. */ | |
99 | + p->ainsn.insn = get_insn_slot(); | |
100 | + if (!p->ainsn.insn) | |
101 | + return -ENOMEM; | |
102 | + | |
103 | + /* | |
104 | + * In the kprobe->ainsn.insn[] array we store the original | |
105 | + * instruction at index zero and a break trap instruction at | |
106 | + * index one. | |
107 | + */ | |
108 | + memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t)); | |
109 | + p->ainsn.insn[1] = breakpoint2_insn; | |
110 | + p->opcode = *p->addr; | |
111 | + | |
112 | + return 0; | |
113 | +} | |
114 | + | |
115 | +void __kprobes arch_arm_kprobe(struct kprobe *p) | |
116 | +{ | |
117 | + unsigned long addr_wr; | |
118 | + | |
119 | + /* Operate on writable kernel text mapping. */ | |
120 | + addr_wr = (unsigned long)p->addr - MEM_SV_START + PAGE_OFFSET; | |
121 | + | |
122 | + if (probe_kernel_write((void *)addr_wr, &breakpoint_insn, | |
123 | + sizeof(breakpoint_insn))) | |
124 | + pr_err("%s: failed to enable kprobe\n", __func__); | |
125 | + | |
126 | + smp_wmb(); | |
127 | + flush_insn_slot(p); | |
128 | +} | |
129 | + | |
130 | +void __kprobes arch_disarm_kprobe(struct kprobe *kp) | |
131 | +{ | |
132 | + unsigned long addr_wr; | |
133 | + | |
134 | + /* Operate on writable kernel text mapping. */ | |
135 | + addr_wr = (unsigned long)kp->addr - MEM_SV_START + PAGE_OFFSET; | |
136 | + | |
137 | + if (probe_kernel_write((void *)addr_wr, &kp->opcode, | |
138 | + sizeof(kp->opcode))) | |
139 | + pr_err("%s: failed to enable kprobe\n", __func__); | |
140 | + | |
141 | + smp_wmb(); | |
142 | + flush_insn_slot(kp); | |
143 | +} | |
144 | + | |
145 | +void __kprobes arch_remove_kprobe(struct kprobe *p) | |
146 | +{ | |
147 | + if (p->ainsn.insn) { | |
148 | + free_insn_slot(p->ainsn.insn, 0); | |
149 | + p->ainsn.insn = NULL; | |
150 | + } | |
151 | +} | |
152 | + | |
153 | +static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | |
154 | +{ | |
155 | + kcb->prev_kprobe.kp = kprobe_running(); | |
156 | + kcb->prev_kprobe.status = kcb->kprobe_status; | |
157 | + kcb->prev_kprobe.saved_pc = kcb->kprobe_saved_pc; | |
158 | +} | |
159 | + | |
160 | +static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | |
161 | +{ | |
162 | + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); | |
163 | + kcb->kprobe_status = kcb->prev_kprobe.status; | |
164 | + kcb->kprobe_saved_pc = kcb->prev_kprobe.saved_pc; | |
165 | +} | |
166 | + | |
167 | +static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | |
168 | + struct kprobe_ctlblk *kcb) | |
169 | +{ | |
170 | + __this_cpu_write(current_kprobe, p); | |
171 | + kcb->kprobe_saved_pc = regs->pc; | |
172 | +} | |
173 | + | |
174 | +static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | |
175 | +{ | |
176 | + /* Single step inline if the instruction is a break. */ | |
177 | + if (p->opcode == breakpoint_insn || | |
178 | + p->opcode == breakpoint2_insn) | |
179 | + regs->pc = (unsigned long)p->addr; | |
180 | + else | |
181 | + regs->pc = (unsigned long)&p->ainsn.insn[0]; | |
182 | +} | |
183 | + | |
184 | +static int __kprobes kprobe_handler(struct pt_regs *regs) | |
185 | +{ | |
186 | + struct kprobe *p; | |
187 | + int ret = 0; | |
188 | + kprobe_opcode_t *addr; | |
189 | + struct kprobe_ctlblk *kcb; | |
190 | + | |
191 | + addr = (kprobe_opcode_t *)regs->pc; | |
192 | + | |
193 | + /* | |
194 | + * We don't want to be preempted for the entire | |
195 | + * duration of kprobe processing. | |
196 | + */ | |
197 | + preempt_disable(); | |
198 | + kcb = get_kprobe_ctlblk(); | |
199 | + | |
200 | + /* Check we're not actually recursing. */ | |
201 | + if (kprobe_running()) { | |
202 | + p = get_kprobe(addr); | |
203 | + if (p) { | |
204 | + if (kcb->kprobe_status == KPROBE_HIT_SS && | |
205 | + p->ainsn.insn[0] == breakpoint_insn) { | |
206 | + goto no_kprobe; | |
207 | + } | |
208 | + /* | |
209 | + * We have reentered the kprobe_handler(), since | |
210 | + * another probe was hit while within the handler. | |
211 | + * We here save the original kprobes variables and | |
212 | + * just single step on the instruction of the new probe | |
213 | + * without calling any user handlers. | |
214 | + */ | |
215 | + save_previous_kprobe(kcb); | |
216 | + set_current_kprobe(p, regs, kcb); | |
217 | + kprobes_inc_nmissed_count(p); | |
218 | + prepare_singlestep(p, regs); | |
219 | + kcb->kprobe_status = KPROBE_REENTER; | |
220 | + return 1; | |
221 | + } else { | |
222 | + if (*addr != breakpoint_insn) { | |
223 | + /* | |
224 | + * The breakpoint instruction was removed by | |
225 | + * another cpu right after we hit, no further | |
226 | + * handling of this interrupt is appropriate. | |
227 | + */ | |
228 | + ret = 1; | |
229 | + goto no_kprobe; | |
230 | + } | |
231 | + p = __this_cpu_read(current_kprobe); | |
232 | + if (p->break_handler && p->break_handler(p, regs)) | |
233 | + goto ss_probe; | |
234 | + } | |
235 | + goto no_kprobe; | |
236 | + } | |
237 | + | |
238 | + p = get_kprobe(addr); | |
239 | + if (!p) { | |
240 | + if (*addr != breakpoint_insn) { | |
241 | + /* | |
242 | + * The breakpoint instruction was removed right | |
243 | + * after we hit it. Another cpu has removed | |
244 | + * either a probepoint or a debugger breakpoint | |
245 | + * at this address. In either case, no further | |
246 | + * handling of this interrupt is appropriate. | |
247 | + */ | |
248 | + ret = 1; | |
249 | + } | |
250 | + /* Not one of ours: let kernel handle it. */ | |
251 | + goto no_kprobe; | |
252 | + } | |
253 | + | |
254 | + set_current_kprobe(p, regs, kcb); | |
255 | + kcb->kprobe_status = KPROBE_HIT_ACTIVE; | |
256 | + | |
257 | + if (p->pre_handler && p->pre_handler(p, regs)) { | |
258 | + /* Handler has already set things up, so skip ss setup. */ | |
259 | + return 1; | |
260 | + } | |
261 | + | |
262 | +ss_probe: | |
263 | + prepare_singlestep(p, regs); | |
264 | + kcb->kprobe_status = KPROBE_HIT_SS; | |
265 | + return 1; | |
266 | + | |
267 | +no_kprobe: | |
268 | + preempt_enable_no_resched(); | |
269 | + return ret; | |
270 | +} | |
271 | + | |
272 | +/* | |
273 | + * Called after single-stepping. p->addr is the address of the | |
274 | + * instruction that has been replaced by the breakpoint. To avoid the | |
275 | + * SMP problems that can occur when we temporarily put back the | |
276 | + * original opcode to single-step, we single-stepped a copy of the | |
277 | + * instruction. The address of this copy is p->ainsn.insn. | |
278 | + * | |
279 | + * This function prepares to return from the post-single-step | |
280 | + * breakpoint trap. | |
281 | + */ | |
282 | +static void __kprobes resume_execution(struct kprobe *p, | |
283 | + struct pt_regs *regs, | |
284 | + struct kprobe_ctlblk *kcb) | |
285 | +{ | |
286 | + unsigned long orig_pc = kcb->kprobe_saved_pc; | |
287 | + regs->pc = orig_pc + 8; | |
288 | +} | |
289 | + | |
290 | +static inline int post_kprobe_handler(struct pt_regs *regs) | |
291 | +{ | |
292 | + struct kprobe *cur = kprobe_running(); | |
293 | + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
294 | + | |
295 | + if (!cur) | |
296 | + return 0; | |
297 | + | |
298 | + if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { | |
299 | + kcb->kprobe_status = KPROBE_HIT_SSDONE; | |
300 | + cur->post_handler(cur, regs, 0); | |
301 | + } | |
302 | + | |
303 | + resume_execution(cur, regs, kcb); | |
304 | + | |
305 | + /* Restore back the original saved kprobes variables and continue. */ | |
306 | + if (kcb->kprobe_status == KPROBE_REENTER) { | |
307 | + restore_previous_kprobe(kcb); | |
308 | + goto out; | |
309 | + } | |
310 | + reset_current_kprobe(); | |
311 | +out: | |
312 | + preempt_enable_no_resched(); | |
313 | + | |
314 | + return 1; | |
315 | +} | |
316 | + | |
317 | +static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |
318 | +{ | |
319 | + struct kprobe *cur = kprobe_running(); | |
320 | + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
321 | + | |
322 | + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | |
323 | + return 1; | |
324 | + | |
325 | + if (kcb->kprobe_status & KPROBE_HIT_SS) { | |
326 | + /* | |
327 | + * We are here because the instruction being single | |
328 | + * stepped caused a page fault. We reset the current | |
329 | + * kprobe and the ip points back to the probe address | |
330 | + * and allow the page fault handler to continue as a | |
331 | + * normal page fault. | |
332 | + */ | |
333 | + resume_execution(cur, regs, kcb); | |
334 | + reset_current_kprobe(); | |
335 | + preempt_enable_no_resched(); | |
336 | + } | |
337 | + return 0; | |
338 | +} | |
339 | + | |
340 | +/* | |
341 | + * Wrapper routine for handling exceptions. | |
342 | + */ | |
343 | +int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |
344 | + unsigned long val, void *data) | |
345 | +{ | |
346 | + struct die_args *args = (struct die_args *)data; | |
347 | + int ret = NOTIFY_DONE; | |
348 | + | |
349 | + switch (val) { | |
350 | + case DIE_BREAK: | |
351 | + if (kprobe_handler(args->regs)) | |
352 | + ret = NOTIFY_STOP; | |
353 | + break; | |
354 | + case DIE_SSTEPBP: | |
355 | + if (post_kprobe_handler(args->regs)) | |
356 | + ret = NOTIFY_STOP; | |
357 | + break; | |
358 | + case DIE_PAGE_FAULT: | |
359 | + /* kprobe_running() needs smp_processor_id(). */ | |
360 | + preempt_disable(); | |
361 | + | |
362 | + if (kprobe_running() | |
363 | + && kprobe_fault_handler(args->regs, args->trapnr)) | |
364 | + ret = NOTIFY_STOP; | |
365 | + preempt_enable(); | |
366 | + break; | |
367 | + default: | |
368 | + break; | |
369 | + } | |
370 | + return ret; | |
371 | +} | |
372 | + | |
373 | +int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |
374 | +{ | |
375 | + struct jprobe *jp = container_of(p, struct jprobe, kp); | |
376 | + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
377 | + | |
378 | + kcb->jprobe_saved_regs = *regs; | |
379 | + kcb->jprobe_saved_sp = regs->sp; | |
380 | + | |
381 | + memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp, | |
382 | + MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp)); | |
383 | + | |
384 | + regs->pc = (unsigned long)(jp->entry); | |
385 | + | |
386 | + return 1; | |
387 | +} | |
388 | + | |
389 | +/* Defined in the inline asm below. */ | |
390 | +void jprobe_return_end(void); | |
391 | + | |
392 | +void __kprobes jprobe_return(void) | |
393 | +{ | |
394 | + asm volatile( | |
395 | + "bpt\n\t" | |
396 | + ".globl jprobe_return_end\n" | |
397 | + "jprobe_return_end:\n"); | |
398 | +} | |
399 | + | |
400 | +int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |
401 | +{ | |
402 | + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | |
403 | + | |
404 | + if (regs->pc >= (unsigned long)jprobe_return && | |
405 | + regs->pc <= (unsigned long)jprobe_return_end) { | |
406 | + *regs = kcb->jprobe_saved_regs; | |
407 | + memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack, | |
408 | + MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp)); | |
409 | + preempt_enable_no_resched(); | |
410 | + | |
411 | + return 1; | |
412 | + } | |
413 | + return 0; | |
414 | +} | |
415 | + | |
416 | +/* | |
417 | + * Function return probe trampoline: | |
418 | + * - init_kprobes() establishes a probepoint here | |
419 | + * - When the probed function returns, this probe causes the | |
420 | + * handlers to fire | |
421 | + */ | |
422 | +static void __used kretprobe_trampoline_holder(void) | |
423 | +{ | |
424 | + asm volatile( | |
425 | + "nop\n\t" | |
426 | + ".global kretprobe_trampoline\n" | |
427 | + "kretprobe_trampoline:\n\t" | |
428 | + "nop\n\t" | |
429 | + : : : "memory"); | |
430 | +} | |
431 | + | |
432 | +void kretprobe_trampoline(void); | |
433 | + | |
434 | +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, | |
435 | + struct pt_regs *regs) | |
436 | +{ | |
437 | + ri->ret_addr = (kprobe_opcode_t *) regs->lr; | |
438 | + | |
439 | + /* Replace the return addr with trampoline addr */ | |
440 | + regs->lr = (unsigned long)kretprobe_trampoline; | |
441 | +} | |
442 | + | |
443 | +/* | |
444 | + * Called when the probe at kretprobe trampoline is hit. | |
445 | + */ | |
446 | +static int __kprobes trampoline_probe_handler(struct kprobe *p, | |
447 | + struct pt_regs *regs) | |
448 | +{ | |
449 | + struct kretprobe_instance *ri = NULL; | |
450 | + struct hlist_head *head, empty_rp; | |
451 | + struct hlist_node *tmp; | |
452 | + unsigned long flags, orig_ret_address = 0; | |
453 | + unsigned long trampoline_address = (unsigned long)kretprobe_trampoline; | |
454 | + | |
455 | + INIT_HLIST_HEAD(&empty_rp); | |
456 | + kretprobe_hash_lock(current, &head, &flags); | |
457 | + | |
458 | + /* | |
459 | + * It is possible to have multiple instances associated with a given | |
460 | + * task either because multiple functions in the call path have | |
461 | + * a return probe installed on them, and/or more than one return | |
462 | + * return probe was registered for a target function. | |
463 | + * | |
464 | + * We can handle this because: | |
465 | + * - instances are always inserted at the head of the list | |
466 | + * - when multiple return probes are registered for the same | |
467 | + * function, the first instance's ret_addr will point to the | |
468 | + * real return address, and all the rest will point to | |
469 | + * kretprobe_trampoline | |
470 | + */ | |
471 | + hlist_for_each_entry_safe(ri, tmp, head, hlist) { | |
472 | + if (ri->task != current) | |
473 | + /* another task is sharing our hash bucket */ | |
474 | + continue; | |
475 | + | |
476 | + if (ri->rp && ri->rp->handler) | |
477 | + ri->rp->handler(ri, regs); | |
478 | + | |
479 | + orig_ret_address = (unsigned long)ri->ret_addr; | |
480 | + recycle_rp_inst(ri, &empty_rp); | |
481 | + | |
482 | + if (orig_ret_address != trampoline_address) { | |
483 | + /* | |
484 | + * This is the real return address. Any other | |
485 | + * instances associated with this task are for | |
486 | + * other calls deeper on the call stack | |
487 | + */ | |
488 | + break; | |
489 | + } | |
490 | + } | |
491 | + | |
492 | + kretprobe_assert(ri, orig_ret_address, trampoline_address); | |
493 | + instruction_pointer(regs) = orig_ret_address; | |
494 | + | |
495 | + reset_current_kprobe(); | |
496 | + kretprobe_hash_unlock(current, &flags); | |
497 | + preempt_enable_no_resched(); | |
498 | + | |
499 | + hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { | |
500 | + hlist_del(&ri->hlist); | |
501 | + kfree(ri); | |
502 | + } | |
503 | + /* | |
504 | + * By returning a non-zero value, we are telling | |
505 | + * kprobe_handler() that we don't want the post_handler | |
506 | + * to run (and have re-enabled preemption) | |
507 | + */ | |
508 | + return 1; | |
509 | +} | |
510 | + | |
511 | +int __kprobes arch_trampoline_kprobe(struct kprobe *p) | |
512 | +{ | |
513 | + if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline) | |
514 | + return 1; | |
515 | + | |
516 | + return 0; | |
517 | +} | |
518 | + | |
519 | +static struct kprobe trampoline_p = { | |
520 | + .addr = (kprobe_opcode_t *)kretprobe_trampoline, | |
521 | + .pre_handler = trampoline_probe_handler | |
522 | +}; | |
523 | + | |
524 | +int __init arch_init_kprobes(void) | |
525 | +{ | |
526 | + register_kprobe(&trampoline_p); | |
527 | + return 0; | |
528 | +} |
arch/tile/kernel/smp.c
... | ... | @@ -20,6 +20,7 @@ |
20 | 20 | #include <linux/irq.h> |
21 | 21 | #include <linux/module.h> |
22 | 22 | #include <asm/cacheflush.h> |
23 | +#include <asm/homecache.h> | |
23 | 24 | |
24 | 25 | HV_Topology smp_topology __write_once; |
25 | 26 | EXPORT_SYMBOL(smp_topology); |
... | ... | @@ -167,9 +168,16 @@ |
167 | 168 | void flush_icache_range(unsigned long start, unsigned long end) |
168 | 169 | { |
169 | 170 | struct ipi_flush flush = { start, end }; |
170 | - preempt_disable(); | |
171 | - on_each_cpu(ipi_flush_icache_range, &flush, 1); | |
172 | - preempt_enable(); | |
171 | + | |
172 | + /* If invoked with irqs disabled, we can not issue IPIs. */ | |
173 | + if (irqs_disabled()) | |
174 | + flush_remote(0, HV_FLUSH_EVICT_L1I, NULL, 0, 0, 0, | |
175 | + NULL, NULL, 0); | |
176 | + else { | |
177 | + preempt_disable(); | |
178 | + on_each_cpu(ipi_flush_icache_range, &flush, 1); | |
179 | + preempt_enable(); | |
180 | + } | |
173 | 181 | } |
174 | 182 | |
175 | 183 |
arch/tile/kernel/traps.c
... | ... | @@ -15,6 +15,7 @@ |
15 | 15 | #include <linux/sched.h> |
16 | 16 | #include <linux/kernel.h> |
17 | 17 | #include <linux/kprobes.h> |
18 | +#include <linux/kdebug.h> | |
18 | 19 | #include <linux/module.h> |
19 | 20 | #include <linux/reboot.h> |
20 | 21 | #include <linux/uaccess.h> |
... | ... | @@ -214,6 +215,43 @@ |
214 | 215 | #endif |
215 | 216 | }; |
216 | 217 | |
218 | +static int do_bpt(struct pt_regs *regs) | |
219 | +{ | |
220 | + unsigned long bundle, bcode, bpt; | |
221 | + | |
222 | + bundle = *(unsigned long *)instruction_pointer(regs); | |
223 | + | |
224 | + /* | |
225 | + * bpt shoule be { bpt; nop }, which is 0x286a44ae51485000ULL. | |
226 | + * we encode the unused least significant bits for other purpose. | |
227 | + */ | |
228 | + bpt = bundle & ~((1ULL << 12) - 1); | |
229 | + if (bpt != TILE_BPT_BUNDLE) | |
230 | + return 0; | |
231 | + | |
232 | + bcode = bundle & ((1ULL << 12) - 1); | |
233 | + /* | |
234 | + * notify the kprobe handlers, if instruction is likely to | |
235 | + * pertain to them. | |
236 | + */ | |
237 | + switch (bcode) { | |
238 | + /* breakpoint_insn */ | |
239 | + case 0: | |
240 | + notify_die(DIE_BREAK, "debug", regs, bundle, | |
241 | + INT_ILL, SIGTRAP); | |
242 | + break; | |
243 | + /* breakpoint2_insn */ | |
244 | + case DIE_SSTEPBP: | |
245 | + notify_die(DIE_SSTEPBP, "single_step", regs, bundle, | |
246 | + INT_ILL, SIGTRAP); | |
247 | + break; | |
248 | + default: | |
249 | + return 0; | |
250 | + } | |
251 | + | |
252 | + return 1; | |
253 | +} | |
254 | + | |
217 | 255 | void __kprobes do_trap(struct pt_regs *regs, int fault_num, |
218 | 256 | unsigned long reason) |
219 | 257 | { |
220 | 258 | |
... | ... | @@ -221,7 +259,12 @@ |
221 | 259 | int signo, code; |
222 | 260 | unsigned long address = 0; |
223 | 261 | bundle_bits instr; |
262 | + int is_kernel = !user_mode(regs); | |
224 | 263 | |
264 | + /* Handle breakpoints, etc. */ | |
265 | + if (is_kernel && fault_num == INT_ILL && do_bpt(regs)) | |
266 | + return; | |
267 | + | |
225 | 268 | /* Re-enable interrupts, if they were previously enabled. */ |
226 | 269 | if (!(regs->flags & PT_FLAGS_DISABLE_IRQ)) |
227 | 270 | local_irq_enable(); |
... | ... | @@ -230,7 +273,7 @@ |
230 | 273 | * If it hits in kernel mode and we can't fix it up, just exit the |
231 | 274 | * current process and hope for the best. |
232 | 275 | */ |
233 | - if (!user_mode(regs)) { | |
276 | + if (is_kernel) { | |
234 | 277 | const char *name; |
235 | 278 | char buf[100]; |
236 | 279 | if (fixup_exception(regs)) /* ILL_TRANS or UNALIGN_DATA */ |
arch/tile/kernel/vmlinux.lds.S
arch/tile/mm/fault.c
... | ... | @@ -34,6 +34,7 @@ |
34 | 34 | #include <linux/hugetlb.h> |
35 | 35 | #include <linux/syscalls.h> |
36 | 36 | #include <linux/uaccess.h> |
37 | +#include <linux/kdebug.h> | |
37 | 38 | |
38 | 39 | #include <asm/pgalloc.h> |
39 | 40 | #include <asm/sections.h> |
... | ... | @@ -720,6 +721,17 @@ |
720 | 721 | unsigned long address, unsigned long write) |
721 | 722 | { |
722 | 723 | int is_page_fault; |
724 | + | |
725 | +#ifdef CONFIG_KPROBES | |
726 | + /* | |
727 | + * This is to notify the fault handler of the kprobes. The | |
728 | + * exception code is redundant as it is also carried in REGS, | |
729 | + * but we pass it anyhow. | |
730 | + */ | |
731 | + if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1, | |
732 | + regs->faultnum, SIGSEGV) == NOTIFY_STOP) | |
733 | + return; | |
734 | +#endif | |
723 | 735 | |
724 | 736 | #ifdef __tilegx__ |
725 | 737 | /* |
samples/kprobes/kprobe_example.c
... | ... | @@ -37,6 +37,11 @@ |
37 | 37 | " status = 0x%lx\n", |
38 | 38 | p->addr, regs->cp0_epc, regs->cp0_status); |
39 | 39 | #endif |
40 | +#ifdef CONFIG_TILEGX | |
41 | + printk(KERN_INFO "pre_handler: p->addr = 0x%p, pc = 0x%lx," | |
42 | + " ex1 = 0x%lx\n", | |
43 | + p->addr, regs->pc, regs->ex1); | |
44 | +#endif | |
40 | 45 | |
41 | 46 | /* A dump_stack() here will give a stack backtrace */ |
42 | 47 | return 0; |
... | ... | @@ -57,6 +62,10 @@ |
57 | 62 | #ifdef CONFIG_MIPS |
58 | 63 | printk(KERN_INFO "post_handler: p->addr = 0x%p, status = 0x%lx\n", |
59 | 64 | p->addr, regs->cp0_status); |
65 | +#endif | |
66 | +#ifdef CONFIG_TILEGX | |
67 | + printk(KERN_INFO "post_handler: p->addr = 0x%p, ex1 = 0x%lx\n", | |
68 | + p->addr, regs->ex1); | |
60 | 69 | #endif |
61 | 70 | } |
62 | 71 |