Blame view
kernel/kprobes.c
62.4 KB
1da177e4c Linux-2.6.12-rc2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
/* * Kernel Probes (KProbes) * kernel/kprobes.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2002, 2004 * * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel * Probes initial implementation (includes suggestions from * Rusty Russell). * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with * hlists and exceptions notifier as suggested by Andi Kleen. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes * interface to access function arguments. * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes * exceptions notifier to be first on the priority list. |
b94cce926 [PATCH] kprobes: ... |
30 31 32 |
* 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi * <prasanna@in.ibm.com> added function-return probes. |
1da177e4c Linux-2.6.12-rc2 |
33 34 |
*/ #include <linux/kprobes.h> |
1da177e4c Linux-2.6.12-rc2 |
35 36 |
#include <linux/hash.h> #include <linux/init.h> |
4e57b6817 [PATCH] fix missi... |
37 |
#include <linux/slab.h> |
e38697929 kprobes: fix spar... |
38 |
#include <linux/stddef.h> |
9984de1a5 kernel: Map most ... |
39 |
#include <linux/export.h> |
9ec4b1f35 [PATCH] kprobes: ... |
40 |
#include <linux/moduleloader.h> |
3a872d89b [PATCH] Kprobes: ... |
41 |
#include <linux/kallsyms.h> |
b4c6c34a5 [PATCH] kprobes: ... |
42 |
#include <linux/freezer.h> |
346fd59ba [PATCH] kprobes: ... |
43 44 |
#include <linux/seq_file.h> #include <linux/debugfs.h> |
b2be84df9 kprobes: Jump opt... |
45 |
#include <linux/sysctl.h> |
1eeb66a1b move die notifier... |
46 |
#include <linux/kdebug.h> |
4460fdad8 tracing, Text Edi... |
47 |
#include <linux/memory.h> |
4554dbcb8 kprobes: Check pr... |
48 |
#include <linux/ftrace.h> |
afd66255b kprobes: Introduc... |
49 |
#include <linux/cpu.h> |
bf5438fca jump label: Base ... |
50 |
#include <linux/jump_label.h> |
bf8f6e5b3 Kprobes: The ON/O... |
51 |
|
bfd45be0b kprobes: include ... |
52 |
#include <asm/sections.h> |
1da177e4c Linux-2.6.12-rc2 |
53 54 |
#include <asm/cacheflush.h> #include <asm/errno.h> |
7c0f6ba68 Replace <asm/uacc... |
55 |
#include <linux/uaccess.h> |
1da177e4c Linux-2.6.12-rc2 |
56 57 58 |
#define KPROBE_HASH_BITS 6 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) |
3a872d89b [PATCH] Kprobes: ... |
59 |
|
ef53d9c5e kprobes: improve ... |
60 |
static int kprobes_initialized; |
1da177e4c Linux-2.6.12-rc2 |
61 |
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; |
b94cce926 [PATCH] kprobes: ... |
62 |
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; |
1da177e4c Linux-2.6.12-rc2 |
63 |
|
bf8f6e5b3 Kprobes: The ON/O... |
64 |
/* NOTE: change this value only with kprobe_mutex held */ |
e579abeb5 kprobes: rename k... |
65 |
static bool kprobes_all_disarmed; |
bf8f6e5b3 Kprobes: The ON/O... |
66 |
|
43948f502 kprobes: Remove r... |
67 68 |
/* This protects kprobe_table and optimizing_list */ static DEFINE_MUTEX(kprobe_mutex); |
e65845235 [PATCH] Kprobes: ... |
69 |
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
ef53d9c5e kprobes: improve ... |
70 |
static struct { |
ec484608c locking, kprobes:... |
71 |
raw_spinlock_t lock ____cacheline_aligned_in_smp; |
ef53d9c5e kprobes: improve ... |
72 |
} kretprobe_table_locks[KPROBE_TABLE_SIZE]; |
290e30707 powerpc/kprobes: ... |
73 74 |
kprobe_opcode_t * __weak kprobe_lookup_name(const char *name, unsigned int __unused) |
49e0b4658 kprobes: Convert ... |
75 76 77 |
{ return ((kprobe_opcode_t *)(kallsyms_lookup_name(name))); } |
ec484608c locking, kprobes:... |
78 |
static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) |
ef53d9c5e kprobes: improve ... |
79 80 81 |
{ return &(kretprobe_table_locks[hash].lock); } |
1da177e4c Linux-2.6.12-rc2 |
82 |
|
376e24242 kprobes: Introduc... |
83 84 |
/* Blacklist -- list of struct kprobe_blacklist_entry */ static LIST_HEAD(kprobe_blacklist); |
3d8d996e0 kprobes: prevent ... |
85 |
|
2d14e39da [PATCH] kprobes: ... |
86 |
#ifdef __ARCH_WANT_KPROBES_INSN_SLOT |
9ec4b1f35 [PATCH] kprobes: ... |
87 88 89 90 91 92 |
/* * kprobe->ainsn.insn points to the copy of the instruction to be * single-stepped. x86_64, POWER4 and above have no-exec support and * stepping on the instruction on a vmalloced/kmalloced/data page * is a recipe for disaster */ |
9ec4b1f35 [PATCH] kprobes: ... |
93 |
struct kprobe_insn_page { |
c5cb5a2d8 kprobes: Clean up... |
94 |
struct list_head list; |
9ec4b1f35 [PATCH] kprobes: ... |
95 |
kprobe_opcode_t *insns; /* Page of instruction slots */ |
af96397de kprobes: allow to... |
96 |
struct kprobe_insn_cache *cache; |
9ec4b1f35 [PATCH] kprobes: ... |
97 |
int nused; |
b4c6c34a5 [PATCH] kprobes: ... |
98 |
int ngarbage; |
4610ee1d3 kprobes: Introduc... |
99 |
char slot_used[]; |
9ec4b1f35 [PATCH] kprobes: ... |
100 |
}; |
4610ee1d3 kprobes: Introduc... |
101 102 103 |
#define KPROBE_INSN_PAGE_SIZE(slots) \ (offsetof(struct kprobe_insn_page, slot_used) + \ (sizeof(char) * (slots))) |
4610ee1d3 kprobes: Introduc... |
104 105 106 107 |
static int slots_per_page(struct kprobe_insn_cache *c) { return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); } |
ab40c5c6b [PATCH] kprobes: ... |
108 109 110 111 112 |
enum kprobe_slot_state { SLOT_CLEAN = 0, SLOT_DIRTY = 1, SLOT_USED = 2, }; |
63fef14fc kprobes/x86: Make... |
113 |
void __weak *alloc_insn_page(void) |
af96397de kprobes: allow to... |
114 115 116 |
{ return module_alloc(PAGE_SIZE); } |
c93f5cf57 kprobes/x86: Fix ... |
117 |
void __weak free_insn_page(void *page) |
af96397de kprobes: allow to... |
118 |
{ |
be1f221c0 module: remove mo... |
119 |
module_memfree(page); |
af96397de kprobes: allow to... |
120 |
} |
c802d64a3 kprobes: unify in... |
121 122 |
struct kprobe_insn_cache kprobe_insn_slots = { .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex), |
af96397de kprobes: allow to... |
123 124 |
.alloc = alloc_insn_page, .free = free_insn_page, |
4610ee1d3 kprobes: Introduc... |
125 126 127 128 |
.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), .insn_size = MAX_INSN_SIZE, .nr_garbage = 0, }; |
55479f647 kprobes: Allow pr... |
129 |
static int collect_garbage_slots(struct kprobe_insn_cache *c); |
b4c6c34a5 [PATCH] kprobes: ... |
130 |
|
9ec4b1f35 [PATCH] kprobes: ... |
131 |
/** |
129415607 kprobes: add kpro... |
132 |
* __get_insn_slot() - Find a slot on an executable page for an instruction. |
9ec4b1f35 [PATCH] kprobes: ... |
133 134 |
* We allocate an executable page if there's no room on existing ones. */ |
55479f647 kprobes: Allow pr... |
135 |
kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) |
9ec4b1f35 [PATCH] kprobes: ... |
136 137 |
{ struct kprobe_insn_page *kip; |
c802d64a3 kprobes: unify in... |
138 |
kprobe_opcode_t *slot = NULL; |
9ec4b1f35 [PATCH] kprobes: ... |
139 |
|
5b485629b kprobes, extable:... |
140 |
/* Since the slot array is not protected by rcu, we need a mutex */ |
c802d64a3 kprobes: unify in... |
141 |
mutex_lock(&c->mutex); |
6f716acd5 kprobes: codingst... |
142 |
retry: |
5b485629b kprobes, extable:... |
143 144 |
rcu_read_lock(); list_for_each_entry_rcu(kip, &c->pages, list) { |
4610ee1d3 kprobes: Introduc... |
145 |
if (kip->nused < slots_per_page(c)) { |
9ec4b1f35 [PATCH] kprobes: ... |
146 |
int i; |
4610ee1d3 kprobes: Introduc... |
147 |
for (i = 0; i < slots_per_page(c); i++) { |
ab40c5c6b [PATCH] kprobes: ... |
148 149 |
if (kip->slot_used[i] == SLOT_CLEAN) { kip->slot_used[i] = SLOT_USED; |
9ec4b1f35 [PATCH] kprobes: ... |
150 |
kip->nused++; |
c802d64a3 kprobes: unify in... |
151 |
slot = kip->insns + (i * c->insn_size); |
5b485629b kprobes, extable:... |
152 |
rcu_read_unlock(); |
c802d64a3 kprobes: unify in... |
153 |
goto out; |
9ec4b1f35 [PATCH] kprobes: ... |
154 155 |
} } |
4610ee1d3 kprobes: Introduc... |
156 157 158 |
/* kip->nused is broken. Fix it. */ kip->nused = slots_per_page(c); WARN_ON(1); |
9ec4b1f35 [PATCH] kprobes: ... |
159 160 |
} } |
5b485629b kprobes, extable:... |
161 |
rcu_read_unlock(); |
9ec4b1f35 [PATCH] kprobes: ... |
162 |
|
b4c6c34a5 [PATCH] kprobes: ... |
163 |
/* If there are any garbage slots, collect it and try again. */ |
4610ee1d3 kprobes: Introduc... |
164 |
if (c->nr_garbage && collect_garbage_slots(c) == 0) |
b4c6c34a5 [PATCH] kprobes: ... |
165 |
goto retry; |
4610ee1d3 kprobes: Introduc... |
166 167 168 |
/* All out of space. Need to allocate a new page. */ kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL); |
6f716acd5 kprobes: codingst... |
169 |
if (!kip) |
c802d64a3 kprobes: unify in... |
170 |
goto out; |
9ec4b1f35 [PATCH] kprobes: ... |
171 172 173 174 175 176 |
/* * Use module_alloc so this page is within +/- 2GB of where the * kernel image and loaded module images reside. This is required * so x86_64 can correctly handle the %rip-relative fixups. */ |
af96397de kprobes: allow to... |
177 |
kip->insns = c->alloc(); |
9ec4b1f35 [PATCH] kprobes: ... |
178 179 |
if (!kip->insns) { kfree(kip); |
c802d64a3 kprobes: unify in... |
180 |
goto out; |
9ec4b1f35 [PATCH] kprobes: ... |
181 |
} |
c5cb5a2d8 kprobes: Clean up... |
182 |
INIT_LIST_HEAD(&kip->list); |
4610ee1d3 kprobes: Introduc... |
183 |
memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); |
ab40c5c6b [PATCH] kprobes: ... |
184 |
kip->slot_used[0] = SLOT_USED; |
9ec4b1f35 [PATCH] kprobes: ... |
185 |
kip->nused = 1; |
b4c6c34a5 [PATCH] kprobes: ... |
186 |
kip->ngarbage = 0; |
af96397de kprobes: allow to... |
187 |
kip->cache = c; |
5b485629b kprobes, extable:... |
188 |
list_add_rcu(&kip->list, &c->pages); |
c802d64a3 kprobes: unify in... |
189 190 191 192 |
slot = kip->insns; out: mutex_unlock(&c->mutex); return slot; |
129415607 kprobes: add kpro... |
193 |
} |
b4c6c34a5 [PATCH] kprobes: ... |
194 |
/* Return 1 if all garbages are collected, otherwise 0. */ |
55479f647 kprobes: Allow pr... |
195 |
static int collect_one_slot(struct kprobe_insn_page *kip, int idx) |
b4c6c34a5 [PATCH] kprobes: ... |
196 |
{ |
ab40c5c6b [PATCH] kprobes: ... |
197 |
kip->slot_used[idx] = SLOT_CLEAN; |
b4c6c34a5 [PATCH] kprobes: ... |
198 199 200 201 202 203 204 205 |
kip->nused--; if (kip->nused == 0) { /* * Page is no longer in use. Free it unless * it's the last one. We keep the last one * so as not to have to set it up again the * next time somebody inserts a probe. */ |
4610ee1d3 kprobes: Introduc... |
206 |
if (!list_is_singular(&kip->list)) { |
5b485629b kprobes, extable:... |
207 208 |
list_del_rcu(&kip->list); synchronize_rcu(); |
af96397de kprobes: allow to... |
209 |
kip->cache->free(kip->insns); |
b4c6c34a5 [PATCH] kprobes: ... |
210 211 212 213 214 215 |
kfree(kip); } return 1; } return 0; } |
55479f647 kprobes: Allow pr... |
216 |
static int collect_garbage_slots(struct kprobe_insn_cache *c) |
b4c6c34a5 [PATCH] kprobes: ... |
217 |
{ |
c5cb5a2d8 kprobes: Clean up... |
218 |
struct kprobe_insn_page *kip, *next; |
b4c6c34a5 [PATCH] kprobes: ... |
219 |
|
615d0ebbc kprobes: Disable ... |
220 221 |
/* Ensure no-one is interrupted on the garbages */ synchronize_sched(); |
b4c6c34a5 [PATCH] kprobes: ... |
222 |
|
4610ee1d3 kprobes: Introduc... |
223 |
list_for_each_entry_safe(kip, next, &c->pages, list) { |
b4c6c34a5 [PATCH] kprobes: ... |
224 |
int i; |
b4c6c34a5 [PATCH] kprobes: ... |
225 226 227 |
if (kip->ngarbage == 0) continue; kip->ngarbage = 0; /* we will collect all garbages */ |
4610ee1d3 kprobes: Introduc... |
228 |
for (i = 0; i < slots_per_page(c); i++) { |
5b485629b kprobes, extable:... |
229 |
if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i)) |
b4c6c34a5 [PATCH] kprobes: ... |
230 231 232 |
break; } } |
4610ee1d3 kprobes: Introduc... |
233 |
c->nr_garbage = 0; |
b4c6c34a5 [PATCH] kprobes: ... |
234 235 |
return 0; } |
55479f647 kprobes: Allow pr... |
236 237 |
void __free_insn_slot(struct kprobe_insn_cache *c, kprobe_opcode_t *slot, int dirty) |
9ec4b1f35 [PATCH] kprobes: ... |
238 239 |
{ struct kprobe_insn_page *kip; |
5b485629b kprobes, extable:... |
240 |
long idx; |
9ec4b1f35 [PATCH] kprobes: ... |
241 |
|
c802d64a3 kprobes: unify in... |
242 |
mutex_lock(&c->mutex); |
5b485629b kprobes, extable:... |
243 244 245 246 247 |
rcu_read_lock(); list_for_each_entry_rcu(kip, &c->pages, list) { idx = ((long)slot - (long)kip->insns) / (c->insn_size * sizeof(kprobe_opcode_t)); if (idx >= 0 && idx < slots_per_page(c)) |
c802d64a3 kprobes: unify in... |
248 |
goto out; |
9ec4b1f35 [PATCH] kprobes: ... |
249 |
} |
5b485629b kprobes, extable:... |
250 |
/* Could not find this slot. */ |
4610ee1d3 kprobes: Introduc... |
251 |
WARN_ON(1); |
5b485629b kprobes, extable:... |
252 |
kip = NULL; |
c802d64a3 kprobes: unify in... |
253 |
out: |
5b485629b kprobes, extable:... |
254 255 256 257 258 259 260 261 262 263 264 265 266 267 |
rcu_read_unlock(); /* Mark and sweep: this may sleep */ if (kip) { /* Check double free */ WARN_ON(kip->slot_used[idx] != SLOT_USED); if (dirty) { kip->slot_used[idx] = SLOT_DIRTY; kip->ngarbage++; if (++c->nr_garbage > slots_per_page(c)) collect_garbage_slots(c); } else { collect_one_slot(kip, idx); } } |
c802d64a3 kprobes: unify in... |
268 |
mutex_unlock(&c->mutex); |
4610ee1d3 kprobes: Introduc... |
269 |
} |
6f716acd5 kprobes: codingst... |
270 |
|
5b485629b kprobes, extable:... |
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 |
/* * Check given address is on the page of kprobe instruction slots. * This will be used for checking whether the address on a stack * is on a text area or not. */ bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr) { struct kprobe_insn_page *kip; bool ret = false; rcu_read_lock(); list_for_each_entry_rcu(kip, &c->pages, list) { if (addr >= (unsigned long)kip->insns && addr < (unsigned long)kip->insns + PAGE_SIZE) { ret = true; break; } } rcu_read_unlock(); return ret; } |
afd66255b kprobes: Introduc... |
293 294 |
#ifdef CONFIG_OPTPROBES /* For optimized_kprobe buffer */ |
c802d64a3 kprobes: unify in... |
295 296 |
struct kprobe_insn_cache kprobe_optinsn_slots = { .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex), |
af96397de kprobes: allow to... |
297 298 |
.alloc = alloc_insn_page, .free = free_insn_page, |
afd66255b kprobes: Introduc... |
299 300 301 302 |
.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), /* .insn_size is initialized later */ .nr_garbage = 0, }; |
afd66255b kprobes: Introduc... |
303 |
#endif |
2d14e39da [PATCH] kprobes: ... |
304 |
#endif |
9ec4b1f35 [PATCH] kprobes: ... |
305 |
|
e65845235 [PATCH] Kprobes: ... |
306 307 308 |
/* We have preemption disabled.. so it is safe to use __ versions */ static inline void set_kprobe_instance(struct kprobe *kp) { |
b76834bc1 kprobes: Use this... |
309 |
__this_cpu_write(kprobe_instance, kp); |
e65845235 [PATCH] Kprobes: ... |
310 311 312 313 |
} static inline void reset_kprobe_instance(void) { |
b76834bc1 kprobes: Use this... |
314 |
__this_cpu_write(kprobe_instance, NULL); |
e65845235 [PATCH] Kprobes: ... |
315 |
} |
3516a4604 [PATCH] Kprobes: ... |
316 317 |
/* * This routine is called either: |
49a2a1b83 [PATCH] kprobes: ... |
318 |
* - under the kprobe_mutex - during kprobe_[un]register() |
3516a4604 [PATCH] Kprobes: ... |
319 |
* OR |
d217d5450 [PATCH] Kprobes: ... |
320 |
* - with preemption disabled - from arch/xxx/kernel/kprobes.c |
3516a4604 [PATCH] Kprobes: ... |
321 |
*/ |
820aede02 kprobes: Use NOKP... |
322 |
struct kprobe *get_kprobe(void *addr) |
1da177e4c Linux-2.6.12-rc2 |
323 324 |
{ struct hlist_head *head; |
3516a4604 [PATCH] Kprobes: ... |
325 |
struct kprobe *p; |
1da177e4c Linux-2.6.12-rc2 |
326 327 |
head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; |
b67bfe0d4 hlist: drop the n... |
328 |
hlist_for_each_entry_rcu(p, head, hlist) { |
1da177e4c Linux-2.6.12-rc2 |
329 330 331 |
if (p->addr == addr) return p; } |
afd66255b kprobes: Introduc... |
332 |
|
1da177e4c Linux-2.6.12-rc2 |
333 334 |
return NULL; } |
820aede02 kprobes: Use NOKP... |
335 |
NOKPROBE_SYMBOL(get_kprobe); |
1da177e4c Linux-2.6.12-rc2 |
336 |
|
820aede02 kprobes: Use NOKP... |
337 |
static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); |
afd66255b kprobes: Introduc... |
338 339 340 341 342 343 |
/* Return true if the kprobe is an aggregator */ static inline int kprobe_aggrprobe(struct kprobe *p) { return p->pre_handler == aggr_pre_handler; } |
6274de498 kprobes: Support ... |
344 345 346 347 348 349 |
/* Return true(!0) if the kprobe is unused */ static inline int kprobe_unused(struct kprobe *p) { return kprobe_aggrprobe(p) && kprobe_disabled(p) && list_empty(&p->list); } |
afd66255b kprobes: Introduc... |
350 351 352 |
/* * Keep all fields in the kprobe consistent */ |
6d8e40a85 kprobes: Rename o... |
353 |
static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p) |
afd66255b kprobes: Introduc... |
354 |
{ |
6d8e40a85 kprobes: Rename o... |
355 356 |
memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t)); memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn)); |
afd66255b kprobes: Introduc... |
357 358 359 |
} #ifdef CONFIG_OPTPROBES |
b2be84df9 kprobes: Jump opt... |
360 361 |
/* NOTE: change this value only with kprobe_mutex held */ static bool kprobes_allow_optimization; |
afd66255b kprobes: Introduc... |
362 363 364 365 |
/* * Call all pre_handler on the list, but ignores its return value. * This must be called from arch-dep optimized caller. */ |
820aede02 kprobes: Use NOKP... |
366 |
void opt_pre_handler(struct kprobe *p, struct pt_regs *regs) |
afd66255b kprobes: Introduc... |
367 368 369 370 371 372 |
{ struct kprobe *kp; list_for_each_entry_rcu(kp, &p->list, list) { if (kp->pre_handler && likely(!kprobe_disabled(kp))) { set_kprobe_instance(kp); |
4f3a87144 Revert "kprobes: ... |
373 |
kp->pre_handler(kp, regs); |
afd66255b kprobes: Introduc... |
374 375 376 377 |
} reset_kprobe_instance(); } } |
820aede02 kprobes: Use NOKP... |
378 |
NOKPROBE_SYMBOL(opt_pre_handler); |
afd66255b kprobes: Introduc... |
379 |
|
6274de498 kprobes: Support ... |
380 |
/* Free optimized instructions and optimized_kprobe */ |
55479f647 kprobes: Allow pr... |
381 |
static void free_aggr_kprobe(struct kprobe *p) |
6274de498 kprobes: Support ... |
382 383 384 385 386 387 388 389 |
{ struct optimized_kprobe *op; op = container_of(p, struct optimized_kprobe, kp); arch_remove_optimized_kprobe(op); arch_remove_kprobe(p); kfree(op); } |
afd66255b kprobes: Introduc... |
390 391 392 393 394 395 396 397 398 399 400 401 |
/* Return true(!0) if the kprobe is ready for optimization. */ static inline int kprobe_optready(struct kprobe *p) { struct optimized_kprobe *op; if (kprobe_aggrprobe(p)) { op = container_of(p, struct optimized_kprobe, kp); return arch_prepared_optinsn(&op->optinsn); } return 0; } |
6274de498 kprobes: Support ... |
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 |
/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */ static inline int kprobe_disarmed(struct kprobe *p) { struct optimized_kprobe *op; /* If kprobe is not aggr/opt probe, just return kprobe is disabled */ if (!kprobe_aggrprobe(p)) return kprobe_disabled(p); op = container_of(p, struct optimized_kprobe, kp); return kprobe_disabled(p) && list_empty(&op->list); } /* Return true(!0) if the probe is queued on (un)optimizing lists */ |
55479f647 kprobes: Allow pr... |
417 |
static int kprobe_queued(struct kprobe *p) |
6274de498 kprobes: Support ... |
418 419 420 421 422 423 424 425 426 427 |
{ struct optimized_kprobe *op; if (kprobe_aggrprobe(p)) { op = container_of(p, struct optimized_kprobe, kp); if (!list_empty(&op->list)) return 1; } return 0; } |
afd66255b kprobes: Introduc... |
428 429 430 431 |
/* * Return an optimized kprobe whose optimizing code replaces * instructions including addr (exclude breakpoint). */ |
55479f647 kprobes: Allow pr... |
432 |
static struct kprobe *get_optimized_kprobe(unsigned long addr) |
afd66255b kprobes: Introduc... |
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 |
{ int i; struct kprobe *p = NULL; struct optimized_kprobe *op; /* Don't check i == 0, since that is a breakpoint case. */ for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++) p = get_kprobe((void *)(addr - i)); if (p && kprobe_optready(p)) { op = container_of(p, struct optimized_kprobe, kp); if (arch_within_optimized_kprobe(op, addr)) return p; } return NULL; } /* Optimization staging list, protected by kprobe_mutex */ static LIST_HEAD(optimizing_list); |
6274de498 kprobes: Support ... |
453 |
static LIST_HEAD(unoptimizing_list); |
7b959fc58 kprobes: Fix to f... |
454 |
static LIST_HEAD(freeing_list); |
afd66255b kprobes: Introduc... |
455 456 457 458 |
static void kprobe_optimizer(struct work_struct *work); static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); #define OPTIMIZE_DELAY 5 |
61f4e13ff kprobes: Separate... |
459 460 461 462 |
/* * Optimize (replace a breakpoint with a jump) kprobes listed on * optimizing_list. */ |
55479f647 kprobes: Allow pr... |
463 |
static void do_optimize_kprobes(void) |
afd66255b kprobes: Introduc... |
464 |
{ |
afd66255b kprobes: Introduc... |
465 466 467 468 469 470 471 |
/* * The optimization/unoptimization refers online_cpus via * stop_machine() and cpu-hotplug modifies online_cpus. * And same time, text_mutex will be held in cpu-hotplug and here. * This combination can cause a deadlock (cpu-hotplug try to lock * text_mutex but stop_machine can not be done because online_cpus * has been changed) |
2d1e38f56 kprobes: Cure hot... |
472 |
* To avoid this deadlock, caller must have locked cpu hotplug |
afd66255b kprobes: Introduc... |
473 474 |
* for preventing cpu-hotplug outside of text_mutex locking. */ |
2d1e38f56 kprobes: Cure hot... |
475 476 477 478 479 480 |
lockdep_assert_cpus_held(); /* Optimization never be done when disarmed */ if (kprobes_all_disarmed || !kprobes_allow_optimization || list_empty(&optimizing_list)) return; |
afd66255b kprobes: Introduc... |
481 |
mutex_lock(&text_mutex); |
cd7ebe229 kprobes: Use text... |
482 |
arch_optimize_kprobes(&optimizing_list); |
afd66255b kprobes: Introduc... |
483 |
mutex_unlock(&text_mutex); |
61f4e13ff kprobes: Separate... |
484 |
} |
6274de498 kprobes: Support ... |
485 486 487 488 |
/* * Unoptimize (replace a jump with a breakpoint and remove the breakpoint * if need) kprobes listed on unoptimizing_list. */ |
55479f647 kprobes: Allow pr... |
489 |
static void do_unoptimize_kprobes(void) |
6274de498 kprobes: Support ... |
490 491 |
{ struct optimized_kprobe *op, *tmp; |
2d1e38f56 kprobes: Cure hot... |
492 493 |
/* See comment in do_optimize_kprobes() */ lockdep_assert_cpus_held(); |
6274de498 kprobes: Support ... |
494 495 496 |
/* Unoptimization must be done anytime */ if (list_empty(&unoptimizing_list)) return; |
6274de498 kprobes: Support ... |
497 |
mutex_lock(&text_mutex); |
7b959fc58 kprobes: Fix to f... |
498 |
arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); |
f984ba4eb kprobes: Use text... |
499 |
/* Loop free_list for disarming */ |
7b959fc58 kprobes: Fix to f... |
500 |
list_for_each_entry_safe(op, tmp, &freeing_list, list) { |
6274de498 kprobes: Support ... |
501 502 503 504 505 506 507 508 509 510 |
/* Disarm probes if marked disabled */ if (kprobe_disabled(&op->kp)) arch_disarm_kprobe(&op->kp); if (kprobe_unused(&op->kp)) { /* * Remove unused probes from hash list. After waiting * for synchronization, these probes are reclaimed. * (reclaiming is done by do_free_cleaned_kprobes.) */ hlist_del_rcu(&op->kp.hlist); |
6274de498 kprobes: Support ... |
511 512 513 514 |
} else list_del_init(&op->list); } mutex_unlock(&text_mutex); |
6274de498 kprobes: Support ... |
515 516 517 |
} /* Reclaim all kprobes on the free_list */ |
55479f647 kprobes: Allow pr... |
518 |
static void do_free_cleaned_kprobes(void) |
6274de498 kprobes: Support ... |
519 520 |
{ struct optimized_kprobe *op, *tmp; |
7b959fc58 kprobes: Fix to f... |
521 |
list_for_each_entry_safe(op, tmp, &freeing_list, list) { |
6274de498 kprobes: Support ... |
522 523 524 525 526 527 528 |
BUG_ON(!kprobe_unused(&op->kp)); list_del_init(&op->list); free_aggr_kprobe(&op->kp); } } /* Start optimizer after OPTIMIZE_DELAY passed */ |
55479f647 kprobes: Allow pr... |
529 |
static void kick_kprobe_optimizer(void) |
6274de498 kprobes: Support ... |
530 |
{ |
ad72b3bea kprobes: fix wait... |
531 |
schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); |
6274de498 kprobes: Support ... |
532 |
} |
61f4e13ff kprobes: Separate... |
533 |
/* Kprobe jump optimizer */ |
55479f647 kprobes: Allow pr... |
534 |
static void kprobe_optimizer(struct work_struct *work) |
61f4e13ff kprobes: Separate... |
535 |
{ |
72ef3794c kprobes: Inverse ... |
536 |
mutex_lock(&kprobe_mutex); |
2d1e38f56 kprobes: Cure hot... |
537 |
cpus_read_lock(); |
61f4e13ff kprobes: Separate... |
538 539 |
/* Lock modules while optimizing kprobes */ mutex_lock(&module_mutex); |
61f4e13ff kprobes: Separate... |
540 541 |
/* |
6274de498 kprobes: Support ... |
542 543 544 |
* Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) * kprobes before waiting for quiesence period. */ |
7b959fc58 kprobes: Fix to f... |
545 |
do_unoptimize_kprobes(); |
6274de498 kprobes: Support ... |
546 547 |
/* |
a30b85df7 kprobes: Use sync... |
548 549 550 551 552 553 554 |
* Step 2: Wait for quiesence period to ensure all potentially * preempted tasks to have normally scheduled. Because optprobe * may modify multiple instructions, there is a chance that Nth * instruction is preempted. In that case, such tasks can return * to 2nd-Nth byte of jump instruction. This wait is for avoiding it. * Note that on non-preemptive kernel, this is transparently converted * to synchronoze_sched() to wait for all interrupts to have completed. |
61f4e13ff kprobes: Separate... |
555 |
*/ |
a30b85df7 kprobes: Use sync... |
556 |
synchronize_rcu_tasks(); |
61f4e13ff kprobes: Separate... |
557 |
|
6274de498 kprobes: Support ... |
558 |
/* Step 3: Optimize kprobes after quiesence period */ |
61f4e13ff kprobes: Separate... |
559 |
do_optimize_kprobes(); |
6274de498 kprobes: Support ... |
560 561 |
/* Step 4: Free cleaned kprobes after quiesence period */ |
7b959fc58 kprobes: Fix to f... |
562 |
do_free_cleaned_kprobes(); |
6274de498 kprobes: Support ... |
563 |
|
afd66255b kprobes: Introduc... |
564 |
mutex_unlock(&module_mutex); |
2d1e38f56 kprobes: Cure hot... |
565 |
cpus_read_unlock(); |
72ef3794c kprobes: Inverse ... |
566 |
mutex_unlock(&kprobe_mutex); |
6274de498 kprobes: Support ... |
567 |
|
cd7ebe229 kprobes: Use text... |
568 |
/* Step 5: Kick optimizer again if needed */ |
f984ba4eb kprobes: Use text... |
569 |
if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) |
cd7ebe229 kprobes: Use text... |
570 |
kick_kprobe_optimizer(); |
6274de498 kprobes: Support ... |
571 572 573 |
} /* Wait for completing optimization and unoptimization */ |
30e7d894c tracing/kprobes: ... |
574 |
void wait_for_kprobe_optimizer(void) |
6274de498 kprobes: Support ... |
575 |
{ |
ad72b3bea kprobes: fix wait... |
576 577 578 579 580 581 582 583 584 585 586 587 588 589 |
mutex_lock(&kprobe_mutex); while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) { mutex_unlock(&kprobe_mutex); /* this will also make optimizing_work execute immmediately */ flush_delayed_work(&optimizing_work); /* @optimizing_work might not have been queued yet, relax */ cpu_relax(); mutex_lock(&kprobe_mutex); } mutex_unlock(&kprobe_mutex); |
afd66255b kprobes: Introduc... |
590 591 592 |
} /* Optimize kprobe if p is ready to be optimized */ |
55479f647 kprobes: Allow pr... |
593 |
static void optimize_kprobe(struct kprobe *p) |
afd66255b kprobes: Introduc... |
594 595 596 597 |
{ struct optimized_kprobe *op; /* Check if the kprobe is disabled or not ready for optimization. */ |
b2be84df9 kprobes: Jump opt... |
598 |
if (!kprobe_optready(p) || !kprobes_allow_optimization || |
afd66255b kprobes: Introduc... |
599 600 |
(kprobe_disabled(p) || kprobes_all_disarmed)) return; |
059053a27 kprobes: Don't ch... |
601 602 |
/* kprobes with post_handler can not be optimized */ if (p->post_handler) |
afd66255b kprobes: Introduc... |
603 604 605 606 607 608 609 610 611 612 613 |
return; op = container_of(p, struct optimized_kprobe, kp); /* Check there is no other kprobes at the optimized instructions */ if (arch_check_optimized_kprobe(op) < 0) return; /* Check if it is already optimized. */ if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) return; |
afd66255b kprobes: Introduc... |
614 |
op->kp.flags |= KPROBE_FLAG_OPTIMIZED; |
6274de498 kprobes: Support ... |
615 616 617 618 619 620 621 622 623 624 625 |
if (!list_empty(&op->list)) /* This is under unoptimizing. Just dequeue the probe */ list_del_init(&op->list); else { list_add(&op->list, &optimizing_list); kick_kprobe_optimizer(); } } /* Short cut to direct unoptimizing */ |
55479f647 kprobes: Allow pr... |
626 |
static void force_unoptimize_kprobe(struct optimized_kprobe *op) |
6274de498 kprobes: Support ... |
627 |
{ |
2d1e38f56 kprobes: Cure hot... |
628 |
lockdep_assert_cpus_held(); |
6274de498 kprobes: Support ... |
629 |
arch_unoptimize_kprobe(op); |
6274de498 kprobes: Support ... |
630 631 |
if (kprobe_disabled(&op->kp)) arch_disarm_kprobe(&op->kp); |
afd66255b kprobes: Introduc... |
632 633 634 |
} /* Unoptimize a kprobe if p is optimized */ |
55479f647 kprobes: Allow pr... |
635 |
static void unoptimize_kprobe(struct kprobe *p, bool force) |
afd66255b kprobes: Introduc... |
636 637 |
{ struct optimized_kprobe *op; |
6274de498 kprobes: Support ... |
638 639 640 641 642 643 644 645 646 647 648 649 |
if (!kprobe_aggrprobe(p) || kprobe_disarmed(p)) return; /* This is not an optprobe nor optimized */ op = container_of(p, struct optimized_kprobe, kp); if (!kprobe_optimized(p)) { /* Unoptimized or unoptimizing case */ if (force && !list_empty(&op->list)) { /* * Only if this is unoptimizing kprobe and forced, * forcibly unoptimize it. (No need to unoptimize * unoptimized kprobe again :) */ |
afd66255b kprobes: Introduc... |
650 |
list_del_init(&op->list); |
6274de498 kprobes: Support ... |
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 |
force_unoptimize_kprobe(op); } return; } op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; if (!list_empty(&op->list)) { /* Dequeue from the optimization queue */ list_del_init(&op->list); return; } /* Optimized kprobe case */ if (force) /* Forcibly update the code: this is a special case */ force_unoptimize_kprobe(op); else { list_add(&op->list, &unoptimizing_list); kick_kprobe_optimizer(); |
afd66255b kprobes: Introduc... |
669 670 |
} } |
0490cd1f9 kprobes: Reuse un... |
671 |
/* Cancel unoptimizing for reusing */ |
82b0f70fd kprobes: Return e... |
672 |
static int reuse_unused_kprobe(struct kprobe *ap) |
0490cd1f9 kprobes: Reuse un... |
673 674 |
{ struct optimized_kprobe *op; |
82b0f70fd kprobes: Return e... |
675 |
int ret; |
0490cd1f9 kprobes: Reuse un... |
676 677 678 679 680 681 682 |
BUG_ON(!kprobe_unused(ap)); /* * Unused kprobe MUST be on the way of delayed unoptimizing (means * there is still a relative jump) and disabled. */ op = container_of(ap, struct optimized_kprobe, kp); |
4458515b2 kprobes: Replace ... |
683 |
WARN_ON_ONCE(list_empty(&op->list)); |
0490cd1f9 kprobes: Reuse un... |
684 685 686 |
/* Enable the probe again */ ap->flags &= ~KPROBE_FLAG_DISABLED; /* Optimize it again (remove from op->list) */ |
82b0f70fd kprobes: Return e... |
687 688 689 |
ret = kprobe_optready(ap); if (ret) return ret; |
0490cd1f9 kprobes: Reuse un... |
690 |
optimize_kprobe(ap); |
82b0f70fd kprobes: Return e... |
691 |
return 0; |
0490cd1f9 kprobes: Reuse un... |
692 |
} |
afd66255b kprobes: Introduc... |
693 |
/* Remove optimized instructions */ |
55479f647 kprobes: Allow pr... |
694 |
static void kill_optimized_kprobe(struct kprobe *p) |
afd66255b kprobes: Introduc... |
695 696 697 698 |
{ struct optimized_kprobe *op; op = container_of(p, struct optimized_kprobe, kp); |
6274de498 kprobes: Support ... |
699 700 |
if (!list_empty(&op->list)) /* Dequeue from the (un)optimization queue */ |
afd66255b kprobes: Introduc... |
701 |
list_del_init(&op->list); |
6274de498 kprobes: Support ... |
702 |
op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; |
7b959fc58 kprobes: Fix to f... |
703 704 705 706 707 708 709 710 711 712 713 |
if (kprobe_unused(p)) { /* Enqueue if it is unused */ list_add(&op->list, &freeing_list); /* * Remove unused probes from the hash list. After waiting * for synchronization, this probe is reclaimed. * (reclaiming is done by do_free_cleaned_kprobes().) */ hlist_del_rcu(&op->kp.hlist); } |
6274de498 kprobes: Support ... |
714 |
/* Don't touch the code, because it is already freed. */ |
afd66255b kprobes: Introduc... |
715 716 |
arch_remove_optimized_kprobe(op); } |
a460246c7 kprobes: Skip pre... |
717 718 719 720 721 722 |
static inline void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) { if (!kprobe_ftrace(p)) arch_prepare_optimized_kprobe(op, p); } |
afd66255b kprobes: Introduc... |
723 |
/* Try to prepare optimized instructions */ |
55479f647 kprobes: Allow pr... |
724 |
static void prepare_optimized_kprobe(struct kprobe *p) |
afd66255b kprobes: Introduc... |
725 726 727 728 |
{ struct optimized_kprobe *op; op = container_of(p, struct optimized_kprobe, kp); |
a460246c7 kprobes: Skip pre... |
729 |
__prepare_optimized_kprobe(op, p); |
afd66255b kprobes: Introduc... |
730 |
} |
afd66255b kprobes: Introduc... |
731 |
/* Allocate new optimized_kprobe and try to prepare optimized instructions */ |
55479f647 kprobes: Allow pr... |
732 |
static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) |
afd66255b kprobes: Introduc... |
733 734 735 736 737 738 739 740 741 |
{ struct optimized_kprobe *op; op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL); if (!op) return NULL; INIT_LIST_HEAD(&op->list); op->kp.addr = p->addr; |
a460246c7 kprobes: Skip pre... |
742 |
__prepare_optimized_kprobe(op, p); |
afd66255b kprobes: Introduc... |
743 744 745 |
return &op->kp; } |
55479f647 kprobes: Allow pr... |
746 |
static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); |
afd66255b kprobes: Introduc... |
747 748 749 750 751 |
/* * Prepare an optimized_kprobe and optimize it * NOTE: p must be a normal registered kprobe */ |
55479f647 kprobes: Allow pr... |
752 |
static void try_to_optimize_kprobe(struct kprobe *p) |
afd66255b kprobes: Introduc... |
753 754 755 |
{ struct kprobe *ap; struct optimized_kprobe *op; |
ae6aa16fd kprobes: introduc... |
756 757 758 |
/* Impossible to optimize ftrace-based kprobe */ if (kprobe_ftrace(p)) return; |
25764288d kprobes: Move loc... |
759 |
/* For preparing optimization, jump_label_text_reserved() is called */ |
2d1e38f56 kprobes: Cure hot... |
760 |
cpus_read_lock(); |
25764288d kprobes: Move loc... |
761 762 |
jump_label_lock(); mutex_lock(&text_mutex); |
afd66255b kprobes: Introduc... |
763 764 |
ap = alloc_aggr_kprobe(p); if (!ap) |
25764288d kprobes: Move loc... |
765 |
goto out; |
afd66255b kprobes: Introduc... |
766 767 768 769 |
op = container_of(ap, struct optimized_kprobe, kp); if (!arch_prepared_optinsn(&op->optinsn)) { /* If failed to setup optimizing, fallback to kprobe */ |
6274de498 kprobes: Support ... |
770 771 |
arch_remove_optimized_kprobe(op); kfree(op); |
25764288d kprobes: Move loc... |
772 |
goto out; |
afd66255b kprobes: Introduc... |
773 774 775 |
} init_aggr_kprobe(ap, p); |
25764288d kprobes: Move loc... |
776 777 778 779 780 |
optimize_kprobe(ap); /* This just kicks optimizer thread */ out: mutex_unlock(&text_mutex); jump_label_unlock(); |
2d1e38f56 kprobes: Cure hot... |
781 |
cpus_read_unlock(); |
afd66255b kprobes: Introduc... |
782 |
} |
b2be84df9 kprobes: Jump opt... |
783 |
#ifdef CONFIG_SYSCTL |
55479f647 kprobes: Allow pr... |
784 |
static void optimize_all_kprobes(void) |
b2be84df9 kprobes: Jump opt... |
785 786 |
{ struct hlist_head *head; |
b2be84df9 kprobes: Jump opt... |
787 788 |
struct kprobe *p; unsigned int i; |
5c51543b0 kprobes: Fix a do... |
789 |
mutex_lock(&kprobe_mutex); |
b2be84df9 kprobes: Jump opt... |
790 791 |
/* If optimization is already allowed, just return */ if (kprobes_allow_optimization) |
5c51543b0 kprobes: Fix a do... |
792 |
goto out; |
b2be84df9 kprobes: Jump opt... |
793 |
|
2d1e38f56 kprobes: Cure hot... |
794 |
cpus_read_lock(); |
b2be84df9 kprobes: Jump opt... |
795 |
kprobes_allow_optimization = true; |
b2be84df9 kprobes: Jump opt... |
796 797 |
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; |
b67bfe0d4 hlist: drop the n... |
798 |
hlist_for_each_entry_rcu(p, head, hlist) |
b2be84df9 kprobes: Jump opt... |
799 800 801 |
if (!kprobe_disabled(p)) optimize_kprobe(p); } |
2d1e38f56 kprobes: Cure hot... |
802 |
cpus_read_unlock(); |
b2be84df9 kprobes: Jump opt... |
803 804 |
printk(KERN_INFO "Kprobes globally optimized "); |
5c51543b0 kprobes: Fix a do... |
805 806 |
out: mutex_unlock(&kprobe_mutex); |
b2be84df9 kprobes: Jump opt... |
807 |
} |
55479f647 kprobes: Allow pr... |
808 |
static void unoptimize_all_kprobes(void) |
b2be84df9 kprobes: Jump opt... |
809 810 |
{ struct hlist_head *head; |
b2be84df9 kprobes: Jump opt... |
811 812 |
struct kprobe *p; unsigned int i; |
5c51543b0 kprobes: Fix a do... |
813 |
mutex_lock(&kprobe_mutex); |
b2be84df9 kprobes: Jump opt... |
814 |
/* If optimization is already prohibited, just return */ |
5c51543b0 kprobes: Fix a do... |
815 816 |
if (!kprobes_allow_optimization) { mutex_unlock(&kprobe_mutex); |
b2be84df9 kprobes: Jump opt... |
817 |
return; |
5c51543b0 kprobes: Fix a do... |
818 |
} |
b2be84df9 kprobes: Jump opt... |
819 |
|
2d1e38f56 kprobes: Cure hot... |
820 |
cpus_read_lock(); |
b2be84df9 kprobes: Jump opt... |
821 |
kprobes_allow_optimization = false; |
b2be84df9 kprobes: Jump opt... |
822 823 |
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; |
b67bfe0d4 hlist: drop the n... |
824 |
hlist_for_each_entry_rcu(p, head, hlist) { |
b2be84df9 kprobes: Jump opt... |
825 |
if (!kprobe_disabled(p)) |
6274de498 kprobes: Support ... |
826 |
unoptimize_kprobe(p, false); |
b2be84df9 kprobes: Jump opt... |
827 828 |
} } |
2d1e38f56 kprobes: Cure hot... |
829 |
cpus_read_unlock(); |
5c51543b0 kprobes: Fix a do... |
830 |
mutex_unlock(&kprobe_mutex); |
6274de498 kprobes: Support ... |
831 832 833 834 |
/* Wait for unoptimizing completion */ wait_for_kprobe_optimizer(); printk(KERN_INFO "Kprobes globally unoptimized "); |
b2be84df9 kprobes: Jump opt... |
835 |
} |
5c51543b0 kprobes: Fix a do... |
836 |
static DEFINE_MUTEX(kprobe_sysctl_mutex); |
b2be84df9 kprobes: Jump opt... |
837 838 839 840 841 842 |
int sysctl_kprobes_optimization; int proc_kprobes_optimization_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { int ret; |
5c51543b0 kprobes: Fix a do... |
843 |
mutex_lock(&kprobe_sysctl_mutex); |
b2be84df9 kprobes: Jump opt... |
844 845 846 847 848 849 850 |
sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; ret = proc_dointvec_minmax(table, write, buffer, length, ppos); if (sysctl_kprobes_optimization) optimize_all_kprobes(); else unoptimize_all_kprobes(); |
5c51543b0 kprobes: Fix a do... |
851 |
mutex_unlock(&kprobe_sysctl_mutex); |
b2be84df9 kprobes: Jump opt... |
852 853 854 855 |
return ret; } #endif /* CONFIG_SYSCTL */ |
6274de498 kprobes: Support ... |
856 |
/* Put a breakpoint for a probe. Must be called with text_mutex locked */ |
55479f647 kprobes: Allow pr... |
857 |
static void __arm_kprobe(struct kprobe *p) |
afd66255b kprobes: Introduc... |
858 |
{ |
6d8e40a85 kprobes: Rename o... |
859 |
struct kprobe *_p; |
afd66255b kprobes: Introduc... |
860 861 |
/* Check collision with other optimized kprobes */ |
6d8e40a85 kprobes: Rename o... |
862 863 |
_p = get_optimized_kprobe((unsigned long)p->addr); if (unlikely(_p)) |
6274de498 kprobes: Support ... |
864 865 |
/* Fallback to unoptimized kprobe */ unoptimize_kprobe(_p, true); |
afd66255b kprobes: Introduc... |
866 867 868 869 |
arch_arm_kprobe(p); optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ } |
6274de498 kprobes: Support ... |
870 |
/* Remove the breakpoint of a probe. Must be called with text_mutex locked */ |
55479f647 kprobes: Allow pr... |
871 |
static void __disarm_kprobe(struct kprobe *p, bool reopt) |
afd66255b kprobes: Introduc... |
872 |
{ |
6d8e40a85 kprobes: Rename o... |
873 |
struct kprobe *_p; |
afd66255b kprobes: Introduc... |
874 |
|
69d54b916 kprobes: makes kp... |
875 876 |
/* Try to unoptimize */ unoptimize_kprobe(p, kprobes_all_disarmed); |
afd66255b kprobes: Introduc... |
877 |
|
6274de498 kprobes: Support ... |
878 879 880 881 882 883 884 885 |
if (!kprobe_queued(p)) { arch_disarm_kprobe(p); /* If another kprobe was blocked, optimize it. */ _p = get_optimized_kprobe((unsigned long)p->addr); if (unlikely(_p) && reopt) optimize_kprobe(_p); } /* TODO: reoptimize others after unoptimized this probe */ |
afd66255b kprobes: Introduc... |
886 887 888 889 890 |
} #else /* !CONFIG_OPTPROBES */ #define optimize_kprobe(p) do {} while (0) |
6274de498 kprobes: Support ... |
891 |
#define unoptimize_kprobe(p, f) do {} while (0) |
afd66255b kprobes: Introduc... |
892 893 894 895 |
#define kill_optimized_kprobe(p) do {} while (0) #define prepare_optimized_kprobe(p) do {} while (0) #define try_to_optimize_kprobe(p) do {} while (0) #define __arm_kprobe(p) arch_arm_kprobe(p) |
6274de498 kprobes: Support ... |
896 897 898 |
#define __disarm_kprobe(p, o) arch_disarm_kprobe(p) #define kprobe_disarmed(p) kprobe_disabled(p) #define wait_for_kprobe_optimizer() do {} while (0) |
afd66255b kprobes: Introduc... |
899 |
|
82b0f70fd kprobes: Return e... |
900 |
static int reuse_unused_kprobe(struct kprobe *ap) |
0490cd1f9 kprobes: Reuse un... |
901 |
{ |
82b0f70fd kprobes: Return e... |
902 903 904 905 906 907 |
/* * If the optimized kprobe is NOT supported, the aggr kprobe is * released at the same time that the last aggregated kprobe is * unregistered. * Thus there should be no chance to reuse unused kprobe. */ |
0490cd1f9 kprobes: Reuse un... |
908 909 |
printk(KERN_ERR "Error: There should be no unused kprobe here. "); |
82b0f70fd kprobes: Return e... |
910 |
return -EINVAL; |
0490cd1f9 kprobes: Reuse un... |
911 |
} |
55479f647 kprobes: Allow pr... |
912 |
static void free_aggr_kprobe(struct kprobe *p) |
afd66255b kprobes: Introduc... |
913 |
{ |
6274de498 kprobes: Support ... |
914 |
arch_remove_kprobe(p); |
afd66255b kprobes: Introduc... |
915 916 |
kfree(p); } |
55479f647 kprobes: Allow pr... |
917 |
static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) |
afd66255b kprobes: Introduc... |
918 919 920 921 |
{ return kzalloc(sizeof(struct kprobe), GFP_KERNEL); } #endif /* CONFIG_OPTPROBES */ |
e7dbfe349 kprobes/x86: Move... |
922 |
#ifdef CONFIG_KPROBES_ON_FTRACE |
ae6aa16fd kprobes: introduc... |
923 |
static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { |
e52538965 kprobes/x86: ftra... |
924 |
.func = kprobe_ftrace_handler, |
1d70be34d kprobes: Add IPMO... |
925 |
.flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY, |
ae6aa16fd kprobes: introduc... |
926 927 928 929 |
}; static int kprobe_ftrace_enabled; /* Must ensure p->addr is really on ftrace */ |
55479f647 kprobes: Allow pr... |
930 |
static int prepare_kprobe(struct kprobe *p) |
ae6aa16fd kprobes: introduc... |
931 932 933 934 935 936 937 938 |
{ if (!kprobe_ftrace(p)) return arch_prepare_kprobe(p); return arch_prepare_kprobe_ftrace(p); } /* Caller must lock kprobe_mutex */ |
12310e343 kprobes: Propagat... |
939 |
static int arm_kprobe_ftrace(struct kprobe *p) |
ae6aa16fd kprobes: introduc... |
940 |
{ |
12310e343 kprobes: Propagat... |
941 |
int ret = 0; |
ae6aa16fd kprobes: introduc... |
942 943 944 |
ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 0, 0); |
12310e343 kprobes: Propagat... |
945 |
if (ret) { |
4458515b2 kprobes: Replace ... |
946 947 948 |
pr_debug("Failed to arm kprobe-ftrace at %pS (%d) ", p->addr, ret); |
12310e343 kprobes: Propagat... |
949 950 951 952 |
return ret; } if (kprobe_ftrace_enabled == 0) { |
ae6aa16fd kprobes: introduc... |
953 |
ret = register_ftrace_function(&kprobe_ftrace_ops); |
12310e343 kprobes: Propagat... |
954 955 956 957 958 |
if (ret) { pr_debug("Failed to init kprobe-ftrace (%d) ", ret); goto err_ftrace; } |
ae6aa16fd kprobes: introduc... |
959 |
} |
12310e343 kprobes: Propagat... |
960 961 962 963 964 965 966 967 968 969 970 971 |
kprobe_ftrace_enabled++; return ret; err_ftrace: /* * Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a * non-empty filter_hash for IPMODIFY ops, we're safe from an accidental * empty filter_hash which would undesirably trace all functions. */ ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0); return ret; |
ae6aa16fd kprobes: introduc... |
972 973 974 |
} /* Caller must lock kprobe_mutex */ |
297f9233b kprobes: Propagat... |
975 |
static int disarm_kprobe_ftrace(struct kprobe *p) |
ae6aa16fd kprobes: introduc... |
976 |
{ |
297f9233b kprobes: Propagat... |
977 |
int ret = 0; |
ae6aa16fd kprobes: introduc... |
978 |
|
297f9233b kprobes: Propagat... |
979 |
if (kprobe_ftrace_enabled == 1) { |
ae6aa16fd kprobes: introduc... |
980 |
ret = unregister_ftrace_function(&kprobe_ftrace_ops); |
297f9233b kprobes: Propagat... |
981 982 983 |
if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d) ", ret)) return ret; |
ae6aa16fd kprobes: introduc... |
984 |
} |
297f9233b kprobes: Propagat... |
985 986 |
kprobe_ftrace_enabled--; |
ae6aa16fd kprobes: introduc... |
987 988 |
ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0); |
4458515b2 kprobes: Replace ... |
989 990 991 |
WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d) ", p->addr, ret); |
297f9233b kprobes: Propagat... |
992 |
return ret; |
ae6aa16fd kprobes: introduc... |
993 |
} |
e7dbfe349 kprobes/x86: Move... |
994 |
#else /* !CONFIG_KPROBES_ON_FTRACE */ |
ae6aa16fd kprobes: introduc... |
995 |
#define prepare_kprobe(p) arch_prepare_kprobe(p) |
12310e343 kprobes: Propagat... |
996 |
#define arm_kprobe_ftrace(p) (-ENODEV) |
297f9233b kprobes: Propagat... |
997 |
#define disarm_kprobe_ftrace(p) (-ENODEV) |
ae6aa16fd kprobes: introduc... |
998 |
#endif |
201517a7f kprobes: fix to u... |
999 |
/* Arm a kprobe with text_mutex */ |
12310e343 kprobes: Propagat... |
1000 |
static int arm_kprobe(struct kprobe *kp) |
201517a7f kprobes: fix to u... |
1001 |
{ |
12310e343 kprobes: Propagat... |
1002 1003 |
if (unlikely(kprobe_ftrace(kp))) return arm_kprobe_ftrace(kp); |
2d1e38f56 kprobes: Cure hot... |
1004 |
cpus_read_lock(); |
201517a7f kprobes: fix to u... |
1005 |
mutex_lock(&text_mutex); |
afd66255b kprobes: Introduc... |
1006 |
__arm_kprobe(kp); |
201517a7f kprobes: fix to u... |
1007 |
mutex_unlock(&text_mutex); |
2d1e38f56 kprobes: Cure hot... |
1008 |
cpus_read_unlock(); |
12310e343 kprobes: Propagat... |
1009 1010 |
return 0; |
201517a7f kprobes: fix to u... |
1011 1012 1013 |
} /* Disarm a kprobe with text_mutex */ |
297f9233b kprobes: Propagat... |
1014 |
static int disarm_kprobe(struct kprobe *kp, bool reopt) |
201517a7f kprobes: fix to u... |
1015 |
{ |
297f9233b kprobes: Propagat... |
1016 1017 |
if (unlikely(kprobe_ftrace(kp))) return disarm_kprobe_ftrace(kp); |
2d1e38f56 kprobes: Cure hot... |
1018 1019 |
cpus_read_lock(); |
201517a7f kprobes: fix to u... |
1020 |
mutex_lock(&text_mutex); |
ae6aa16fd kprobes: introduc... |
1021 |
__disarm_kprobe(kp, reopt); |
201517a7f kprobes: fix to u... |
1022 |
mutex_unlock(&text_mutex); |
2d1e38f56 kprobes: Cure hot... |
1023 |
cpus_read_unlock(); |
297f9233b kprobes: Propagat... |
1024 1025 |
return 0; |
201517a7f kprobes: fix to u... |
1026 |
} |
64f562c6d [PATCH] kprobes: ... |
1027 1028 1029 1030 |
/* * Aggregate handlers for multiple kprobes support - these handlers * take care of invoking the individual kprobe handlers on p->list */ |
820aede02 kprobes: Use NOKP... |
1031 |
static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) |
64f562c6d [PATCH] kprobes: ... |
1032 1033 |
{ struct kprobe *kp; |
3516a4604 [PATCH] Kprobes: ... |
1034 |
list_for_each_entry_rcu(kp, &p->list, list) { |
de5bd88d5 kprobes: support ... |
1035 |
if (kp->pre_handler && likely(!kprobe_disabled(kp))) { |
e65845235 [PATCH] Kprobes: ... |
1036 |
set_kprobe_instance(kp); |
8b0914ea7 [PATCH] jprobes: ... |
1037 1038 |
if (kp->pre_handler(kp, regs)) return 1; |
64f562c6d [PATCH] kprobes: ... |
1039 |
} |
e65845235 [PATCH] Kprobes: ... |
1040 |
reset_kprobe_instance(); |
64f562c6d [PATCH] kprobes: ... |
1041 1042 1043 |
} return 0; } |
820aede02 kprobes: Use NOKP... |
1044 |
NOKPROBE_SYMBOL(aggr_pre_handler); |
64f562c6d [PATCH] kprobes: ... |
1045 |
|
820aede02 kprobes: Use NOKP... |
1046 1047 |
static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, unsigned long flags) |
64f562c6d [PATCH] kprobes: ... |
1048 1049 |
{ struct kprobe *kp; |
3516a4604 [PATCH] Kprobes: ... |
1050 |
list_for_each_entry_rcu(kp, &p->list, list) { |
de5bd88d5 kprobes: support ... |
1051 |
if (kp->post_handler && likely(!kprobe_disabled(kp))) { |
e65845235 [PATCH] Kprobes: ... |
1052 |
set_kprobe_instance(kp); |
64f562c6d [PATCH] kprobes: ... |
1053 |
kp->post_handler(kp, regs, flags); |
e65845235 [PATCH] Kprobes: ... |
1054 |
reset_kprobe_instance(); |
64f562c6d [PATCH] kprobes: ... |
1055 1056 |
} } |
64f562c6d [PATCH] kprobes: ... |
1057 |
} |
820aede02 kprobes: Use NOKP... |
1058 |
NOKPROBE_SYMBOL(aggr_post_handler); |
64f562c6d [PATCH] kprobes: ... |
1059 |
|
820aede02 kprobes: Use NOKP... |
1060 1061 |
static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr) |
64f562c6d [PATCH] kprobes: ... |
1062 |
{ |
b76834bc1 kprobes: Use this... |
1063 |
struct kprobe *cur = __this_cpu_read(kprobe_instance); |
e65845235 [PATCH] Kprobes: ... |
1064 |
|
64f562c6d [PATCH] kprobes: ... |
1065 1066 1067 1068 |
/* * if we faulted "during" the execution of a user specified * probe handler, invoke just that probe's fault handler */ |
e65845235 [PATCH] Kprobes: ... |
1069 1070 |
if (cur && cur->fault_handler) { if (cur->fault_handler(cur, regs, trapnr)) |
64f562c6d [PATCH] kprobes: ... |
1071 1072 1073 1074 |
return 1; } return 0; } |
820aede02 kprobes: Use NOKP... |
1075 |
NOKPROBE_SYMBOL(aggr_fault_handler); |
64f562c6d [PATCH] kprobes: ... |
1076 |
|
bf8d5c52c [PATCH] kprobes: ... |
1077 |
/* Walks the list and increments nmissed count for multiprobe case */ |
820aede02 kprobes: Use NOKP... |
1078 |
void kprobes_inc_nmissed_count(struct kprobe *p) |
bf8d5c52c [PATCH] kprobes: ... |
1079 1080 |
{ struct kprobe *kp; |
afd66255b kprobes: Introduc... |
1081 |
if (!kprobe_aggrprobe(p)) { |
bf8d5c52c [PATCH] kprobes: ... |
1082 1083 1084 1085 1086 1087 1088 |
p->nmissed++; } else { list_for_each_entry_rcu(kp, &p->list, list) kp->nmissed++; } return; } |
820aede02 kprobes: Use NOKP... |
1089 |
NOKPROBE_SYMBOL(kprobes_inc_nmissed_count); |
bf8d5c52c [PATCH] kprobes: ... |
1090 |
|
820aede02 kprobes: Use NOKP... |
1091 1092 |
void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head) |
b94cce926 [PATCH] kprobes: ... |
1093 |
{ |
ef53d9c5e kprobes: improve ... |
1094 |
struct kretprobe *rp = ri->rp; |
b94cce926 [PATCH] kprobes: ... |
1095 1096 |
/* remove rp inst off the rprobe_inst_table */ hlist_del(&ri->hlist); |
ef53d9c5e kprobes: improve ... |
1097 1098 |
INIT_HLIST_NODE(&ri->hlist); if (likely(rp)) { |
ec484608c locking, kprobes:... |
1099 |
raw_spin_lock(&rp->lock); |
ef53d9c5e kprobes: improve ... |
1100 |
hlist_add_head(&ri->hlist, &rp->free_instances); |
ec484608c locking, kprobes:... |
1101 |
raw_spin_unlock(&rp->lock); |
b94cce926 [PATCH] kprobes: ... |
1102 1103 |
} else /* Unregistering */ |
99219a3fb [PATCH] kretprobe... |
1104 |
hlist_add_head(&ri->hlist, head); |
b94cce926 [PATCH] kprobes: ... |
1105 |
} |
820aede02 kprobes: Use NOKP... |
1106 |
NOKPROBE_SYMBOL(recycle_rp_inst); |
b94cce926 [PATCH] kprobes: ... |
1107 |
|
820aede02 kprobes: Use NOKP... |
1108 |
void kretprobe_hash_lock(struct task_struct *tsk, |
ef53d9c5e kprobes: improve ... |
1109 |
struct hlist_head **head, unsigned long *flags) |
635c17c2b kprobes: Add spar... |
1110 |
__acquires(hlist_lock) |
ef53d9c5e kprobes: improve ... |
1111 1112 |
{ unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
ec484608c locking, kprobes:... |
1113 |
raw_spinlock_t *hlist_lock; |
ef53d9c5e kprobes: improve ... |
1114 1115 1116 |
*head = &kretprobe_inst_table[hash]; hlist_lock = kretprobe_table_lock_ptr(hash); |
ec484608c locking, kprobes:... |
1117 |
raw_spin_lock_irqsave(hlist_lock, *flags); |
ef53d9c5e kprobes: improve ... |
1118 |
} |
820aede02 kprobes: Use NOKP... |
1119 |
NOKPROBE_SYMBOL(kretprobe_hash_lock); |
ef53d9c5e kprobes: improve ... |
1120 |
|
820aede02 kprobes: Use NOKP... |
1121 1122 |
static void kretprobe_table_lock(unsigned long hash, unsigned long *flags) |
635c17c2b kprobes: Add spar... |
1123 |
__acquires(hlist_lock) |
b94cce926 [PATCH] kprobes: ... |
1124 |
{ |
ec484608c locking, kprobes:... |
1125 1126 |
raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); raw_spin_lock_irqsave(hlist_lock, *flags); |
ef53d9c5e kprobes: improve ... |
1127 |
} |
820aede02 kprobes: Use NOKP... |
1128 |
NOKPROBE_SYMBOL(kretprobe_table_lock); |
ef53d9c5e kprobes: improve ... |
1129 |
|
820aede02 kprobes: Use NOKP... |
1130 1131 |
void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags) |
635c17c2b kprobes: Add spar... |
1132 |
__releases(hlist_lock) |
ef53d9c5e kprobes: improve ... |
1133 1134 |
{ unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
ec484608c locking, kprobes:... |
1135 |
raw_spinlock_t *hlist_lock; |
ef53d9c5e kprobes: improve ... |
1136 1137 |
hlist_lock = kretprobe_table_lock_ptr(hash); |
ec484608c locking, kprobes:... |
1138 |
raw_spin_unlock_irqrestore(hlist_lock, *flags); |
ef53d9c5e kprobes: improve ... |
1139 |
} |
820aede02 kprobes: Use NOKP... |
1140 |
NOKPROBE_SYMBOL(kretprobe_hash_unlock); |
ef53d9c5e kprobes: improve ... |
1141 |
|
820aede02 kprobes: Use NOKP... |
1142 1143 |
static void kretprobe_table_unlock(unsigned long hash, unsigned long *flags) |
635c17c2b kprobes: Add spar... |
1144 |
__releases(hlist_lock) |
ef53d9c5e kprobes: improve ... |
1145 |
{ |
ec484608c locking, kprobes:... |
1146 1147 |
raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); raw_spin_unlock_irqrestore(hlist_lock, *flags); |
b94cce926 [PATCH] kprobes: ... |
1148 |
} |
820aede02 kprobes: Use NOKP... |
1149 |
NOKPROBE_SYMBOL(kretprobe_table_unlock); |
b94cce926 [PATCH] kprobes: ... |
1150 |
|
b94cce926 [PATCH] kprobes: ... |
1151 |
/* |
c6fd91f0b [PATCH] kretprobe... |
1152 1153 1154 1155 |
* This function is called from finish_task_switch when task tk becomes dead, * so that we can recycle any function-return probe instances associated * with this task. These left over instances represent probed functions * that have been called but will never return. |
b94cce926 [PATCH] kprobes: ... |
1156 |
*/ |
820aede02 kprobes: Use NOKP... |
1157 |
void kprobe_flush_task(struct task_struct *tk) |
b94cce926 [PATCH] kprobes: ... |
1158 |
{ |
62c27be0d [PATCH] kprobe wh... |
1159 |
struct kretprobe_instance *ri; |
99219a3fb [PATCH] kretprobe... |
1160 |
struct hlist_head *head, empty_rp; |
b67bfe0d4 hlist: drop the n... |
1161 |
struct hlist_node *tmp; |
ef53d9c5e kprobes: improve ... |
1162 |
unsigned long hash, flags = 0; |
802eae7c8 [PATCH] Return pr... |
1163 |
|
ef53d9c5e kprobes: improve ... |
1164 1165 1166 |
if (unlikely(!kprobes_initialized)) /* Early boot. kretprobe_table_locks not yet initialized. */ return; |
d496aab56 kprobes: initiali... |
1167 |
INIT_HLIST_HEAD(&empty_rp); |
ef53d9c5e kprobes: improve ... |
1168 1169 1170 |
hash = hash_ptr(tk, KPROBE_HASH_BITS); head = &kretprobe_inst_table[hash]; kretprobe_table_lock(hash, &flags); |
b67bfe0d4 hlist: drop the n... |
1171 |
hlist_for_each_entry_safe(ri, tmp, head, hlist) { |
62c27be0d [PATCH] kprobe wh... |
1172 |
if (ri->task == tk) |
99219a3fb [PATCH] kretprobe... |
1173 |
recycle_rp_inst(ri, &empty_rp); |
62c27be0d [PATCH] kprobe wh... |
1174 |
} |
ef53d9c5e kprobes: improve ... |
1175 |
kretprobe_table_unlock(hash, &flags); |
b67bfe0d4 hlist: drop the n... |
1176 |
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { |
99219a3fb [PATCH] kretprobe... |
1177 1178 1179 |
hlist_del(&ri->hlist); kfree(ri); } |
b94cce926 [PATCH] kprobes: ... |
1180 |
} |
820aede02 kprobes: Use NOKP... |
1181 |
NOKPROBE_SYMBOL(kprobe_flush_task); |
b94cce926 [PATCH] kprobes: ... |
1182 |
|
b94cce926 [PATCH] kprobes: ... |
1183 1184 1185 |
static inline void free_rp_inst(struct kretprobe *rp) { struct kretprobe_instance *ri; |
b67bfe0d4 hlist: drop the n... |
1186 |
struct hlist_node *next; |
4c4308cb9 kprobes: kretprob... |
1187 |
|
b67bfe0d4 hlist: drop the n... |
1188 |
hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) { |
ef53d9c5e kprobes: improve ... |
1189 |
hlist_del(&ri->hlist); |
b94cce926 [PATCH] kprobes: ... |
1190 1191 1192 |
kfree(ri); } } |
820aede02 kprobes: Use NOKP... |
1193 |
static void cleanup_rp_inst(struct kretprobe *rp) |
4a296e07c kprobes: add (un)... |
1194 |
{ |
ef53d9c5e kprobes: improve ... |
1195 |
unsigned long flags, hash; |
4a296e07c kprobes: add (un)... |
1196 |
struct kretprobe_instance *ri; |
b67bfe0d4 hlist: drop the n... |
1197 |
struct hlist_node *next; |
ef53d9c5e kprobes: improve ... |
1198 |
struct hlist_head *head; |
4a296e07c kprobes: add (un)... |
1199 |
/* No race here */ |
ef53d9c5e kprobes: improve ... |
1200 1201 1202 |
for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { kretprobe_table_lock(hash, &flags); head = &kretprobe_inst_table[hash]; |
b67bfe0d4 hlist: drop the n... |
1203 |
hlist_for_each_entry_safe(ri, next, head, hlist) { |
ef53d9c5e kprobes: improve ... |
1204 1205 1206 1207 |
if (ri->rp == rp) ri->rp = NULL; } kretprobe_table_unlock(hash, &flags); |
4a296e07c kprobes: add (un)... |
1208 |
} |
4a296e07c kprobes: add (un)... |
1209 1210 |
free_rp_inst(rp); } |
820aede02 kprobes: Use NOKP... |
1211 |
NOKPROBE_SYMBOL(cleanup_rp_inst); |
4a296e07c kprobes: add (un)... |
1212 |
|
059053a27 kprobes: Don't ch... |
1213 |
/* Add the new probe to ap->list */ |
55479f647 kprobes: Allow pr... |
1214 |
static int add_new_kprobe(struct kprobe *ap, struct kprobe *p) |
8b0914ea7 [PATCH] jprobes: ... |
1215 |
{ |
de5bd88d5 kprobes: support ... |
1216 |
BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); |
afd66255b kprobes: Introduc... |
1217 |
|
059053a27 kprobes: Don't ch... |
1218 |
if (p->post_handler) |
6274de498 kprobes: Support ... |
1219 |
unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */ |
afd66255b kprobes: Introduc... |
1220 |
|
059053a27 kprobes: Don't ch... |
1221 |
list_add_rcu(&p->list, &ap->list); |
b918e5e60 kprobes: cleanup ... |
1222 1223 |
if (p->post_handler && !ap->post_handler) ap->post_handler = aggr_post_handler; |
de5bd88d5 kprobes: support ... |
1224 |
|
8b0914ea7 [PATCH] jprobes: ... |
1225 1226 1227 1228 |
return 0; } /* |
64f562c6d [PATCH] kprobes: ... |
1229 1230 1231 |
* Fill in the required fields of the "manager kprobe". Replace the * earlier kprobe in the hlist with the manager kprobe */ |
55479f647 kprobes: Allow pr... |
1232 |
static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) |
64f562c6d [PATCH] kprobes: ... |
1233 |
{ |
afd66255b kprobes: Introduc... |
1234 |
/* Copy p's insn slot to ap */ |
8b0914ea7 [PATCH] jprobes: ... |
1235 |
copy_kprobe(p, ap); |
a9ad965ea [PATCH] IA64: kpr... |
1236 |
flush_insn_slot(ap); |
64f562c6d [PATCH] kprobes: ... |
1237 |
ap->addr = p->addr; |
afd66255b kprobes: Introduc... |
1238 |
ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED; |
64f562c6d [PATCH] kprobes: ... |
1239 |
ap->pre_handler = aggr_pre_handler; |
64f562c6d [PATCH] kprobes: ... |
1240 |
ap->fault_handler = aggr_fault_handler; |
e8386a0cb kprobes: support ... |
1241 1242 |
/* We don't care the kprobe which has gone. */ if (p->post_handler && !kprobe_gone(p)) |
367216567 [PATCH] Kprobe: m... |
1243 |
ap->post_handler = aggr_post_handler; |
64f562c6d [PATCH] kprobes: ... |
1244 1245 |
INIT_LIST_HEAD(&ap->list); |
afd66255b kprobes: Introduc... |
1246 |
INIT_HLIST_NODE(&ap->hlist); |
64f562c6d [PATCH] kprobes: ... |
1247 |
|
afd66255b kprobes: Introduc... |
1248 |
list_add_rcu(&p->list, &ap->list); |
adad0f331 [PATCH] kprobes: ... |
1249 |
hlist_replace_rcu(&p->hlist, &ap->hlist); |
64f562c6d [PATCH] kprobes: ... |
1250 1251 1252 1253 1254 |
} /* * This is the second or subsequent kprobe at the address - handle * the intricacies |
64f562c6d [PATCH] kprobes: ... |
1255 |
*/ |
55479f647 kprobes: Allow pr... |
1256 |
static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) |
64f562c6d [PATCH] kprobes: ... |
1257 1258 |
{ int ret = 0; |
6d8e40a85 kprobes: Rename o... |
1259 |
struct kprobe *ap = orig_p; |
64f562c6d [PATCH] kprobes: ... |
1260 |
|
2d1e38f56 kprobes: Cure hot... |
1261 |
cpus_read_lock(); |
25764288d kprobes: Move loc... |
1262 1263 |
/* For preparing optimization, jump_label_text_reserved() is called */ jump_label_lock(); |
25764288d kprobes: Move loc... |
1264 |
mutex_lock(&text_mutex); |
6d8e40a85 kprobes: Rename o... |
1265 1266 1267 |
if (!kprobe_aggrprobe(orig_p)) { /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ ap = alloc_aggr_kprobe(orig_p); |
25764288d kprobes: Move loc... |
1268 1269 1270 1271 |
if (!ap) { ret = -ENOMEM; goto out; } |
6d8e40a85 kprobes: Rename o... |
1272 |
init_aggr_kprobe(ap, orig_p); |
82b0f70fd kprobes: Return e... |
1273 |
} else if (kprobe_unused(ap)) { |
0490cd1f9 kprobes: Reuse un... |
1274 |
/* This probe is going to die. Rescue it */ |
82b0f70fd kprobes: Return e... |
1275 1276 1277 1278 |
ret = reuse_unused_kprobe(ap); if (ret) goto out; } |
b918e5e60 kprobes: cleanup ... |
1279 1280 |
if (kprobe_gone(ap)) { |
e8386a0cb kprobes: support ... |
1281 1282 1283 1284 1285 1286 |
/* * Attempting to insert new probe at the same location that * had a probe in the module vaddr area which already * freed. So, the instruction slot has already been * released. We need a new slot for the new probe. */ |
b918e5e60 kprobes: cleanup ... |
1287 |
ret = arch_prepare_kprobe(ap); |
e8386a0cb kprobes: support ... |
1288 |
if (ret) |
b918e5e60 kprobes: cleanup ... |
1289 1290 1291 1292 1293 |
/* * Even if fail to allocate new slot, don't need to * free aggr_probe. It will be used next time, or * freed by unregister_kprobe. */ |
25764288d kprobes: Move loc... |
1294 |
goto out; |
de5bd88d5 kprobes: support ... |
1295 |
|
afd66255b kprobes: Introduc... |
1296 1297 |
/* Prepare optimized instructions if possible. */ prepare_optimized_kprobe(ap); |
e8386a0cb kprobes: support ... |
1298 |
/* |
de5bd88d5 kprobes: support ... |
1299 1300 |
* Clear gone flag to prevent allocating new slot again, and * set disabled flag because it is not armed yet. |
e8386a0cb kprobes: support ... |
1301 |
*/ |
de5bd88d5 kprobes: support ... |
1302 1303 |
ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) | KPROBE_FLAG_DISABLED; |
e8386a0cb kprobes: support ... |
1304 |
} |
b918e5e60 kprobes: cleanup ... |
1305 |
|
afd66255b kprobes: Introduc... |
1306 |
/* Copy ap's insn slot to p */ |
b918e5e60 kprobes: cleanup ... |
1307 |
copy_kprobe(ap, p); |
25764288d kprobes: Move loc... |
1308 1309 1310 1311 |
ret = add_new_kprobe(ap, p); out: mutex_unlock(&text_mutex); |
25764288d kprobes: Move loc... |
1312 |
jump_label_unlock(); |
2d1e38f56 kprobes: Cure hot... |
1313 |
cpus_read_unlock(); |
25764288d kprobes: Move loc... |
1314 1315 1316 |
if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { ap->flags &= ~KPROBE_FLAG_DISABLED; |
12310e343 kprobes: Propagat... |
1317 |
if (!kprobes_all_disarmed) { |
25764288d kprobes: Move loc... |
1318 |
/* Arm the breakpoint again. */ |
12310e343 kprobes: Propagat... |
1319 1320 1321 1322 1323 1324 1325 |
ret = arm_kprobe(ap); if (ret) { ap->flags |= KPROBE_FLAG_DISABLED; list_del_rcu(&p->list); synchronize_sched(); } } |
25764288d kprobes: Move loc... |
1326 1327 |
} return ret; |
64f562c6d [PATCH] kprobes: ... |
1328 |
} |
be8f27432 kprobes: Prohibit... |
1329 1330 1331 1332 1333 1334 |
bool __weak arch_within_kprobe_blacklist(unsigned long addr) { /* The __kprobes marked functions and entry code must not be probed */ return addr >= (unsigned long)__kprobes_text_start && addr < (unsigned long)__kprobes_text_end; } |
e5779e8e1 perf/x86/hw_break... |
1335 |
bool within_kprobe_blacklist(unsigned long addr) |
d0aaff979 [PATCH] Kprobes: ... |
1336 |
{ |
376e24242 kprobes: Introduc... |
1337 |
struct kprobe_blacklist_entry *ent; |
3d8d996e0 kprobes: prevent ... |
1338 |
|
be8f27432 kprobes: Prohibit... |
1339 |
if (arch_within_kprobe_blacklist(addr)) |
376e24242 kprobes: Introduc... |
1340 |
return true; |
3d8d996e0 kprobes: prevent ... |
1341 1342 1343 1344 |
/* * If there exists a kprobe_blacklist, verify and * fail any probe registration in the prohibited area */ |
376e24242 kprobes: Introduc... |
1345 1346 1347 |
list_for_each_entry(ent, &kprobe_blacklist, list) { if (addr >= ent->start_addr && addr < ent->end_addr) return true; |
3d8d996e0 kprobes: prevent ... |
1348 |
} |
376e24242 kprobes: Introduc... |
1349 1350 |
return false; |
d0aaff979 [PATCH] Kprobes: ... |
1351 |
} |
b2a5cd693 kprobes: fix a nu... |
1352 1353 1354 |
/* * If we have a symbol_name argument, look it up and add the offset field * to it. This way, we can specify a relative address to a symbol. |
bc81d48d1 kprobes: Return -... |
1355 1356 |
* This returns encoded errors if it fails to look up symbol or invalid * combination of parameters. |
b2a5cd693 kprobes: fix a nu... |
1357 |
*/ |
1d585e709 trace/kprobes: Fi... |
1358 1359 |
static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name, unsigned int offset) |
b2a5cd693 kprobes: fix a nu... |
1360 |
{ |
1d585e709 trace/kprobes: Fi... |
1361 |
if ((symbol_name && addr) || (!symbol_name && !addr)) |
bc81d48d1 kprobes: Return -... |
1362 |
goto invalid; |
1d585e709 trace/kprobes: Fi... |
1363 |
if (symbol_name) { |
7246f6006 Merge tag 'powerp... |
1364 |
addr = kprobe_lookup_name(symbol_name, offset); |
bc81d48d1 kprobes: Return -... |
1365 1366 |
if (!addr) return ERR_PTR(-ENOENT); |
b2a5cd693 kprobes: fix a nu... |
1367 |
} |
1d585e709 trace/kprobes: Fi... |
1368 |
addr = (kprobe_opcode_t *)(((char *)addr) + offset); |
bc81d48d1 kprobes: Return -... |
1369 1370 1371 1372 1373 |
if (addr) return addr; invalid: return ERR_PTR(-EINVAL); |
b2a5cd693 kprobes: fix a nu... |
1374 |
} |
1d585e709 trace/kprobes: Fi... |
1375 1376 1377 1378 |
static kprobe_opcode_t *kprobe_addr(struct kprobe *p) { return _kprobe_addr(p->addr, p->symbol_name, p->offset); } |
1f0ab4097 kprobes: Prevent ... |
1379 |
/* Check passed kprobe is valid and return kprobe in kprobe_table. */ |
55479f647 kprobes: Allow pr... |
1380 |
static struct kprobe *__get_valid_kprobe(struct kprobe *p) |
1f0ab4097 kprobes: Prevent ... |
1381 |
{ |
6d8e40a85 kprobes: Rename o... |
1382 |
struct kprobe *ap, *list_p; |
1f0ab4097 kprobes: Prevent ... |
1383 |
|
6d8e40a85 kprobes: Rename o... |
1384 1385 |
ap = get_kprobe(p->addr); if (unlikely(!ap)) |
1f0ab4097 kprobes: Prevent ... |
1386 |
return NULL; |
6d8e40a85 kprobes: Rename o... |
1387 1388 |
if (p != ap) { list_for_each_entry_rcu(list_p, &ap->list, list) |
1f0ab4097 kprobes: Prevent ... |
1389 1390 1391 1392 1393 1394 |
if (list_p == p) /* kprobe p is a valid probe */ goto valid; return NULL; } valid: |
6d8e40a85 kprobes: Rename o... |
1395 |
return ap; |
1f0ab4097 kprobes: Prevent ... |
1396 1397 1398 1399 1400 1401 |
} /* Return error if the kprobe is being re-registered */ static inline int check_kprobe_rereg(struct kprobe *p) { int ret = 0; |
1f0ab4097 kprobes: Prevent ... |
1402 1403 |
mutex_lock(&kprobe_mutex); |
6d8e40a85 kprobes: Rename o... |
1404 |
if (__get_valid_kprobe(p)) |
1f0ab4097 kprobes: Prevent ... |
1405 1406 |
ret = -EINVAL; mutex_unlock(&kprobe_mutex); |
6d8e40a85 kprobes: Rename o... |
1407 |
|
1f0ab4097 kprobes: Prevent ... |
1408 1409 |
return ret; } |
f7f242ff0 kprobes: introduc... |
1410 |
int __weak arch_check_ftrace_location(struct kprobe *p) |
1da177e4c Linux-2.6.12-rc2 |
1411 |
{ |
ae6aa16fd kprobes: introduc... |
1412 |
unsigned long ftrace_addr; |
ae6aa16fd kprobes: introduc... |
1413 1414 |
ftrace_addr = ftrace_location((unsigned long)p->addr); if (ftrace_addr) { |
e7dbfe349 kprobes/x86: Move... |
1415 |
#ifdef CONFIG_KPROBES_ON_FTRACE |
ae6aa16fd kprobes: introduc... |
1416 1417 1418 |
/* Given address is not on the instruction boundary */ if ((unsigned long)p->addr != ftrace_addr) return -EILSEQ; |
ae6aa16fd kprobes: introduc... |
1419 |
p->flags |= KPROBE_FLAG_FTRACE; |
e7dbfe349 kprobes/x86: Move... |
1420 |
#else /* !CONFIG_KPROBES_ON_FTRACE */ |
ae6aa16fd kprobes: introduc... |
1421 1422 1423 |
return -EINVAL; #endif } |
f7f242ff0 kprobes: introduc... |
1424 1425 1426 1427 1428 1429 1430 |
return 0; } static int check_kprobe_address_safe(struct kprobe *p, struct module **probed_mod) { int ret; |
1f0ab4097 kprobes: Prevent ... |
1431 |
|
f7f242ff0 kprobes: introduc... |
1432 1433 1434 |
ret = arch_check_ftrace_location(p); if (ret) return ret; |
91bad2f8d jump label: Fix d... |
1435 |
jump_label_lock(); |
de31c3ca8 jump label: Fix e... |
1436 |
preempt_disable(); |
f7fa6ef0d kprobes: cleanup ... |
1437 1438 |
/* Ensure it is not in reserved area nor out of text */ |
ec30c5f3a kprobes: Use kern... |
1439 |
if (!kernel_text_address((unsigned long) p->addr) || |
376e24242 kprobes: Introduc... |
1440 |
within_kprobe_blacklist((unsigned long) p->addr) || |
f986a499e kprobes: return p... |
1441 1442 |
jump_label_text_reserved(p->addr, p->addr)) { ret = -EINVAL; |
f7fa6ef0d kprobes: cleanup ... |
1443 |
goto out; |
f986a499e kprobes: return p... |
1444 |
} |
b3e55c727 [PATCH] Kprobes: ... |
1445 |
|
f7fa6ef0d kprobes: cleanup ... |
1446 1447 1448 |
/* Check if are we probing a module */ *probed_mod = __module_text_address((unsigned long) p->addr); if (*probed_mod) { |
6f716acd5 kprobes: codingst... |
1449 |
/* |
e8386a0cb kprobes: support ... |
1450 1451 |
* We must hold a refcount of the probed module while updating * its code to prohibit unexpected unloading. |
df019b1d8 [PATCH] kprobes: ... |
1452 |
*/ |
f7fa6ef0d kprobes: cleanup ... |
1453 1454 1455 1456 |
if (unlikely(!try_module_get(*probed_mod))) { ret = -ENOENT; goto out; } |
de31c3ca8 jump label: Fix e... |
1457 |
|
f24659d96 kprobes: support ... |
1458 1459 1460 1461 |
/* * If the module freed .init.text, we couldn't insert * kprobes in there. */ |
f7fa6ef0d kprobes: cleanup ... |
1462 1463 1464 1465 1466 |
if (within_module_init((unsigned long)p->addr, *probed_mod) && (*probed_mod)->state != MODULE_STATE_COMING) { module_put(*probed_mod); *probed_mod = NULL; ret = -ENOENT; |
f24659d96 kprobes: support ... |
1467 |
} |
df019b1d8 [PATCH] kprobes: ... |
1468 |
} |
f7fa6ef0d kprobes: cleanup ... |
1469 |
out: |
a189d0350 kprobes: disable ... |
1470 |
preempt_enable(); |
de31c3ca8 jump label: Fix e... |
1471 |
jump_label_unlock(); |
1da177e4c Linux-2.6.12-rc2 |
1472 |
|
f7fa6ef0d kprobes: cleanup ... |
1473 1474 |
return ret; } |
55479f647 kprobes: Allow pr... |
1475 |
int register_kprobe(struct kprobe *p) |
f7fa6ef0d kprobes: cleanup ... |
1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 |
{ int ret; struct kprobe *old_p; struct module *probed_mod; kprobe_opcode_t *addr; /* Adjust probe address from symbol */ addr = kprobe_addr(p); if (IS_ERR(addr)) return PTR_ERR(addr); p->addr = addr; ret = check_kprobe_rereg(p); if (ret) return ret; /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ p->flags &= KPROBE_FLAG_DISABLED; |
3516a4604 [PATCH] Kprobes: ... |
1494 |
p->nmissed = 0; |
9861668f7 kprobes: add (un)... |
1495 |
INIT_LIST_HEAD(&p->list); |
afd66255b kprobes: Introduc... |
1496 |
|
f7fa6ef0d kprobes: cleanup ... |
1497 1498 1499 1500 1501 |
ret = check_kprobe_address_safe(p, &probed_mod); if (ret) return ret; mutex_lock(&kprobe_mutex); |
afd66255b kprobes: Introduc... |
1502 |
|
64f562c6d [PATCH] kprobes: ... |
1503 1504 |
old_p = get_kprobe(p->addr); if (old_p) { |
afd66255b kprobes: Introduc... |
1505 |
/* Since this may unoptimize old_p, locking text_mutex. */ |
64f562c6d [PATCH] kprobes: ... |
1506 |
ret = register_aggr_kprobe(old_p, p); |
1da177e4c Linux-2.6.12-rc2 |
1507 1508 |
goto out; } |
1da177e4c Linux-2.6.12-rc2 |
1509 |
|
2d1e38f56 kprobes: Cure hot... |
1510 1511 1512 |
cpus_read_lock(); /* Prevent text modification */ mutex_lock(&text_mutex); |
ae6aa16fd kprobes: introduc... |
1513 |
ret = prepare_kprobe(p); |
25764288d kprobes: Move loc... |
1514 |
mutex_unlock(&text_mutex); |
2d1e38f56 kprobes: Cure hot... |
1515 |
cpus_read_unlock(); |
6f716acd5 kprobes: codingst... |
1516 |
if (ret) |
afd66255b kprobes: Introduc... |
1517 |
goto out; |
49a2a1b83 [PATCH] kprobes: ... |
1518 |
|
64f562c6d [PATCH] kprobes: ... |
1519 |
INIT_HLIST_NODE(&p->hlist); |
3516a4604 [PATCH] Kprobes: ... |
1520 |
hlist_add_head_rcu(&p->hlist, |
1da177e4c Linux-2.6.12-rc2 |
1521 |
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); |
12310e343 kprobes: Propagat... |
1522 1523 1524 1525 1526 1527 1528 1529 |
if (!kprobes_all_disarmed && !kprobe_disabled(p)) { ret = arm_kprobe(p); if (ret) { hlist_del_rcu(&p->hlist); synchronize_sched(); goto out; } } |
afd66255b kprobes: Introduc... |
1530 1531 1532 |
/* Try to optimize kprobe */ try_to_optimize_kprobe(p); |
1da177e4c Linux-2.6.12-rc2 |
1533 |
out: |
7a7d1cf95 [PATCH] sem2mutex... |
1534 |
mutex_unlock(&kprobe_mutex); |
49a2a1b83 [PATCH] kprobes: ... |
1535 |
|
e8386a0cb kprobes: support ... |
1536 |
if (probed_mod) |
df019b1d8 [PATCH] kprobes: ... |
1537 |
module_put(probed_mod); |
e8386a0cb kprobes: support ... |
1538 |
|
1da177e4c Linux-2.6.12-rc2 |
1539 1540 |
return ret; } |
99081ab55 kprobes: move EXP... |
1541 |
EXPORT_SYMBOL_GPL(register_kprobe); |
1da177e4c Linux-2.6.12-rc2 |
1542 |
|
6f0f1dd71 kprobes: Cleanup ... |
1543 |
/* Check if all probes on the aggrprobe are disabled */ |
55479f647 kprobes: Allow pr... |
1544 |
static int aggr_kprobe_disabled(struct kprobe *ap) |
6f0f1dd71 kprobes: Cleanup ... |
1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 |
{ struct kprobe *kp; list_for_each_entry_rcu(kp, &ap->list, list) if (!kprobe_disabled(kp)) /* * There is an active probe on the list. * We can't disable this ap. */ return 0; return 1; } /* Disable one kprobe: Make sure called under kprobe_mutex is locked */ |
55479f647 kprobes: Allow pr... |
1560 |
static struct kprobe *__disable_kprobe(struct kprobe *p) |
6f0f1dd71 kprobes: Cleanup ... |
1561 1562 |
{ struct kprobe *orig_p; |
297f9233b kprobes: Propagat... |
1563 |
int ret; |
6f0f1dd71 kprobes: Cleanup ... |
1564 1565 1566 1567 |
/* Get an original kprobe for return */ orig_p = __get_valid_kprobe(p); if (unlikely(orig_p == NULL)) |
297f9233b kprobes: Propagat... |
1568 |
return ERR_PTR(-EINVAL); |
6f0f1dd71 kprobes: Cleanup ... |
1569 1570 1571 1572 1573 1574 1575 1576 |
if (!kprobe_disabled(p)) { /* Disable probe if it is a child probe */ if (p != orig_p) p->flags |= KPROBE_FLAG_DISABLED; /* Try to disarm and disable this/parent probe */ if (p == orig_p || aggr_kprobe_disabled(orig_p)) { |
69d54b916 kprobes: makes kp... |
1577 1578 1579 1580 1581 |
/* * If kprobes_all_disarmed is set, orig_p * should have already been disarmed, so * skip unneed disarming process. */ |
297f9233b kprobes: Propagat... |
1582 1583 1584 1585 1586 1587 1588 |
if (!kprobes_all_disarmed) { ret = disarm_kprobe(orig_p, true); if (ret) { p->flags &= ~KPROBE_FLAG_DISABLED; return ERR_PTR(ret); } } |
6f0f1dd71 kprobes: Cleanup ... |
1589 1590 1591 1592 1593 1594 |
orig_p->flags |= KPROBE_FLAG_DISABLED; } } return orig_p; } |
de5bd88d5 kprobes: support ... |
1595 1596 1597 |
/* * Unregister a kprobe without a scheduler synchronization. */ |
55479f647 kprobes: Allow pr... |
1598 |
static int __unregister_kprobe_top(struct kprobe *p) |
de5bd88d5 kprobes: support ... |
1599 |
{ |
6d8e40a85 kprobes: Rename o... |
1600 |
struct kprobe *ap, *list_p; |
de5bd88d5 kprobes: support ... |
1601 |
|
6f0f1dd71 kprobes: Cleanup ... |
1602 1603 |
/* Disable kprobe. This will disarm it if needed. */ ap = __disable_kprobe(p); |
297f9233b kprobes: Propagat... |
1604 1605 |
if (IS_ERR(ap)) return PTR_ERR(ap); |
de5bd88d5 kprobes: support ... |
1606 |
|
6f0f1dd71 kprobes: Cleanup ... |
1607 |
if (ap == p) |
bf8f6e5b3 Kprobes: The ON/O... |
1608 |
/* |
6f0f1dd71 kprobes: Cleanup ... |
1609 1610 |
* This probe is an independent(and non-optimized) kprobe * (not an aggrprobe). Remove from the hash list. |
bf8f6e5b3 Kprobes: The ON/O... |
1611 |
*/ |
6f0f1dd71 kprobes: Cleanup ... |
1612 1613 1614 1615 |
goto disarmed; /* Following process expects this probe is an aggrprobe */ WARN_ON(!kprobe_aggrprobe(ap)); |
6274de498 kprobes: Support ... |
1616 1617 1618 1619 1620 |
if (list_is_singular(&ap->list) && kprobe_disarmed(ap)) /* * !disarmed could be happen if the probe is under delayed * unoptimizing. */ |
6f0f1dd71 kprobes: Cleanup ... |
1621 1622 1623 |
goto disarmed; else { /* If disabling probe has special handlers, update aggrprobe */ |
e8386a0cb kprobes: support ... |
1624 |
if (p->post_handler && !kprobe_gone(p)) { |
6d8e40a85 kprobes: Rename o... |
1625 |
list_for_each_entry_rcu(list_p, &ap->list, list) { |
9861668f7 kprobes: add (un)... |
1626 1627 1628 |
if ((list_p != p) && (list_p->post_handler)) goto noclean; } |
6d8e40a85 kprobes: Rename o... |
1629 |
ap->post_handler = NULL; |
9861668f7 kprobes: add (un)... |
1630 1631 |
} noclean: |
6f0f1dd71 kprobes: Cleanup ... |
1632 1633 1634 1635 |
/* * Remove from the aggrprobe: this path will do nothing in * __unregister_kprobe_bottom(). */ |
49a2a1b83 [PATCH] kprobes: ... |
1636 |
list_del_rcu(&p->list); |
6f0f1dd71 kprobes: Cleanup ... |
1637 1638 1639 1640 1641 1642 |
if (!kprobe_disabled(ap) && !kprobes_all_disarmed) /* * Try to optimize this probe again, because post * handler may have been changed. */ optimize_kprobe(ap); |
49a2a1b83 [PATCH] kprobes: ... |
1643 |
} |
9861668f7 kprobes: add (un)... |
1644 |
return 0; |
6f0f1dd71 kprobes: Cleanup ... |
1645 1646 |
disarmed: |
6274de498 kprobes: Support ... |
1647 |
BUG_ON(!kprobe_disarmed(ap)); |
6f0f1dd71 kprobes: Cleanup ... |
1648 1649 |
hlist_del_rcu(&ap->hlist); return 0; |
9861668f7 kprobes: add (un)... |
1650 |
} |
3516a4604 [PATCH] Kprobes: ... |
1651 |
|
55479f647 kprobes: Allow pr... |
1652 |
static void __unregister_kprobe_bottom(struct kprobe *p) |
9861668f7 kprobes: add (un)... |
1653 |
{ |
6d8e40a85 kprobes: Rename o... |
1654 |
struct kprobe *ap; |
b3e55c727 [PATCH] Kprobes: ... |
1655 |
|
e8386a0cb kprobes: support ... |
1656 |
if (list_empty(&p->list)) |
6274de498 kprobes: Support ... |
1657 |
/* This is an independent kprobe */ |
0498b6350 [PATCH] kprobes: ... |
1658 |
arch_remove_kprobe(p); |
e8386a0cb kprobes: support ... |
1659 |
else if (list_is_singular(&p->list)) { |
6274de498 kprobes: Support ... |
1660 |
/* This is the last child of an aggrprobe */ |
6d8e40a85 kprobes: Rename o... |
1661 |
ap = list_entry(p->list.next, struct kprobe, list); |
e8386a0cb kprobes: support ... |
1662 |
list_del(&p->list); |
6d8e40a85 kprobes: Rename o... |
1663 |
free_aggr_kprobe(ap); |
9861668f7 kprobes: add (un)... |
1664 |
} |
6274de498 kprobes: Support ... |
1665 |
/* Otherwise, do nothing. */ |
9861668f7 kprobes: add (un)... |
1666 |
} |
55479f647 kprobes: Allow pr... |
1667 |
int register_kprobes(struct kprobe **kps, int num) |
9861668f7 kprobes: add (un)... |
1668 1669 1670 1671 1672 1673 |
{ int i, ret = 0; if (num <= 0) return -EINVAL; for (i = 0; i < num; i++) { |
49ad2fd76 kprobes: remove c... |
1674 |
ret = register_kprobe(kps[i]); |
67dddaad5 kprobes: fix erro... |
1675 1676 1677 |
if (ret < 0) { if (i > 0) unregister_kprobes(kps, i); |
9861668f7 kprobes: add (un)... |
1678 |
break; |
367216567 [PATCH] Kprobe: m... |
1679 |
} |
49a2a1b83 [PATCH] kprobes: ... |
1680 |
} |
9861668f7 kprobes: add (un)... |
1681 1682 |
return ret; } |
99081ab55 kprobes: move EXP... |
1683 |
EXPORT_SYMBOL_GPL(register_kprobes); |
9861668f7 kprobes: add (un)... |
1684 |
|
55479f647 kprobes: Allow pr... |
1685 |
void unregister_kprobe(struct kprobe *p) |
9861668f7 kprobes: add (un)... |
1686 1687 1688 |
{ unregister_kprobes(&p, 1); } |
99081ab55 kprobes: move EXP... |
1689 |
EXPORT_SYMBOL_GPL(unregister_kprobe); |
9861668f7 kprobes: add (un)... |
1690 |
|
55479f647 kprobes: Allow pr... |
1691 |
void unregister_kprobes(struct kprobe **kps, int num) |
9861668f7 kprobes: add (un)... |
1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 |
{ int i; if (num <= 0) return; mutex_lock(&kprobe_mutex); for (i = 0; i < num; i++) if (__unregister_kprobe_top(kps[i]) < 0) kps[i]->addr = NULL; mutex_unlock(&kprobe_mutex); synchronize_sched(); for (i = 0; i < num; i++) if (kps[i]->addr) __unregister_kprobe_bottom(kps[i]); |
1da177e4c Linux-2.6.12-rc2 |
1707 |
} |
99081ab55 kprobes: move EXP... |
1708 |
EXPORT_SYMBOL_GPL(unregister_kprobes); |
1da177e4c Linux-2.6.12-rc2 |
1709 |
|
5f6bee347 kprobes: Convert ... |
1710 1711 |
int __weak kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) |
fc62d0207 kprobes: Introduc... |
1712 1713 1714 |
{ return NOTIFY_DONE; } |
5f6bee347 kprobes: Convert ... |
1715 |
NOKPROBE_SYMBOL(kprobe_exceptions_notify); |
fc62d0207 kprobes: Introduc... |
1716 |
|
1da177e4c Linux-2.6.12-rc2 |
1717 1718 |
static struct notifier_block kprobe_exceptions_nb = { .notifier_call = kprobe_exceptions_notify, |
3d5631e06 [PATCH] Kprobes r... |
1719 1720 |
.priority = 0x7fffffff /* we need to be notified first */ }; |
3d7e33825 jprobes: make jpr... |
1721 1722 1723 1724 |
unsigned long __weak arch_deref_entry_point(void *entry) { return (unsigned long)entry; } |
1da177e4c Linux-2.6.12-rc2 |
1725 |
|
9edddaa20 Kprobes: indicate... |
1726 |
#ifdef CONFIG_KRETPROBES |
e65cefe87 [PATCH] kernel/kp... |
1727 1728 1729 1730 |
/* * This kprobe pre_handler is registered with every kretprobe. When probe * hits it will set up the return probe. */ |
820aede02 kprobes: Use NOKP... |
1731 |
static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) |
e65cefe87 [PATCH] kernel/kp... |
1732 1733 |
{ struct kretprobe *rp = container_of(p, struct kretprobe, kp); |
ef53d9c5e kprobes: improve ... |
1734 1735 |
unsigned long hash, flags = 0; struct kretprobe_instance *ri; |
e65cefe87 [PATCH] kernel/kp... |
1736 |
|
f96f56780 kprobes: Skip kre... |
1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 |
/* * To avoid deadlocks, prohibit return probing in NMI contexts, * just skip the probe and increase the (inexact) 'nmissed' * statistical counter, so that the user is informed that * something happened: */ if (unlikely(in_nmi())) { rp->nmissed++; return 0; } /* TODO: consider to only swap the RA after the last pre_handler fired */ |
ef53d9c5e kprobes: improve ... |
1749 |
hash = hash_ptr(current, KPROBE_HASH_BITS); |
ec484608c locking, kprobes:... |
1750 |
raw_spin_lock_irqsave(&rp->lock, flags); |
4c4308cb9 kprobes: kretprob... |
1751 |
if (!hlist_empty(&rp->free_instances)) { |
4c4308cb9 kprobes: kretprob... |
1752 |
ri = hlist_entry(rp->free_instances.first, |
ef53d9c5e kprobes: improve ... |
1753 1754 |
struct kretprobe_instance, hlist); hlist_del(&ri->hlist); |
ec484608c locking, kprobes:... |
1755 |
raw_spin_unlock_irqrestore(&rp->lock, flags); |
ef53d9c5e kprobes: improve ... |
1756 |
|
4c4308cb9 kprobes: kretprob... |
1757 1758 |
ri->rp = rp; ri->task = current; |
f47cd9b55 kprobes: kretprob... |
1759 |
|
55ca6140e kprobes: fix a me... |
1760 1761 1762 1763 |
if (rp->entry_handler && rp->entry_handler(ri, regs)) { raw_spin_lock_irqsave(&rp->lock, flags); hlist_add_head(&ri->hlist, &rp->free_instances); raw_spin_unlock_irqrestore(&rp->lock, flags); |
f47cd9b55 kprobes: kretprob... |
1764 |
return 0; |
55ca6140e kprobes: fix a me... |
1765 |
} |
f47cd9b55 kprobes: kretprob... |
1766 |
|
4c4308cb9 kprobes: kretprob... |
1767 1768 1769 |
arch_prepare_kretprobe(ri, regs); /* XXX(hch): why is there no hlist_move_head? */ |
ef53d9c5e kprobes: improve ... |
1770 1771 1772 1773 1774 |
INIT_HLIST_NODE(&ri->hlist); kretprobe_table_lock(hash, &flags); hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]); kretprobe_table_unlock(hash, &flags); } else { |
4c4308cb9 kprobes: kretprob... |
1775 |
rp->nmissed++; |
ec484608c locking, kprobes:... |
1776 |
raw_spin_unlock_irqrestore(&rp->lock, flags); |
ef53d9c5e kprobes: improve ... |
1777 |
} |
e65cefe87 [PATCH] kernel/kp... |
1778 1779 |
return 0; } |
820aede02 kprobes: Use NOKP... |
1780 |
NOKPROBE_SYMBOL(pre_handler_kretprobe); |
e65cefe87 [PATCH] kernel/kp... |
1781 |
|
659b957f2 kprobes: Rename [... |
1782 |
bool __weak arch_kprobe_on_func_entry(unsigned long offset) |
90ec5e89e kretprobes: Ensur... |
1783 1784 1785 |
{ return !offset; } |
659b957f2 kprobes: Rename [... |
1786 |
bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset) |
1d585e709 trace/kprobes: Fi... |
1787 1788 1789 1790 1791 1792 1793 |
{ kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset); if (IS_ERR(kp_addr)) return false; if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) || |
659b957f2 kprobes: Rename [... |
1794 |
!arch_kprobe_on_func_entry(offset)) |
1d585e709 trace/kprobes: Fi... |
1795 1796 1797 1798 |
return false; return true; } |
55479f647 kprobes: Allow pr... |
1799 |
int register_kretprobe(struct kretprobe *rp) |
b94cce926 [PATCH] kprobes: ... |
1800 1801 1802 1803 |
{ int ret = 0; struct kretprobe_instance *inst; int i; |
b2a5cd693 kprobes: fix a nu... |
1804 |
void *addr; |
90ec5e89e kretprobes: Ensur... |
1805 |
|
659b957f2 kprobes: Rename [... |
1806 |
if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset)) |
90ec5e89e kretprobes: Ensur... |
1807 |
return -EINVAL; |
f438d914b kprobes: support ... |
1808 1809 |
if (kretprobe_blacklist_size) { |
b2a5cd693 kprobes: fix a nu... |
1810 |
addr = kprobe_addr(&rp->kp); |
bc81d48d1 kprobes: Return -... |
1811 1812 |
if (IS_ERR(addr)) return PTR_ERR(addr); |
f438d914b kprobes: support ... |
1813 1814 1815 1816 1817 1818 |
for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { if (kretprobe_blacklist[i].addr == addr) return -EINVAL; } } |
b94cce926 [PATCH] kprobes: ... |
1819 1820 |
rp->kp.pre_handler = pre_handler_kretprobe; |
7522a8423 [PATCH] kprobes: ... |
1821 1822 |
rp->kp.post_handler = NULL; rp->kp.fault_handler = NULL; |
b94cce926 [PATCH] kprobes: ... |
1823 1824 1825 1826 |
/* Pre-allocate memory for max kretprobe instances */ if (rp->maxactive <= 0) { #ifdef CONFIG_PREEMPT |
c2ef6661c kprobes: Fix dist... |
1827 |
rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus()); |
b94cce926 [PATCH] kprobes: ... |
1828 |
#else |
4dae560f9 kprobes: Sanitize... |
1829 |
rp->maxactive = num_possible_cpus(); |
b94cce926 [PATCH] kprobes: ... |
1830 1831 |
#endif } |
ec484608c locking, kprobes:... |
1832 |
raw_spin_lock_init(&rp->lock); |
b94cce926 [PATCH] kprobes: ... |
1833 1834 |
INIT_HLIST_HEAD(&rp->free_instances); for (i = 0; i < rp->maxactive; i++) { |
f47cd9b55 kprobes: kretprob... |
1835 1836 |
inst = kmalloc(sizeof(struct kretprobe_instance) + rp->data_size, GFP_KERNEL); |
b94cce926 [PATCH] kprobes: ... |
1837 1838 1839 1840 |
if (inst == NULL) { free_rp_inst(rp); return -ENOMEM; } |
ef53d9c5e kprobes: improve ... |
1841 1842 |
INIT_HLIST_NODE(&inst->hlist); hlist_add_head(&inst->hlist, &rp->free_instances); |
b94cce926 [PATCH] kprobes: ... |
1843 1844 1845 1846 |
} rp->nmissed = 0; /* Establish function entry probe point */ |
49ad2fd76 kprobes: remove c... |
1847 |
ret = register_kprobe(&rp->kp); |
4a296e07c kprobes: add (un)... |
1848 |
if (ret != 0) |
b94cce926 [PATCH] kprobes: ... |
1849 1850 1851 |
free_rp_inst(rp); return ret; } |
99081ab55 kprobes: move EXP... |
1852 |
EXPORT_SYMBOL_GPL(register_kretprobe); |
b94cce926 [PATCH] kprobes: ... |
1853 |
|
55479f647 kprobes: Allow pr... |
1854 |
int register_kretprobes(struct kretprobe **rps, int num) |
4a296e07c kprobes: add (un)... |
1855 1856 1857 1858 1859 1860 |
{ int ret = 0, i; if (num <= 0) return -EINVAL; for (i = 0; i < num; i++) { |
49ad2fd76 kprobes: remove c... |
1861 |
ret = register_kretprobe(rps[i]); |
67dddaad5 kprobes: fix erro... |
1862 1863 1864 |
if (ret < 0) { if (i > 0) unregister_kretprobes(rps, i); |
4a296e07c kprobes: add (un)... |
1865 1866 1867 1868 1869 |
break; } } return ret; } |
99081ab55 kprobes: move EXP... |
1870 |
EXPORT_SYMBOL_GPL(register_kretprobes); |
4a296e07c kprobes: add (un)... |
1871 |
|
55479f647 kprobes: Allow pr... |
1872 |
void unregister_kretprobe(struct kretprobe *rp) |
4a296e07c kprobes: add (un)... |
1873 1874 1875 |
{ unregister_kretprobes(&rp, 1); } |
99081ab55 kprobes: move EXP... |
1876 |
EXPORT_SYMBOL_GPL(unregister_kretprobe); |
4a296e07c kprobes: add (un)... |
1877 |
|
55479f647 kprobes: Allow pr... |
1878 |
void unregister_kretprobes(struct kretprobe **rps, int num) |
4a296e07c kprobes: add (un)... |
1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 |
{ int i; if (num <= 0) return; mutex_lock(&kprobe_mutex); for (i = 0; i < num; i++) if (__unregister_kprobe_top(&rps[i]->kp) < 0) rps[i]->kp.addr = NULL; mutex_unlock(&kprobe_mutex); synchronize_sched(); for (i = 0; i < num; i++) { if (rps[i]->kp.addr) { __unregister_kprobe_bottom(&rps[i]->kp); cleanup_rp_inst(rps[i]); } } } |
99081ab55 kprobes: move EXP... |
1898 |
EXPORT_SYMBOL_GPL(unregister_kretprobes); |
4a296e07c kprobes: add (un)... |
1899 |
|
9edddaa20 Kprobes: indicate... |
1900 |
#else /* CONFIG_KRETPROBES */ |
55479f647 kprobes: Allow pr... |
1901 |
int register_kretprobe(struct kretprobe *rp) |
b94cce926 [PATCH] kprobes: ... |
1902 1903 1904 |
{ return -ENOSYS; } |
99081ab55 kprobes: move EXP... |
1905 |
EXPORT_SYMBOL_GPL(register_kretprobe); |
b94cce926 [PATCH] kprobes: ... |
1906 |
|
55479f647 kprobes: Allow pr... |
1907 |
int register_kretprobes(struct kretprobe **rps, int num) |
346fd59ba [PATCH] kprobes: ... |
1908 |
{ |
4a296e07c kprobes: add (un)... |
1909 |
return -ENOSYS; |
346fd59ba [PATCH] kprobes: ... |
1910 |
} |
99081ab55 kprobes: move EXP... |
1911 |
EXPORT_SYMBOL_GPL(register_kretprobes); |
55479f647 kprobes: Allow pr... |
1912 |
void unregister_kretprobe(struct kretprobe *rp) |
b94cce926 [PATCH] kprobes: ... |
1913 |
{ |
4a296e07c kprobes: add (un)... |
1914 |
} |
99081ab55 kprobes: move EXP... |
1915 |
EXPORT_SYMBOL_GPL(unregister_kretprobe); |
b94cce926 [PATCH] kprobes: ... |
1916 |
|
55479f647 kprobes: Allow pr... |
1917 |
void unregister_kretprobes(struct kretprobe **rps, int num) |
4a296e07c kprobes: add (un)... |
1918 1919 |
{ } |
99081ab55 kprobes: move EXP... |
1920 |
EXPORT_SYMBOL_GPL(unregister_kretprobes); |
4c4308cb9 kprobes: kretprob... |
1921 |
|
820aede02 kprobes: Use NOKP... |
1922 |
static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) |
4a296e07c kprobes: add (un)... |
1923 1924 |
{ return 0; |
b94cce926 [PATCH] kprobes: ... |
1925 |
} |
820aede02 kprobes: Use NOKP... |
1926 |
NOKPROBE_SYMBOL(pre_handler_kretprobe); |
b94cce926 [PATCH] kprobes: ... |
1927 |
|
4a296e07c kprobes: add (un)... |
1928 |
#endif /* CONFIG_KRETPROBES */ |
e8386a0cb kprobes: support ... |
1929 |
/* Set the kprobe gone and remove its instruction buffer. */ |
55479f647 kprobes: Allow pr... |
1930 |
static void kill_kprobe(struct kprobe *p) |
e8386a0cb kprobes: support ... |
1931 1932 |
{ struct kprobe *kp; |
de5bd88d5 kprobes: support ... |
1933 |
|
e8386a0cb kprobes: support ... |
1934 |
p->flags |= KPROBE_FLAG_GONE; |
afd66255b kprobes: Introduc... |
1935 |
if (kprobe_aggrprobe(p)) { |
e8386a0cb kprobes: support ... |
1936 1937 1938 1939 1940 1941 1942 |
/* * If this is an aggr_kprobe, we have to list all the * chained probes and mark them GONE. */ list_for_each_entry_rcu(kp, &p->list, list) kp->flags |= KPROBE_FLAG_GONE; p->post_handler = NULL; |
afd66255b kprobes: Introduc... |
1943 |
kill_optimized_kprobe(p); |
e8386a0cb kprobes: support ... |
1944 1945 1946 1947 1948 1949 1950 |
} /* * Here, we can remove insn_slot safely, because no thread calls * the original probed function (which will be freed soon) any more. */ arch_remove_kprobe(p); } |
c0614829c kprobes: Move ena... |
1951 |
/* Disable one kprobe */ |
55479f647 kprobes: Allow pr... |
1952 |
int disable_kprobe(struct kprobe *kp) |
c0614829c kprobes: Move ena... |
1953 1954 |
{ int ret = 0; |
297f9233b kprobes: Propagat... |
1955 |
struct kprobe *p; |
c0614829c kprobes: Move ena... |
1956 1957 |
mutex_lock(&kprobe_mutex); |
6f0f1dd71 kprobes: Cleanup ... |
1958 |
/* Disable this kprobe */ |
297f9233b kprobes: Propagat... |
1959 1960 1961 |
p = __disable_kprobe(kp); if (IS_ERR(p)) ret = PTR_ERR(p); |
c0614829c kprobes: Move ena... |
1962 |
|
c0614829c kprobes: Move ena... |
1963 1964 1965 1966 1967 1968 |
mutex_unlock(&kprobe_mutex); return ret; } EXPORT_SYMBOL_GPL(disable_kprobe); /* Enable one kprobe */ |
55479f647 kprobes: Allow pr... |
1969 |
int enable_kprobe(struct kprobe *kp) |
c0614829c kprobes: Move ena... |
1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 |
{ int ret = 0; struct kprobe *p; mutex_lock(&kprobe_mutex); /* Check whether specified probe is valid. */ p = __get_valid_kprobe(kp); if (unlikely(p == NULL)) { ret = -EINVAL; goto out; } if (kprobe_gone(kp)) { /* This kprobe has gone, we couldn't enable it. */ ret = -EINVAL; goto out; } if (p != kp) kp->flags &= ~KPROBE_FLAG_DISABLED; if (!kprobes_all_disarmed && kprobe_disabled(p)) { p->flags &= ~KPROBE_FLAG_DISABLED; |
12310e343 kprobes: Propagat... |
1994 1995 1996 |
ret = arm_kprobe(p); if (ret) p->flags |= KPROBE_FLAG_DISABLED; |
c0614829c kprobes: Move ena... |
1997 1998 1999 2000 2001 2002 |
} out: mutex_unlock(&kprobe_mutex); return ret; } EXPORT_SYMBOL_GPL(enable_kprobe); |
4458515b2 kprobes: Replace ... |
2003 |
/* Caller must NOT call this in usual path. This is only for critical case */ |
820aede02 kprobes: Use NOKP... |
2004 |
void dump_kprobe(struct kprobe *kp) |
24851d244 tracing/kprobes: ... |
2005 |
{ |
4458515b2 kprobes: Replace ... |
2006 2007 2008 2009 2010 2011 2012 |
pr_err("Dumping kprobe: "); pr_err("Name: %s Offset: %x Address: %pS ", kp->symbol_name, kp->offset, kp->addr); |
24851d244 tracing/kprobes: ... |
2013 |
} |
820aede02 kprobes: Use NOKP... |
2014 |
NOKPROBE_SYMBOL(dump_kprobe); |
24851d244 tracing/kprobes: ... |
2015 |
|
376e24242 kprobes: Introduc... |
2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 |
/* * Lookup and populate the kprobe_blacklist. * * Unlike the kretprobe blacklist, we'll need to determine * the range of addresses that belong to the said functions, * since a kprobe need not necessarily be at the beginning * of a function. */ static int __init populate_kprobe_blacklist(unsigned long *start, unsigned long *end) { unsigned long *iter; struct kprobe_blacklist_entry *ent; |
d81b4253b kprobes: Fix "Fai... |
2029 |
unsigned long entry, offset = 0, size = 0; |
376e24242 kprobes: Introduc... |
2030 2031 |
for (iter = start; iter < end; iter++) { |
d81b4253b kprobes: Fix "Fai... |
2032 2033 2034 |
entry = arch_deref_entry_point((void *)*iter); if (!kernel_text_address(entry) || |
4458515b2 kprobes: Replace ... |
2035 |
!kallsyms_lookup_size_offset(entry, &size, &offset)) |
376e24242 kprobes: Introduc... |
2036 |
continue; |
376e24242 kprobes: Introduc... |
2037 2038 2039 2040 |
ent = kmalloc(sizeof(*ent), GFP_KERNEL); if (!ent) return -ENOMEM; |
d81b4253b kprobes: Fix "Fai... |
2041 2042 |
ent->start_addr = entry; ent->end_addr = entry + size; |
376e24242 kprobes: Introduc... |
2043 2044 2045 2046 2047 |
INIT_LIST_HEAD(&ent->list); list_add_tail(&ent->list, &kprobe_blacklist); } return 0; } |
e8386a0cb kprobes: support ... |
2048 |
/* Module notifier call back, checking kprobes on the module */ |
55479f647 kprobes: Allow pr... |
2049 2050 |
static int kprobes_module_callback(struct notifier_block *nb, unsigned long val, void *data) |
e8386a0cb kprobes: support ... |
2051 2052 2053 |
{ struct module *mod = data; struct hlist_head *head; |
e8386a0cb kprobes: support ... |
2054 2055 |
struct kprobe *p; unsigned int i; |
f24659d96 kprobes: support ... |
2056 |
int checkcore = (val == MODULE_STATE_GOING); |
e8386a0cb kprobes: support ... |
2057 |
|
f24659d96 kprobes: support ... |
2058 |
if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) |
e8386a0cb kprobes: support ... |
2059 2060 2061 |
return NOTIFY_DONE; /* |
f24659d96 kprobes: support ... |
2062 2063 2064 2065 |
* When MODULE_STATE_GOING was notified, both of module .text and * .init.text sections would be freed. When MODULE_STATE_LIVE was * notified, only .init.text section would be freed. We need to * disable kprobes which have been inserted in the sections. |
e8386a0cb kprobes: support ... |
2066 2067 2068 2069 |
*/ mutex_lock(&kprobe_mutex); for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; |
b67bfe0d4 hlist: drop the n... |
2070 |
hlist_for_each_entry_rcu(p, head, hlist) |
f24659d96 kprobes: support ... |
2071 2072 2073 |
if (within_module_init((unsigned long)p->addr, mod) || (checkcore && within_module_core((unsigned long)p->addr, mod))) { |
e8386a0cb kprobes: support ... |
2074 2075 2076 2077 |
/* * The vaddr this probe is installed will soon * be vfreed buy not synced to disk. Hence, * disarming the breakpoint isn't needed. |
545a02819 kprobes: Document... |
2078 2079 2080 2081 2082 2083 |
* * Note, this will also move any optimized probes * that are pending to be removed from their * corresponding lists to the freeing_list and * will not be touched by the delayed * kprobe_optimizer work handler. |
e8386a0cb kprobes: support ... |
2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 |
*/ kill_kprobe(p); } } mutex_unlock(&kprobe_mutex); return NOTIFY_DONE; } static struct notifier_block kprobe_module_nb = { .notifier_call = kprobes_module_callback, .priority = 0 }; |
376e24242 kprobes: Introduc... |
2096 2097 2098 |
/* Markers of _kprobe_blacklist section */ extern unsigned long __start_kprobe_blacklist[]; extern unsigned long __stop_kprobe_blacklist[]; |
1da177e4c Linux-2.6.12-rc2 |
2099 2100 2101 2102 2103 2104 |
static int __init init_kprobes(void) { int i, err = 0; /* FIXME allocate the probe table, currently defined statically */ /* initialize all list heads */ |
b94cce926 [PATCH] kprobes: ... |
2105 |
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
1da177e4c Linux-2.6.12-rc2 |
2106 |
INIT_HLIST_HEAD(&kprobe_table[i]); |
b94cce926 [PATCH] kprobes: ... |
2107 |
INIT_HLIST_HEAD(&kretprobe_inst_table[i]); |
ec484608c locking, kprobes:... |
2108 |
raw_spin_lock_init(&(kretprobe_table_locks[i].lock)); |
b94cce926 [PATCH] kprobes: ... |
2109 |
} |
1da177e4c Linux-2.6.12-rc2 |
2110 |
|
376e24242 kprobes: Introduc... |
2111 2112 2113 2114 2115 2116 2117 |
err = populate_kprobe_blacklist(__start_kprobe_blacklist, __stop_kprobe_blacklist); if (err) { pr_err("kprobes: failed to populate blacklist: %d ", err); pr_err("Please take care of using kprobes. "); |
3d8d996e0 kprobes: prevent ... |
2118 |
} |
f438d914b kprobes: support ... |
2119 2120 2121 |
if (kretprobe_blacklist_size) { /* lookup the function address from its name */ for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { |
49e0b4658 kprobes: Convert ... |
2122 |
kretprobe_blacklist[i].addr = |
290e30707 powerpc/kprobes: ... |
2123 |
kprobe_lookup_name(kretprobe_blacklist[i].name, 0); |
f438d914b kprobes: support ... |
2124 2125 2126 2127 2128 2129 |
if (!kretprobe_blacklist[i].addr) printk("kretprobe: lookup failed: %s ", kretprobe_blacklist[i].name); } } |
b2be84df9 kprobes: Jump opt... |
2130 2131 |
#if defined(CONFIG_OPTPROBES) #if defined(__ARCH_WANT_KPROBES_INSN_SLOT) |
afd66255b kprobes: Introduc... |
2132 2133 2134 |
/* Init kprobe_optinsn_slots */ kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; #endif |
b2be84df9 kprobes: Jump opt... |
2135 2136 2137 |
/* By default, kprobes can be optimized */ kprobes_allow_optimization = true; #endif |
afd66255b kprobes: Introduc... |
2138 |
|
e579abeb5 kprobes: rename k... |
2139 2140 |
/* By default, kprobes are armed */ kprobes_all_disarmed = false; |
bf8f6e5b3 Kprobes: The ON/O... |
2141 |
|
6772926be [PATCH] kprobes: ... |
2142 |
err = arch_init_kprobes(); |
802eae7c8 [PATCH] Return pr... |
2143 2144 |
if (!err) err = register_die_notifier(&kprobe_exceptions_nb); |
e8386a0cb kprobes: support ... |
2145 2146 |
if (!err) err = register_module_notifier(&kprobe_module_nb); |
ef53d9c5e kprobes: improve ... |
2147 |
kprobes_initialized = (err == 0); |
802eae7c8 [PATCH] Return pr... |
2148 |
|
8c1c93564 x86: kprobes: add... |
2149 2150 |
if (!err) init_test_probes(); |
1da177e4c Linux-2.6.12-rc2 |
2151 2152 |
return err; } |
346fd59ba [PATCH] kprobes: ... |
2153 |
#ifdef CONFIG_DEBUG_FS |
55479f647 kprobes: Allow pr... |
2154 |
static void report_probe(struct seq_file *pi, struct kprobe *p, |
afd66255b kprobes: Introduc... |
2155 |
const char *sym, int offset, char *modname, struct kprobe *pp) |
346fd59ba [PATCH] kprobes: ... |
2156 2157 |
{ char *kprobe_type; |
81365a947 kprobes: Show add... |
2158 |
void *addr = p->addr; |
346fd59ba [PATCH] kprobes: ... |
2159 2160 2161 |
if (p->pre_handler == pre_handler_kretprobe) kprobe_type = "r"; |
346fd59ba [PATCH] kprobes: ... |
2162 2163 |
else kprobe_type = "k"; |
afd66255b kprobes: Introduc... |
2164 |
|
81365a947 kprobes: Show add... |
2165 2166 |
if (!kallsyms_show_value()) addr = NULL; |
346fd59ba [PATCH] kprobes: ... |
2167 |
if (sym) |
81365a947 kprobes: Show add... |
2168 2169 |
seq_printf(pi, "%px %s %s+0x%x %s ", addr, kprobe_type, sym, offset, |
afd66255b kprobes: Introduc... |
2170 |
(modname ? modname : " ")); |
81365a947 kprobes: Show add... |
2171 2172 2173 |
else /* try to use %pS */ seq_printf(pi, "%px %s %pS ", addr, kprobe_type, p->addr); |
afd66255b kprobes: Introduc... |
2174 2175 2176 |
if (!pp) pp = p; |
ae6aa16fd kprobes: introduc... |
2177 2178 |
seq_printf(pi, "%s%s%s%s ", |
afd66255b kprobes: Introduc... |
2179 2180 |
(kprobe_gone(p) ? "[GONE]" : ""), ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), |
ae6aa16fd kprobes: introduc... |
2181 2182 |
(kprobe_optimized(pp) ? "[OPTIMIZED]" : ""), (kprobe_ftrace(pp) ? "[FTRACE]" : "")); |
346fd59ba [PATCH] kprobes: ... |
2183 |
} |
55479f647 kprobes: Allow pr... |
2184 |
static void *kprobe_seq_start(struct seq_file *f, loff_t *pos) |
346fd59ba [PATCH] kprobes: ... |
2185 2186 2187 |
{ return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; } |
55479f647 kprobes: Allow pr... |
2188 |
static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) |
346fd59ba [PATCH] kprobes: ... |
2189 2190 2191 2192 2193 2194 |
{ (*pos)++; if (*pos >= KPROBE_TABLE_SIZE) return NULL; return pos; } |
55479f647 kprobes: Allow pr... |
2195 |
static void kprobe_seq_stop(struct seq_file *f, void *v) |
346fd59ba [PATCH] kprobes: ... |
2196 2197 2198 |
{ /* Nothing to do */ } |
55479f647 kprobes: Allow pr... |
2199 |
static int show_kprobe_addr(struct seq_file *pi, void *v) |
346fd59ba [PATCH] kprobes: ... |
2200 2201 |
{ struct hlist_head *head; |
346fd59ba [PATCH] kprobes: ... |
2202 2203 2204 |
struct kprobe *p, *kp; const char *sym = NULL; unsigned int i = *(loff_t *) v; |
ffb451227 Simplify kallsyms... |
2205 |
unsigned long offset = 0; |
ab7678656 kprobes: use KSYM... |
2206 |
char *modname, namebuf[KSYM_NAME_LEN]; |
346fd59ba [PATCH] kprobes: ... |
2207 2208 2209 |
head = &kprobe_table[i]; preempt_disable(); |
b67bfe0d4 hlist: drop the n... |
2210 |
hlist_for_each_entry_rcu(p, head, hlist) { |
ffb451227 Simplify kallsyms... |
2211 |
sym = kallsyms_lookup((unsigned long)p->addr, NULL, |
346fd59ba [PATCH] kprobes: ... |
2212 |
&offset, &modname, namebuf); |
afd66255b kprobes: Introduc... |
2213 |
if (kprobe_aggrprobe(p)) { |
346fd59ba [PATCH] kprobes: ... |
2214 |
list_for_each_entry_rcu(kp, &p->list, list) |
afd66255b kprobes: Introduc... |
2215 |
report_probe(pi, kp, sym, offset, modname, p); |
346fd59ba [PATCH] kprobes: ... |
2216 |
} else |
afd66255b kprobes: Introduc... |
2217 |
report_probe(pi, p, sym, offset, modname, NULL); |
346fd59ba [PATCH] kprobes: ... |
2218 2219 2220 2221 |
} preempt_enable(); return 0; } |
88e9d34c7 seq_file: constif... |
2222 |
static const struct seq_operations kprobes_seq_ops = { |
346fd59ba [PATCH] kprobes: ... |
2223 2224 2225 2226 2227 |
.start = kprobe_seq_start, .next = kprobe_seq_next, .stop = kprobe_seq_stop, .show = show_kprobe_addr }; |
55479f647 kprobes: Allow pr... |
2228 |
static int kprobes_open(struct inode *inode, struct file *filp) |
346fd59ba [PATCH] kprobes: ... |
2229 2230 2231 |
{ return seq_open(filp, &kprobes_seq_ops); } |
828c09509 const: constify r... |
2232 |
static const struct file_operations debugfs_kprobes_operations = { |
346fd59ba [PATCH] kprobes: ... |
2233 2234 2235 2236 2237 |
.open = kprobes_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; |
637247403 kprobes: Show bla... |
2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 |
/* kprobes/blacklist -- shows which functions can not be probed */ static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos) { return seq_list_start(&kprobe_blacklist, *pos); } static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos) { return seq_list_next(v, &kprobe_blacklist, pos); } static int kprobe_blacklist_seq_show(struct seq_file *m, void *v) { struct kprobe_blacklist_entry *ent = list_entry(v, struct kprobe_blacklist_entry, list); |
ffb9bd68e kprobes: Show bla... |
2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 |
/* * If /proc/kallsyms is not showing kernel address, we won't * show them here either. */ if (!kallsyms_show_value()) seq_printf(m, "0x%px-0x%px\t%ps ", NULL, NULL, (void *)ent->start_addr); else seq_printf(m, "0x%px-0x%px\t%ps ", (void *)ent->start_addr, (void *)ent->end_addr, (void *)ent->start_addr); |
637247403 kprobes: Show bla... |
2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 |
return 0; } static const struct seq_operations kprobe_blacklist_seq_ops = { .start = kprobe_blacklist_seq_start, .next = kprobe_blacklist_seq_next, .stop = kprobe_seq_stop, /* Reuse void function */ .show = kprobe_blacklist_seq_show, }; static int kprobe_blacklist_open(struct inode *inode, struct file *filp) { return seq_open(filp, &kprobe_blacklist_seq_ops); } static const struct file_operations debugfs_kprobe_blacklist_ops = { .open = kprobe_blacklist_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; |
12310e343 kprobes: Propagat... |
2286 |
static int arm_all_kprobes(void) |
bf8f6e5b3 Kprobes: The ON/O... |
2287 2288 |
{ struct hlist_head *head; |
bf8f6e5b3 Kprobes: The ON/O... |
2289 |
struct kprobe *p; |
12310e343 kprobes: Propagat... |
2290 2291 |
unsigned int i, total = 0, errors = 0; int err, ret = 0; |
bf8f6e5b3 Kprobes: The ON/O... |
2292 2293 |
mutex_lock(&kprobe_mutex); |
e579abeb5 kprobes: rename k... |
2294 2295 |
/* If kprobes are armed, just return */ if (!kprobes_all_disarmed) |
bf8f6e5b3 Kprobes: The ON/O... |
2296 |
goto already_enabled; |
977ad481b kprobes: set kpro... |
2297 2298 2299 2300 2301 2302 |
/* * optimize_kprobe() called by arm_kprobe() checks * kprobes_all_disarmed, so set kprobes_all_disarmed before * arm_kprobe. */ kprobes_all_disarmed = false; |
afd66255b kprobes: Introduc... |
2303 |
/* Arming kprobes doesn't optimize kprobe itself */ |
bf8f6e5b3 Kprobes: The ON/O... |
2304 2305 |
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; |
12310e343 kprobes: Propagat... |
2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 |
/* Arm all kprobes on a best-effort basis */ hlist_for_each_entry_rcu(p, head, hlist) { if (!kprobe_disabled(p)) { err = arm_kprobe(p); if (err) { errors++; ret = err; } total++; } } |
bf8f6e5b3 Kprobes: The ON/O... |
2317 |
} |
12310e343 kprobes: Propagat... |
2318 2319 2320 2321 2322 2323 2324 |
if (errors) pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes ", errors, total); else pr_info("Kprobes globally enabled "); |
bf8f6e5b3 Kprobes: The ON/O... |
2325 2326 2327 |
already_enabled: mutex_unlock(&kprobe_mutex); |
12310e343 kprobes: Propagat... |
2328 |
return ret; |
bf8f6e5b3 Kprobes: The ON/O... |
2329 |
} |
297f9233b kprobes: Propagat... |
2330 |
static int disarm_all_kprobes(void) |
bf8f6e5b3 Kprobes: The ON/O... |
2331 2332 |
{ struct hlist_head *head; |
bf8f6e5b3 Kprobes: The ON/O... |
2333 |
struct kprobe *p; |
297f9233b kprobes: Propagat... |
2334 2335 |
unsigned int i, total = 0, errors = 0; int err, ret = 0; |
bf8f6e5b3 Kprobes: The ON/O... |
2336 2337 |
mutex_lock(&kprobe_mutex); |
e579abeb5 kprobes: rename k... |
2338 |
/* If kprobes are already disarmed, just return */ |
6274de498 kprobes: Support ... |
2339 2340 |
if (kprobes_all_disarmed) { mutex_unlock(&kprobe_mutex); |
297f9233b kprobes: Propagat... |
2341 |
return 0; |
6274de498 kprobes: Support ... |
2342 |
} |
bf8f6e5b3 Kprobes: The ON/O... |
2343 |
|
e579abeb5 kprobes: rename k... |
2344 |
kprobes_all_disarmed = true; |
afd66255b kprobes: Introduc... |
2345 |
|
bf8f6e5b3 Kprobes: The ON/O... |
2346 2347 |
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; |
297f9233b kprobes: Propagat... |
2348 |
/* Disarm all kprobes on a best-effort basis */ |
b67bfe0d4 hlist: drop the n... |
2349 |
hlist_for_each_entry_rcu(p, head, hlist) { |
297f9233b kprobes: Propagat... |
2350 2351 2352 2353 2354 2355 2356 2357 |
if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) { err = disarm_kprobe(p, false); if (err) { errors++; ret = err; } total++; } |
bf8f6e5b3 Kprobes: The ON/O... |
2358 2359 |
} } |
297f9233b kprobes: Propagat... |
2360 2361 2362 2363 2364 2365 2366 2367 |
if (errors) pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes ", errors, total); else pr_info("Kprobes globally disabled "); |
bf8f6e5b3 Kprobes: The ON/O... |
2368 |
mutex_unlock(&kprobe_mutex); |
bf8f6e5b3 Kprobes: The ON/O... |
2369 |
|
6274de498 kprobes: Support ... |
2370 2371 |
/* Wait for disarming all kprobes by optimizer */ wait_for_kprobe_optimizer(); |
297f9233b kprobes: Propagat... |
2372 2373 |
return ret; |
bf8f6e5b3 Kprobes: The ON/O... |
2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 |
} /* * XXX: The debugfs bool file interface doesn't allow for callbacks * when the bool state is switched. We can reuse that facility when * available */ static ssize_t read_enabled_file_bool(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char buf[3]; |
e579abeb5 kprobes: rename k... |
2385 |
if (!kprobes_all_disarmed) |
bf8f6e5b3 Kprobes: The ON/O... |
2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 |
buf[0] = '1'; else buf[0] = '0'; buf[1] = ' '; buf[2] = 0x00; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static ssize_t write_enabled_file_bool(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; |
efeb156e7 kprobes: silence ... |
2399 |
size_t buf_size; |
12310e343 kprobes: Propagat... |
2400 |
int ret = 0; |
bf8f6e5b3 Kprobes: The ON/O... |
2401 2402 2403 2404 |
buf_size = min(count, (sizeof(buf)-1)); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; |
10fb46d5f kprobes: handle e... |
2405 |
buf[buf_size] = '\0'; |
bf8f6e5b3 Kprobes: The ON/O... |
2406 2407 2408 2409 |
switch (buf[0]) { case 'y': case 'Y': case '1': |
12310e343 kprobes: Propagat... |
2410 |
ret = arm_all_kprobes(); |
bf8f6e5b3 Kprobes: The ON/O... |
2411 2412 2413 2414 |
break; case 'n': case 'N': case '0': |
297f9233b kprobes: Propagat... |
2415 |
ret = disarm_all_kprobes(); |
bf8f6e5b3 Kprobes: The ON/O... |
2416 |
break; |
10fb46d5f kprobes: handle e... |
2417 2418 |
default: return -EINVAL; |
bf8f6e5b3 Kprobes: The ON/O... |
2419 |
} |
12310e343 kprobes: Propagat... |
2420 2421 |
if (ret) return ret; |
bf8f6e5b3 Kprobes: The ON/O... |
2422 2423 |
return count; } |
828c09509 const: constify r... |
2424 |
static const struct file_operations fops_kp = { |
bf8f6e5b3 Kprobes: The ON/O... |
2425 2426 |
.read = read_enabled_file_bool, .write = write_enabled_file_bool, |
6038f373a llseek: automatic... |
2427 |
.llseek = default_llseek, |
bf8f6e5b3 Kprobes: The ON/O... |
2428 |
}; |
55479f647 kprobes: Allow pr... |
2429 |
static int __init debugfs_kprobe_init(void) |
346fd59ba [PATCH] kprobes: ... |
2430 2431 |
{ struct dentry *dir, *file; |
bf8f6e5b3 Kprobes: The ON/O... |
2432 |
unsigned int value = 1; |
346fd59ba [PATCH] kprobes: ... |
2433 2434 2435 2436 |
dir = debugfs_create_dir("kprobes", NULL); if (!dir) return -ENOMEM; |
f2a3ab360 kprobes: Make lis... |
2437 |
file = debugfs_create_file("list", 0400, dir, NULL, |
346fd59ba [PATCH] kprobes: ... |
2438 |
&debugfs_kprobes_operations); |
637247403 kprobes: Show bla... |
2439 2440 |
if (!file) goto error; |
346fd59ba [PATCH] kprobes: ... |
2441 |
|
bf8f6e5b3 Kprobes: The ON/O... |
2442 2443 |
file = debugfs_create_file("enabled", 0600, dir, &value, &fops_kp); |
637247403 kprobes: Show bla... |
2444 2445 |
if (!file) goto error; |
f2a3ab360 kprobes: Make lis... |
2446 |
file = debugfs_create_file("blacklist", 0400, dir, NULL, |
637247403 kprobes: Show bla... |
2447 2448 2449 |
&debugfs_kprobe_blacklist_ops); if (!file) goto error; |
bf8f6e5b3 Kprobes: The ON/O... |
2450 |
|
346fd59ba [PATCH] kprobes: ... |
2451 |
return 0; |
637247403 kprobes: Show bla... |
2452 2453 2454 2455 |
error: debugfs_remove(dir); return -ENOMEM; |
346fd59ba [PATCH] kprobes: ... |
2456 2457 2458 2459 2460 2461 |
} late_initcall(debugfs_kprobe_init); #endif /* CONFIG_DEBUG_FS */ module_init(init_kprobes); |