Blame view
kernel/kprobes.c
60.7 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
/* * Kernel Probes (KProbes) * kernel/kprobes.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2002, 2004 * * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel * Probes initial implementation (includes suggestions from * Rusty Russell). * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with * hlists and exceptions notifier as suggested by Andi Kleen. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes * interface to access function arguments. * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes * exceptions notifier to be first on the priority list. |
b94cce926
|
30 31 32 |
* 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi * <prasanna@in.ibm.com> added function-return probes. |
1da177e4c
|
33 34 |
*/ #include <linux/kprobes.h> |
1da177e4c
|
35 36 |
#include <linux/hash.h> #include <linux/init.h> |
4e57b6817
|
37 |
#include <linux/slab.h> |
e38697929
|
38 |
#include <linux/stddef.h> |
9984de1a5
|
39 |
#include <linux/export.h> |
9ec4b1f35
|
40 |
#include <linux/moduleloader.h> |
3a872d89b
|
41 |
#include <linux/kallsyms.h> |
b4c6c34a5
|
42 |
#include <linux/freezer.h> |
346fd59ba
|
43 44 |
#include <linux/seq_file.h> #include <linux/debugfs.h> |
b2be84df9
|
45 |
#include <linux/sysctl.h> |
1eeb66a1b
|
46 |
#include <linux/kdebug.h> |
4460fdad8
|
47 |
#include <linux/memory.h> |
4554dbcb8
|
48 |
#include <linux/ftrace.h> |
afd66255b
|
49 |
#include <linux/cpu.h> |
bf5438fca
|
50 |
#include <linux/jump_label.h> |
bf8f6e5b3
|
51 |
|
bfd45be0b
|
52 |
#include <asm/sections.h> |
1da177e4c
|
53 54 |
#include <asm/cacheflush.h> #include <asm/errno.h> |
bf8f6e5b3
|
55 |
#include <asm/uaccess.h> |
1da177e4c
|
56 57 58 |
#define KPROBE_HASH_BITS 6 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) |
3a872d89b
|
59 60 61 62 63 64 65 66 67 |
/* * Some oddball architectures like 64bit powerpc have function descriptors * so this must be overridable. */ #ifndef kprobe_lookup_name #define kprobe_lookup_name(name, addr) \ addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name))) #endif |
ef53d9c5e
|
68 |
static int kprobes_initialized; |
1da177e4c
|
69 |
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; |
b94cce926
|
70 |
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; |
1da177e4c
|
71 |
|
bf8f6e5b3
|
72 |
/* NOTE: change this value only with kprobe_mutex held */ |
e579abeb5
|
73 |
static bool kprobes_all_disarmed; |
bf8f6e5b3
|
74 |
|
43948f502
|
75 76 |
/* This protects kprobe_table and optimizing_list */ static DEFINE_MUTEX(kprobe_mutex); |
e65845235
|
77 |
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
ef53d9c5e
|
78 |
static struct { |
ec484608c
|
79 |
raw_spinlock_t lock ____cacheline_aligned_in_smp; |
ef53d9c5e
|
80 |
} kretprobe_table_locks[KPROBE_TABLE_SIZE]; |
ec484608c
|
81 |
static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) |
ef53d9c5e
|
82 83 84 |
{ return &(kretprobe_table_locks[hash].lock); } |
1da177e4c
|
85 |
|
376e24242
|
86 87 |
/* Blacklist -- list of struct kprobe_blacklist_entry */ static LIST_HEAD(kprobe_blacklist); |
3d8d996e0
|
88 |
|
2d14e39da
|
89 |
#ifdef __ARCH_WANT_KPROBES_INSN_SLOT |
9ec4b1f35
|
90 91 92 93 94 95 |
/* * kprobe->ainsn.insn points to the copy of the instruction to be * single-stepped. x86_64, POWER4 and above have no-exec support and * stepping on the instruction on a vmalloced/kmalloced/data page * is a recipe for disaster */ |
9ec4b1f35
|
96 |
struct kprobe_insn_page { |
c5cb5a2d8
|
97 |
struct list_head list; |
9ec4b1f35
|
98 |
kprobe_opcode_t *insns; /* Page of instruction slots */ |
af96397de
|
99 |
struct kprobe_insn_cache *cache; |
9ec4b1f35
|
100 |
int nused; |
b4c6c34a5
|
101 |
int ngarbage; |
4610ee1d3
|
102 |
char slot_used[]; |
9ec4b1f35
|
103 |
}; |
4610ee1d3
|
104 105 106 |
#define KPROBE_INSN_PAGE_SIZE(slots) \ (offsetof(struct kprobe_insn_page, slot_used) + \ (sizeof(char) * (slots))) |
4610ee1d3
|
107 108 109 110 |
static int slots_per_page(struct kprobe_insn_cache *c) { return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); } |
ab40c5c6b
|
111 112 113 114 115 |
enum kprobe_slot_state { SLOT_CLEAN = 0, SLOT_DIRTY = 1, SLOT_USED = 2, }; |
af96397de
|
116 117 118 119 120 121 122 |
static void *alloc_insn_page(void) { return module_alloc(PAGE_SIZE); } static void free_insn_page(void *page) { |
be1f221c0
|
123 |
module_memfree(page); |
af96397de
|
124 |
} |
c802d64a3
|
125 126 |
struct kprobe_insn_cache kprobe_insn_slots = { .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex), |
af96397de
|
127 128 |
.alloc = alloc_insn_page, .free = free_insn_page, |
4610ee1d3
|
129 130 131 132 |
.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), .insn_size = MAX_INSN_SIZE, .nr_garbage = 0, }; |
55479f647
|
133 |
static int collect_garbage_slots(struct kprobe_insn_cache *c); |
b4c6c34a5
|
134 |
|
9ec4b1f35
|
135 |
/** |
129415607
|
136 |
* __get_insn_slot() - Find a slot on an executable page for an instruction. |
9ec4b1f35
|
137 138 |
* We allocate an executable page if there's no room on existing ones. */ |
55479f647
|
139 |
kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) |
9ec4b1f35
|
140 141 |
{ struct kprobe_insn_page *kip; |
c802d64a3
|
142 |
kprobe_opcode_t *slot = NULL; |
9ec4b1f35
|
143 |
|
c802d64a3
|
144 |
mutex_lock(&c->mutex); |
6f716acd5
|
145 |
retry: |
4610ee1d3
|
146 147 |
list_for_each_entry(kip, &c->pages, list) { if (kip->nused < slots_per_page(c)) { |
9ec4b1f35
|
148 |
int i; |
4610ee1d3
|
149 |
for (i = 0; i < slots_per_page(c); i++) { |
ab40c5c6b
|
150 151 |
if (kip->slot_used[i] == SLOT_CLEAN) { kip->slot_used[i] = SLOT_USED; |
9ec4b1f35
|
152 |
kip->nused++; |
c802d64a3
|
153 154 |
slot = kip->insns + (i * c->insn_size); goto out; |
9ec4b1f35
|
155 156 |
} } |
4610ee1d3
|
157 158 159 |
/* kip->nused is broken. Fix it. */ kip->nused = slots_per_page(c); WARN_ON(1); |
9ec4b1f35
|
160 161 |
} } |
b4c6c34a5
|
162 |
/* If there are any garbage slots, collect it and try again. */ |
4610ee1d3
|
163 |
if (c->nr_garbage && collect_garbage_slots(c) == 0) |
b4c6c34a5
|
164 |
goto retry; |
4610ee1d3
|
165 166 167 |
/* All out of space. Need to allocate a new page. */ kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL); |
6f716acd5
|
168 |
if (!kip) |
c802d64a3
|
169 |
goto out; |
9ec4b1f35
|
170 171 172 173 174 175 |
/* * Use module_alloc so this page is within +/- 2GB of where the * kernel image and loaded module images reside. This is required * so x86_64 can correctly handle the %rip-relative fixups. */ |
af96397de
|
176 |
kip->insns = c->alloc(); |
9ec4b1f35
|
177 178 |
if (!kip->insns) { kfree(kip); |
c802d64a3
|
179 |
goto out; |
9ec4b1f35
|
180 |
} |
c5cb5a2d8
|
181 |
INIT_LIST_HEAD(&kip->list); |
4610ee1d3
|
182 |
memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); |
ab40c5c6b
|
183 |
kip->slot_used[0] = SLOT_USED; |
9ec4b1f35
|
184 |
kip->nused = 1; |
b4c6c34a5
|
185 |
kip->ngarbage = 0; |
af96397de
|
186 |
kip->cache = c; |
4610ee1d3
|
187 |
list_add(&kip->list, &c->pages); |
c802d64a3
|
188 189 190 191 |
slot = kip->insns; out: mutex_unlock(&c->mutex); return slot; |
129415607
|
192 |
} |
b4c6c34a5
|
193 |
/* Return 1 if all garbages are collected, otherwise 0. */ |
55479f647
|
194 |
static int collect_one_slot(struct kprobe_insn_page *kip, int idx) |
b4c6c34a5
|
195 |
{ |
ab40c5c6b
|
196 |
kip->slot_used[idx] = SLOT_CLEAN; |
b4c6c34a5
|
197 198 199 200 201 202 203 204 |
kip->nused--; if (kip->nused == 0) { /* * Page is no longer in use. Free it unless * it's the last one. We keep the last one * so as not to have to set it up again the * next time somebody inserts a probe. */ |
4610ee1d3
|
205 |
if (!list_is_singular(&kip->list)) { |
c5cb5a2d8
|
206 |
list_del(&kip->list); |
af96397de
|
207 |
kip->cache->free(kip->insns); |
b4c6c34a5
|
208 209 210 211 212 213 |
kfree(kip); } return 1; } return 0; } |
55479f647
|
214 |
static int collect_garbage_slots(struct kprobe_insn_cache *c) |
b4c6c34a5
|
215 |
{ |
c5cb5a2d8
|
216 |
struct kprobe_insn_page *kip, *next; |
b4c6c34a5
|
217 |
|
615d0ebbc
|
218 219 |
/* Ensure no-one is interrupted on the garbages */ synchronize_sched(); |
b4c6c34a5
|
220 |
|
4610ee1d3
|
221 |
list_for_each_entry_safe(kip, next, &c->pages, list) { |
b4c6c34a5
|
222 |
int i; |
b4c6c34a5
|
223 224 225 |
if (kip->ngarbage == 0) continue; kip->ngarbage = 0; /* we will collect all garbages */ |
4610ee1d3
|
226 |
for (i = 0; i < slots_per_page(c); i++) { |
ab40c5c6b
|
227 |
if (kip->slot_used[i] == SLOT_DIRTY && |
b4c6c34a5
|
228 229 230 231 |
collect_one_slot(kip, i)) break; } } |
4610ee1d3
|
232 |
c->nr_garbage = 0; |
b4c6c34a5
|
233 234 |
return 0; } |
55479f647
|
235 236 |
void __free_insn_slot(struct kprobe_insn_cache *c, kprobe_opcode_t *slot, int dirty) |
9ec4b1f35
|
237 238 |
{ struct kprobe_insn_page *kip; |
9ec4b1f35
|
239 |
|
c802d64a3
|
240 |
mutex_lock(&c->mutex); |
4610ee1d3
|
241 |
list_for_each_entry(kip, &c->pages, list) { |
83ff56f46
|
242 243 |
long idx = ((long)slot - (long)kip->insns) / (c->insn_size * sizeof(kprobe_opcode_t)); |
4610ee1d3
|
244 245 |
if (idx >= 0 && idx < slots_per_page(c)) { WARN_ON(kip->slot_used[idx] != SLOT_USED); |
b4c6c34a5
|
246 |
if (dirty) { |
4610ee1d3
|
247 |
kip->slot_used[idx] = SLOT_DIRTY; |
b4c6c34a5
|
248 |
kip->ngarbage++; |
4610ee1d3
|
249 250 |
if (++c->nr_garbage > slots_per_page(c)) collect_garbage_slots(c); |
c5cb5a2d8
|
251 |
} else |
4610ee1d3
|
252 |
collect_one_slot(kip, idx); |
c802d64a3
|
253 |
goto out; |
9ec4b1f35
|
254 255 |
} } |
4610ee1d3
|
256 257 |
/* Could not free this slot. */ WARN_ON(1); |
c802d64a3
|
258 259 |
out: mutex_unlock(&c->mutex); |
4610ee1d3
|
260 |
} |
6f716acd5
|
261 |
|
afd66255b
|
262 263 |
#ifdef CONFIG_OPTPROBES /* For optimized_kprobe buffer */ |
c802d64a3
|
264 265 |
struct kprobe_insn_cache kprobe_optinsn_slots = { .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex), |
af96397de
|
266 267 |
.alloc = alloc_insn_page, .free = free_insn_page, |
afd66255b
|
268 269 270 271 |
.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), /* .insn_size is initialized later */ .nr_garbage = 0, }; |
afd66255b
|
272 |
#endif |
2d14e39da
|
273 |
#endif |
9ec4b1f35
|
274 |
|
e65845235
|
275 276 277 |
/* We have preemption disabled.. so it is safe to use __ versions */ static inline void set_kprobe_instance(struct kprobe *kp) { |
b76834bc1
|
278 |
__this_cpu_write(kprobe_instance, kp); |
e65845235
|
279 280 281 282 |
} static inline void reset_kprobe_instance(void) { |
b76834bc1
|
283 |
__this_cpu_write(kprobe_instance, NULL); |
e65845235
|
284 |
} |
3516a4604
|
285 286 |
/* * This routine is called either: |
49a2a1b83
|
287 |
* - under the kprobe_mutex - during kprobe_[un]register() |
3516a4604
|
288 |
* OR |
d217d5450
|
289 |
* - with preemption disabled - from arch/xxx/kernel/kprobes.c |
3516a4604
|
290 |
*/ |
820aede02
|
291 |
struct kprobe *get_kprobe(void *addr) |
1da177e4c
|
292 293 |
{ struct hlist_head *head; |
3516a4604
|
294 |
struct kprobe *p; |
1da177e4c
|
295 296 |
head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; |
b67bfe0d4
|
297 |
hlist_for_each_entry_rcu(p, head, hlist) { |
1da177e4c
|
298 299 300 |
if (p->addr == addr) return p; } |
afd66255b
|
301 |
|
1da177e4c
|
302 303 |
return NULL; } |
820aede02
|
304 |
NOKPROBE_SYMBOL(get_kprobe); |
1da177e4c
|
305 |
|
820aede02
|
306 |
static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); |
afd66255b
|
307 308 309 310 311 312 |
/* Return true if the kprobe is an aggregator */ static inline int kprobe_aggrprobe(struct kprobe *p) { return p->pre_handler == aggr_pre_handler; } |
6274de498
|
313 314 315 316 317 318 |
/* Return true(!0) if the kprobe is unused */ static inline int kprobe_unused(struct kprobe *p) { return kprobe_aggrprobe(p) && kprobe_disabled(p) && list_empty(&p->list); } |
afd66255b
|
319 320 321 |
/* * Keep all fields in the kprobe consistent */ |
6d8e40a85
|
322 |
static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p) |
afd66255b
|
323 |
{ |
6d8e40a85
|
324 325 |
memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t)); memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn)); |
afd66255b
|
326 327 328 |
} #ifdef CONFIG_OPTPROBES |
b2be84df9
|
329 330 |
/* NOTE: change this value only with kprobe_mutex held */ static bool kprobes_allow_optimization; |
afd66255b
|
331 332 333 334 |
/* * Call all pre_handler on the list, but ignores its return value. * This must be called from arch-dep optimized caller. */ |
820aede02
|
335 |
void opt_pre_handler(struct kprobe *p, struct pt_regs *regs) |
afd66255b
|
336 337 338 339 340 341 342 343 344 345 346 |
{ struct kprobe *kp; list_for_each_entry_rcu(kp, &p->list, list) { if (kp->pre_handler && likely(!kprobe_disabled(kp))) { set_kprobe_instance(kp); kp->pre_handler(kp, regs); } reset_kprobe_instance(); } } |
820aede02
|
347 |
NOKPROBE_SYMBOL(opt_pre_handler); |
afd66255b
|
348 |
|
6274de498
|
349 |
/* Free optimized instructions and optimized_kprobe */ |
55479f647
|
350 |
static void free_aggr_kprobe(struct kprobe *p) |
6274de498
|
351 352 353 354 355 356 357 358 |
{ struct optimized_kprobe *op; op = container_of(p, struct optimized_kprobe, kp); arch_remove_optimized_kprobe(op); arch_remove_kprobe(p); kfree(op); } |
afd66255b
|
359 360 361 362 363 364 365 366 367 368 369 370 |
/* Return true(!0) if the kprobe is ready for optimization. */ static inline int kprobe_optready(struct kprobe *p) { struct optimized_kprobe *op; if (kprobe_aggrprobe(p)) { op = container_of(p, struct optimized_kprobe, kp); return arch_prepared_optinsn(&op->optinsn); } return 0; } |
6274de498
|
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 |
/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */ static inline int kprobe_disarmed(struct kprobe *p) { struct optimized_kprobe *op; /* If kprobe is not aggr/opt probe, just return kprobe is disabled */ if (!kprobe_aggrprobe(p)) return kprobe_disabled(p); op = container_of(p, struct optimized_kprobe, kp); return kprobe_disabled(p) && list_empty(&op->list); } /* Return true(!0) if the probe is queued on (un)optimizing lists */ |
55479f647
|
386 |
static int kprobe_queued(struct kprobe *p) |
6274de498
|
387 388 389 390 391 392 393 394 395 396 |
{ struct optimized_kprobe *op; if (kprobe_aggrprobe(p)) { op = container_of(p, struct optimized_kprobe, kp); if (!list_empty(&op->list)) return 1; } return 0; } |
afd66255b
|
397 398 399 400 |
/* * Return an optimized kprobe whose optimizing code replaces * instructions including addr (exclude breakpoint). */ |
55479f647
|
401 |
static struct kprobe *get_optimized_kprobe(unsigned long addr) |
afd66255b
|
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 |
{ int i; struct kprobe *p = NULL; struct optimized_kprobe *op; /* Don't check i == 0, since that is a breakpoint case. */ for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++) p = get_kprobe((void *)(addr - i)); if (p && kprobe_optready(p)) { op = container_of(p, struct optimized_kprobe, kp); if (arch_within_optimized_kprobe(op, addr)) return p; } return NULL; } /* Optimization staging list, protected by kprobe_mutex */ static LIST_HEAD(optimizing_list); |
6274de498
|
422 |
static LIST_HEAD(unoptimizing_list); |
7b959fc58
|
423 |
static LIST_HEAD(freeing_list); |
afd66255b
|
424 425 426 427 |
static void kprobe_optimizer(struct work_struct *work); static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); #define OPTIMIZE_DELAY 5 |
61f4e13ff
|
428 429 430 431 |
/* * Optimize (replace a breakpoint with a jump) kprobes listed on * optimizing_list. */ |
55479f647
|
432 |
static void do_optimize_kprobes(void) |
afd66255b
|
433 |
{ |
6274de498
|
434 435 436 437 |
/* Optimization never be done when disarmed */ if (kprobes_all_disarmed || !kprobes_allow_optimization || list_empty(&optimizing_list)) return; |
afd66255b
|
438 439 440 441 442 443 444 445 446 447 448 449 |
/* * The optimization/unoptimization refers online_cpus via * stop_machine() and cpu-hotplug modifies online_cpus. * And same time, text_mutex will be held in cpu-hotplug and here. * This combination can cause a deadlock (cpu-hotplug try to lock * text_mutex but stop_machine can not be done because online_cpus * has been changed) * To avoid this deadlock, we need to call get_online_cpus() * for preventing cpu-hotplug outside of text_mutex locking. */ get_online_cpus(); mutex_lock(&text_mutex); |
cd7ebe229
|
450 |
arch_optimize_kprobes(&optimizing_list); |
afd66255b
|
451 452 |
mutex_unlock(&text_mutex); put_online_cpus(); |
61f4e13ff
|
453 |
} |
6274de498
|
454 455 456 457 |
/* * Unoptimize (replace a jump with a breakpoint and remove the breakpoint * if need) kprobes listed on unoptimizing_list. */ |
55479f647
|
458 |
static void do_unoptimize_kprobes(void) |
6274de498
|
459 460 461 462 463 464 465 466 467 468 |
{ struct optimized_kprobe *op, *tmp; /* Unoptimization must be done anytime */ if (list_empty(&unoptimizing_list)) return; /* Ditto to do_optimize_kprobes */ get_online_cpus(); mutex_lock(&text_mutex); |
7b959fc58
|
469 |
arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); |
f984ba4eb
|
470 |
/* Loop free_list for disarming */ |
7b959fc58
|
471 |
list_for_each_entry_safe(op, tmp, &freeing_list, list) { |
6274de498
|
472 473 474 475 476 477 478 479 480 481 |
/* Disarm probes if marked disabled */ if (kprobe_disabled(&op->kp)) arch_disarm_kprobe(&op->kp); if (kprobe_unused(&op->kp)) { /* * Remove unused probes from hash list. After waiting * for synchronization, these probes are reclaimed. * (reclaiming is done by do_free_cleaned_kprobes.) */ hlist_del_rcu(&op->kp.hlist); |
6274de498
|
482 483 484 485 486 487 488 489 |
} else list_del_init(&op->list); } mutex_unlock(&text_mutex); put_online_cpus(); } /* Reclaim all kprobes on the free_list */ |
55479f647
|
490 |
static void do_free_cleaned_kprobes(void) |
6274de498
|
491 492 |
{ struct optimized_kprobe *op, *tmp; |
7b959fc58
|
493 |
list_for_each_entry_safe(op, tmp, &freeing_list, list) { |
6274de498
|
494 495 496 497 498 499 500 |
BUG_ON(!kprobe_unused(&op->kp)); list_del_init(&op->list); free_aggr_kprobe(&op->kp); } } /* Start optimizer after OPTIMIZE_DELAY passed */ |
55479f647
|
501 |
static void kick_kprobe_optimizer(void) |
6274de498
|
502 |
{ |
ad72b3bea
|
503 |
schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); |
6274de498
|
504 |
} |
61f4e13ff
|
505 |
/* Kprobe jump optimizer */ |
55479f647
|
506 |
static void kprobe_optimizer(struct work_struct *work) |
61f4e13ff
|
507 |
{ |
72ef3794c
|
508 |
mutex_lock(&kprobe_mutex); |
61f4e13ff
|
509 510 |
/* Lock modules while optimizing kprobes */ mutex_lock(&module_mutex); |
61f4e13ff
|
511 512 |
/* |
6274de498
|
513 514 515 |
* Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) * kprobes before waiting for quiesence period. */ |
7b959fc58
|
516 |
do_unoptimize_kprobes(); |
6274de498
|
517 518 519 |
/* * Step 2: Wait for quiesence period to ensure all running interrupts |
61f4e13ff
|
520 521 522 523 524 525 |
* are done. Because optprobe may modify multiple instructions * there is a chance that Nth instruction is interrupted. In that * case, running interrupt can return to 2nd-Nth byte of jump * instruction. This wait is for avoiding it. */ synchronize_sched(); |
6274de498
|
526 |
/* Step 3: Optimize kprobes after quiesence period */ |
61f4e13ff
|
527 |
do_optimize_kprobes(); |
6274de498
|
528 529 |
/* Step 4: Free cleaned kprobes after quiesence period */ |
7b959fc58
|
530 |
do_free_cleaned_kprobes(); |
6274de498
|
531 |
|
afd66255b
|
532 |
mutex_unlock(&module_mutex); |
72ef3794c
|
533 |
mutex_unlock(&kprobe_mutex); |
6274de498
|
534 |
|
cd7ebe229
|
535 |
/* Step 5: Kick optimizer again if needed */ |
f984ba4eb
|
536 |
if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) |
cd7ebe229
|
537 |
kick_kprobe_optimizer(); |
6274de498
|
538 539 540 |
} /* Wait for completing optimization and unoptimization */ |
55479f647
|
541 |
static void wait_for_kprobe_optimizer(void) |
6274de498
|
542 |
{ |
ad72b3bea
|
543 544 545 546 547 548 549 550 551 552 553 554 555 556 |
mutex_lock(&kprobe_mutex); while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) { mutex_unlock(&kprobe_mutex); /* this will also make optimizing_work execute immmediately */ flush_delayed_work(&optimizing_work); /* @optimizing_work might not have been queued yet, relax */ cpu_relax(); mutex_lock(&kprobe_mutex); } mutex_unlock(&kprobe_mutex); |
afd66255b
|
557 558 559 |
} /* Optimize kprobe if p is ready to be optimized */ |
55479f647
|
560 |
static void optimize_kprobe(struct kprobe *p) |
afd66255b
|
561 562 563 564 |
{ struct optimized_kprobe *op; /* Check if the kprobe is disabled or not ready for optimization. */ |
b2be84df9
|
565 |
if (!kprobe_optready(p) || !kprobes_allow_optimization || |
afd66255b
|
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 |
(kprobe_disabled(p) || kprobes_all_disarmed)) return; /* Both of break_handler and post_handler are not supported. */ if (p->break_handler || p->post_handler) return; op = container_of(p, struct optimized_kprobe, kp); /* Check there is no other kprobes at the optimized instructions */ if (arch_check_optimized_kprobe(op) < 0) return; /* Check if it is already optimized. */ if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) return; |
afd66255b
|
582 |
op->kp.flags |= KPROBE_FLAG_OPTIMIZED; |
6274de498
|
583 584 585 586 587 588 589 590 591 592 593 |
if (!list_empty(&op->list)) /* This is under unoptimizing. Just dequeue the probe */ list_del_init(&op->list); else { list_add(&op->list, &optimizing_list); kick_kprobe_optimizer(); } } /* Short cut to direct unoptimizing */ |
55479f647
|
594 |
static void force_unoptimize_kprobe(struct optimized_kprobe *op) |
6274de498
|
595 596 597 598 599 600 |
{ get_online_cpus(); arch_unoptimize_kprobe(op); put_online_cpus(); if (kprobe_disabled(&op->kp)) arch_disarm_kprobe(&op->kp); |
afd66255b
|
601 602 603 |
} /* Unoptimize a kprobe if p is optimized */ |
55479f647
|
604 |
static void unoptimize_kprobe(struct kprobe *p, bool force) |
afd66255b
|
605 606 |
{ struct optimized_kprobe *op; |
6274de498
|
607 608 609 610 611 612 613 614 615 616 617 618 |
if (!kprobe_aggrprobe(p) || kprobe_disarmed(p)) return; /* This is not an optprobe nor optimized */ op = container_of(p, struct optimized_kprobe, kp); if (!kprobe_optimized(p)) { /* Unoptimized or unoptimizing case */ if (force && !list_empty(&op->list)) { /* * Only if this is unoptimizing kprobe and forced, * forcibly unoptimize it. (No need to unoptimize * unoptimized kprobe again :) */ |
afd66255b
|
619 |
list_del_init(&op->list); |
6274de498
|
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 |
force_unoptimize_kprobe(op); } return; } op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; if (!list_empty(&op->list)) { /* Dequeue from the optimization queue */ list_del_init(&op->list); return; } /* Optimized kprobe case */ if (force) /* Forcibly update the code: this is a special case */ force_unoptimize_kprobe(op); else { list_add(&op->list, &unoptimizing_list); kick_kprobe_optimizer(); |
afd66255b
|
638 639 |
} } |
0490cd1f9
|
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 |
/* Cancel unoptimizing for reusing */ static void reuse_unused_kprobe(struct kprobe *ap) { struct optimized_kprobe *op; BUG_ON(!kprobe_unused(ap)); /* * Unused kprobe MUST be on the way of delayed unoptimizing (means * there is still a relative jump) and disabled. */ op = container_of(ap, struct optimized_kprobe, kp); if (unlikely(list_empty(&op->list))) printk(KERN_WARNING "Warning: found a stray unused " "aggrprobe@%p ", ap->addr); /* Enable the probe again */ ap->flags &= ~KPROBE_FLAG_DISABLED; /* Optimize it again (remove from op->list) */ BUG_ON(!kprobe_optready(ap)); optimize_kprobe(ap); } |
afd66255b
|
661 |
/* Remove optimized instructions */ |
55479f647
|
662 |
static void kill_optimized_kprobe(struct kprobe *p) |
afd66255b
|
663 664 665 666 |
{ struct optimized_kprobe *op; op = container_of(p, struct optimized_kprobe, kp); |
6274de498
|
667 668 |
if (!list_empty(&op->list)) /* Dequeue from the (un)optimization queue */ |
afd66255b
|
669 |
list_del_init(&op->list); |
6274de498
|
670 |
op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; |
7b959fc58
|
671 672 673 674 675 676 677 678 679 680 681 |
if (kprobe_unused(p)) { /* Enqueue if it is unused */ list_add(&op->list, &freeing_list); /* * Remove unused probes from the hash list. After waiting * for synchronization, this probe is reclaimed. * (reclaiming is done by do_free_cleaned_kprobes().) */ hlist_del_rcu(&op->kp.hlist); } |
6274de498
|
682 |
/* Don't touch the code, because it is already freed. */ |
afd66255b
|
683 684 685 686 |
arch_remove_optimized_kprobe(op); } /* Try to prepare optimized instructions */ |
55479f647
|
687 |
static void prepare_optimized_kprobe(struct kprobe *p) |
afd66255b
|
688 689 690 691 |
{ struct optimized_kprobe *op; op = container_of(p, struct optimized_kprobe, kp); |
cbf6ab52a
|
692 |
arch_prepare_optimized_kprobe(op, p); |
afd66255b
|
693 |
} |
afd66255b
|
694 |
/* Allocate new optimized_kprobe and try to prepare optimized instructions */ |
55479f647
|
695 |
static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) |
afd66255b
|
696 697 698 699 700 701 702 703 704 |
{ struct optimized_kprobe *op; op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL); if (!op) return NULL; INIT_LIST_HEAD(&op->list); op->kp.addr = p->addr; |
cbf6ab52a
|
705 |
arch_prepare_optimized_kprobe(op, p); |
afd66255b
|
706 707 708 |
return &op->kp; } |
55479f647
|
709 |
static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); |
afd66255b
|
710 711 712 713 714 |
/* * Prepare an optimized_kprobe and optimize it * NOTE: p must be a normal registered kprobe */ |
55479f647
|
715 |
static void try_to_optimize_kprobe(struct kprobe *p) |
afd66255b
|
716 717 718 |
{ struct kprobe *ap; struct optimized_kprobe *op; |
ae6aa16fd
|
719 720 721 |
/* Impossible to optimize ftrace-based kprobe */ if (kprobe_ftrace(p)) return; |
25764288d
|
722 723 724 |
/* For preparing optimization, jump_label_text_reserved() is called */ jump_label_lock(); mutex_lock(&text_mutex); |
afd66255b
|
725 726 |
ap = alloc_aggr_kprobe(p); if (!ap) |
25764288d
|
727 |
goto out; |
afd66255b
|
728 729 730 731 |
op = container_of(ap, struct optimized_kprobe, kp); if (!arch_prepared_optinsn(&op->optinsn)) { /* If failed to setup optimizing, fallback to kprobe */ |
6274de498
|
732 733 |
arch_remove_optimized_kprobe(op); kfree(op); |
25764288d
|
734 |
goto out; |
afd66255b
|
735 736 737 |
} init_aggr_kprobe(ap, p); |
25764288d
|
738 739 740 741 742 |
optimize_kprobe(ap); /* This just kicks optimizer thread */ out: mutex_unlock(&text_mutex); jump_label_unlock(); |
afd66255b
|
743 |
} |
b2be84df9
|
744 |
#ifdef CONFIG_SYSCTL |
55479f647
|
745 |
static void optimize_all_kprobes(void) |
b2be84df9
|
746 747 |
{ struct hlist_head *head; |
b2be84df9
|
748 749 |
struct kprobe *p; unsigned int i; |
5c51543b0
|
750 |
mutex_lock(&kprobe_mutex); |
b2be84df9
|
751 752 |
/* If optimization is already allowed, just return */ if (kprobes_allow_optimization) |
5c51543b0
|
753 |
goto out; |
b2be84df9
|
754 755 |
kprobes_allow_optimization = true; |
b2be84df9
|
756 757 |
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; |
b67bfe0d4
|
758 |
hlist_for_each_entry_rcu(p, head, hlist) |
b2be84df9
|
759 760 761 |
if (!kprobe_disabled(p)) optimize_kprobe(p); } |
b2be84df9
|
762 763 |
printk(KERN_INFO "Kprobes globally optimized "); |
5c51543b0
|
764 765 |
out: mutex_unlock(&kprobe_mutex); |
b2be84df9
|
766 |
} |
55479f647
|
767 |
static void unoptimize_all_kprobes(void) |
b2be84df9
|
768 769 |
{ struct hlist_head *head; |
b2be84df9
|
770 771 |
struct kprobe *p; unsigned int i; |
5c51543b0
|
772 |
mutex_lock(&kprobe_mutex); |
b2be84df9
|
773 |
/* If optimization is already prohibited, just return */ |
5c51543b0
|
774 775 |
if (!kprobes_allow_optimization) { mutex_unlock(&kprobe_mutex); |
b2be84df9
|
776 |
return; |
5c51543b0
|
777 |
} |
b2be84df9
|
778 779 |
kprobes_allow_optimization = false; |
b2be84df9
|
780 781 |
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; |
b67bfe0d4
|
782 |
hlist_for_each_entry_rcu(p, head, hlist) { |
b2be84df9
|
783 |
if (!kprobe_disabled(p)) |
6274de498
|
784 |
unoptimize_kprobe(p, false); |
b2be84df9
|
785 786 |
} } |
5c51543b0
|
787 |
mutex_unlock(&kprobe_mutex); |
6274de498
|
788 789 790 791 |
/* Wait for unoptimizing completion */ wait_for_kprobe_optimizer(); printk(KERN_INFO "Kprobes globally unoptimized "); |
b2be84df9
|
792 |
} |
5c51543b0
|
793 |
static DEFINE_MUTEX(kprobe_sysctl_mutex); |
b2be84df9
|
794 795 796 797 798 799 |
int sysctl_kprobes_optimization; int proc_kprobes_optimization_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { int ret; |
5c51543b0
|
800 |
mutex_lock(&kprobe_sysctl_mutex); |
b2be84df9
|
801 802 803 804 805 806 807 |
sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; ret = proc_dointvec_minmax(table, write, buffer, length, ppos); if (sysctl_kprobes_optimization) optimize_all_kprobes(); else unoptimize_all_kprobes(); |
5c51543b0
|
808 |
mutex_unlock(&kprobe_sysctl_mutex); |
b2be84df9
|
809 810 811 812 |
return ret; } #endif /* CONFIG_SYSCTL */ |
6274de498
|
813 |
/* Put a breakpoint for a probe. Must be called with text_mutex locked */ |
55479f647
|
814 |
static void __arm_kprobe(struct kprobe *p) |
afd66255b
|
815 |
{ |
6d8e40a85
|
816 |
struct kprobe *_p; |
afd66255b
|
817 818 |
/* Check collision with other optimized kprobes */ |
6d8e40a85
|
819 820 |
_p = get_optimized_kprobe((unsigned long)p->addr); if (unlikely(_p)) |
6274de498
|
821 822 |
/* Fallback to unoptimized kprobe */ unoptimize_kprobe(_p, true); |
afd66255b
|
823 824 825 826 |
arch_arm_kprobe(p); optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ } |
6274de498
|
827 |
/* Remove the breakpoint of a probe. Must be called with text_mutex locked */ |
55479f647
|
828 |
static void __disarm_kprobe(struct kprobe *p, bool reopt) |
afd66255b
|
829 |
{ |
6d8e40a85
|
830 |
struct kprobe *_p; |
afd66255b
|
831 |
|
69d54b916
|
832 833 |
/* Try to unoptimize */ unoptimize_kprobe(p, kprobes_all_disarmed); |
afd66255b
|
834 |
|
6274de498
|
835 836 837 838 839 840 841 842 |
if (!kprobe_queued(p)) { arch_disarm_kprobe(p); /* If another kprobe was blocked, optimize it. */ _p = get_optimized_kprobe((unsigned long)p->addr); if (unlikely(_p) && reopt) optimize_kprobe(_p); } /* TODO: reoptimize others after unoptimized this probe */ |
afd66255b
|
843 844 845 846 847 |
} #else /* !CONFIG_OPTPROBES */ #define optimize_kprobe(p) do {} while (0) |
6274de498
|
848 |
#define unoptimize_kprobe(p, f) do {} while (0) |
afd66255b
|
849 850 851 852 |
#define kill_optimized_kprobe(p) do {} while (0) #define prepare_optimized_kprobe(p) do {} while (0) #define try_to_optimize_kprobe(p) do {} while (0) #define __arm_kprobe(p) arch_arm_kprobe(p) |
6274de498
|
853 854 855 |
#define __disarm_kprobe(p, o) arch_disarm_kprobe(p) #define kprobe_disarmed(p) kprobe_disabled(p) #define wait_for_kprobe_optimizer() do {} while (0) |
afd66255b
|
856 |
|
0490cd1f9
|
857 858 859 860 861 862 863 |
/* There should be no unused kprobes can be reused without optimization */ static void reuse_unused_kprobe(struct kprobe *ap) { printk(KERN_ERR "Error: There should be no unused kprobe here. "); BUG_ON(kprobe_unused(ap)); } |
55479f647
|
864 |
static void free_aggr_kprobe(struct kprobe *p) |
afd66255b
|
865 |
{ |
6274de498
|
866 |
arch_remove_kprobe(p); |
afd66255b
|
867 868 |
kfree(p); } |
55479f647
|
869 |
static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) |
afd66255b
|
870 871 872 873 |
{ return kzalloc(sizeof(struct kprobe), GFP_KERNEL); } #endif /* CONFIG_OPTPROBES */ |
e7dbfe349
|
874 |
#ifdef CONFIG_KPROBES_ON_FTRACE |
ae6aa16fd
|
875 |
static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { |
e52538965
|
876 |
.func = kprobe_ftrace_handler, |
1d70be34d
|
877 |
.flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY, |
ae6aa16fd
|
878 879 880 881 |
}; static int kprobe_ftrace_enabled; /* Must ensure p->addr is really on ftrace */ |
55479f647
|
882 |
static int prepare_kprobe(struct kprobe *p) |
ae6aa16fd
|
883 884 885 886 887 888 889 890 |
{ if (!kprobe_ftrace(p)) return arch_prepare_kprobe(p); return arch_prepare_kprobe_ftrace(p); } /* Caller must lock kprobe_mutex */ |
55479f647
|
891 |
static void arm_kprobe_ftrace(struct kprobe *p) |
ae6aa16fd
|
892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 |
{ int ret; ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 0, 0); WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d) ", p->addr, ret); kprobe_ftrace_enabled++; if (kprobe_ftrace_enabled == 1) { ret = register_ftrace_function(&kprobe_ftrace_ops); WARN(ret < 0, "Failed to init kprobe-ftrace (%d) ", ret); } } /* Caller must lock kprobe_mutex */ |
55479f647
|
908 |
static void disarm_kprobe_ftrace(struct kprobe *p) |
ae6aa16fd
|
909 910 911 912 913 914 915 916 917 918 919 920 921 922 |
{ int ret; kprobe_ftrace_enabled--; if (kprobe_ftrace_enabled == 0) { ret = unregister_ftrace_function(&kprobe_ftrace_ops); WARN(ret < 0, "Failed to init kprobe-ftrace (%d) ", ret); } ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0); WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d) ", p->addr, ret); } |
e7dbfe349
|
923 |
#else /* !CONFIG_KPROBES_ON_FTRACE */ |
ae6aa16fd
|
924 925 926 927 |
#define prepare_kprobe(p) arch_prepare_kprobe(p) #define arm_kprobe_ftrace(p) do {} while (0) #define disarm_kprobe_ftrace(p) do {} while (0) #endif |
201517a7f
|
928 |
/* Arm a kprobe with text_mutex */ |
55479f647
|
929 |
static void arm_kprobe(struct kprobe *kp) |
201517a7f
|
930 |
{ |
ae6aa16fd
|
931 932 933 934 |
if (unlikely(kprobe_ftrace(kp))) { arm_kprobe_ftrace(kp); return; } |
afd66255b
|
935 936 937 938 939 |
/* * Here, since __arm_kprobe() doesn't use stop_machine(), * this doesn't cause deadlock on text_mutex. So, we don't * need get_online_cpus(). */ |
201517a7f
|
940 |
mutex_lock(&text_mutex); |
afd66255b
|
941 |
__arm_kprobe(kp); |
201517a7f
|
942 943 944 945 |
mutex_unlock(&text_mutex); } /* Disarm a kprobe with text_mutex */ |
55479f647
|
946 |
static void disarm_kprobe(struct kprobe *kp, bool reopt) |
201517a7f
|
947 |
{ |
ae6aa16fd
|
948 949 950 951 |
if (unlikely(kprobe_ftrace(kp))) { disarm_kprobe_ftrace(kp); return; } |
6274de498
|
952 |
/* Ditto */ |
201517a7f
|
953 |
mutex_lock(&text_mutex); |
ae6aa16fd
|
954 |
__disarm_kprobe(kp, reopt); |
201517a7f
|
955 956 |
mutex_unlock(&text_mutex); } |
64f562c6d
|
957 958 959 960 |
/* * Aggregate handlers for multiple kprobes support - these handlers * take care of invoking the individual kprobe handlers on p->list */ |
820aede02
|
961 |
static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) |
64f562c6d
|
962 963 |
{ struct kprobe *kp; |
3516a4604
|
964 |
list_for_each_entry_rcu(kp, &p->list, list) { |
de5bd88d5
|
965 |
if (kp->pre_handler && likely(!kprobe_disabled(kp))) { |
e65845235
|
966 |
set_kprobe_instance(kp); |
8b0914ea7
|
967 968 |
if (kp->pre_handler(kp, regs)) return 1; |
64f562c6d
|
969 |
} |
e65845235
|
970 |
reset_kprobe_instance(); |
64f562c6d
|
971 972 973 |
} return 0; } |
820aede02
|
974 |
NOKPROBE_SYMBOL(aggr_pre_handler); |
64f562c6d
|
975 |
|
820aede02
|
976 977 |
static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, unsigned long flags) |
64f562c6d
|
978 979 |
{ struct kprobe *kp; |
3516a4604
|
980 |
list_for_each_entry_rcu(kp, &p->list, list) { |
de5bd88d5
|
981 |
if (kp->post_handler && likely(!kprobe_disabled(kp))) { |
e65845235
|
982 |
set_kprobe_instance(kp); |
64f562c6d
|
983 |
kp->post_handler(kp, regs, flags); |
e65845235
|
984 |
reset_kprobe_instance(); |
64f562c6d
|
985 986 |
} } |
64f562c6d
|
987 |
} |
820aede02
|
988 |
NOKPROBE_SYMBOL(aggr_post_handler); |
64f562c6d
|
989 |
|
820aede02
|
990 991 |
static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr) |
64f562c6d
|
992 |
{ |
b76834bc1
|
993 |
struct kprobe *cur = __this_cpu_read(kprobe_instance); |
e65845235
|
994 |
|
64f562c6d
|
995 996 997 998 |
/* * if we faulted "during" the execution of a user specified * probe handler, invoke just that probe's fault handler */ |
e65845235
|
999 1000 |
if (cur && cur->fault_handler) { if (cur->fault_handler(cur, regs, trapnr)) |
64f562c6d
|
1001 1002 1003 1004 |
return 1; } return 0; } |
820aede02
|
1005 |
NOKPROBE_SYMBOL(aggr_fault_handler); |
64f562c6d
|
1006 |
|
820aede02
|
1007 |
static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs) |
8b0914ea7
|
1008 |
{ |
b76834bc1
|
1009 |
struct kprobe *cur = __this_cpu_read(kprobe_instance); |
e65845235
|
1010 1011 1012 1013 1014 |
int ret = 0; if (cur && cur->break_handler) { if (cur->break_handler(cur, regs)) ret = 1; |
8b0914ea7
|
1015 |
} |
e65845235
|
1016 1017 |
reset_kprobe_instance(); return ret; |
8b0914ea7
|
1018 |
} |
820aede02
|
1019 |
NOKPROBE_SYMBOL(aggr_break_handler); |
8b0914ea7
|
1020 |
|
bf8d5c52c
|
1021 |
/* Walks the list and increments nmissed count for multiprobe case */ |
820aede02
|
1022 |
void kprobes_inc_nmissed_count(struct kprobe *p) |
bf8d5c52c
|
1023 1024 |
{ struct kprobe *kp; |
afd66255b
|
1025 |
if (!kprobe_aggrprobe(p)) { |
bf8d5c52c
|
1026 1027 1028 1029 1030 1031 1032 |
p->nmissed++; } else { list_for_each_entry_rcu(kp, &p->list, list) kp->nmissed++; } return; } |
820aede02
|
1033 |
NOKPROBE_SYMBOL(kprobes_inc_nmissed_count); |
bf8d5c52c
|
1034 |
|
820aede02
|
1035 1036 |
void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head) |
b94cce926
|
1037 |
{ |
ef53d9c5e
|
1038 |
struct kretprobe *rp = ri->rp; |
b94cce926
|
1039 1040 |
/* remove rp inst off the rprobe_inst_table */ hlist_del(&ri->hlist); |
ef53d9c5e
|
1041 1042 |
INIT_HLIST_NODE(&ri->hlist); if (likely(rp)) { |
ec484608c
|
1043 |
raw_spin_lock(&rp->lock); |
ef53d9c5e
|
1044 |
hlist_add_head(&ri->hlist, &rp->free_instances); |
ec484608c
|
1045 |
raw_spin_unlock(&rp->lock); |
b94cce926
|
1046 1047 |
} else /* Unregistering */ |
99219a3fb
|
1048 |
hlist_add_head(&ri->hlist, head); |
b94cce926
|
1049 |
} |
820aede02
|
1050 |
NOKPROBE_SYMBOL(recycle_rp_inst); |
b94cce926
|
1051 |
|
820aede02
|
1052 |
void kretprobe_hash_lock(struct task_struct *tsk, |
ef53d9c5e
|
1053 |
struct hlist_head **head, unsigned long *flags) |
635c17c2b
|
1054 |
__acquires(hlist_lock) |
ef53d9c5e
|
1055 1056 |
{ unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
ec484608c
|
1057 |
raw_spinlock_t *hlist_lock; |
ef53d9c5e
|
1058 1059 1060 |
*head = &kretprobe_inst_table[hash]; hlist_lock = kretprobe_table_lock_ptr(hash); |
ec484608c
|
1061 |
raw_spin_lock_irqsave(hlist_lock, *flags); |
ef53d9c5e
|
1062 |
} |
820aede02
|
1063 |
NOKPROBE_SYMBOL(kretprobe_hash_lock); |
ef53d9c5e
|
1064 |
|
820aede02
|
1065 1066 |
static void kretprobe_table_lock(unsigned long hash, unsigned long *flags) |
635c17c2b
|
1067 |
__acquires(hlist_lock) |
b94cce926
|
1068 |
{ |
ec484608c
|
1069 1070 |
raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); raw_spin_lock_irqsave(hlist_lock, *flags); |
ef53d9c5e
|
1071 |
} |
820aede02
|
1072 |
NOKPROBE_SYMBOL(kretprobe_table_lock); |
ef53d9c5e
|
1073 |
|
820aede02
|
1074 1075 |
void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags) |
635c17c2b
|
1076 |
__releases(hlist_lock) |
ef53d9c5e
|
1077 1078 |
{ unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
ec484608c
|
1079 |
raw_spinlock_t *hlist_lock; |
ef53d9c5e
|
1080 1081 |
hlist_lock = kretprobe_table_lock_ptr(hash); |
ec484608c
|
1082 |
raw_spin_unlock_irqrestore(hlist_lock, *flags); |
ef53d9c5e
|
1083 |
} |
820aede02
|
1084 |
NOKPROBE_SYMBOL(kretprobe_hash_unlock); |
ef53d9c5e
|
1085 |
|
820aede02
|
1086 1087 |
static void kretprobe_table_unlock(unsigned long hash, unsigned long *flags) |
635c17c2b
|
1088 |
__releases(hlist_lock) |
ef53d9c5e
|
1089 |
{ |
ec484608c
|
1090 1091 |
raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); raw_spin_unlock_irqrestore(hlist_lock, *flags); |
b94cce926
|
1092 |
} |
820aede02
|
1093 |
NOKPROBE_SYMBOL(kretprobe_table_unlock); |
b94cce926
|
1094 |
|
b94cce926
|
1095 |
/* |
c6fd91f0b
|
1096 1097 1098 1099 |
* This function is called from finish_task_switch when task tk becomes dead, * so that we can recycle any function-return probe instances associated * with this task. These left over instances represent probed functions * that have been called but will never return. |
b94cce926
|
1100 |
*/ |
820aede02
|
1101 |
void kprobe_flush_task(struct task_struct *tk) |
b94cce926
|
1102 |
{ |
62c27be0d
|
1103 |
struct kretprobe_instance *ri; |
99219a3fb
|
1104 |
struct hlist_head *head, empty_rp; |
b67bfe0d4
|
1105 |
struct hlist_node *tmp; |
ef53d9c5e
|
1106 |
unsigned long hash, flags = 0; |
802eae7c8
|
1107 |
|
ef53d9c5e
|
1108 1109 1110 |
if (unlikely(!kprobes_initialized)) /* Early boot. kretprobe_table_locks not yet initialized. */ return; |
d496aab56
|
1111 |
INIT_HLIST_HEAD(&empty_rp); |
ef53d9c5e
|
1112 1113 1114 |
hash = hash_ptr(tk, KPROBE_HASH_BITS); head = &kretprobe_inst_table[hash]; kretprobe_table_lock(hash, &flags); |
b67bfe0d4
|
1115 |
hlist_for_each_entry_safe(ri, tmp, head, hlist) { |
62c27be0d
|
1116 |
if (ri->task == tk) |
99219a3fb
|
1117 |
recycle_rp_inst(ri, &empty_rp); |
62c27be0d
|
1118 |
} |
ef53d9c5e
|
1119 |
kretprobe_table_unlock(hash, &flags); |
b67bfe0d4
|
1120 |
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { |
99219a3fb
|
1121 1122 1123 |
hlist_del(&ri->hlist); kfree(ri); } |
b94cce926
|
1124 |
} |
820aede02
|
1125 |
NOKPROBE_SYMBOL(kprobe_flush_task); |
b94cce926
|
1126 |
|
b94cce926
|
1127 1128 1129 |
static inline void free_rp_inst(struct kretprobe *rp) { struct kretprobe_instance *ri; |
b67bfe0d4
|
1130 |
struct hlist_node *next; |
4c4308cb9
|
1131 |
|
b67bfe0d4
|
1132 |
hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) { |
ef53d9c5e
|
1133 |
hlist_del(&ri->hlist); |
b94cce926
|
1134 1135 1136 |
kfree(ri); } } |
820aede02
|
1137 |
static void cleanup_rp_inst(struct kretprobe *rp) |
4a296e07c
|
1138 |
{ |
ef53d9c5e
|
1139 |
unsigned long flags, hash; |
4a296e07c
|
1140 |
struct kretprobe_instance *ri; |
b67bfe0d4
|
1141 |
struct hlist_node *next; |
ef53d9c5e
|
1142 |
struct hlist_head *head; |
4a296e07c
|
1143 |
/* No race here */ |
ef53d9c5e
|
1144 1145 1146 |
for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { kretprobe_table_lock(hash, &flags); head = &kretprobe_inst_table[hash]; |
b67bfe0d4
|
1147 |
hlist_for_each_entry_safe(ri, next, head, hlist) { |
ef53d9c5e
|
1148 1149 1150 1151 |
if (ri->rp == rp) ri->rp = NULL; } kretprobe_table_unlock(hash, &flags); |
4a296e07c
|
1152 |
} |
4a296e07c
|
1153 1154 |
free_rp_inst(rp); } |
820aede02
|
1155 |
NOKPROBE_SYMBOL(cleanup_rp_inst); |
4a296e07c
|
1156 |
|
64f562c6d
|
1157 |
/* |
b918e5e60
|
1158 |
* Add the new probe to ap->list. Fail if this is the |
8b0914ea7
|
1159 1160 |
* second jprobe at the address - two jprobes can't coexist */ |
55479f647
|
1161 |
static int add_new_kprobe(struct kprobe *ap, struct kprobe *p) |
8b0914ea7
|
1162 |
{ |
de5bd88d5
|
1163 |
BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); |
afd66255b
|
1164 1165 |
if (p->break_handler || p->post_handler) |
6274de498
|
1166 |
unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */ |
afd66255b
|
1167 |
|
8b0914ea7
|
1168 |
if (p->break_handler) { |
b918e5e60
|
1169 |
if (ap->break_handler) |
367216567
|
1170 |
return -EEXIST; |
b918e5e60
|
1171 1172 |
list_add_tail_rcu(&p->list, &ap->list); ap->break_handler = aggr_break_handler; |
8b0914ea7
|
1173 |
} else |
b918e5e60
|
1174 1175 1176 |
list_add_rcu(&p->list, &ap->list); if (p->post_handler && !ap->post_handler) ap->post_handler = aggr_post_handler; |
de5bd88d5
|
1177 |
|
8b0914ea7
|
1178 1179 1180 1181 |
return 0; } /* |
64f562c6d
|
1182 1183 1184 |
* Fill in the required fields of the "manager kprobe". Replace the * earlier kprobe in the hlist with the manager kprobe */ |
55479f647
|
1185 |
static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) |
64f562c6d
|
1186 |
{ |
afd66255b
|
1187 |
/* Copy p's insn slot to ap */ |
8b0914ea7
|
1188 |
copy_kprobe(p, ap); |
a9ad965ea
|
1189 |
flush_insn_slot(ap); |
64f562c6d
|
1190 |
ap->addr = p->addr; |
afd66255b
|
1191 |
ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED; |
64f562c6d
|
1192 |
ap->pre_handler = aggr_pre_handler; |
64f562c6d
|
1193 |
ap->fault_handler = aggr_fault_handler; |
e8386a0cb
|
1194 1195 |
/* We don't care the kprobe which has gone. */ if (p->post_handler && !kprobe_gone(p)) |
367216567
|
1196 |
ap->post_handler = aggr_post_handler; |
e8386a0cb
|
1197 |
if (p->break_handler && !kprobe_gone(p)) |
367216567
|
1198 |
ap->break_handler = aggr_break_handler; |
64f562c6d
|
1199 1200 |
INIT_LIST_HEAD(&ap->list); |
afd66255b
|
1201 |
INIT_HLIST_NODE(&ap->hlist); |
64f562c6d
|
1202 |
|
afd66255b
|
1203 |
list_add_rcu(&p->list, &ap->list); |
adad0f331
|
1204 |
hlist_replace_rcu(&p->hlist, &ap->hlist); |
64f562c6d
|
1205 1206 1207 1208 1209 |
} /* * This is the second or subsequent kprobe at the address - handle * the intricacies |
64f562c6d
|
1210 |
*/ |
55479f647
|
1211 |
static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) |
64f562c6d
|
1212 1213 |
{ int ret = 0; |
6d8e40a85
|
1214 |
struct kprobe *ap = orig_p; |
64f562c6d
|
1215 |
|
25764288d
|
1216 1217 1218 1219 1220 1221 1222 1223 |
/* For preparing optimization, jump_label_text_reserved() is called */ jump_label_lock(); /* * Get online CPUs to avoid text_mutex deadlock.with stop machine, * which is invoked by unoptimize_kprobe() in add_new_kprobe() */ get_online_cpus(); mutex_lock(&text_mutex); |
6d8e40a85
|
1224 1225 1226 |
if (!kprobe_aggrprobe(orig_p)) { /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ ap = alloc_aggr_kprobe(orig_p); |
25764288d
|
1227 1228 1229 1230 |
if (!ap) { ret = -ENOMEM; goto out; } |
6d8e40a85
|
1231 |
init_aggr_kprobe(ap, orig_p); |
6274de498
|
1232 |
} else if (kprobe_unused(ap)) |
0490cd1f9
|
1233 1234 |
/* This probe is going to die. Rescue it */ reuse_unused_kprobe(ap); |
b918e5e60
|
1235 1236 |
if (kprobe_gone(ap)) { |
e8386a0cb
|
1237 1238 1239 1240 1241 1242 |
/* * Attempting to insert new probe at the same location that * had a probe in the module vaddr area which already * freed. So, the instruction slot has already been * released. We need a new slot for the new probe. */ |
b918e5e60
|
1243 |
ret = arch_prepare_kprobe(ap); |
e8386a0cb
|
1244 |
if (ret) |
b918e5e60
|
1245 1246 1247 1248 1249 |
/* * Even if fail to allocate new slot, don't need to * free aggr_probe. It will be used next time, or * freed by unregister_kprobe. */ |
25764288d
|
1250 |
goto out; |
de5bd88d5
|
1251 |
|
afd66255b
|
1252 1253 |
/* Prepare optimized instructions if possible. */ prepare_optimized_kprobe(ap); |
e8386a0cb
|
1254 |
/* |
de5bd88d5
|
1255 1256 |
* Clear gone flag to prevent allocating new slot again, and * set disabled flag because it is not armed yet. |
e8386a0cb
|
1257 |
*/ |
de5bd88d5
|
1258 1259 |
ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) | KPROBE_FLAG_DISABLED; |
e8386a0cb
|
1260 |
} |
b918e5e60
|
1261 |
|
afd66255b
|
1262 |
/* Copy ap's insn slot to p */ |
b918e5e60
|
1263 |
copy_kprobe(ap, p); |
25764288d
|
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 |
ret = add_new_kprobe(ap, p); out: mutex_unlock(&text_mutex); put_online_cpus(); jump_label_unlock(); if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { ap->flags &= ~KPROBE_FLAG_DISABLED; if (!kprobes_all_disarmed) /* Arm the breakpoint again. */ arm_kprobe(ap); } return ret; |
64f562c6d
|
1278 |
} |
be8f27432
|
1279 1280 1281 1282 1283 1284 |
bool __weak arch_within_kprobe_blacklist(unsigned long addr) { /* The __kprobes marked functions and entry code must not be probed */ return addr >= (unsigned long)__kprobes_text_start && addr < (unsigned long)__kprobes_text_end; } |
e5779e8e1
|
1285 |
bool within_kprobe_blacklist(unsigned long addr) |
d0aaff979
|
1286 |
{ |
376e24242
|
1287 |
struct kprobe_blacklist_entry *ent; |
3d8d996e0
|
1288 |
|
be8f27432
|
1289 |
if (arch_within_kprobe_blacklist(addr)) |
376e24242
|
1290 |
return true; |
3d8d996e0
|
1291 1292 1293 1294 |
/* * If there exists a kprobe_blacklist, verify and * fail any probe registration in the prohibited area */ |
376e24242
|
1295 1296 1297 |
list_for_each_entry(ent, &kprobe_blacklist, list) { if (addr >= ent->start_addr && addr < ent->end_addr) return true; |
3d8d996e0
|
1298 |
} |
376e24242
|
1299 1300 |
return false; |
d0aaff979
|
1301 |
} |
b2a5cd693
|
1302 1303 1304 |
/* * If we have a symbol_name argument, look it up and add the offset field * to it. This way, we can specify a relative address to a symbol. |
bc81d48d1
|
1305 1306 |
* This returns encoded errors if it fails to look up symbol or invalid * combination of parameters. |
b2a5cd693
|
1307 |
*/ |
55479f647
|
1308 |
static kprobe_opcode_t *kprobe_addr(struct kprobe *p) |
b2a5cd693
|
1309 1310 |
{ kprobe_opcode_t *addr = p->addr; |
bc81d48d1
|
1311 1312 1313 1314 |
if ((p->symbol_name && p->addr) || (!p->symbol_name && !p->addr)) goto invalid; |
b2a5cd693
|
1315 |
if (p->symbol_name) { |
b2a5cd693
|
1316 |
kprobe_lookup_name(p->symbol_name, addr); |
bc81d48d1
|
1317 1318 |
if (!addr) return ERR_PTR(-ENOENT); |
b2a5cd693
|
1319 |
} |
bc81d48d1
|
1320 1321 1322 1323 1324 1325 |
addr = (kprobe_opcode_t *)(((char *)addr) + p->offset); if (addr) return addr; invalid: return ERR_PTR(-EINVAL); |
b2a5cd693
|
1326 |
} |
1f0ab4097
|
1327 |
/* Check passed kprobe is valid and return kprobe in kprobe_table. */ |
55479f647
|
1328 |
static struct kprobe *__get_valid_kprobe(struct kprobe *p) |
1f0ab4097
|
1329 |
{ |
6d8e40a85
|
1330 |
struct kprobe *ap, *list_p; |
1f0ab4097
|
1331 |
|
6d8e40a85
|
1332 1333 |
ap = get_kprobe(p->addr); if (unlikely(!ap)) |
1f0ab4097
|
1334 |
return NULL; |
6d8e40a85
|
1335 1336 |
if (p != ap) { list_for_each_entry_rcu(list_p, &ap->list, list) |
1f0ab4097
|
1337 1338 1339 1340 1341 1342 |
if (list_p == p) /* kprobe p is a valid probe */ goto valid; return NULL; } valid: |
6d8e40a85
|
1343 |
return ap; |
1f0ab4097
|
1344 1345 1346 1347 1348 1349 |
} /* Return error if the kprobe is being re-registered */ static inline int check_kprobe_rereg(struct kprobe *p) { int ret = 0; |
1f0ab4097
|
1350 1351 |
mutex_lock(&kprobe_mutex); |
6d8e40a85
|
1352 |
if (__get_valid_kprobe(p)) |
1f0ab4097
|
1353 1354 |
ret = -EINVAL; mutex_unlock(&kprobe_mutex); |
6d8e40a85
|
1355 |
|
1f0ab4097
|
1356 1357 |
return ret; } |
f7f242ff0
|
1358 |
int __weak arch_check_ftrace_location(struct kprobe *p) |
1da177e4c
|
1359 |
{ |
ae6aa16fd
|
1360 |
unsigned long ftrace_addr; |
ae6aa16fd
|
1361 1362 |
ftrace_addr = ftrace_location((unsigned long)p->addr); if (ftrace_addr) { |
e7dbfe349
|
1363 |
#ifdef CONFIG_KPROBES_ON_FTRACE |
ae6aa16fd
|
1364 1365 1366 |
/* Given address is not on the instruction boundary */ if ((unsigned long)p->addr != ftrace_addr) return -EILSEQ; |
ae6aa16fd
|
1367 |
p->flags |= KPROBE_FLAG_FTRACE; |
e7dbfe349
|
1368 |
#else /* !CONFIG_KPROBES_ON_FTRACE */ |
ae6aa16fd
|
1369 1370 1371 |
return -EINVAL; #endif } |
f7f242ff0
|
1372 1373 1374 1375 1376 1377 1378 |
return 0; } static int check_kprobe_address_safe(struct kprobe *p, struct module **probed_mod) { int ret; |
1f0ab4097
|
1379 |
|
f7f242ff0
|
1380 1381 1382 |
ret = arch_check_ftrace_location(p); if (ret) return ret; |
91bad2f8d
|
1383 |
jump_label_lock(); |
de31c3ca8
|
1384 |
preempt_disable(); |
f7fa6ef0d
|
1385 1386 |
/* Ensure it is not in reserved area nor out of text */ |
ec30c5f3a
|
1387 |
if (!kernel_text_address((unsigned long) p->addr) || |
376e24242
|
1388 |
within_kprobe_blacklist((unsigned long) p->addr) || |
f986a499e
|
1389 1390 |
jump_label_text_reserved(p->addr, p->addr)) { ret = -EINVAL; |
f7fa6ef0d
|
1391 |
goto out; |
f986a499e
|
1392 |
} |
b3e55c727
|
1393 |
|
f7fa6ef0d
|
1394 1395 1396 |
/* Check if are we probing a module */ *probed_mod = __module_text_address((unsigned long) p->addr); if (*probed_mod) { |
6f716acd5
|
1397 |
/* |
e8386a0cb
|
1398 1399 |
* We must hold a refcount of the probed module while updating * its code to prohibit unexpected unloading. |
df019b1d8
|
1400 |
*/ |
f7fa6ef0d
|
1401 1402 1403 1404 |
if (unlikely(!try_module_get(*probed_mod))) { ret = -ENOENT; goto out; } |
de31c3ca8
|
1405 |
|
f24659d96
|
1406 1407 1408 1409 |
/* * If the module freed .init.text, we couldn't insert * kprobes in there. */ |
f7fa6ef0d
|
1410 1411 1412 1413 1414 |
if (within_module_init((unsigned long)p->addr, *probed_mod) && (*probed_mod)->state != MODULE_STATE_COMING) { module_put(*probed_mod); *probed_mod = NULL; ret = -ENOENT; |
f24659d96
|
1415 |
} |
df019b1d8
|
1416 |
} |
f7fa6ef0d
|
1417 |
out: |
a189d0350
|
1418 |
preempt_enable(); |
de31c3ca8
|
1419 |
jump_label_unlock(); |
1da177e4c
|
1420 |
|
f7fa6ef0d
|
1421 1422 |
return ret; } |
55479f647
|
1423 |
int register_kprobe(struct kprobe *p) |
f7fa6ef0d
|
1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 |
{ int ret; struct kprobe *old_p; struct module *probed_mod; kprobe_opcode_t *addr; /* Adjust probe address from symbol */ addr = kprobe_addr(p); if (IS_ERR(addr)) return PTR_ERR(addr); p->addr = addr; ret = check_kprobe_rereg(p); if (ret) return ret; /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ p->flags &= KPROBE_FLAG_DISABLED; |
3516a4604
|
1442 |
p->nmissed = 0; |
9861668f7
|
1443 |
INIT_LIST_HEAD(&p->list); |
afd66255b
|
1444 |
|
f7fa6ef0d
|
1445 1446 1447 1448 1449 |
ret = check_kprobe_address_safe(p, &probed_mod); if (ret) return ret; mutex_lock(&kprobe_mutex); |
afd66255b
|
1450 |
|
64f562c6d
|
1451 1452 |
old_p = get_kprobe(p->addr); if (old_p) { |
afd66255b
|
1453 |
/* Since this may unoptimize old_p, locking text_mutex. */ |
64f562c6d
|
1454 |
ret = register_aggr_kprobe(old_p, p); |
1da177e4c
|
1455 1456 |
goto out; } |
1da177e4c
|
1457 |
|
25764288d
|
1458 |
mutex_lock(&text_mutex); /* Avoiding text modification */ |
ae6aa16fd
|
1459 |
ret = prepare_kprobe(p); |
25764288d
|
1460 |
mutex_unlock(&text_mutex); |
6f716acd5
|
1461 |
if (ret) |
afd66255b
|
1462 |
goto out; |
49a2a1b83
|
1463 |
|
64f562c6d
|
1464 |
INIT_HLIST_NODE(&p->hlist); |
3516a4604
|
1465 |
hlist_add_head_rcu(&p->hlist, |
1da177e4c
|
1466 |
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); |
de5bd88d5
|
1467 |
if (!kprobes_all_disarmed && !kprobe_disabled(p)) |
25764288d
|
1468 |
arm_kprobe(p); |
afd66255b
|
1469 1470 1471 |
/* Try to optimize kprobe */ try_to_optimize_kprobe(p); |
74a0b5762
|
1472 |
|
1da177e4c
|
1473 |
out: |
7a7d1cf95
|
1474 |
mutex_unlock(&kprobe_mutex); |
49a2a1b83
|
1475 |
|
e8386a0cb
|
1476 |
if (probed_mod) |
df019b1d8
|
1477 |
module_put(probed_mod); |
e8386a0cb
|
1478 |
|
1da177e4c
|
1479 1480 |
return ret; } |
99081ab55
|
1481 |
EXPORT_SYMBOL_GPL(register_kprobe); |
1da177e4c
|
1482 |
|
6f0f1dd71
|
1483 |
/* Check if all probes on the aggrprobe are disabled */ |
55479f647
|
1484 |
static int aggr_kprobe_disabled(struct kprobe *ap) |
6f0f1dd71
|
1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 |
{ struct kprobe *kp; list_for_each_entry_rcu(kp, &ap->list, list) if (!kprobe_disabled(kp)) /* * There is an active probe on the list. * We can't disable this ap. */ return 0; return 1; } /* Disable one kprobe: Make sure called under kprobe_mutex is locked */ |
55479f647
|
1500 |
static struct kprobe *__disable_kprobe(struct kprobe *p) |
6f0f1dd71
|
1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 |
{ struct kprobe *orig_p; /* Get an original kprobe for return */ orig_p = __get_valid_kprobe(p); if (unlikely(orig_p == NULL)) return NULL; if (!kprobe_disabled(p)) { /* Disable probe if it is a child probe */ if (p != orig_p) p->flags |= KPROBE_FLAG_DISABLED; /* Try to disarm and disable this/parent probe */ if (p == orig_p || aggr_kprobe_disabled(orig_p)) { |
69d54b916
|
1516 1517 1518 1519 1520 1521 1522 |
/* * If kprobes_all_disarmed is set, orig_p * should have already been disarmed, so * skip unneed disarming process. */ if (!kprobes_all_disarmed) disarm_kprobe(orig_p, true); |
6f0f1dd71
|
1523 1524 1525 1526 1527 1528 |
orig_p->flags |= KPROBE_FLAG_DISABLED; } } return orig_p; } |
de5bd88d5
|
1529 1530 1531 |
/* * Unregister a kprobe without a scheduler synchronization. */ |
55479f647
|
1532 |
static int __unregister_kprobe_top(struct kprobe *p) |
de5bd88d5
|
1533 |
{ |
6d8e40a85
|
1534 |
struct kprobe *ap, *list_p; |
de5bd88d5
|
1535 |
|
6f0f1dd71
|
1536 1537 |
/* Disable kprobe. This will disarm it if needed. */ ap = __disable_kprobe(p); |
6d8e40a85
|
1538 |
if (ap == NULL) |
de5bd88d5
|
1539 |
return -EINVAL; |
6f0f1dd71
|
1540 |
if (ap == p) |
bf8f6e5b3
|
1541 |
/* |
6f0f1dd71
|
1542 1543 |
* This probe is an independent(and non-optimized) kprobe * (not an aggrprobe). Remove from the hash list. |
bf8f6e5b3
|
1544 |
*/ |
6f0f1dd71
|
1545 1546 1547 1548 |
goto disarmed; /* Following process expects this probe is an aggrprobe */ WARN_ON(!kprobe_aggrprobe(ap)); |
6274de498
|
1549 1550 1551 1552 1553 |
if (list_is_singular(&ap->list) && kprobe_disarmed(ap)) /* * !disarmed could be happen if the probe is under delayed * unoptimizing. */ |
6f0f1dd71
|
1554 1555 1556 |
goto disarmed; else { /* If disabling probe has special handlers, update aggrprobe */ |
e8386a0cb
|
1557 |
if (p->break_handler && !kprobe_gone(p)) |
6d8e40a85
|
1558 |
ap->break_handler = NULL; |
e8386a0cb
|
1559 |
if (p->post_handler && !kprobe_gone(p)) { |
6d8e40a85
|
1560 |
list_for_each_entry_rcu(list_p, &ap->list, list) { |
9861668f7
|
1561 1562 1563 |
if ((list_p != p) && (list_p->post_handler)) goto noclean; } |
6d8e40a85
|
1564 |
ap->post_handler = NULL; |
9861668f7
|
1565 1566 |
} noclean: |
6f0f1dd71
|
1567 1568 1569 1570 |
/* * Remove from the aggrprobe: this path will do nothing in * __unregister_kprobe_bottom(). */ |
49a2a1b83
|
1571 |
list_del_rcu(&p->list); |
6f0f1dd71
|
1572 1573 1574 1575 1576 1577 |
if (!kprobe_disabled(ap) && !kprobes_all_disarmed) /* * Try to optimize this probe again, because post * handler may have been changed. */ optimize_kprobe(ap); |
49a2a1b83
|
1578 |
} |
9861668f7
|
1579 |
return 0; |
6f0f1dd71
|
1580 1581 |
disarmed: |
6274de498
|
1582 |
BUG_ON(!kprobe_disarmed(ap)); |
6f0f1dd71
|
1583 1584 |
hlist_del_rcu(&ap->hlist); return 0; |
9861668f7
|
1585 |
} |
3516a4604
|
1586 |
|
55479f647
|
1587 |
static void __unregister_kprobe_bottom(struct kprobe *p) |
9861668f7
|
1588 |
{ |
6d8e40a85
|
1589 |
struct kprobe *ap; |
b3e55c727
|
1590 |
|
e8386a0cb
|
1591 |
if (list_empty(&p->list)) |
6274de498
|
1592 |
/* This is an independent kprobe */ |
0498b6350
|
1593 |
arch_remove_kprobe(p); |
e8386a0cb
|
1594 |
else if (list_is_singular(&p->list)) { |
6274de498
|
1595 |
/* This is the last child of an aggrprobe */ |
6d8e40a85
|
1596 |
ap = list_entry(p->list.next, struct kprobe, list); |
e8386a0cb
|
1597 |
list_del(&p->list); |
6d8e40a85
|
1598 |
free_aggr_kprobe(ap); |
9861668f7
|
1599 |
} |
6274de498
|
1600 |
/* Otherwise, do nothing. */ |
9861668f7
|
1601 |
} |
55479f647
|
1602 |
int register_kprobes(struct kprobe **kps, int num) |
9861668f7
|
1603 1604 1605 1606 1607 1608 |
{ int i, ret = 0; if (num <= 0) return -EINVAL; for (i = 0; i < num; i++) { |
49ad2fd76
|
1609 |
ret = register_kprobe(kps[i]); |
67dddaad5
|
1610 1611 1612 |
if (ret < 0) { if (i > 0) unregister_kprobes(kps, i); |
9861668f7
|
1613 |
break; |
367216567
|
1614 |
} |
49a2a1b83
|
1615 |
} |
9861668f7
|
1616 1617 |
return ret; } |
99081ab55
|
1618 |
EXPORT_SYMBOL_GPL(register_kprobes); |
9861668f7
|
1619 |
|
55479f647
|
1620 |
void unregister_kprobe(struct kprobe *p) |
9861668f7
|
1621 1622 1623 |
{ unregister_kprobes(&p, 1); } |
99081ab55
|
1624 |
EXPORT_SYMBOL_GPL(unregister_kprobe); |
9861668f7
|
1625 |
|
55479f647
|
1626 |
void unregister_kprobes(struct kprobe **kps, int num) |
9861668f7
|
1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 |
{ int i; if (num <= 0) return; mutex_lock(&kprobe_mutex); for (i = 0; i < num; i++) if (__unregister_kprobe_top(kps[i]) < 0) kps[i]->addr = NULL; mutex_unlock(&kprobe_mutex); synchronize_sched(); for (i = 0; i < num; i++) if (kps[i]->addr) __unregister_kprobe_bottom(kps[i]); |
1da177e4c
|
1642 |
} |
99081ab55
|
1643 |
EXPORT_SYMBOL_GPL(unregister_kprobes); |
1da177e4c
|
1644 1645 1646 |
static struct notifier_block kprobe_exceptions_nb = { .notifier_call = kprobe_exceptions_notify, |
3d5631e06
|
1647 1648 |
.priority = 0x7fffffff /* we need to be notified first */ }; |
3d7e33825
|
1649 1650 1651 1652 |
unsigned long __weak arch_deref_entry_point(void *entry) { return (unsigned long)entry; } |
1da177e4c
|
1653 |
|
55479f647
|
1654 |
int register_jprobes(struct jprobe **jps, int num) |
1da177e4c
|
1655 |
{ |
26b31c190
|
1656 1657 |
struct jprobe *jp; int ret = 0, i; |
3d7e33825
|
1658 |
|
26b31c190
|
1659 |
if (num <= 0) |
3d7e33825
|
1660 |
return -EINVAL; |
26b31c190
|
1661 |
for (i = 0; i < num; i++) { |
05662bdb6
|
1662 |
unsigned long addr, offset; |
26b31c190
|
1663 1664 |
jp = jps[i]; addr = arch_deref_entry_point(jp->entry); |
05662bdb6
|
1665 1666 1667 1668 1669 1670 1671 1672 |
/* Verify probepoint is a function entry point */ if (kallsyms_lookup_size_offset(addr, NULL, &offset) && offset == 0) { jp->kp.pre_handler = setjmp_pre_handler; jp->kp.break_handler = longjmp_break_handler; ret = register_kprobe(&jp->kp); } else ret = -EINVAL; |
edbaadbe4
|
1673 |
|
67dddaad5
|
1674 1675 1676 |
if (ret < 0) { if (i > 0) unregister_jprobes(jps, i); |
26b31c190
|
1677 1678 1679 1680 1681 |
break; } } return ret; } |
99081ab55
|
1682 |
EXPORT_SYMBOL_GPL(register_jprobes); |
3d7e33825
|
1683 |
|
55479f647
|
1684 |
int register_jprobe(struct jprobe *jp) |
26b31c190
|
1685 |
{ |
49ad2fd76
|
1686 |
return register_jprobes(&jp, 1); |
1da177e4c
|
1687 |
} |
99081ab55
|
1688 |
EXPORT_SYMBOL_GPL(register_jprobe); |
1da177e4c
|
1689 |
|
55479f647
|
1690 |
void unregister_jprobe(struct jprobe *jp) |
1da177e4c
|
1691 |
{ |
26b31c190
|
1692 1693 |
unregister_jprobes(&jp, 1); } |
99081ab55
|
1694 |
EXPORT_SYMBOL_GPL(unregister_jprobe); |
26b31c190
|
1695 |
|
55479f647
|
1696 |
void unregister_jprobes(struct jprobe **jps, int num) |
26b31c190
|
1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 |
{ int i; if (num <= 0) return; mutex_lock(&kprobe_mutex); for (i = 0; i < num; i++) if (__unregister_kprobe_top(&jps[i]->kp) < 0) jps[i]->kp.addr = NULL; mutex_unlock(&kprobe_mutex); synchronize_sched(); for (i = 0; i < num; i++) { if (jps[i]->kp.addr) __unregister_kprobe_bottom(&jps[i]->kp); } |
1da177e4c
|
1713 |
} |
99081ab55
|
1714 |
EXPORT_SYMBOL_GPL(unregister_jprobes); |
1da177e4c
|
1715 |
|
9edddaa20
|
1716 |
#ifdef CONFIG_KRETPROBES |
e65cefe87
|
1717 1718 1719 1720 |
/* * This kprobe pre_handler is registered with every kretprobe. When probe * hits it will set up the return probe. */ |
820aede02
|
1721 |
static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) |
e65cefe87
|
1722 1723 |
{ struct kretprobe *rp = container_of(p, struct kretprobe, kp); |
ef53d9c5e
|
1724 1725 |
unsigned long hash, flags = 0; struct kretprobe_instance *ri; |
e65cefe87
|
1726 |
|
f96f56780
|
1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 |
/* * To avoid deadlocks, prohibit return probing in NMI contexts, * just skip the probe and increase the (inexact) 'nmissed' * statistical counter, so that the user is informed that * something happened: */ if (unlikely(in_nmi())) { rp->nmissed++; return 0; } /* TODO: consider to only swap the RA after the last pre_handler fired */ |
ef53d9c5e
|
1739 |
hash = hash_ptr(current, KPROBE_HASH_BITS); |
ec484608c
|
1740 |
raw_spin_lock_irqsave(&rp->lock, flags); |
4c4308cb9
|
1741 |
if (!hlist_empty(&rp->free_instances)) { |
4c4308cb9
|
1742 |
ri = hlist_entry(rp->free_instances.first, |
ef53d9c5e
|
1743 1744 |
struct kretprobe_instance, hlist); hlist_del(&ri->hlist); |
ec484608c
|
1745 |
raw_spin_unlock_irqrestore(&rp->lock, flags); |
ef53d9c5e
|
1746 |
|
4c4308cb9
|
1747 1748 |
ri->rp = rp; ri->task = current; |
f47cd9b55
|
1749 |
|
55ca6140e
|
1750 1751 1752 1753 |
if (rp->entry_handler && rp->entry_handler(ri, regs)) { raw_spin_lock_irqsave(&rp->lock, flags); hlist_add_head(&ri->hlist, &rp->free_instances); raw_spin_unlock_irqrestore(&rp->lock, flags); |
f47cd9b55
|
1754 |
return 0; |
55ca6140e
|
1755 |
} |
f47cd9b55
|
1756 |
|
4c4308cb9
|
1757 1758 1759 |
arch_prepare_kretprobe(ri, regs); /* XXX(hch): why is there no hlist_move_head? */ |
ef53d9c5e
|
1760 1761 1762 1763 1764 |
INIT_HLIST_NODE(&ri->hlist); kretprobe_table_lock(hash, &flags); hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]); kretprobe_table_unlock(hash, &flags); } else { |
4c4308cb9
|
1765 |
rp->nmissed++; |
ec484608c
|
1766 |
raw_spin_unlock_irqrestore(&rp->lock, flags); |
ef53d9c5e
|
1767 |
} |
e65cefe87
|
1768 1769 |
return 0; } |
820aede02
|
1770 |
NOKPROBE_SYMBOL(pre_handler_kretprobe); |
e65cefe87
|
1771 |
|
55479f647
|
1772 |
int register_kretprobe(struct kretprobe *rp) |
b94cce926
|
1773 1774 1775 1776 |
{ int ret = 0; struct kretprobe_instance *inst; int i; |
b2a5cd693
|
1777 |
void *addr; |
f438d914b
|
1778 1779 |
if (kretprobe_blacklist_size) { |
b2a5cd693
|
1780 |
addr = kprobe_addr(&rp->kp); |
bc81d48d1
|
1781 1782 |
if (IS_ERR(addr)) return PTR_ERR(addr); |
f438d914b
|
1783 1784 1785 1786 1787 1788 |
for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { if (kretprobe_blacklist[i].addr == addr) return -EINVAL; } } |
b94cce926
|
1789 1790 |
rp->kp.pre_handler = pre_handler_kretprobe; |
7522a8423
|
1791 1792 1793 |
rp->kp.post_handler = NULL; rp->kp.fault_handler = NULL; rp->kp.break_handler = NULL; |
b94cce926
|
1794 1795 1796 1797 |
/* Pre-allocate memory for max kretprobe instances */ if (rp->maxactive <= 0) { #ifdef CONFIG_PREEMPT |
c2ef6661c
|
1798 |
rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus()); |
b94cce926
|
1799 |
#else |
4dae560f9
|
1800 |
rp->maxactive = num_possible_cpus(); |
b94cce926
|
1801 1802 |
#endif } |
ec484608c
|
1803 |
raw_spin_lock_init(&rp->lock); |
b94cce926
|
1804 1805 |
INIT_HLIST_HEAD(&rp->free_instances); for (i = 0; i < rp->maxactive; i++) { |
f47cd9b55
|
1806 1807 |
inst = kmalloc(sizeof(struct kretprobe_instance) + rp->data_size, GFP_KERNEL); |
b94cce926
|
1808 1809 1810 1811 |
if (inst == NULL) { free_rp_inst(rp); return -ENOMEM; } |
ef53d9c5e
|
1812 1813 |
INIT_HLIST_NODE(&inst->hlist); hlist_add_head(&inst->hlist, &rp->free_instances); |
b94cce926
|
1814 1815 1816 1817 |
} rp->nmissed = 0; /* Establish function entry probe point */ |
49ad2fd76
|
1818 |
ret = register_kprobe(&rp->kp); |
4a296e07c
|
1819 |
if (ret != 0) |
b94cce926
|
1820 1821 1822 |
free_rp_inst(rp); return ret; } |
99081ab55
|
1823 |
EXPORT_SYMBOL_GPL(register_kretprobe); |
b94cce926
|
1824 |
|
55479f647
|
1825 |
int register_kretprobes(struct kretprobe **rps, int num) |
4a296e07c
|
1826 1827 1828 1829 1830 1831 |
{ int ret = 0, i; if (num <= 0) return -EINVAL; for (i = 0; i < num; i++) { |
49ad2fd76
|
1832 |
ret = register_kretprobe(rps[i]); |
67dddaad5
|
1833 1834 1835 |
if (ret < 0) { if (i > 0) unregister_kretprobes(rps, i); |
4a296e07c
|
1836 1837 1838 1839 1840 |
break; } } return ret; } |
99081ab55
|
1841 |
EXPORT_SYMBOL_GPL(register_kretprobes); |
4a296e07c
|
1842 |
|
55479f647
|
1843 |
void unregister_kretprobe(struct kretprobe *rp) |
4a296e07c
|
1844 1845 1846 |
{ unregister_kretprobes(&rp, 1); } |
99081ab55
|
1847 |
EXPORT_SYMBOL_GPL(unregister_kretprobe); |
4a296e07c
|
1848 |
|
55479f647
|
1849 |
void unregister_kretprobes(struct kretprobe **rps, int num) |
4a296e07c
|
1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 |
{ int i; if (num <= 0) return; mutex_lock(&kprobe_mutex); for (i = 0; i < num; i++) if (__unregister_kprobe_top(&rps[i]->kp) < 0) rps[i]->kp.addr = NULL; mutex_unlock(&kprobe_mutex); synchronize_sched(); for (i = 0; i < num; i++) { if (rps[i]->kp.addr) { __unregister_kprobe_bottom(&rps[i]->kp); cleanup_rp_inst(rps[i]); } } } |
99081ab55
|
1869 |
EXPORT_SYMBOL_GPL(unregister_kretprobes); |
4a296e07c
|
1870 |
|
9edddaa20
|
1871 |
#else /* CONFIG_KRETPROBES */ |
55479f647
|
1872 |
int register_kretprobe(struct kretprobe *rp) |
b94cce926
|
1873 1874 1875 |
{ return -ENOSYS; } |
99081ab55
|
1876 |
EXPORT_SYMBOL_GPL(register_kretprobe); |
b94cce926
|
1877 |
|
55479f647
|
1878 |
int register_kretprobes(struct kretprobe **rps, int num) |
346fd59ba
|
1879 |
{ |
4a296e07c
|
1880 |
return -ENOSYS; |
346fd59ba
|
1881 |
} |
99081ab55
|
1882 |
EXPORT_SYMBOL_GPL(register_kretprobes); |
55479f647
|
1883 |
void unregister_kretprobe(struct kretprobe *rp) |
b94cce926
|
1884 |
{ |
4a296e07c
|
1885 |
} |
99081ab55
|
1886 |
EXPORT_SYMBOL_GPL(unregister_kretprobe); |
b94cce926
|
1887 |
|
55479f647
|
1888 |
void unregister_kretprobes(struct kretprobe **rps, int num) |
4a296e07c
|
1889 1890 |
{ } |
99081ab55
|
1891 |
EXPORT_SYMBOL_GPL(unregister_kretprobes); |
4c4308cb9
|
1892 |
|
820aede02
|
1893 |
static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) |
4a296e07c
|
1894 1895 |
{ return 0; |
b94cce926
|
1896 |
} |
820aede02
|
1897 |
NOKPROBE_SYMBOL(pre_handler_kretprobe); |
b94cce926
|
1898 |
|
4a296e07c
|
1899 |
#endif /* CONFIG_KRETPROBES */ |
e8386a0cb
|
1900 |
/* Set the kprobe gone and remove its instruction buffer. */ |
55479f647
|
1901 |
static void kill_kprobe(struct kprobe *p) |
e8386a0cb
|
1902 1903 |
{ struct kprobe *kp; |
de5bd88d5
|
1904 |
|
e8386a0cb
|
1905 |
p->flags |= KPROBE_FLAG_GONE; |
afd66255b
|
1906 |
if (kprobe_aggrprobe(p)) { |
e8386a0cb
|
1907 1908 1909 1910 1911 1912 1913 1914 |
/* * If this is an aggr_kprobe, we have to list all the * chained probes and mark them GONE. */ list_for_each_entry_rcu(kp, &p->list, list) kp->flags |= KPROBE_FLAG_GONE; p->post_handler = NULL; p->break_handler = NULL; |
afd66255b
|
1915 |
kill_optimized_kprobe(p); |
e8386a0cb
|
1916 1917 1918 1919 1920 1921 1922 |
} /* * Here, we can remove insn_slot safely, because no thread calls * the original probed function (which will be freed soon) any more. */ arch_remove_kprobe(p); } |
c0614829c
|
1923 |
/* Disable one kprobe */ |
55479f647
|
1924 |
int disable_kprobe(struct kprobe *kp) |
c0614829c
|
1925 1926 |
{ int ret = 0; |
c0614829c
|
1927 1928 |
mutex_lock(&kprobe_mutex); |
6f0f1dd71
|
1929 1930 |
/* Disable this kprobe */ if (__disable_kprobe(kp) == NULL) |
c0614829c
|
1931 |
ret = -EINVAL; |
c0614829c
|
1932 |
|
c0614829c
|
1933 1934 1935 1936 1937 1938 |
mutex_unlock(&kprobe_mutex); return ret; } EXPORT_SYMBOL_GPL(disable_kprobe); /* Enable one kprobe */ |
55479f647
|
1939 |
int enable_kprobe(struct kprobe *kp) |
c0614829c
|
1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 |
{ int ret = 0; struct kprobe *p; mutex_lock(&kprobe_mutex); /* Check whether specified probe is valid. */ p = __get_valid_kprobe(kp); if (unlikely(p == NULL)) { ret = -EINVAL; goto out; } if (kprobe_gone(kp)) { /* This kprobe has gone, we couldn't enable it. */ ret = -EINVAL; goto out; } if (p != kp) kp->flags &= ~KPROBE_FLAG_DISABLED; if (!kprobes_all_disarmed && kprobe_disabled(p)) { p->flags &= ~KPROBE_FLAG_DISABLED; arm_kprobe(p); } out: mutex_unlock(&kprobe_mutex); return ret; } EXPORT_SYMBOL_GPL(enable_kprobe); |
820aede02
|
1971 |
void dump_kprobe(struct kprobe *kp) |
24851d244
|
1972 1973 1974 1975 1976 1977 1978 1979 1980 |
{ printk(KERN_WARNING "Dumping kprobe: "); printk(KERN_WARNING "Name: %s Address: %p Offset: %x ", kp->symbol_name, kp->addr, kp->offset); } |
820aede02
|
1981 |
NOKPROBE_SYMBOL(dump_kprobe); |
24851d244
|
1982 |
|
376e24242
|
1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 |
/* * Lookup and populate the kprobe_blacklist. * * Unlike the kretprobe blacklist, we'll need to determine * the range of addresses that belong to the said functions, * since a kprobe need not necessarily be at the beginning * of a function. */ static int __init populate_kprobe_blacklist(unsigned long *start, unsigned long *end) { unsigned long *iter; struct kprobe_blacklist_entry *ent; |
d81b4253b
|
1996 |
unsigned long entry, offset = 0, size = 0; |
376e24242
|
1997 1998 |
for (iter = start; iter < end; iter++) { |
d81b4253b
|
1999 2000 2001 2002 2003 2004 2005 |
entry = arch_deref_entry_point((void *)*iter); if (!kernel_text_address(entry) || !kallsyms_lookup_size_offset(entry, &size, &offset)) { pr_err("Failed to find blacklist at %p ", (void *)entry); |
376e24242
|
2006 2007 2008 2009 2010 2011 |
continue; } ent = kmalloc(sizeof(*ent), GFP_KERNEL); if (!ent) return -ENOMEM; |
d81b4253b
|
2012 2013 |
ent->start_addr = entry; ent->end_addr = entry + size; |
376e24242
|
2014 2015 2016 2017 2018 |
INIT_LIST_HEAD(&ent->list); list_add_tail(&ent->list, &kprobe_blacklist); } return 0; } |
e8386a0cb
|
2019 |
/* Module notifier call back, checking kprobes on the module */ |
55479f647
|
2020 2021 |
static int kprobes_module_callback(struct notifier_block *nb, unsigned long val, void *data) |
e8386a0cb
|
2022 2023 2024 |
{ struct module *mod = data; struct hlist_head *head; |
e8386a0cb
|
2025 2026 |
struct kprobe *p; unsigned int i; |
f24659d96
|
2027 |
int checkcore = (val == MODULE_STATE_GOING); |
e8386a0cb
|
2028 |
|
f24659d96
|
2029 |
if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) |
e8386a0cb
|
2030 2031 2032 |
return NOTIFY_DONE; /* |
f24659d96
|
2033 2034 2035 2036 |
* When MODULE_STATE_GOING was notified, both of module .text and * .init.text sections would be freed. When MODULE_STATE_LIVE was * notified, only .init.text section would be freed. We need to * disable kprobes which have been inserted in the sections. |
e8386a0cb
|
2037 2038 2039 2040 |
*/ mutex_lock(&kprobe_mutex); for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; |
b67bfe0d4
|
2041 |
hlist_for_each_entry_rcu(p, head, hlist) |
f24659d96
|
2042 2043 2044 |
if (within_module_init((unsigned long)p->addr, mod) || (checkcore && within_module_core((unsigned long)p->addr, mod))) { |
e8386a0cb
|
2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 |
/* * The vaddr this probe is installed will soon * be vfreed buy not synced to disk. Hence, * disarming the breakpoint isn't needed. */ kill_kprobe(p); } } mutex_unlock(&kprobe_mutex); return NOTIFY_DONE; } static struct notifier_block kprobe_module_nb = { .notifier_call = kprobes_module_callback, .priority = 0 }; |
376e24242
|
2061 2062 2063 |
/* Markers of _kprobe_blacklist section */ extern unsigned long __start_kprobe_blacklist[]; extern unsigned long __stop_kprobe_blacklist[]; |
1da177e4c
|
2064 2065 2066 2067 2068 2069 |
static int __init init_kprobes(void) { int i, err = 0; /* FIXME allocate the probe table, currently defined statically */ /* initialize all list heads */ |
b94cce926
|
2070 |
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
1da177e4c
|
2071 |
INIT_HLIST_HEAD(&kprobe_table[i]); |
b94cce926
|
2072 |
INIT_HLIST_HEAD(&kretprobe_inst_table[i]); |
ec484608c
|
2073 |
raw_spin_lock_init(&(kretprobe_table_locks[i].lock)); |
b94cce926
|
2074 |
} |
1da177e4c
|
2075 |
|
376e24242
|
2076 2077 2078 2079 2080 2081 2082 |
err = populate_kprobe_blacklist(__start_kprobe_blacklist, __stop_kprobe_blacklist); if (err) { pr_err("kprobes: failed to populate blacklist: %d ", err); pr_err("Please take care of using kprobes. "); |
3d8d996e0
|
2083 |
} |
f438d914b
|
2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 |
if (kretprobe_blacklist_size) { /* lookup the function address from its name */ for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { kprobe_lookup_name(kretprobe_blacklist[i].name, kretprobe_blacklist[i].addr); if (!kretprobe_blacklist[i].addr) printk("kretprobe: lookup failed: %s ", kretprobe_blacklist[i].name); } } |
b2be84df9
|
2095 2096 |
#if defined(CONFIG_OPTPROBES) #if defined(__ARCH_WANT_KPROBES_INSN_SLOT) |
afd66255b
|
2097 2098 2099 |
/* Init kprobe_optinsn_slots */ kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; #endif |
b2be84df9
|
2100 2101 2102 |
/* By default, kprobes can be optimized */ kprobes_allow_optimization = true; #endif |
afd66255b
|
2103 |
|
e579abeb5
|
2104 2105 |
/* By default, kprobes are armed */ kprobes_all_disarmed = false; |
bf8f6e5b3
|
2106 |
|
6772926be
|
2107 |
err = arch_init_kprobes(); |
802eae7c8
|
2108 2109 |
if (!err) err = register_die_notifier(&kprobe_exceptions_nb); |
e8386a0cb
|
2110 2111 |
if (!err) err = register_module_notifier(&kprobe_module_nb); |
ef53d9c5e
|
2112 |
kprobes_initialized = (err == 0); |
802eae7c8
|
2113 |
|
8c1c93564
|
2114 2115 |
if (!err) init_test_probes(); |
1da177e4c
|
2116 2117 |
return err; } |
346fd59ba
|
2118 |
#ifdef CONFIG_DEBUG_FS |
55479f647
|
2119 |
static void report_probe(struct seq_file *pi, struct kprobe *p, |
afd66255b
|
2120 |
const char *sym, int offset, char *modname, struct kprobe *pp) |
346fd59ba
|
2121 2122 2123 2124 2125 2126 2127 2128 2129 |
{ char *kprobe_type; if (p->pre_handler == pre_handler_kretprobe) kprobe_type = "r"; else if (p->pre_handler == setjmp_pre_handler) kprobe_type = "j"; else kprobe_type = "k"; |
afd66255b
|
2130 |
|
346fd59ba
|
2131 |
if (sym) |
afd66255b
|
2132 |
seq_printf(pi, "%p %s %s+0x%x %s ", |
de5bd88d5
|
2133 |
p->addr, kprobe_type, sym, offset, |
afd66255b
|
2134 |
(modname ? modname : " ")); |
346fd59ba
|
2135 |
else |
afd66255b
|
2136 2137 2138 2139 2140 |
seq_printf(pi, "%p %s %p ", p->addr, kprobe_type, p->addr); if (!pp) pp = p; |
ae6aa16fd
|
2141 2142 |
seq_printf(pi, "%s%s%s%s ", |
afd66255b
|
2143 2144 |
(kprobe_gone(p) ? "[GONE]" : ""), ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), |
ae6aa16fd
|
2145 2146 |
(kprobe_optimized(pp) ? "[OPTIMIZED]" : ""), (kprobe_ftrace(pp) ? "[FTRACE]" : "")); |
346fd59ba
|
2147 |
} |
55479f647
|
2148 |
static void *kprobe_seq_start(struct seq_file *f, loff_t *pos) |
346fd59ba
|
2149 2150 2151 |
{ return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; } |
55479f647
|
2152 |
static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) |
346fd59ba
|
2153 2154 2155 2156 2157 2158 |
{ (*pos)++; if (*pos >= KPROBE_TABLE_SIZE) return NULL; return pos; } |
55479f647
|
2159 |
static void kprobe_seq_stop(struct seq_file *f, void *v) |
346fd59ba
|
2160 2161 2162 |
{ /* Nothing to do */ } |
55479f647
|
2163 |
static int show_kprobe_addr(struct seq_file *pi, void *v) |
346fd59ba
|
2164 2165 |
{ struct hlist_head *head; |
346fd59ba
|
2166 2167 2168 |
struct kprobe *p, *kp; const char *sym = NULL; unsigned int i = *(loff_t *) v; |
ffb451227
|
2169 |
unsigned long offset = 0; |
ab7678656
|
2170 |
char *modname, namebuf[KSYM_NAME_LEN]; |
346fd59ba
|
2171 2172 2173 |
head = &kprobe_table[i]; preempt_disable(); |
b67bfe0d4
|
2174 |
hlist_for_each_entry_rcu(p, head, hlist) { |
ffb451227
|
2175 |
sym = kallsyms_lookup((unsigned long)p->addr, NULL, |
346fd59ba
|
2176 |
&offset, &modname, namebuf); |
afd66255b
|
2177 |
if (kprobe_aggrprobe(p)) { |
346fd59ba
|
2178 |
list_for_each_entry_rcu(kp, &p->list, list) |
afd66255b
|
2179 |
report_probe(pi, kp, sym, offset, modname, p); |
346fd59ba
|
2180 |
} else |
afd66255b
|
2181 |
report_probe(pi, p, sym, offset, modname, NULL); |
346fd59ba
|
2182 2183 2184 2185 |
} preempt_enable(); return 0; } |
88e9d34c7
|
2186 |
static const struct seq_operations kprobes_seq_ops = { |
346fd59ba
|
2187 2188 2189 2190 2191 |
.start = kprobe_seq_start, .next = kprobe_seq_next, .stop = kprobe_seq_stop, .show = show_kprobe_addr }; |
55479f647
|
2192 |
static int kprobes_open(struct inode *inode, struct file *filp) |
346fd59ba
|
2193 2194 2195 |
{ return seq_open(filp, &kprobes_seq_ops); } |
828c09509
|
2196 |
static const struct file_operations debugfs_kprobes_operations = { |
346fd59ba
|
2197 2198 2199 2200 2201 |
.open = kprobes_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; |
637247403
|
2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 |
/* kprobes/blacklist -- shows which functions can not be probed */ static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos) { return seq_list_start(&kprobe_blacklist, *pos); } static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos) { return seq_list_next(v, &kprobe_blacklist, pos); } static int kprobe_blacklist_seq_show(struct seq_file *m, void *v) { struct kprobe_blacklist_entry *ent = list_entry(v, struct kprobe_blacklist_entry, list); seq_printf(m, "0x%p-0x%p\t%ps ", (void *)ent->start_addr, (void *)ent->end_addr, (void *)ent->start_addr); return 0; } static const struct seq_operations kprobe_blacklist_seq_ops = { .start = kprobe_blacklist_seq_start, .next = kprobe_blacklist_seq_next, .stop = kprobe_seq_stop, /* Reuse void function */ .show = kprobe_blacklist_seq_show, }; static int kprobe_blacklist_open(struct inode *inode, struct file *filp) { return seq_open(filp, &kprobe_blacklist_seq_ops); } static const struct file_operations debugfs_kprobe_blacklist_ops = { .open = kprobe_blacklist_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; |
55479f647
|
2242 |
static void arm_all_kprobes(void) |
bf8f6e5b3
|
2243 2244 |
{ struct hlist_head *head; |
bf8f6e5b3
|
2245 2246 2247 2248 |
struct kprobe *p; unsigned int i; mutex_lock(&kprobe_mutex); |
e579abeb5
|
2249 2250 |
/* If kprobes are armed, just return */ if (!kprobes_all_disarmed) |
bf8f6e5b3
|
2251 |
goto already_enabled; |
977ad481b
|
2252 2253 2254 2255 2256 2257 |
/* * optimize_kprobe() called by arm_kprobe() checks * kprobes_all_disarmed, so set kprobes_all_disarmed before * arm_kprobe. */ kprobes_all_disarmed = false; |
afd66255b
|
2258 |
/* Arming kprobes doesn't optimize kprobe itself */ |
bf8f6e5b3
|
2259 2260 |
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; |
b67bfe0d4
|
2261 |
hlist_for_each_entry_rcu(p, head, hlist) |
de5bd88d5
|
2262 |
if (!kprobe_disabled(p)) |
ae6aa16fd
|
2263 |
arm_kprobe(p); |
bf8f6e5b3
|
2264 |
} |
bf8f6e5b3
|
2265 2266 2267 2268 2269 2270 2271 |
printk(KERN_INFO "Kprobes globally enabled "); already_enabled: mutex_unlock(&kprobe_mutex); return; } |
55479f647
|
2272 |
static void disarm_all_kprobes(void) |
bf8f6e5b3
|
2273 2274 |
{ struct hlist_head *head; |
bf8f6e5b3
|
2275 2276 2277 2278 |
struct kprobe *p; unsigned int i; mutex_lock(&kprobe_mutex); |
e579abeb5
|
2279 |
/* If kprobes are already disarmed, just return */ |
6274de498
|
2280 2281 2282 2283 |
if (kprobes_all_disarmed) { mutex_unlock(&kprobe_mutex); return; } |
bf8f6e5b3
|
2284 |
|
e579abeb5
|
2285 |
kprobes_all_disarmed = true; |
bf8f6e5b3
|
2286 2287 |
printk(KERN_INFO "Kprobes globally disabled "); |
afd66255b
|
2288 |
|
bf8f6e5b3
|
2289 2290 |
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; |
b67bfe0d4
|
2291 |
hlist_for_each_entry_rcu(p, head, hlist) { |
de5bd88d5
|
2292 |
if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) |
ae6aa16fd
|
2293 |
disarm_kprobe(p, false); |
bf8f6e5b3
|
2294 2295 |
} } |
bf8f6e5b3
|
2296 |
mutex_unlock(&kprobe_mutex); |
bf8f6e5b3
|
2297 |
|
6274de498
|
2298 2299 |
/* Wait for disarming all kprobes by optimizer */ wait_for_kprobe_optimizer(); |
bf8f6e5b3
|
2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 |
} /* * XXX: The debugfs bool file interface doesn't allow for callbacks * when the bool state is switched. We can reuse that facility when * available */ static ssize_t read_enabled_file_bool(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char buf[3]; |
e579abeb5
|
2311 |
if (!kprobes_all_disarmed) |
bf8f6e5b3
|
2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 |
buf[0] = '1'; else buf[0] = '0'; buf[1] = ' '; buf[2] = 0x00; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static ssize_t write_enabled_file_bool(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; |
efeb156e7
|
2325 |
size_t buf_size; |
bf8f6e5b3
|
2326 2327 2328 2329 |
buf_size = min(count, (sizeof(buf)-1)); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; |
10fb46d5f
|
2330 |
buf[buf_size] = '\0'; |
bf8f6e5b3
|
2331 2332 2333 2334 |
switch (buf[0]) { case 'y': case 'Y': case '1': |
e579abeb5
|
2335 |
arm_all_kprobes(); |
bf8f6e5b3
|
2336 2337 2338 2339 |
break; case 'n': case 'N': case '0': |
e579abeb5
|
2340 |
disarm_all_kprobes(); |
bf8f6e5b3
|
2341 |
break; |
10fb46d5f
|
2342 2343 |
default: return -EINVAL; |
bf8f6e5b3
|
2344 2345 2346 2347 |
} return count; } |
828c09509
|
2348 |
static const struct file_operations fops_kp = { |
bf8f6e5b3
|
2349 2350 |
.read = read_enabled_file_bool, .write = write_enabled_file_bool, |
6038f373a
|
2351 |
.llseek = default_llseek, |
bf8f6e5b3
|
2352 |
}; |
55479f647
|
2353 |
static int __init debugfs_kprobe_init(void) |
346fd59ba
|
2354 2355 |
{ struct dentry *dir, *file; |
bf8f6e5b3
|
2356 |
unsigned int value = 1; |
346fd59ba
|
2357 2358 2359 2360 |
dir = debugfs_create_dir("kprobes", NULL); if (!dir) return -ENOMEM; |
e38697929
|
2361 |
file = debugfs_create_file("list", 0444, dir, NULL, |
346fd59ba
|
2362 |
&debugfs_kprobes_operations); |
637247403
|
2363 2364 |
if (!file) goto error; |
346fd59ba
|
2365 |
|
bf8f6e5b3
|
2366 2367 |
file = debugfs_create_file("enabled", 0600, dir, &value, &fops_kp); |
637247403
|
2368 2369 2370 2371 2372 2373 2374 |
if (!file) goto error; file = debugfs_create_file("blacklist", 0444, dir, NULL, &debugfs_kprobe_blacklist_ops); if (!file) goto error; |
bf8f6e5b3
|
2375 |
|
346fd59ba
|
2376 |
return 0; |
637247403
|
2377 2378 2379 2380 |
error: debugfs_remove(dir); return -ENOMEM; |
346fd59ba
|
2381 2382 2383 2384 2385 2386 |
} late_initcall(debugfs_kprobe_init); #endif /* CONFIG_DEBUG_FS */ module_init(init_kprobes); |
1da177e4c
|
2387 |
|
99081ab55
|
2388 |
/* defined in arch/.../kernel/kprobes.c */ |
1da177e4c
|
2389 |
EXPORT_SYMBOL_GPL(jprobe_return); |