Commit de5bd88d5a5cce3cacea904d3503e5ebdb3852a2

Authored by Masami Hiramatsu
Committed by Linus Torvalds
1 parent e579abeb58

kprobes: support per-kprobe disabling

Add disable_kprobe() and enable_kprobe() to disable/enable kprobes
temporarily.

disable_kprobe() asynchronously disables probe handlers of specified
kprobe.  So, after calling it, some handlers can be called at a while.
enable_kprobe() enables specified kprobe.

aggr_pre_handler and aggr_post_handler check disabled probes.  On the
other hand aggr_break_handler and aggr_fault_handler don't check it
because these handlers will be called while executing pre or post handlers
and usually those help error handling.

Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Acked-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Cc: David S. Miller <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 3 changed files with 191 additions and 33 deletions Side-by-side Diff

Documentation/kprobes.txt
... ... @@ -212,7 +212,9 @@
212 212 is single-stepped, Kprobe calls kp->post_handler. If a fault
213 213 occurs during execution of kp->pre_handler or kp->post_handler,
214 214 or during single-stepping of the probed instruction, Kprobes calls
215   -kp->fault_handler. Any or all handlers can be NULL.
  215 +kp->fault_handler. Any or all handlers can be NULL. If kp->flags
  216 +is set KPROBE_FLAG_DISABLED, that kp will be registered but disabled,
  217 +so, it's handlers aren't hit until calling enable_kprobe(kp).
216 218  
217 219 NOTE:
218 220 1. With the introduction of the "symbol_name" field to struct kprobe,
... ... @@ -363,6 +365,22 @@
363 365 incorrect probes. However, other probes in the array are
364 366 unregistered correctly.
365 367  
  368 +4.7 disable_kprobe
  369 +
  370 +#include <linux/kprobes.h>
  371 +int disable_kprobe(struct kprobe *kp);
  372 +
  373 +Temporarily disables the specified kprobe. You can enable it again by using
  374 +enable_kprobe(). You must specify the kprobe which has been registered.
  375 +
  376 +4.8 enable_kprobe
  377 +
  378 +#include <linux/kprobes.h>
  379 +int enable_kprobe(struct kprobe *kp);
  380 +
  381 +Enables kprobe which has been disabled by disable_kprobe(). You must specify
  382 +the kprobe which has been registered.
  383 +
366 384 5. Kprobes Features and Limitations
367 385  
368 386 Kprobes allows multiple probes at the same address. Currently,
369 387  
370 388  
... ... @@ -500,11 +518,15 @@
500 518 is also specified. Following columns show probe status. If the probe is on
501 519 a virtual address that is no longer valid (module init sections, module
502 520 virtual addresses that correspond to modules that've been unloaded),
503   -such probes are marked with [GONE].
  521 +such probes are marked with [GONE]. If the probe is temporarily disabled,
  522 +such probes are marked with [DISABLED].
504 523  
505   -/debug/kprobes/enabled: Turn kprobes ON/OFF
  524 +/debug/kprobes/enabled: Turn kprobes ON/OFF forcibly.
506 525  
507   -Provides a knob to globally turn registered kprobes ON or OFF. By default,
508   -all kprobes are enabled. By echoing "0" to this file, all registered probes
509   -will be disarmed, till such time a "1" is echoed to this file.
  526 +Provides a knob to globally and forcibly turn registered kprobes ON or OFF.
  527 +By default, all kprobes are enabled. By echoing "0" to this file, all
  528 +registered probes will be disarmed, till such time a "1" is echoed to this
  529 +file. Note that this knob just disarms and arms all kprobes and doesn't
  530 +change each probe's disabling state. This means that disabled kprobes (marked
  531 +[DISABLED]) will be not enabled if you turn ON all kprobes by this knob.
include/linux/kprobes.h
... ... @@ -112,18 +112,28 @@
112 112 /* copy of the original instruction */
113 113 struct arch_specific_insn ainsn;
114 114  
115   - /* Indicates various status flags. Protected by kprobe_mutex. */
  115 + /*
  116 + * Indicates various status flags.
  117 + * Protected by kprobe_mutex after this kprobe is registered.
  118 + */
116 119 u32 flags;
117 120 };
118 121  
119 122 /* Kprobe status flags */
120 123 #define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */
  124 +#define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */
121 125  
  126 +/* Has this kprobe gone ? */
122 127 static inline int kprobe_gone(struct kprobe *p)
123 128 {
124 129 return p->flags & KPROBE_FLAG_GONE;
125 130 }
126 131  
  132 +/* Is this kprobe disabled ? */
  133 +static inline int kprobe_disabled(struct kprobe *p)
  134 +{
  135 + return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
  136 +}
127 137 /*
128 138 * Special probe type that uses setjmp-longjmp type tricks to resume
129 139 * execution at a specified entry with a matching prototype corresponding
... ... @@ -283,6 +293,9 @@
283 293 void kprobe_flush_task(struct task_struct *tk);
284 294 void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
285 295  
  296 +int disable_kprobe(struct kprobe *kp);
  297 +int enable_kprobe(struct kprobe *kp);
  298 +
286 299 #else /* !CONFIG_KPROBES: */
287 300  
288 301 static inline int kprobes_built_in(void)
... ... @@ -348,6 +361,14 @@
348 361 }
349 362 static inline void kprobe_flush_task(struct task_struct *tk)
350 363 {
  364 +}
  365 +static inline int disable_kprobe(struct kprobe *kp)
  366 +{
  367 + return -ENOSYS;
  368 +}
  369 +static inline int enable_kprobe(struct kprobe *kp)
  370 +{
  371 + return -ENOSYS;
351 372 }
352 373 #endif /* CONFIG_KPROBES */
353 374 #endif /* _LINUX_KPROBES_H */
... ... @@ -328,7 +328,7 @@
328 328 struct kprobe *kp;
329 329  
330 330 list_for_each_entry_rcu(kp, &p->list, list) {
331   - if (kp->pre_handler && !kprobe_gone(kp)) {
  331 + if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
332 332 set_kprobe_instance(kp);
333 333 if (kp->pre_handler(kp, regs))
334 334 return 1;
... ... @@ -344,7 +344,7 @@
344 344 struct kprobe *kp;
345 345  
346 346 list_for_each_entry_rcu(kp, &p->list, list) {
347   - if (kp->post_handler && !kprobe_gone(kp)) {
  347 + if (kp->post_handler && likely(!kprobe_disabled(kp))) {
348 348 set_kprobe_instance(kp);
349 349 kp->post_handler(kp, regs, flags);
350 350 reset_kprobe_instance();
... ... @@ -523,6 +523,7 @@
523 523 */
524 524 static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
525 525 {
  526 + BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
526 527 if (p->break_handler) {
527 528 if (ap->break_handler)
528 529 return -EEXIST;
... ... @@ -532,6 +533,13 @@
532 533 list_add_rcu(&p->list, &ap->list);
533 534 if (p->post_handler && !ap->post_handler)
534 535 ap->post_handler = aggr_post_handler;
  536 +
  537 + if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
  538 + ap->flags &= ~KPROBE_FLAG_DISABLED;
  539 + if (!kprobes_all_disarmed)
  540 + /* Arm the breakpoint again. */
  541 + arch_arm_kprobe(ap);
  542 + }
535 543 return 0;
536 544 }
537 545  
538 546  
539 547  
540 548  
... ... @@ -592,20 +600,36 @@
592 600 * freed by unregister_kprobe.
593 601 */
594 602 return ret;
595   - /* Clear gone flag to prevent allocating new slot again. */
596   - ap->flags &= ~KPROBE_FLAG_GONE;
  603 +
597 604 /*
598   - * If the old_p has gone, its breakpoint has been disarmed.
599   - * We have to arm it again after preparing real kprobes.
  605 + * Clear gone flag to prevent allocating new slot again, and
  606 + * set disabled flag because it is not armed yet.
600 607 */
601   - if (!kprobes_all_disarmed)
602   - arch_arm_kprobe(ap);
  608 + ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
  609 + | KPROBE_FLAG_DISABLED;
603 610 }
604 611  
605 612 copy_kprobe(ap, p);
606 613 return add_new_kprobe(ap, p);
607 614 }
608 615  
  616 +/* Try to disable aggr_kprobe, and return 1 if succeeded.*/
  617 +static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
  618 +{
  619 + struct kprobe *kp;
  620 +
  621 + list_for_each_entry_rcu(kp, &p->list, list) {
  622 + if (!kprobe_disabled(kp))
  623 + /*
  624 + * There is an active probe on the list.
  625 + * We can't disable aggr_kprobe.
  626 + */
  627 + return 0;
  628 + }
  629 + p->flags |= KPROBE_FLAG_DISABLED;
  630 + return 1;
  631 +}
  632 +
609 633 static int __kprobes in_kprobes_functions(unsigned long addr)
610 634 {
611 635 struct kprobe_blackpoint *kb;
... ... @@ -664,7 +688,9 @@
664 688 return -EINVAL;
665 689 }
666 690  
667   - p->flags = 0;
  691 + /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
  692 + p->flags &= KPROBE_FLAG_DISABLED;
  693 +
668 694 /*
669 695 * Check if are we probing a module.
670 696 */
... ... @@ -709,7 +735,7 @@
709 735 hlist_add_head_rcu(&p->hlist,
710 736 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
711 737  
712   - if (!kprobes_all_disarmed)
  738 + if (!kprobes_all_disarmed && !kprobe_disabled(p))
713 739 arch_arm_kprobe(p);
714 740  
715 741 out_unlock_text:
716 742  
717 743  
718 744  
... ... @@ -724,25 +750,37 @@
724 750 }
725 751 EXPORT_SYMBOL_GPL(register_kprobe);
726 752  
727   -/*
728   - * Unregister a kprobe without a scheduler synchronization.
729   - */
730   -static int __kprobes __unregister_kprobe_top(struct kprobe *p)
  753 +/* Check passed kprobe is valid and return kprobe in kprobe_table. */
  754 +static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
731 755 {
732 756 struct kprobe *old_p, *list_p;
733 757  
734 758 old_p = get_kprobe(p->addr);
735 759 if (unlikely(!old_p))
736   - return -EINVAL;
  760 + return NULL;
737 761  
738 762 if (p != old_p) {
739 763 list_for_each_entry_rcu(list_p, &old_p->list, list)
740 764 if (list_p == p)
741 765 /* kprobe p is a valid probe */
742   - goto valid_p;
743   - return -EINVAL;
  766 + goto valid;
  767 + return NULL;
744 768 }
745   -valid_p:
  769 +valid:
  770 + return old_p;
  771 +}
  772 +
  773 +/*
  774 + * Unregister a kprobe without a scheduler synchronization.
  775 + */
  776 +static int __kprobes __unregister_kprobe_top(struct kprobe *p)
  777 +{
  778 + struct kprobe *old_p, *list_p;
  779 +
  780 + old_p = __get_valid_kprobe(p);
  781 + if (old_p == NULL)
  782 + return -EINVAL;
  783 +
746 784 if (old_p == p ||
747 785 (old_p->pre_handler == aggr_pre_handler &&
748 786 list_is_singular(&old_p->list))) {
... ... @@ -751,7 +789,7 @@
751 789 * enabled and not gone - otherwise, the breakpoint would
752 790 * already have been removed. We save on flushing icache.
753 791 */
754   - if (!kprobes_all_disarmed && !kprobe_gone(old_p)) {
  792 + if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) {
755 793 mutex_lock(&text_mutex);
756 794 arch_disarm_kprobe(p);
757 795 mutex_unlock(&text_mutex);
... ... @@ -769,6 +807,11 @@
769 807 }
770 808 noclean:
771 809 list_del_rcu(&p->list);
  810 + if (!kprobe_disabled(old_p)) {
  811 + try_to_disable_aggr_kprobe(old_p);
  812 + if (!kprobes_all_disarmed && kprobe_disabled(old_p))
  813 + arch_disarm_kprobe(old_p);
  814 + }
772 815 }
773 816 return 0;
774 817 }
... ... @@ -1078,6 +1121,7 @@
1078 1121 static void __kprobes kill_kprobe(struct kprobe *p)
1079 1122 {
1080 1123 struct kprobe *kp;
  1124 +
1081 1125 p->flags |= KPROBE_FLAG_GONE;
1082 1126 if (p->pre_handler == aggr_pre_handler) {
1083 1127 /*
1084 1128  
... ... @@ -1219,12 +1263,18 @@
1219 1263 else
1220 1264 kprobe_type = "k";
1221 1265 if (sym)
1222   - seq_printf(pi, "%p %s %s+0x%x %s %s\n", p->addr, kprobe_type,
1223   - sym, offset, (modname ? modname : " "),
1224   - (kprobe_gone(p) ? "[GONE]" : ""));
  1266 + seq_printf(pi, "%p %s %s+0x%x %s %s%s\n",
  1267 + p->addr, kprobe_type, sym, offset,
  1268 + (modname ? modname : " "),
  1269 + (kprobe_gone(p) ? "[GONE]" : ""),
  1270 + ((kprobe_disabled(p) && !kprobe_gone(p)) ?
  1271 + "[DISABLED]" : ""));
1225 1272 else
1226   - seq_printf(pi, "%p %s %p %s\n", p->addr, kprobe_type, p->addr,
1227   - (kprobe_gone(p) ? "[GONE]" : ""));
  1273 + seq_printf(pi, "%p %s %p %s%s\n",
  1274 + p->addr, kprobe_type, p->addr,
  1275 + (kprobe_gone(p) ? "[GONE]" : ""),
  1276 + ((kprobe_disabled(p) && !kprobe_gone(p)) ?
  1277 + "[DISABLED]" : ""));
1228 1278 }
1229 1279  
1230 1280 static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
... ... @@ -1289,6 +1339,71 @@
1289 1339 .release = seq_release,
1290 1340 };
1291 1341  
  1342 +/* Disable one kprobe */
  1343 +int __kprobes disable_kprobe(struct kprobe *kp)
  1344 +{
  1345 + int ret = 0;
  1346 + struct kprobe *p;
  1347 +
  1348 + mutex_lock(&kprobe_mutex);
  1349 +
  1350 + /* Check whether specified probe is valid. */
  1351 + p = __get_valid_kprobe(kp);
  1352 + if (unlikely(p == NULL)) {
  1353 + ret = -EINVAL;
  1354 + goto out;
  1355 + }
  1356 +
  1357 + /* If the probe is already disabled (or gone), just return */
  1358 + if (kprobe_disabled(kp))
  1359 + goto out;
  1360 +
  1361 + kp->flags |= KPROBE_FLAG_DISABLED;
  1362 + if (p != kp)
  1363 + /* When kp != p, p is always enabled. */
  1364 + try_to_disable_aggr_kprobe(p);
  1365 +
  1366 + if (!kprobes_all_disarmed && kprobe_disabled(p))
  1367 + arch_disarm_kprobe(p);
  1368 +out:
  1369 + mutex_unlock(&kprobe_mutex);
  1370 + return ret;
  1371 +}
  1372 +EXPORT_SYMBOL_GPL(disable_kprobe);
  1373 +
  1374 +/* Enable one kprobe */
  1375 +int __kprobes enable_kprobe(struct kprobe *kp)
  1376 +{
  1377 + int ret = 0;
  1378 + struct kprobe *p;
  1379 +
  1380 + mutex_lock(&kprobe_mutex);
  1381 +
  1382 + /* Check whether specified probe is valid. */
  1383 + p = __get_valid_kprobe(kp);
  1384 + if (unlikely(p == NULL)) {
  1385 + ret = -EINVAL;
  1386 + goto out;
  1387 + }
  1388 +
  1389 + if (kprobe_gone(kp)) {
  1390 + /* This kprobe has gone, we couldn't enable it. */
  1391 + ret = -EINVAL;
  1392 + goto out;
  1393 + }
  1394 +
  1395 + if (!kprobes_all_disarmed && kprobe_disabled(p))
  1396 + arch_arm_kprobe(p);
  1397 +
  1398 + p->flags &= ~KPROBE_FLAG_DISABLED;
  1399 + if (p != kp)
  1400 + kp->flags &= ~KPROBE_FLAG_DISABLED;
  1401 +out:
  1402 + mutex_unlock(&kprobe_mutex);
  1403 + return ret;
  1404 +}
  1405 +EXPORT_SYMBOL_GPL(enable_kprobe);
  1406 +
1292 1407 static void __kprobes arm_all_kprobes(void)
1293 1408 {
1294 1409 struct hlist_head *head;
... ... @@ -1306,7 +1421,7 @@
1306 1421 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1307 1422 head = &kprobe_table[i];
1308 1423 hlist_for_each_entry_rcu(p, node, head, hlist)
1309   - if (!kprobe_gone(p))
  1424 + if (!kprobe_disabled(p))
1310 1425 arch_arm_kprobe(p);
1311 1426 }
1312 1427 mutex_unlock(&text_mutex);
... ... @@ -1338,7 +1453,7 @@
1338 1453 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1339 1454 head = &kprobe_table[i];
1340 1455 hlist_for_each_entry_rcu(p, node, head, hlist) {
1341   - if (!arch_trampoline_kprobe(p) && !kprobe_gone(p))
  1456 + if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
1342 1457 arch_disarm_kprobe(p);
1343 1458 }
1344 1459 }