Commit f38787f4f921222d080d976ef59210ce3c6c6cb4

Authored by Ingo Molnar

Merge branch 'uprobes/core' of git://git.kernel.org/pub/scm/linux/kernel/git/ole…

…g/misc into perf/urgent

Pull various uprobes bugfixes from Oleg Nesterov - mostly race and
failure path fixes.

Signed-off-by: Ingo Molnar <mingo@kernel.org>

Showing 4 changed files Side-by-side Diff

arch/x86/kernel/signal.c
... ... @@ -824,10 +824,8 @@
824 824 mce_notify_process();
825 825 #endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
826 826  
827   - if (thread_info_flags & _TIF_UPROBE) {
828   - clear_thread_flag(TIF_UPROBE);
  827 + if (thread_info_flags & _TIF_UPROBE)
829 828 uprobe_notify_resume(regs);
830   - }
831 829  
832 830 /* deal with pending signal delivery */
833 831 if (thread_info_flags & _TIF_SIGPENDING)
arch/x86/kernel/uprobes.c
... ... @@ -651,29 +651,17 @@
651 651  
652 652 /*
653 653 * Skip these instructions as per the currently known x86 ISA.
654   - * 0x66* { 0x90 | 0x0f 0x1f | 0x0f 0x19 | 0x87 0xc0 }
  654 + * rep=0x66*; nop=0x90
655 655 */
656 656 static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
657 657 {
658 658 int i;
659 659  
660 660 for (i = 0; i < MAX_UINSN_BYTES; i++) {
661   - if ((auprobe->insn[i] == 0x66))
  661 + if (auprobe->insn[i] == 0x66)
662 662 continue;
663 663  
664 664 if (auprobe->insn[i] == 0x90)
665   - return true;
666   -
667   - if (i == (MAX_UINSN_BYTES - 1))
668   - break;
669   -
670   - if ((auprobe->insn[i] == 0x0f) && (auprobe->insn[i+1] == 0x1f))
671   - return true;
672   -
673   - if ((auprobe->insn[i] == 0x0f) && (auprobe->insn[i+1] == 0x19))
674   - return true;
675   -
676   - if ((auprobe->insn[i] == 0x87) && (auprobe->insn[i+1] == 0xc0))
677 665 return true;
678 666  
679 667 break;
include/linux/uprobes.h
... ... @@ -35,16 +35,6 @@
35 35 # include <asm/uprobes.h>
36 36 #endif
37 37  
38   -/* flags that denote/change uprobes behaviour */
39   -
40   -/* Have a copy of original instruction */
41   -#define UPROBE_COPY_INSN 0x1
42   -
43   -/* Dont run handlers when first register/ last unregister in progress*/
44   -#define UPROBE_RUN_HANDLER 0x2
45   -/* Can skip singlestep */
46   -#define UPROBE_SKIP_SSTEP 0x4
47   -
48 38 struct uprobe_consumer {
49 39 int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
50 40 /*
... ... @@ -59,7 +49,6 @@
59 49 #ifdef CONFIG_UPROBES
60 50 enum uprobe_task_state {
61 51 UTASK_RUNNING,
62   - UTASK_BP_HIT,
63 52 UTASK_SSTEP,
64 53 UTASK_SSTEP_ACK,
65 54 UTASK_SSTEP_TRAPPED,
kernel/events/uprobes.c
... ... @@ -78,15 +78,23 @@
78 78 */
79 79 static atomic_t uprobe_events = ATOMIC_INIT(0);
80 80  
  81 +/* Have a copy of original instruction */
  82 +#define UPROBE_COPY_INSN 0
  83 +/* Dont run handlers when first register/ last unregister in progress*/
  84 +#define UPROBE_RUN_HANDLER 1
  85 +/* Can skip singlestep */
  86 +#define UPROBE_SKIP_SSTEP 2
  87 +
81 88 struct uprobe {
82 89 struct rb_node rb_node; /* node in the rb tree */
83 90 atomic_t ref;
84 91 struct rw_semaphore consumer_rwsem;
  92 + struct mutex copy_mutex; /* TODO: kill me and UPROBE_COPY_INSN */
85 93 struct list_head pending_list;
86 94 struct uprobe_consumer *consumers;
87 95 struct inode *inode; /* Also hold a ref to inode */
88 96 loff_t offset;
89   - int flags;
  97 + unsigned long flags;
90 98 struct arch_uprobe arch;
91 99 };
92 100  
93 101  
94 102  
... ... @@ -100,17 +108,12 @@
100 108 */
101 109 static bool valid_vma(struct vm_area_struct *vma, bool is_register)
102 110 {
103   - if (!vma->vm_file)
104   - return false;
  111 + vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_SHARED;
105 112  
106   - if (!is_register)
107   - return true;
  113 + if (is_register)
  114 + flags |= VM_WRITE;
108 115  
109   - if ((vma->vm_flags & (VM_HUGETLB|VM_READ|VM_WRITE|VM_EXEC|VM_SHARED))
110   - == (VM_READ|VM_EXEC))
111   - return true;
112   -
113   - return false;
  116 + return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
114 117 }
115 118  
116 119 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
117 120  
118 121  
... ... @@ -193,19 +196,44 @@
193 196 return *insn == UPROBE_SWBP_INSN;
194 197 }
195 198  
  199 +static void copy_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *opcode)
  200 +{
  201 + void *kaddr = kmap_atomic(page);
  202 + memcpy(opcode, kaddr + (vaddr & ~PAGE_MASK), UPROBE_SWBP_INSN_SIZE);
  203 + kunmap_atomic(kaddr);
  204 +}
  205 +
  206 +static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
  207 +{
  208 + uprobe_opcode_t old_opcode;
  209 + bool is_swbp;
  210 +
  211 + copy_opcode(page, vaddr, &old_opcode);
  212 + is_swbp = is_swbp_insn(&old_opcode);
  213 +
  214 + if (is_swbp_insn(new_opcode)) {
  215 + if (is_swbp) /* register: already installed? */
  216 + return 0;
  217 + } else {
  218 + if (!is_swbp) /* unregister: was it changed by us? */
  219 + return 0;
  220 + }
  221 +
  222 + return 1;
  223 +}
  224 +
196 225 /*
197 226 * NOTE:
198 227 * Expect the breakpoint instruction to be the smallest size instruction for
199 228 * the architecture. If an arch has variable length instruction and the
200 229 * breakpoint instruction is not of the smallest length instruction
201   - * supported by that architecture then we need to modify read_opcode /
  230 + * supported by that architecture then we need to modify is_swbp_at_addr and
202 231 * write_opcode accordingly. This would never be a problem for archs that
203 232 * have fixed length instructions.
204 233 */
205 234  
206 235 /*
207 236 * write_opcode - write the opcode at a given virtual address.
208   - * @auprobe: arch breakpointing information.
209 237 * @mm: the probed process address space.
210 238 * @vaddr: the virtual address to store the opcode.
211 239 * @opcode: opcode to be written at @vaddr.
... ... @@ -216,8 +244,8 @@
216 244 * For mm @mm, write the opcode at @vaddr.
217 245 * Return 0 (success) or a negative errno.
218 246 */
219   -static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
220   - unsigned long vaddr, uprobe_opcode_t opcode)
  247 +static int write_opcode(struct mm_struct *mm, unsigned long vaddr,
  248 + uprobe_opcode_t opcode)
221 249 {
222 250 struct page *old_page, *new_page;
223 251 void *vaddr_old, *vaddr_new;
224 252  
... ... @@ -226,10 +254,14 @@
226 254  
227 255 retry:
228 256 /* Read the page with vaddr into memory */
229   - ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma);
  257 + ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma);
230 258 if (ret <= 0)
231 259 return ret;
232 260  
  261 + ret = verify_opcode(old_page, vaddr, &opcode);
  262 + if (ret <= 0)
  263 + goto put_old;
  264 +
233 265 ret = -ENOMEM;
234 266 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
235 267 if (!new_page)
... ... @@ -264,63 +296,6 @@
264 296 }
265 297  
266 298 /**
267   - * read_opcode - read the opcode at a given virtual address.
268   - * @mm: the probed process address space.
269   - * @vaddr: the virtual address to read the opcode.
270   - * @opcode: location to store the read opcode.
271   - *
272   - * Called with mm->mmap_sem held (for read and with a reference to
273   - * mm.
274   - *
275   - * For mm @mm, read the opcode at @vaddr and store it in @opcode.
276   - * Return 0 (success) or a negative errno.
277   - */
278   -static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t *opcode)
279   -{
280   - struct page *page;
281   - void *vaddr_new;
282   - int ret;
283   -
284   - ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
285   - if (ret <= 0)
286   - return ret;
287   -
288   - vaddr_new = kmap_atomic(page);
289   - vaddr &= ~PAGE_MASK;
290   - memcpy(opcode, vaddr_new + vaddr, UPROBE_SWBP_INSN_SIZE);
291   - kunmap_atomic(vaddr_new);
292   -
293   - put_page(page);
294   -
295   - return 0;
296   -}
297   -
298   -static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
299   -{
300   - uprobe_opcode_t opcode;
301   - int result;
302   -
303   - if (current->mm == mm) {
304   - pagefault_disable();
305   - result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
306   - sizeof(opcode));
307   - pagefault_enable();
308   -
309   - if (likely(result == 0))
310   - goto out;
311   - }
312   -
313   - result = read_opcode(mm, vaddr, &opcode);
314   - if (result)
315   - return result;
316   -out:
317   - if (is_swbp_insn(&opcode))
318   - return 1;
319   -
320   - return 0;
321   -}
322   -
323   -/**
324 299 * set_swbp - store breakpoint at a given address.
325 300 * @auprobe: arch specific probepoint information.
326 301 * @mm: the probed process address space.
... ... @@ -331,18 +306,7 @@
331 306 */
332 307 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
333 308 {
334   - int result;
335   - /*
336   - * See the comment near uprobes_hash().
337   - */
338   - result = is_swbp_at_addr(mm, vaddr);
339   - if (result == 1)
340   - return 0;
341   -
342   - if (result)
343   - return result;
344   -
345   - return write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
  309 + return write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
346 310 }
347 311  
348 312 /**
... ... @@ -357,16 +321,7 @@
357 321 int __weak
358 322 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
359 323 {
360   - int result;
361   -
362   - result = is_swbp_at_addr(mm, vaddr);
363   - if (!result)
364   - return -EINVAL;
365   -
366   - if (result != 1)
367   - return result;
368   -
369   - return write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
  324 + return write_opcode(mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
370 325 }
371 326  
372 327 static int match_uprobe(struct uprobe *l, struct uprobe *r)
... ... @@ -473,7 +428,7 @@
473 428 spin_unlock(&uprobes_treelock);
474 429  
475 430 /* For now assume that the instruction need not be single-stepped */
476   - uprobe->flags |= UPROBE_SKIP_SSTEP;
  431 + __set_bit(UPROBE_SKIP_SSTEP, &uprobe->flags);
477 432  
478 433 return u;
479 434 }
... ... @@ -495,6 +450,7 @@
495 450 uprobe->inode = igrab(inode);
496 451 uprobe->offset = offset;
497 452 init_rwsem(&uprobe->consumer_rwsem);
  453 + mutex_init(&uprobe->copy_mutex);
498 454  
499 455 /* add to uprobes_tree, sorted on inode:offset */
500 456 cur_uprobe = insert_uprobe(uprobe);
... ... @@ -515,7 +471,7 @@
515 471 {
516 472 struct uprobe_consumer *uc;
517 473  
518   - if (!(uprobe->flags & UPROBE_RUN_HANDLER))
  474 + if (!test_bit(UPROBE_RUN_HANDLER, &uprobe->flags))
519 475 return;
520 476  
521 477 down_read(&uprobe->consumer_rwsem);
... ... @@ -621,29 +577,43 @@
621 577 return __copy_insn(mapping, filp, uprobe->arch.insn, bytes, uprobe->offset);
622 578 }
623 579  
624   -/*
625   - * How mm->uprobes_state.count gets updated
626   - * uprobe_mmap() increments the count if
627   - * - it successfully adds a breakpoint.
628   - * - it cannot add a breakpoint, but sees that there is a underlying
629   - * breakpoint (via a is_swbp_at_addr()).
630   - *
631   - * uprobe_munmap() decrements the count if
632   - * - it sees a underlying breakpoint, (via is_swbp_at_addr)
633   - * (Subsequent uprobe_unregister wouldnt find the breakpoint
634   - * unless a uprobe_mmap kicks in, since the old vma would be
635   - * dropped just after uprobe_munmap.)
636   - *
637   - * uprobe_register increments the count if:
638   - * - it successfully adds a breakpoint.
639   - *
640   - * uprobe_unregister decrements the count if:
641   - * - it sees a underlying breakpoint and removes successfully.
642   - * (via is_swbp_at_addr)
643   - * (Subsequent uprobe_munmap wouldnt find the breakpoint
644   - * since there is no underlying breakpoint after the
645   - * breakpoint removal.)
646   - */
  580 +static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
  581 + struct mm_struct *mm, unsigned long vaddr)
  582 +{
  583 + int ret = 0;
  584 +
  585 + if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
  586 + return ret;
  587 +
  588 + mutex_lock(&uprobe->copy_mutex);
  589 + if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
  590 + goto out;
  591 +
  592 + ret = copy_insn(uprobe, file);
  593 + if (ret)
  594 + goto out;
  595 +
  596 + ret = -ENOTSUPP;
  597 + if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
  598 + goto out;
  599 +
  600 + ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
  601 + if (ret)
  602 + goto out;
  603 +
  604 + /* write_opcode() assumes we don't cross page boundary */
  605 + BUG_ON((uprobe->offset & ~PAGE_MASK) +
  606 + UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
  607 +
  608 + smp_wmb(); /* pairs with rmb() in find_active_uprobe() */
  609 + set_bit(UPROBE_COPY_INSN, &uprobe->flags);
  610 +
  611 + out:
  612 + mutex_unlock(&uprobe->copy_mutex);
  613 +
  614 + return ret;
  615 +}
  616 +
647 617 static int
648 618 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
649 619 struct vm_area_struct *vma, unsigned long vaddr)
650 620  
... ... @@ -661,25 +631,10 @@
661 631 if (!uprobe->consumers)
662 632 return 0;
663 633  
664   - if (!(uprobe->flags & UPROBE_COPY_INSN)) {
665   - ret = copy_insn(uprobe, vma->vm_file);
666   - if (ret)
667   - return ret;
  634 + ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
  635 + if (ret)
  636 + return ret;
668 637  
669   - if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
670   - return -ENOTSUPP;
671   -
672   - ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
673   - if (ret)
674   - return ret;
675   -
676   - /* write_opcode() assumes we don't cross page boundary */
677   - BUG_ON((uprobe->offset & ~PAGE_MASK) +
678   - UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
679   -
680   - uprobe->flags |= UPROBE_COPY_INSN;
681   - }
682   -
683 638 /*
684 639 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
685 640 * the task can hit this breakpoint right after __replace_page().
686 641  
687 642  
... ... @@ -697,15 +652,15 @@
697 652 return ret;
698 653 }
699 654  
700   -static void
  655 +static int
701 656 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
702 657 {
703 658 /* can happen if uprobe_register() fails */
704 659 if (!test_bit(MMF_HAS_UPROBES, &mm->flags))
705   - return;
  660 + return 0;
706 661  
707 662 set_bit(MMF_RECALC_UPROBES, &mm->flags);
708   - set_orig_insn(&uprobe->arch, mm, vaddr);
  663 + return set_orig_insn(&uprobe->arch, mm, vaddr);
709 664 }
710 665  
711 666 /*
... ... @@ -820,7 +775,7 @@
820 775 struct mm_struct *mm = info->mm;
821 776 struct vm_area_struct *vma;
822 777  
823   - if (err)
  778 + if (err && is_register)
824 779 goto free;
825 780  
826 781 down_write(&mm->mmap_sem);
... ... @@ -836,7 +791,7 @@
836 791 if (is_register)
837 792 err = install_breakpoint(uprobe, mm, vma, info->vaddr);
838 793 else
839   - remove_breakpoint(uprobe, mm, info->vaddr);
  794 + err |= remove_breakpoint(uprobe, mm, info->vaddr);
840 795  
841 796 unlock:
842 797 up_write(&mm->mmap_sem);
843 798  
... ... @@ -893,13 +848,15 @@
893 848 mutex_lock(uprobes_hash(inode));
894 849 uprobe = alloc_uprobe(inode, offset);
895 850  
896   - if (uprobe && !consumer_add(uprobe, uc)) {
  851 + if (!uprobe) {
  852 + ret = -ENOMEM;
  853 + } else if (!consumer_add(uprobe, uc)) {
897 854 ret = __uprobe_register(uprobe);
898 855 if (ret) {
899 856 uprobe->consumers = NULL;
900 857 __uprobe_unregister(uprobe);
901 858 } else {
902   - uprobe->flags |= UPROBE_RUN_HANDLER;
  859 + set_bit(UPROBE_RUN_HANDLER, &uprobe->flags);
903 860 }
904 861 }
905 862  
... ... @@ -932,7 +889,7 @@
932 889 if (consumer_del(uprobe, uc)) {
933 890 if (!uprobe->consumers) {
934 891 __uprobe_unregister(uprobe);
935   - uprobe->flags &= ~UPROBE_RUN_HANDLER;
  892 + clear_bit(UPROBE_RUN_HANDLER, &uprobe->flags);
936 893 }
937 894 }
938 895  
... ... @@ -1393,10 +1350,11 @@
1393 1350 */
1394 1351 static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs)
1395 1352 {
1396   - if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
1397   - return true;
1398   -
1399   - uprobe->flags &= ~UPROBE_SKIP_SSTEP;
  1353 + if (test_bit(UPROBE_SKIP_SSTEP, &uprobe->flags)) {
  1354 + if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
  1355 + return true;
  1356 + clear_bit(UPROBE_SKIP_SSTEP, &uprobe->flags);
  1357 + }
1400 1358 return false;
1401 1359 }
1402 1360  
... ... @@ -1419,6 +1377,30 @@
1419 1377 clear_bit(MMF_HAS_UPROBES, &mm->flags);
1420 1378 }
1421 1379  
  1380 +static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
  1381 +{
  1382 + struct page *page;
  1383 + uprobe_opcode_t opcode;
  1384 + int result;
  1385 +
  1386 + pagefault_disable();
  1387 + result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
  1388 + sizeof(opcode));
  1389 + pagefault_enable();
  1390 +
  1391 + if (likely(result == 0))
  1392 + goto out;
  1393 +
  1394 + result = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
  1395 + if (result < 0)
  1396 + return result;
  1397 +
  1398 + copy_opcode(page, vaddr, &opcode);
  1399 + put_page(page);
  1400 + out:
  1401 + return is_swbp_insn(&opcode);
  1402 +}
  1403 +
1422 1404 static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
1423 1405 {
1424 1406 struct mm_struct *mm = current->mm;
1425 1407  
1426 1408  
1427 1409  
1428 1410  
1429 1411  
1430 1412  
... ... @@ -1489,38 +1471,41 @@
1489 1471 }
1490 1472 return;
1491 1473 }
  1474 + /*
  1475 + * TODO: move copy_insn/etc into _register and remove this hack.
  1476 + * After we hit the bp, _unregister + _register can install the
  1477 + * new and not-yet-analyzed uprobe at the same address, restart.
  1478 + */
  1479 + smp_rmb(); /* pairs with wmb() in install_breakpoint() */
  1480 + if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
  1481 + goto restart;
1492 1482  
1493 1483 utask = current->utask;
1494 1484 if (!utask) {
1495 1485 utask = add_utask();
1496 1486 /* Cannot allocate; re-execute the instruction. */
1497 1487 if (!utask)
1498   - goto cleanup_ret;
  1488 + goto restart;
1499 1489 }
1500   - utask->active_uprobe = uprobe;
  1490 +
1501 1491 handler_chain(uprobe, regs);
1502   - if (uprobe->flags & UPROBE_SKIP_SSTEP && can_skip_sstep(uprobe, regs))
1503   - goto cleanup_ret;
  1492 + if (can_skip_sstep(uprobe, regs))
  1493 + goto out;
1504 1494  
1505   - utask->state = UTASK_SSTEP;
1506 1495 if (!pre_ssout(uprobe, regs, bp_vaddr)) {
1507 1496 arch_uprobe_enable_step(&uprobe->arch);
  1497 + utask->active_uprobe = uprobe;
  1498 + utask->state = UTASK_SSTEP;
1508 1499 return;
1509 1500 }
1510 1501  
1511   -cleanup_ret:
1512   - if (utask) {
1513   - utask->active_uprobe = NULL;
1514   - utask->state = UTASK_RUNNING;
1515   - }
1516   - if (!(uprobe->flags & UPROBE_SKIP_SSTEP))
1517   -
1518   - /*
1519   - * cannot singlestep; cannot skip instruction;
1520   - * re-execute the instruction.
1521   - */
1522   - instruction_pointer_set(regs, bp_vaddr);
1523   -
  1502 +restart:
  1503 + /*
  1504 + * cannot singlestep; cannot skip instruction;
  1505 + * re-execute the instruction.
  1506 + */
  1507 + instruction_pointer_set(regs, bp_vaddr);
  1508 +out:
1524 1509 put_uprobe(uprobe);
1525 1510 }
1526 1511  
1527 1512  
... ... @@ -1552,13 +1537,12 @@
1552 1537 }
1553 1538  
1554 1539 /*
1555   - * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag. (and on
1556   - * subsequent probe hits on the thread sets the state to UTASK_BP_HIT) and
1557   - * allows the thread to return from interrupt.
  1540 + * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
  1541 + * allows the thread to return from interrupt. After that handle_swbp()
  1542 + * sets utask->active_uprobe.
1558 1543 *
1559   - * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag and
1560   - * also sets the state to UTASK_SSTEP_ACK and allows the thread to return from
1561   - * interrupt.
  1544 + * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
  1545 + * and allows the thread to return from interrupt.
1562 1546 *
1563 1547 * While returning to userspace, thread notices the TIF_UPROBE flag and calls
1564 1548 * uprobe_notify_resume().
1565 1549  
1566 1550  
... ... @@ -1567,11 +1551,13 @@
1567 1551 {
1568 1552 struct uprobe_task *utask;
1569 1553  
  1554 + clear_thread_flag(TIF_UPROBE);
  1555 +
1570 1556 utask = current->utask;
1571   - if (!utask || utask->state == UTASK_BP_HIT)
1572   - handle_swbp(regs);
1573   - else
  1557 + if (utask && utask->active_uprobe)
1574 1558 handle_singlestep(utask, regs);
  1559 + else
  1560 + handle_swbp(regs);
1575 1561 }
1576 1562  
1577 1563 /*
1578 1564  
1579 1565  
... ... @@ -1580,17 +1566,10 @@
1580 1566 */
1581 1567 int uprobe_pre_sstep_notifier(struct pt_regs *regs)
1582 1568 {
1583   - struct uprobe_task *utask;
1584   -
1585 1569 if (!current->mm || !test_bit(MMF_HAS_UPROBES, &current->mm->flags))
1586 1570 return 0;
1587 1571  
1588   - utask = current->utask;
1589   - if (utask)
1590   - utask->state = UTASK_BP_HIT;
1591   -
1592 1572 set_thread_flag(TIF_UPROBE);
1593   -
1594 1573 return 1;
1595 1574 }
1596 1575