Commit 3ed4c0583daa34dedb568b26ff99e5a7b58db612
Exists in
master
and in
4 other branches
Merge branch 'ptrace' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc
* 'ptrace' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc: (41 commits) signal: trivial, fix the "timespec declared inside parameter list" warning job control: reorganize wait_task_stopped() ptrace: fix signal->wait_chldexit usage in task_clear_group_stop_trapping() signal: sys_sigprocmask() needs retarget_shared_pending() signal: cleanup sys_sigprocmask() signal: rename signandsets() to sigandnsets() signal: do_sigtimedwait() needs retarget_shared_pending() signal: introduce do_sigtimedwait() to factor out compat/native code signal: sys_rt_sigtimedwait: simplify the timeout logic signal: cleanup sys_rt_sigprocmask() x86: signal: sys_rt_sigreturn() should use set_current_blocked() x86: signal: handle_signal() should use set_current_blocked() signal: sigprocmask() should do retarget_shared_pending() signal: sigprocmask: narrow the scope of ->siglock signal: retarget_shared_pending: optimize while_each_thread() loop signal: retarget_shared_pending: consider shared/unblocked signals only signal: introduce retarget_shared_pending() ptrace: ptrace_check_attach() should not do s/STOPPED/TRACED/ signal: Turn SIGNAL_STOP_DEQUEUED into GROUP_STOP_DEQUEUED signal: do_signal_stop: Remove the unneeded task_clear_group_stop_pending() ...
Showing 9 changed files Side-by-side Diff
arch/x86/kernel/signal.c
... | ... | @@ -601,10 +601,7 @@ |
601 | 601 | goto badframe; |
602 | 602 | |
603 | 603 | sigdelsetmask(&set, ~_BLOCKABLE); |
604 | - spin_lock_irq(¤t->sighand->siglock); | |
605 | - current->blocked = set; | |
606 | - recalc_sigpending(); | |
607 | - spin_unlock_irq(¤t->sighand->siglock); | |
604 | + set_current_blocked(&set); | |
608 | 605 | |
609 | 606 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) |
610 | 607 | goto badframe; |
... | ... | @@ -682,6 +679,7 @@ |
682 | 679 | handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, |
683 | 680 | sigset_t *oldset, struct pt_regs *regs) |
684 | 681 | { |
682 | + sigset_t blocked; | |
685 | 683 | int ret; |
686 | 684 | |
687 | 685 | /* Are we from a system call? */ |
688 | 686 | |
... | ... | @@ -741,12 +739,10 @@ |
741 | 739 | */ |
742 | 740 | regs->flags &= ~X86_EFLAGS_TF; |
743 | 741 | |
744 | - spin_lock_irq(¤t->sighand->siglock); | |
745 | - sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); | |
742 | + sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); | |
746 | 743 | if (!(ka->sa.sa_flags & SA_NODEFER)) |
747 | - sigaddset(¤t->blocked, sig); | |
748 | - recalc_sigpending(); | |
749 | - spin_unlock_irq(¤t->sighand->siglock); | |
744 | + sigaddset(&blocked, sig); | |
745 | + set_current_blocked(&blocked); | |
750 | 746 | |
751 | 747 | tracehook_signal_handler(sig, info, ka, regs, |
752 | 748 | test_thread_flag(TIF_SINGLESTEP)); |
fs/exec.c
include/linux/sched.h
... | ... | @@ -653,9 +653,8 @@ |
653 | 653 | * Bits in flags field of signal_struct. |
654 | 654 | */ |
655 | 655 | #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ |
656 | -#define SIGNAL_STOP_DEQUEUED 0x00000002 /* stop signal dequeued */ | |
657 | -#define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ | |
658 | -#define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */ | |
656 | +#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ | |
657 | +#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ | |
659 | 658 | /* |
660 | 659 | * Pending notifications to parent. |
661 | 660 | */ |
... | ... | @@ -1251,6 +1250,7 @@ |
1251 | 1250 | int exit_state; |
1252 | 1251 | int exit_code, exit_signal; |
1253 | 1252 | int pdeath_signal; /* The signal sent when the parent dies */ |
1253 | + unsigned int group_stop; /* GROUP_STOP_*, siglock protected */ | |
1254 | 1254 | /* ??? */ |
1255 | 1255 | unsigned int personality; |
1256 | 1256 | unsigned did_exec:1; |
... | ... | @@ -1770,6 +1770,17 @@ |
1770 | 1770 | /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ |
1771 | 1771 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) |
1772 | 1772 | #define used_math() tsk_used_math(current) |
1773 | + | |
1774 | +/* | |
1775 | + * task->group_stop flags | |
1776 | + */ | |
1777 | +#define GROUP_STOP_SIGMASK 0xffff /* signr of the last group stop */ | |
1778 | +#define GROUP_STOP_PENDING (1 << 16) /* task should stop for group stop */ | |
1779 | +#define GROUP_STOP_CONSUME (1 << 17) /* consume group stop count */ | |
1780 | +#define GROUP_STOP_TRAPPING (1 << 18) /* switching from STOPPED to TRACED */ | |
1781 | +#define GROUP_STOP_DEQUEUED (1 << 19) /* stop signal dequeued */ | |
1782 | + | |
1783 | +extern void task_clear_group_stop_pending(struct task_struct *task); | |
1773 | 1784 | |
1774 | 1785 | #ifdef CONFIG_PREEMPT_RCU |
1775 | 1786 |
include/linux/signal.h
... | ... | @@ -125,13 +125,13 @@ |
125 | 125 | #define _sig_and(x,y) ((x) & (y)) |
126 | 126 | _SIG_SET_BINOP(sigandsets, _sig_and) |
127 | 127 | |
128 | -#define _sig_nand(x,y) ((x) & ~(y)) | |
129 | -_SIG_SET_BINOP(signandsets, _sig_nand) | |
128 | +#define _sig_andn(x,y) ((x) & ~(y)) | |
129 | +_SIG_SET_BINOP(sigandnsets, _sig_andn) | |
130 | 130 | |
131 | 131 | #undef _SIG_SET_BINOP |
132 | 132 | #undef _sig_or |
133 | 133 | #undef _sig_and |
134 | -#undef _sig_nand | |
134 | +#undef _sig_andn | |
135 | 135 | |
136 | 136 | #define _SIG_SET_OP(name, op) \ |
137 | 137 | static inline void name(sigset_t *set) \ |
... | ... | @@ -236,6 +236,9 @@ |
236 | 236 | return sig <= _NSIG ? 1 : 0; |
237 | 237 | } |
238 | 238 | |
239 | +struct timespec; | |
240 | +struct pt_regs; | |
241 | + | |
239 | 242 | extern int next_signal(struct sigpending *pending, sigset_t *mask); |
240 | 243 | extern int do_send_sig_info(int sig, struct siginfo *info, |
241 | 244 | struct task_struct *p, bool group); |
242 | 245 | |
243 | 246 | |
... | ... | @@ -244,10 +247,12 @@ |
244 | 247 | extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, |
245 | 248 | siginfo_t *info); |
246 | 249 | extern long do_sigpending(void __user *, unsigned long); |
250 | +extern int do_sigtimedwait(const sigset_t *, siginfo_t *, | |
251 | + const struct timespec *); | |
247 | 252 | extern int sigprocmask(int, sigset_t *, sigset_t *); |
253 | +extern void set_current_blocked(const sigset_t *); | |
248 | 254 | extern int show_unhandled_signals; |
249 | 255 | |
250 | -struct pt_regs; | |
251 | 256 | extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie); |
252 | 257 | extern void exit_signals(struct task_struct *tsk); |
253 | 258 |
include/linux/tracehook.h
... | ... | @@ -469,33 +469,6 @@ |
469 | 469 | } |
470 | 470 | |
471 | 471 | /** |
472 | - * tracehook_notify_jctl - report about job control stop/continue | |
473 | - * @notify: zero, %CLD_STOPPED or %CLD_CONTINUED | |
474 | - * @why: %CLD_STOPPED or %CLD_CONTINUED | |
475 | - * | |
476 | - * This is called when we might call do_notify_parent_cldstop(). | |
477 | - * | |
478 | - * @notify is zero if we would not ordinarily send a %SIGCHLD, | |
479 | - * or is the %CLD_STOPPED or %CLD_CONTINUED .si_code for %SIGCHLD. | |
480 | - * | |
481 | - * @why is %CLD_STOPPED when about to stop for job control; | |
482 | - * we are already in %TASK_STOPPED state, about to call schedule(). | |
483 | - * It might also be that we have just exited (check %PF_EXITING), | |
484 | - * but need to report that a group-wide stop is complete. | |
485 | - * | |
486 | - * @why is %CLD_CONTINUED when waking up after job control stop and | |
487 | - * ready to make a delayed @notify report. | |
488 | - * | |
489 | - * Return the %CLD_* value for %SIGCHLD, or zero to generate no signal. | |
490 | - * | |
491 | - * Called with the siglock held. | |
492 | - */ | |
493 | -static inline int tracehook_notify_jctl(int notify, int why) | |
494 | -{ | |
495 | - return notify ?: (current->ptrace & PT_PTRACED) ? why : 0; | |
496 | -} | |
497 | - | |
498 | -/** | |
499 | 472 | * tracehook_finish_jctl - report about return from job control stop |
500 | 473 | * |
501 | 474 | * This is called by do_signal_stop() after wakeup. |
kernel/compat.c
... | ... | @@ -890,10 +890,9 @@ |
890 | 890 | { |
891 | 891 | compat_sigset_t s32; |
892 | 892 | sigset_t s; |
893 | - int sig; | |
894 | 893 | struct timespec t; |
895 | 894 | siginfo_t info; |
896 | - long ret, timeout = 0; | |
895 | + long ret; | |
897 | 896 | |
898 | 897 | if (sigsetsize != sizeof(sigset_t)) |
899 | 898 | return -EINVAL; |
900 | 899 | |
901 | 900 | |
902 | 901 | |
903 | 902 | |
904 | 903 | |
905 | 904 | |
... | ... | @@ -901,51 +900,19 @@ |
901 | 900 | if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t))) |
902 | 901 | return -EFAULT; |
903 | 902 | sigset_from_compat(&s, &s32); |
904 | - sigdelsetmask(&s,sigmask(SIGKILL)|sigmask(SIGSTOP)); | |
905 | - signotset(&s); | |
906 | 903 | |
907 | 904 | if (uts) { |
908 | - if (get_compat_timespec (&t, uts)) | |
905 | + if (get_compat_timespec(&t, uts)) | |
909 | 906 | return -EFAULT; |
910 | - if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 | |
911 | - || t.tv_sec < 0) | |
912 | - return -EINVAL; | |
913 | 907 | } |
914 | 908 | |
915 | - spin_lock_irq(¤t->sighand->siglock); | |
916 | - sig = dequeue_signal(current, &s, &info); | |
917 | - if (!sig) { | |
918 | - timeout = MAX_SCHEDULE_TIMEOUT; | |
919 | - if (uts) | |
920 | - timeout = timespec_to_jiffies(&t) | |
921 | - +(t.tv_sec || t.tv_nsec); | |
922 | - if (timeout) { | |
923 | - current->real_blocked = current->blocked; | |
924 | - sigandsets(¤t->blocked, ¤t->blocked, &s); | |
909 | + ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); | |
925 | 910 | |
926 | - recalc_sigpending(); | |
927 | - spin_unlock_irq(¤t->sighand->siglock); | |
928 | - | |
929 | - timeout = schedule_timeout_interruptible(timeout); | |
930 | - | |
931 | - spin_lock_irq(¤t->sighand->siglock); | |
932 | - sig = dequeue_signal(current, &s, &info); | |
933 | - current->blocked = current->real_blocked; | |
934 | - siginitset(¤t->real_blocked, 0); | |
935 | - recalc_sigpending(); | |
936 | - } | |
911 | + if (ret > 0 && uinfo) { | |
912 | + if (copy_siginfo_to_user32(uinfo, &info)) | |
913 | + ret = -EFAULT; | |
937 | 914 | } |
938 | - spin_unlock_irq(¤t->sighand->siglock); | |
939 | 915 | |
940 | - if (sig) { | |
941 | - ret = sig; | |
942 | - if (uinfo) { | |
943 | - if (copy_siginfo_to_user32(uinfo, &info)) | |
944 | - ret = -EFAULT; | |
945 | - } | |
946 | - }else { | |
947 | - ret = timeout?-EINTR:-EAGAIN; | |
948 | - } | |
949 | 916 | return ret; |
950 | 917 | |
951 | 918 | } |
kernel/exit.c
... | ... | @@ -1377,11 +1377,23 @@ |
1377 | 1377 | return NULL; |
1378 | 1378 | } |
1379 | 1379 | |
1380 | -/* | |
1381 | - * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold | |
1382 | - * read_lock(&tasklist_lock) on entry. If we return zero, we still hold | |
1383 | - * the lock and this task is uninteresting. If we return nonzero, we have | |
1384 | - * released the lock and the system call should return. | |
1380 | +/** | |
1381 | + * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED | |
1382 | + * @wo: wait options | |
1383 | + * @ptrace: is the wait for ptrace | |
1384 | + * @p: task to wait for | |
1385 | + * | |
1386 | + * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED. | |
1387 | + * | |
1388 | + * CONTEXT: | |
1389 | + * read_lock(&tasklist_lock), which is released if return value is | |
1390 | + * non-zero. Also, grabs and releases @p->sighand->siglock. | |
1391 | + * | |
1392 | + * RETURNS: | |
1393 | + * 0 if wait condition didn't exist and search for other wait conditions | |
1394 | + * should continue. Non-zero return, -errno on failure and @p's pid on | |
1395 | + * success, implies that tasklist_lock is released and wait condition | |
1396 | + * search should terminate. | |
1385 | 1397 | */ |
1386 | 1398 | static int wait_task_stopped(struct wait_opts *wo, |
1387 | 1399 | int ptrace, struct task_struct *p) |
... | ... | @@ -1397,6 +1409,9 @@ |
1397 | 1409 | if (!ptrace && !(wo->wo_flags & WUNTRACED)) |
1398 | 1410 | return 0; |
1399 | 1411 | |
1412 | + if (!task_stopped_code(p, ptrace)) | |
1413 | + return 0; | |
1414 | + | |
1400 | 1415 | exit_code = 0; |
1401 | 1416 | spin_lock_irq(&p->sighand->siglock); |
1402 | 1417 | |
1403 | 1418 | |
1404 | 1419 | |
1405 | 1420 | |
1406 | 1421 | |
1407 | 1422 | |
1408 | 1423 | |
1409 | 1424 | |
1410 | 1425 | |
... | ... | @@ -1538,33 +1553,84 @@ |
1538 | 1553 | return 0; |
1539 | 1554 | } |
1540 | 1555 | |
1541 | - if (likely(!ptrace) && unlikely(task_ptrace(p))) { | |
1556 | + /* dead body doesn't have much to contribute */ | |
1557 | + if (p->exit_state == EXIT_DEAD) | |
1558 | + return 0; | |
1559 | + | |
1560 | + /* slay zombie? */ | |
1561 | + if (p->exit_state == EXIT_ZOMBIE) { | |
1542 | 1562 | /* |
1543 | - * This child is hidden by ptrace. | |
1544 | - * We aren't allowed to see it now, but eventually we will. | |
1563 | + * A zombie ptracee is only visible to its ptracer. | |
1564 | + * Notification and reaping will be cascaded to the real | |
1565 | + * parent when the ptracer detaches. | |
1545 | 1566 | */ |
1567 | + if (likely(!ptrace) && unlikely(task_ptrace(p))) { | |
1568 | + /* it will become visible, clear notask_error */ | |
1569 | + wo->notask_error = 0; | |
1570 | + return 0; | |
1571 | + } | |
1572 | + | |
1573 | + /* we don't reap group leaders with subthreads */ | |
1574 | + if (!delay_group_leader(p)) | |
1575 | + return wait_task_zombie(wo, p); | |
1576 | + | |
1577 | + /* | |
1578 | + * Allow access to stopped/continued state via zombie by | |
1579 | + * falling through. Clearing of notask_error is complex. | |
1580 | + * | |
1581 | + * When !@ptrace: | |
1582 | + * | |
1583 | + * If WEXITED is set, notask_error should naturally be | |
1584 | + * cleared. If not, subset of WSTOPPED|WCONTINUED is set, | |
1585 | + * so, if there are live subthreads, there are events to | |
1586 | + * wait for. If all subthreads are dead, it's still safe | |
1587 | + * to clear - this function will be called again in finite | |
1588 | + * amount time once all the subthreads are released and | |
1589 | + * will then return without clearing. | |
1590 | + * | |
1591 | + * When @ptrace: | |
1592 | + * | |
1593 | + * Stopped state is per-task and thus can't change once the | |
1594 | + * target task dies. Only continued and exited can happen. | |
1595 | + * Clear notask_error if WCONTINUED | WEXITED. | |
1596 | + */ | |
1597 | + if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) | |
1598 | + wo->notask_error = 0; | |
1599 | + } else { | |
1600 | + /* | |
1601 | + * If @p is ptraced by a task in its real parent's group, | |
1602 | + * hide group stop/continued state when looking at @p as | |
1603 | + * the real parent; otherwise, a single stop can be | |
1604 | + * reported twice as group and ptrace stops. | |
1605 | + * | |
1606 | + * If a ptracer wants to distinguish the two events for its | |
1607 | + * own children, it should create a separate process which | |
1608 | + * takes the role of real parent. | |
1609 | + */ | |
1610 | + if (likely(!ptrace) && task_ptrace(p) && | |
1611 | + same_thread_group(p->parent, p->real_parent)) | |
1612 | + return 0; | |
1613 | + | |
1614 | + /* | |
1615 | + * @p is alive and it's gonna stop, continue or exit, so | |
1616 | + * there always is something to wait for. | |
1617 | + */ | |
1546 | 1618 | wo->notask_error = 0; |
1547 | - return 0; | |
1548 | 1619 | } |
1549 | 1620 | |
1550 | - if (p->exit_state == EXIT_DEAD) | |
1551 | - return 0; | |
1552 | - | |
1553 | 1621 | /* |
1554 | - * We don't reap group leaders with subthreads. | |
1622 | + * Wait for stopped. Depending on @ptrace, different stopped state | |
1623 | + * is used and the two don't interact with each other. | |
1555 | 1624 | */ |
1556 | - if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p)) | |
1557 | - return wait_task_zombie(wo, p); | |
1625 | + ret = wait_task_stopped(wo, ptrace, p); | |
1626 | + if (ret) | |
1627 | + return ret; | |
1558 | 1628 | |
1559 | 1629 | /* |
1560 | - * It's stopped or running now, so it might | |
1561 | - * later continue, exit, or stop again. | |
1630 | + * Wait for continued. There's only one continued state and the | |
1631 | + * ptracer can consume it which can confuse the real parent. Don't | |
1632 | + * use WCONTINUED from ptracer. You don't need or want it. | |
1562 | 1633 | */ |
1563 | - wo->notask_error = 0; | |
1564 | - | |
1565 | - if (task_stopped_code(p, ptrace)) | |
1566 | - return wait_task_stopped(wo, ptrace, p); | |
1567 | - | |
1568 | 1634 | return wait_task_continued(wo, p); |
1569 | 1635 | } |
1570 | 1636 |
kernel/ptrace.c
... | ... | @@ -38,35 +38,33 @@ |
38 | 38 | child->parent = new_parent; |
39 | 39 | } |
40 | 40 | |
41 | -/* | |
42 | - * Turn a tracing stop into a normal stop now, since with no tracer there | |
43 | - * would be no way to wake it up with SIGCONT or SIGKILL. If there was a | |
44 | - * signal sent that would resume the child, but didn't because it was in | |
45 | - * TASK_TRACED, resume it now. | |
46 | - * Requires that irqs be disabled. | |
47 | - */ | |
48 | -static void ptrace_untrace(struct task_struct *child) | |
49 | -{ | |
50 | - spin_lock(&child->sighand->siglock); | |
51 | - if (task_is_traced(child)) { | |
52 | - /* | |
53 | - * If the group stop is completed or in progress, | |
54 | - * this thread was already counted as stopped. | |
55 | - */ | |
56 | - if (child->signal->flags & SIGNAL_STOP_STOPPED || | |
57 | - child->signal->group_stop_count) | |
58 | - __set_task_state(child, TASK_STOPPED); | |
59 | - else | |
60 | - signal_wake_up(child, 1); | |
61 | - } | |
62 | - spin_unlock(&child->sighand->siglock); | |
63 | -} | |
64 | - | |
65 | -/* | |
66 | - * unptrace a task: move it back to its original parent and | |
67 | - * remove it from the ptrace list. | |
41 | +/** | |
42 | + * __ptrace_unlink - unlink ptracee and restore its execution state | |
43 | + * @child: ptracee to be unlinked | |
68 | 44 | * |
69 | - * Must be called with the tasklist lock write-held. | |
45 | + * Remove @child from the ptrace list, move it back to the original parent, | |
46 | + * and restore the execution state so that it conforms to the group stop | |
47 | + * state. | |
48 | + * | |
49 | + * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer | |
50 | + * exiting. For PTRACE_DETACH, unless the ptracee has been killed between | |
51 | + * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED. | |
52 | + * If the ptracer is exiting, the ptracee can be in any state. | |
53 | + * | |
54 | + * After detach, the ptracee should be in a state which conforms to the | |
55 | + * group stop. If the group is stopped or in the process of stopping, the | |
56 | + * ptracee should be put into TASK_STOPPED; otherwise, it should be woken | |
57 | + * up from TASK_TRACED. | |
58 | + * | |
59 | + * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED, | |
60 | + * it goes through TRACED -> RUNNING -> STOPPED transition which is similar | |
61 | + * to but in the opposite direction of what happens while attaching to a | |
62 | + * stopped task. However, in this direction, the intermediate RUNNING | |
63 | + * state is not hidden even from the current ptracer and if it immediately | |
64 | + * re-attaches and performs a WNOHANG wait(2), it may fail. | |
65 | + * | |
66 | + * CONTEXT: | |
67 | + * write_lock_irq(tasklist_lock) | |
70 | 68 | */ |
71 | 69 | void __ptrace_unlink(struct task_struct *child) |
72 | 70 | { |
... | ... | @@ -76,8 +74,27 @@ |
76 | 74 | child->parent = child->real_parent; |
77 | 75 | list_del_init(&child->ptrace_entry); |
78 | 76 | |
79 | - if (task_is_traced(child)) | |
80 | - ptrace_untrace(child); | |
77 | + spin_lock(&child->sighand->siglock); | |
78 | + | |
79 | + /* | |
80 | + * Reinstate GROUP_STOP_PENDING if group stop is in effect and | |
81 | + * @child isn't dead. | |
82 | + */ | |
83 | + if (!(child->flags & PF_EXITING) && | |
84 | + (child->signal->flags & SIGNAL_STOP_STOPPED || | |
85 | + child->signal->group_stop_count)) | |
86 | + child->group_stop |= GROUP_STOP_PENDING; | |
87 | + | |
88 | + /* | |
89 | + * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick | |
90 | + * @child in the butt. Note that @resume should be used iff @child | |
91 | + * is in TASK_TRACED; otherwise, we might unduly disrupt | |
92 | + * TASK_KILLABLE sleeps. | |
93 | + */ | |
94 | + if (child->group_stop & GROUP_STOP_PENDING || task_is_traced(child)) | |
95 | + signal_wake_up(child, task_is_traced(child)); | |
96 | + | |
97 | + spin_unlock(&child->sighand->siglock); | |
81 | 98 | } |
82 | 99 | |
83 | 100 | /* |
84 | 101 | |
... | ... | @@ -96,16 +113,14 @@ |
96 | 113 | */ |
97 | 114 | read_lock(&tasklist_lock); |
98 | 115 | if ((child->ptrace & PT_PTRACED) && child->parent == current) { |
99 | - ret = 0; | |
100 | 116 | /* |
101 | 117 | * child->sighand can't be NULL, release_task() |
102 | 118 | * does ptrace_unlink() before __exit_signal(). |
103 | 119 | */ |
104 | 120 | spin_lock_irq(&child->sighand->siglock); |
105 | - if (task_is_stopped(child)) | |
106 | - child->state = TASK_TRACED; | |
107 | - else if (!task_is_traced(child) && !kill) | |
108 | - ret = -ESRCH; | |
121 | + WARN_ON_ONCE(task_is_stopped(child)); | |
122 | + if (task_is_traced(child) || kill) | |
123 | + ret = 0; | |
109 | 124 | spin_unlock_irq(&child->sighand->siglock); |
110 | 125 | } |
111 | 126 | read_unlock(&tasklist_lock); |
... | ... | @@ -169,6 +184,7 @@ |
169 | 184 | |
170 | 185 | static int ptrace_attach(struct task_struct *task) |
171 | 186 | { |
187 | + bool wait_trap = false; | |
172 | 188 | int retval; |
173 | 189 | |
174 | 190 | audit_ptrace(task); |
175 | 191 | |
... | ... | @@ -208,12 +224,42 @@ |
208 | 224 | __ptrace_link(task, current); |
209 | 225 | send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); |
210 | 226 | |
227 | + spin_lock(&task->sighand->siglock); | |
228 | + | |
229 | + /* | |
230 | + * If the task is already STOPPED, set GROUP_STOP_PENDING and | |
231 | + * TRAPPING, and kick it so that it transits to TRACED. TRAPPING | |
232 | + * will be cleared if the child completes the transition or any | |
233 | + * event which clears the group stop states happens. We'll wait | |
234 | + * for the transition to complete before returning from this | |
235 | + * function. | |
236 | + * | |
237 | + * This hides STOPPED -> RUNNING -> TRACED transition from the | |
238 | + * attaching thread but a different thread in the same group can | |
239 | + * still observe the transient RUNNING state. IOW, if another | |
240 | + * thread's WNOHANG wait(2) on the stopped tracee races against | |
241 | + * ATTACH, the wait(2) may fail due to the transient RUNNING. | |
242 | + * | |
243 | + * The following task_is_stopped() test is safe as both transitions | |
244 | + * in and out of STOPPED are protected by siglock. | |
245 | + */ | |
246 | + if (task_is_stopped(task)) { | |
247 | + task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING; | |
248 | + signal_wake_up(task, 1); | |
249 | + wait_trap = true; | |
250 | + } | |
251 | + | |
252 | + spin_unlock(&task->sighand->siglock); | |
253 | + | |
211 | 254 | retval = 0; |
212 | 255 | unlock_tasklist: |
213 | 256 | write_unlock_irq(&tasklist_lock); |
214 | 257 | unlock_creds: |
215 | 258 | mutex_unlock(&task->signal->cred_guard_mutex); |
216 | 259 | out: |
260 | + if (wait_trap) | |
261 | + wait_event(current->signal->wait_chldexit, | |
262 | + !(task->group_stop & GROUP_STOP_TRAPPING)); | |
217 | 263 | return retval; |
218 | 264 | } |
219 | 265 | |
... | ... | @@ -316,8 +362,6 @@ |
316 | 362 | if (child->ptrace) { |
317 | 363 | child->exit_code = data; |
318 | 364 | dead = __ptrace_detach(current, child); |
319 | - if (!child->exit_state) | |
320 | - wake_up_state(child, TASK_TRACED | TASK_STOPPED); | |
321 | 365 | } |
322 | 366 | write_unlock_irq(&tasklist_lock); |
323 | 367 |
kernel/signal.c
Changes suppressed. Click to show
... | ... | @@ -124,7 +124,7 @@ |
124 | 124 | |
125 | 125 | static int recalc_sigpending_tsk(struct task_struct *t) |
126 | 126 | { |
127 | - if (t->signal->group_stop_count > 0 || | |
127 | + if ((t->group_stop & GROUP_STOP_PENDING) || | |
128 | 128 | PENDING(&t->pending, &t->blocked) || |
129 | 129 | PENDING(&t->signal->shared_pending, &t->blocked)) { |
130 | 130 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
... | ... | @@ -223,6 +223,83 @@ |
223 | 223 | current->comm, current->pid, sig); |
224 | 224 | } |
225 | 225 | |
226 | +/** | |
227 | + * task_clear_group_stop_trapping - clear group stop trapping bit | |
228 | + * @task: target task | |
229 | + * | |
230 | + * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us. Clear it | |
231 | + * and wake up the ptracer. Note that we don't need any further locking. | |
232 | + * @task->siglock guarantees that @task->parent points to the ptracer. | |
233 | + * | |
234 | + * CONTEXT: | |
235 | + * Must be called with @task->sighand->siglock held. | |
236 | + */ | |
237 | +static void task_clear_group_stop_trapping(struct task_struct *task) | |
238 | +{ | |
239 | + if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) { | |
240 | + task->group_stop &= ~GROUP_STOP_TRAPPING; | |
241 | + __wake_up_sync_key(&task->parent->signal->wait_chldexit, | |
242 | + TASK_UNINTERRUPTIBLE, 1, task); | |
243 | + } | |
244 | +} | |
245 | + | |
246 | +/** | |
247 | + * task_clear_group_stop_pending - clear pending group stop | |
248 | + * @task: target task | |
249 | + * | |
250 | + * Clear group stop states for @task. | |
251 | + * | |
252 | + * CONTEXT: | |
253 | + * Must be called with @task->sighand->siglock held. | |
254 | + */ | |
255 | +void task_clear_group_stop_pending(struct task_struct *task) | |
256 | +{ | |
257 | + task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME | | |
258 | + GROUP_STOP_DEQUEUED); | |
259 | +} | |
260 | + | |
261 | +/** | |
262 | + * task_participate_group_stop - participate in a group stop | |
263 | + * @task: task participating in a group stop | |
264 | + * | |
265 | + * @task has GROUP_STOP_PENDING set and is participating in a group stop. | |
266 | + * Group stop states are cleared and the group stop count is consumed if | |
267 | + * %GROUP_STOP_CONSUME was set. If the consumption completes the group | |
268 | + * stop, the appropriate %SIGNAL_* flags are set. | |
269 | + * | |
270 | + * CONTEXT: | |
271 | + * Must be called with @task->sighand->siglock held. | |
272 | + * | |
273 | + * RETURNS: | |
274 | + * %true if group stop completion should be notified to the parent, %false | |
275 | + * otherwise. | |
276 | + */ | |
277 | +static bool task_participate_group_stop(struct task_struct *task) | |
278 | +{ | |
279 | + struct signal_struct *sig = task->signal; | |
280 | + bool consume = task->group_stop & GROUP_STOP_CONSUME; | |
281 | + | |
282 | + WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING)); | |
283 | + | |
284 | + task_clear_group_stop_pending(task); | |
285 | + | |
286 | + if (!consume) | |
287 | + return false; | |
288 | + | |
289 | + if (!WARN_ON_ONCE(sig->group_stop_count == 0)) | |
290 | + sig->group_stop_count--; | |
291 | + | |
292 | + /* | |
293 | + * Tell the caller to notify completion iff we are entering into a | |
294 | + * fresh group stop. Read comment in do_signal_stop() for details. | |
295 | + */ | |
296 | + if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { | |
297 | + sig->flags = SIGNAL_STOP_STOPPED; | |
298 | + return true; | |
299 | + } | |
300 | + return false; | |
301 | +} | |
302 | + | |
226 | 303 | /* |
227 | 304 | * allocate a new signal queue record |
228 | 305 | * - this may be called without locks if and only if t == current, otherwise an |
... | ... | @@ -527,7 +604,7 @@ |
527 | 604 | * is to alert stop-signal processing code when another |
528 | 605 | * processor has come along and cleared the flag. |
529 | 606 | */ |
530 | - tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; | |
607 | + current->group_stop |= GROUP_STOP_DEQUEUED; | |
531 | 608 | } |
532 | 609 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { |
533 | 610 | /* |
... | ... | @@ -592,7 +669,7 @@ |
592 | 669 | if (sigisemptyset(&m)) |
593 | 670 | return 0; |
594 | 671 | |
595 | - signandsets(&s->signal, &s->signal, mask); | |
672 | + sigandnsets(&s->signal, &s->signal, mask); | |
596 | 673 | list_for_each_entry_safe(q, n, &s->list, list) { |
597 | 674 | if (sigismember(mask, q->info.si_signo)) { |
598 | 675 | list_del_init(&q->list); |
599 | 676 | |
600 | 677 | |
... | ... | @@ -727,34 +804,14 @@ |
727 | 804 | } else if (sig == SIGCONT) { |
728 | 805 | unsigned int why; |
729 | 806 | /* |
730 | - * Remove all stop signals from all queues, | |
731 | - * and wake all threads. | |
807 | + * Remove all stop signals from all queues, wake all threads. | |
732 | 808 | */ |
733 | 809 | rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); |
734 | 810 | t = p; |
735 | 811 | do { |
736 | - unsigned int state; | |
812 | + task_clear_group_stop_pending(t); | |
737 | 813 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); |
738 | - /* | |
739 | - * If there is a handler for SIGCONT, we must make | |
740 | - * sure that no thread returns to user mode before | |
741 | - * we post the signal, in case it was the only | |
742 | - * thread eligible to run the signal handler--then | |
743 | - * it must not do anything between resuming and | |
744 | - * running the handler. With the TIF_SIGPENDING | |
745 | - * flag set, the thread will pause and acquire the | |
746 | - * siglock that we hold now and until we've queued | |
747 | - * the pending signal. | |
748 | - * | |
749 | - * Wake up the stopped thread _after_ setting | |
750 | - * TIF_SIGPENDING | |
751 | - */ | |
752 | - state = __TASK_STOPPED; | |
753 | - if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) { | |
754 | - set_tsk_thread_flag(t, TIF_SIGPENDING); | |
755 | - state |= TASK_INTERRUPTIBLE; | |
756 | - } | |
757 | - wake_up_state(t, state); | |
814 | + wake_up_state(t, __TASK_STOPPED); | |
758 | 815 | } while_each_thread(p, t); |
759 | 816 | |
760 | 817 | /* |
... | ... | @@ -780,13 +837,6 @@ |
780 | 837 | signal->flags = why | SIGNAL_STOP_CONTINUED; |
781 | 838 | signal->group_stop_count = 0; |
782 | 839 | signal->group_exit_code = 0; |
783 | - } else { | |
784 | - /* | |
785 | - * We are not stopped, but there could be a stop | |
786 | - * signal in the middle of being processed after | |
787 | - * being removed from the queue. Clear that too. | |
788 | - */ | |
789 | - signal->flags &= ~SIGNAL_STOP_DEQUEUED; | |
790 | 840 | } |
791 | 841 | } |
792 | 842 | |
... | ... | @@ -875,6 +925,7 @@ |
875 | 925 | signal->group_stop_count = 0; |
876 | 926 | t = p; |
877 | 927 | do { |
928 | + task_clear_group_stop_pending(t); | |
878 | 929 | sigaddset(&t->pending.signal, SIGKILL); |
879 | 930 | signal_wake_up(t, 1); |
880 | 931 | } while_each_thread(p, t); |
... | ... | @@ -1109,6 +1160,7 @@ |
1109 | 1160 | p->signal->group_stop_count = 0; |
1110 | 1161 | |
1111 | 1162 | while_each_thread(p, t) { |
1163 | + task_clear_group_stop_pending(t); | |
1112 | 1164 | count++; |
1113 | 1165 | |
1114 | 1166 | /* Don't bother with already dead threads */ |
1115 | 1167 | |
1116 | 1168 | |
... | ... | @@ -1536,16 +1588,30 @@ |
1536 | 1588 | return ret; |
1537 | 1589 | } |
1538 | 1590 | |
1539 | -static void do_notify_parent_cldstop(struct task_struct *tsk, int why) | |
1591 | +/** | |
1592 | + * do_notify_parent_cldstop - notify parent of stopped/continued state change | |
1593 | + * @tsk: task reporting the state change | |
1594 | + * @for_ptracer: the notification is for ptracer | |
1595 | + * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report | |
1596 | + * | |
1597 | + * Notify @tsk's parent that the stopped/continued state has changed. If | |
1598 | + * @for_ptracer is %false, @tsk's group leader notifies to its real parent. | |
1599 | + * If %true, @tsk reports to @tsk->parent which should be the ptracer. | |
1600 | + * | |
1601 | + * CONTEXT: | |
1602 | + * Must be called with tasklist_lock at least read locked. | |
1603 | + */ | |
1604 | +static void do_notify_parent_cldstop(struct task_struct *tsk, | |
1605 | + bool for_ptracer, int why) | |
1540 | 1606 | { |
1541 | 1607 | struct siginfo info; |
1542 | 1608 | unsigned long flags; |
1543 | 1609 | struct task_struct *parent; |
1544 | 1610 | struct sighand_struct *sighand; |
1545 | 1611 | |
1546 | - if (task_ptrace(tsk)) | |
1612 | + if (for_ptracer) { | |
1547 | 1613 | parent = tsk->parent; |
1548 | - else { | |
1614 | + } else { | |
1549 | 1615 | tsk = tsk->group_leader; |
1550 | 1616 | parent = tsk->real_parent; |
1551 | 1617 | } |
... | ... | @@ -1621,6 +1687,15 @@ |
1621 | 1687 | } |
1622 | 1688 | |
1623 | 1689 | /* |
1690 | + * Test whether the target task of the usual cldstop notification - the | |
1691 | + * real_parent of @child - is in the same group as the ptracer. | |
1692 | + */ | |
1693 | +static bool real_parent_is_ptracer(struct task_struct *child) | |
1694 | +{ | |
1695 | + return same_thread_group(child->parent, child->real_parent); | |
1696 | +} | |
1697 | + | |
1698 | +/* | |
1624 | 1699 | * This must be called with current->sighand->siglock held. |
1625 | 1700 | * |
1626 | 1701 | * This should be the path for all ptrace stops. |
1627 | 1702 | |
... | ... | @@ -1631,10 +1706,12 @@ |
1631 | 1706 | * If we actually decide not to stop at all because the tracer |
1632 | 1707 | * is gone, we keep current->exit_code unless clear_code. |
1633 | 1708 | */ |
1634 | -static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) | |
1709 | +static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) | |
1635 | 1710 | __releases(¤t->sighand->siglock) |
1636 | 1711 | __acquires(¤t->sighand->siglock) |
1637 | 1712 | { |
1713 | + bool gstop_done = false; | |
1714 | + | |
1638 | 1715 | if (arch_ptrace_stop_needed(exit_code, info)) { |
1639 | 1716 | /* |
1640 | 1717 | * The arch code has something special to do before a |
1641 | 1718 | |
1642 | 1719 | |
1643 | 1720 | |
1644 | 1721 | |
... | ... | @@ -1655,22 +1732,50 @@ |
1655 | 1732 | } |
1656 | 1733 | |
1657 | 1734 | /* |
1658 | - * If there is a group stop in progress, | |
1659 | - * we must participate in the bookkeeping. | |
1735 | + * If @why is CLD_STOPPED, we're trapping to participate in a group | |
1736 | + * stop. Do the bookkeeping. Note that if SIGCONT was delievered | |
1737 | + * while siglock was released for the arch hook, PENDING could be | |
1738 | + * clear now. We act as if SIGCONT is received after TASK_TRACED | |
1739 | + * is entered - ignore it. | |
1660 | 1740 | */ |
1661 | - if (current->signal->group_stop_count > 0) | |
1662 | - --current->signal->group_stop_count; | |
1741 | + if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING)) | |
1742 | + gstop_done = task_participate_group_stop(current); | |
1663 | 1743 | |
1664 | 1744 | current->last_siginfo = info; |
1665 | 1745 | current->exit_code = exit_code; |
1666 | 1746 | |
1667 | - /* Let the debugger run. */ | |
1668 | - __set_current_state(TASK_TRACED); | |
1747 | + /* | |
1748 | + * TRACED should be visible before TRAPPING is cleared; otherwise, | |
1749 | + * the tracer might fail do_wait(). | |
1750 | + */ | |
1751 | + set_current_state(TASK_TRACED); | |
1752 | + | |
1753 | + /* | |
1754 | + * We're committing to trapping. Clearing GROUP_STOP_TRAPPING and | |
1755 | + * transition to TASK_TRACED should be atomic with respect to | |
1756 | + * siglock. This hsould be done after the arch hook as siglock is | |
1757 | + * released and regrabbed across it. | |
1758 | + */ | |
1759 | + task_clear_group_stop_trapping(current); | |
1760 | + | |
1669 | 1761 | spin_unlock_irq(¤t->sighand->siglock); |
1670 | 1762 | read_lock(&tasklist_lock); |
1671 | 1763 | if (may_ptrace_stop()) { |
1672 | - do_notify_parent_cldstop(current, CLD_TRAPPED); | |
1673 | 1764 | /* |
1765 | + * Notify parents of the stop. | |
1766 | + * | |
1767 | + * While ptraced, there are two parents - the ptracer and | |
1768 | + * the real_parent of the group_leader. The ptracer should | |
1769 | + * know about every stop while the real parent is only | |
1770 | + * interested in the completion of group stop. The states | |
1771 | + * for the two don't interact with each other. Notify | |
1772 | + * separately unless they're gonna be duplicates. | |
1773 | + */ | |
1774 | + do_notify_parent_cldstop(current, true, why); | |
1775 | + if (gstop_done && !real_parent_is_ptracer(current)) | |
1776 | + do_notify_parent_cldstop(current, false, why); | |
1777 | + | |
1778 | + /* | |
1674 | 1779 | * Don't want to allow preemption here, because |
1675 | 1780 | * sys_ptrace() needs this task to be inactive. |
1676 | 1781 | * |
1677 | 1782 | |
... | ... | @@ -1684,7 +1789,16 @@ |
1684 | 1789 | /* |
1685 | 1790 | * By the time we got the lock, our tracer went away. |
1686 | 1791 | * Don't drop the lock yet, another tracer may come. |
1792 | + * | |
1793 | + * If @gstop_done, the ptracer went away between group stop | |
1794 | + * completion and here. During detach, it would have set | |
1795 | + * GROUP_STOP_PENDING on us and we'll re-enter TASK_STOPPED | |
1796 | + * in do_signal_stop() on return, so notifying the real | |
1797 | + * parent of the group stop completion is enough. | |
1687 | 1798 | */ |
1799 | + if (gstop_done) | |
1800 | + do_notify_parent_cldstop(current, false, why); | |
1801 | + | |
1688 | 1802 | __set_current_state(TASK_RUNNING); |
1689 | 1803 | if (clear_code) |
1690 | 1804 | current->exit_code = 0; |
... | ... | @@ -1728,7 +1842,7 @@ |
1728 | 1842 | |
1729 | 1843 | /* Let the debugger run. */ |
1730 | 1844 | spin_lock_irq(¤t->sighand->siglock); |
1731 | - ptrace_stop(exit_code, 1, &info); | |
1845 | + ptrace_stop(exit_code, CLD_TRAPPED, 1, &info); | |
1732 | 1846 | spin_unlock_irq(¤t->sighand->siglock); |
1733 | 1847 | } |
1734 | 1848 | |
1735 | 1849 | |
1736 | 1850 | |
1737 | 1851 | |
1738 | 1852 | |
1739 | 1853 | |
1740 | 1854 | |
1741 | 1855 | |
1742 | 1856 | |
1743 | 1857 | |
1744 | 1858 | |
1745 | 1859 | |
1746 | 1860 | |
1747 | 1861 | |
1748 | 1862 | |
1749 | 1863 | |
... | ... | @@ -1741,66 +1855,115 @@ |
1741 | 1855 | static int do_signal_stop(int signr) |
1742 | 1856 | { |
1743 | 1857 | struct signal_struct *sig = current->signal; |
1744 | - int notify; | |
1745 | 1858 | |
1746 | - if (!sig->group_stop_count) { | |
1859 | + if (!(current->group_stop & GROUP_STOP_PENDING)) { | |
1860 | + unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME; | |
1747 | 1861 | struct task_struct *t; |
1748 | 1862 | |
1749 | - if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || | |
1863 | + /* signr will be recorded in task->group_stop for retries */ | |
1864 | + WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK); | |
1865 | + | |
1866 | + if (!likely(current->group_stop & GROUP_STOP_DEQUEUED) || | |
1750 | 1867 | unlikely(signal_group_exit(sig))) |
1751 | 1868 | return 0; |
1752 | 1869 | /* |
1753 | - * There is no group stop already in progress. | |
1754 | - * We must initiate one now. | |
1870 | + * There is no group stop already in progress. We must | |
1871 | + * initiate one now. | |
1872 | + * | |
1873 | + * While ptraced, a task may be resumed while group stop is | |
1874 | + * still in effect and then receive a stop signal and | |
1875 | + * initiate another group stop. This deviates from the | |
1876 | + * usual behavior as two consecutive stop signals can't | |
1877 | + * cause two group stops when !ptraced. That is why we | |
1878 | + * also check !task_is_stopped(t) below. | |
1879 | + * | |
1880 | + * The condition can be distinguished by testing whether | |
1881 | + * SIGNAL_STOP_STOPPED is already set. Don't generate | |
1882 | + * group_exit_code in such case. | |
1883 | + * | |
1884 | + * This is not necessary for SIGNAL_STOP_CONTINUED because | |
1885 | + * an intervening stop signal is required to cause two | |
1886 | + * continued events regardless of ptrace. | |
1755 | 1887 | */ |
1756 | - sig->group_exit_code = signr; | |
1888 | + if (!(sig->flags & SIGNAL_STOP_STOPPED)) | |
1889 | + sig->group_exit_code = signr; | |
1890 | + else | |
1891 | + WARN_ON_ONCE(!task_ptrace(current)); | |
1757 | 1892 | |
1893 | + current->group_stop &= ~GROUP_STOP_SIGMASK; | |
1894 | + current->group_stop |= signr | gstop; | |
1758 | 1895 | sig->group_stop_count = 1; |
1759 | - for (t = next_thread(current); t != current; t = next_thread(t)) | |
1896 | + for (t = next_thread(current); t != current; | |
1897 | + t = next_thread(t)) { | |
1898 | + t->group_stop &= ~GROUP_STOP_SIGMASK; | |
1760 | 1899 | /* |
1761 | 1900 | * Setting state to TASK_STOPPED for a group |
1762 | 1901 | * stop is always done with the siglock held, |
1763 | 1902 | * so this check has no races. |
1764 | 1903 | */ |
1765 | - if (!(t->flags & PF_EXITING) && | |
1766 | - !task_is_stopped_or_traced(t)) { | |
1904 | + if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) { | |
1905 | + t->group_stop |= signr | gstop; | |
1767 | 1906 | sig->group_stop_count++; |
1768 | 1907 | signal_wake_up(t, 0); |
1769 | 1908 | } |
1909 | + } | |
1770 | 1910 | } |
1771 | - /* | |
1772 | - * If there are no other threads in the group, or if there is | |
1773 | - * a group stop in progress and we are the last to stop, report | |
1774 | - * to the parent. When ptraced, every thread reports itself. | |
1775 | - */ | |
1776 | - notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0; | |
1777 | - notify = tracehook_notify_jctl(notify, CLD_STOPPED); | |
1778 | - /* | |
1779 | - * tracehook_notify_jctl() can drop and reacquire siglock, so | |
1780 | - * we keep ->group_stop_count != 0 before the call. If SIGCONT | |
1781 | - * or SIGKILL comes in between ->group_stop_count == 0. | |
1782 | - */ | |
1783 | - if (sig->group_stop_count) { | |
1784 | - if (!--sig->group_stop_count) | |
1785 | - sig->flags = SIGNAL_STOP_STOPPED; | |
1786 | - current->exit_code = sig->group_exit_code; | |
1911 | +retry: | |
1912 | + if (likely(!task_ptrace(current))) { | |
1913 | + int notify = 0; | |
1914 | + | |
1915 | + /* | |
1916 | + * If there are no other threads in the group, or if there | |
1917 | + * is a group stop in progress and we are the last to stop, | |
1918 | + * report to the parent. | |
1919 | + */ | |
1920 | + if (task_participate_group_stop(current)) | |
1921 | + notify = CLD_STOPPED; | |
1922 | + | |
1787 | 1923 | __set_current_state(TASK_STOPPED); |
1924 | + spin_unlock_irq(¤t->sighand->siglock); | |
1925 | + | |
1926 | + /* | |
1927 | + * Notify the parent of the group stop completion. Because | |
1928 | + * we're not holding either the siglock or tasklist_lock | |
1929 | + * here, ptracer may attach inbetween; however, this is for | |
1930 | + * group stop and should always be delivered to the real | |
1931 | + * parent of the group leader. The new ptracer will get | |
1932 | + * its notification when this task transitions into | |
1933 | + * TASK_TRACED. | |
1934 | + */ | |
1935 | + if (notify) { | |
1936 | + read_lock(&tasklist_lock); | |
1937 | + do_notify_parent_cldstop(current, false, notify); | |
1938 | + read_unlock(&tasklist_lock); | |
1939 | + } | |
1940 | + | |
1941 | + /* Now we don't run again until woken by SIGCONT or SIGKILL */ | |
1942 | + schedule(); | |
1943 | + | |
1944 | + spin_lock_irq(¤t->sighand->siglock); | |
1945 | + } else { | |
1946 | + ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK, | |
1947 | + CLD_STOPPED, 0, NULL); | |
1948 | + current->exit_code = 0; | |
1788 | 1949 | } |
1789 | - spin_unlock_irq(¤t->sighand->siglock); | |
1790 | 1950 | |
1791 | - if (notify) { | |
1792 | - read_lock(&tasklist_lock); | |
1793 | - do_notify_parent_cldstop(current, notify); | |
1794 | - read_unlock(&tasklist_lock); | |
1951 | + /* | |
1952 | + * GROUP_STOP_PENDING could be set if another group stop has | |
1953 | + * started since being woken up or ptrace wants us to transit | |
1954 | + * between TASK_STOPPED and TRACED. Retry group stop. | |
1955 | + */ | |
1956 | + if (current->group_stop & GROUP_STOP_PENDING) { | |
1957 | + WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK)); | |
1958 | + goto retry; | |
1795 | 1959 | } |
1796 | 1960 | |
1797 | - /* Now we don't run again until woken by SIGCONT or SIGKILL */ | |
1798 | - do { | |
1799 | - schedule(); | |
1800 | - } while (try_to_freeze()); | |
1961 | + /* PTRACE_ATTACH might have raced with task killing, clear trapping */ | |
1962 | + task_clear_group_stop_trapping(current); | |
1801 | 1963 | |
1964 | + spin_unlock_irq(¤t->sighand->siglock); | |
1965 | + | |
1802 | 1966 | tracehook_finish_jctl(); |
1803 | - current->exit_code = 0; | |
1804 | 1967 | |
1805 | 1968 | return 1; |
1806 | 1969 | } |
... | ... | @@ -1814,7 +1977,7 @@ |
1814 | 1977 | ptrace_signal_deliver(regs, cookie); |
1815 | 1978 | |
1816 | 1979 | /* Let the debugger run. */ |
1817 | - ptrace_stop(signr, 0, info); | |
1980 | + ptrace_stop(signr, CLD_TRAPPED, 0, info); | |
1818 | 1981 | |
1819 | 1982 | /* We're back. Did the debugger cancel the sig? */ |
1820 | 1983 | signr = current->exit_code; |
1821 | 1984 | |
1822 | 1985 | |
... | ... | @@ -1869,18 +2032,36 @@ |
1869 | 2032 | * the CLD_ si_code into SIGNAL_CLD_MASK bits. |
1870 | 2033 | */ |
1871 | 2034 | if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { |
1872 | - int why = (signal->flags & SIGNAL_STOP_CONTINUED) | |
1873 | - ? CLD_CONTINUED : CLD_STOPPED; | |
2035 | + struct task_struct *leader; | |
2036 | + int why; | |
2037 | + | |
2038 | + if (signal->flags & SIGNAL_CLD_CONTINUED) | |
2039 | + why = CLD_CONTINUED; | |
2040 | + else | |
2041 | + why = CLD_STOPPED; | |
2042 | + | |
1874 | 2043 | signal->flags &= ~SIGNAL_CLD_MASK; |
1875 | 2044 | |
1876 | - why = tracehook_notify_jctl(why, CLD_CONTINUED); | |
1877 | 2045 | spin_unlock_irq(&sighand->siglock); |
1878 | 2046 | |
1879 | - if (why) { | |
1880 | - read_lock(&tasklist_lock); | |
1881 | - do_notify_parent_cldstop(current->group_leader, why); | |
1882 | - read_unlock(&tasklist_lock); | |
1883 | - } | |
2047 | + /* | |
2048 | + * Notify the parent that we're continuing. This event is | |
2049 | + * always per-process and doesn't make whole lot of sense | |
2050 | + * for ptracers, who shouldn't consume the state via | |
2051 | + * wait(2) either, but, for backward compatibility, notify | |
2052 | + * the ptracer of the group leader too unless it's gonna be | |
2053 | + * a duplicate. | |
2054 | + */ | |
2055 | + read_lock(&tasklist_lock); | |
2056 | + | |
2057 | + do_notify_parent_cldstop(current, false, why); | |
2058 | + | |
2059 | + leader = current->group_leader; | |
2060 | + if (task_ptrace(leader) && !real_parent_is_ptracer(leader)) | |
2061 | + do_notify_parent_cldstop(leader, true, why); | |
2062 | + | |
2063 | + read_unlock(&tasklist_lock); | |
2064 | + | |
1884 | 2065 | goto relock; |
1885 | 2066 | } |
1886 | 2067 | |
... | ... | @@ -1897,8 +2078,8 @@ |
1897 | 2078 | if (unlikely(signr != 0)) |
1898 | 2079 | ka = return_ka; |
1899 | 2080 | else { |
1900 | - if (unlikely(signal->group_stop_count > 0) && | |
1901 | - do_signal_stop(0)) | |
2081 | + if (unlikely(current->group_stop & | |
2082 | + GROUP_STOP_PENDING) && do_signal_stop(0)) | |
1902 | 2083 | goto relock; |
1903 | 2084 | |
1904 | 2085 | signr = dequeue_signal(current, ¤t->blocked, |
1905 | 2086 | |
... | ... | @@ -2017,10 +2198,42 @@ |
2017 | 2198 | return signr; |
2018 | 2199 | } |
2019 | 2200 | |
2201 | +/* | |
2202 | + * It could be that complete_signal() picked us to notify about the | |
2203 | + * group-wide signal. Other threads should be notified now to take | |
2204 | + * the shared signals in @which since we will not. | |
2205 | + */ | |
2206 | +static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) | |
2207 | +{ | |
2208 | + sigset_t retarget; | |
2209 | + struct task_struct *t; | |
2210 | + | |
2211 | + sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); | |
2212 | + if (sigisemptyset(&retarget)) | |
2213 | + return; | |
2214 | + | |
2215 | + t = tsk; | |
2216 | + while_each_thread(tsk, t) { | |
2217 | + if (t->flags & PF_EXITING) | |
2218 | + continue; | |
2219 | + | |
2220 | + if (!has_pending_signals(&retarget, &t->blocked)) | |
2221 | + continue; | |
2222 | + /* Remove the signals this thread can handle. */ | |
2223 | + sigandsets(&retarget, &retarget, &t->blocked); | |
2224 | + | |
2225 | + if (!signal_pending(t)) | |
2226 | + signal_wake_up(t, 0); | |
2227 | + | |
2228 | + if (sigisemptyset(&retarget)) | |
2229 | + break; | |
2230 | + } | |
2231 | +} | |
2232 | + | |
2020 | 2233 | void exit_signals(struct task_struct *tsk) |
2021 | 2234 | { |
2022 | 2235 | int group_stop = 0; |
2023 | - struct task_struct *t; | |
2236 | + sigset_t unblocked; | |
2024 | 2237 | |
2025 | 2238 | if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { |
2026 | 2239 | tsk->flags |= PF_EXITING; |
2027 | 2240 | |
2028 | 2241 | |
2029 | 2242 | |
... | ... | @@ -2036,26 +2249,23 @@ |
2036 | 2249 | if (!signal_pending(tsk)) |
2037 | 2250 | goto out; |
2038 | 2251 | |
2039 | - /* | |
2040 | - * It could be that __group_complete_signal() choose us to | |
2041 | - * notify about group-wide signal. Another thread should be | |
2042 | - * woken now to take the signal since we will not. | |
2043 | - */ | |
2044 | - for (t = tsk; (t = next_thread(t)) != tsk; ) | |
2045 | - if (!signal_pending(t) && !(t->flags & PF_EXITING)) | |
2046 | - recalc_sigpending_and_wake(t); | |
2252 | + unblocked = tsk->blocked; | |
2253 | + signotset(&unblocked); | |
2254 | + retarget_shared_pending(tsk, &unblocked); | |
2047 | 2255 | |
2048 | - if (unlikely(tsk->signal->group_stop_count) && | |
2049 | - !--tsk->signal->group_stop_count) { | |
2050 | - tsk->signal->flags = SIGNAL_STOP_STOPPED; | |
2051 | - group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED); | |
2052 | - } | |
2256 | + if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) && | |
2257 | + task_participate_group_stop(tsk)) | |
2258 | + group_stop = CLD_STOPPED; | |
2053 | 2259 | out: |
2054 | 2260 | spin_unlock_irq(&tsk->sighand->siglock); |
2055 | 2261 | |
2262 | + /* | |
2263 | + * If group stop has completed, deliver the notification. This | |
2264 | + * should always go to the real parent of the group leader. | |
2265 | + */ | |
2056 | 2266 | if (unlikely(group_stop)) { |
2057 | 2267 | read_lock(&tasklist_lock); |
2058 | - do_notify_parent_cldstop(tsk, group_stop); | |
2268 | + do_notify_parent_cldstop(tsk, false, group_stop); | |
2059 | 2269 | read_unlock(&tasklist_lock); |
2060 | 2270 | } |
2061 | 2271 | } |
2062 | 2272 | |
2063 | 2273 | |
... | ... | @@ -2089,12 +2299,34 @@ |
2089 | 2299 | return -EINTR; |
2090 | 2300 | } |
2091 | 2301 | |
2092 | -/* | |
2093 | - * We don't need to get the kernel lock - this is all local to this | |
2094 | - * particular thread.. (and that's good, because this is _heavily_ | |
2095 | - * used by various programs) | |
2302 | +static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) | |
2303 | +{ | |
2304 | + if (signal_pending(tsk) && !thread_group_empty(tsk)) { | |
2305 | + sigset_t newblocked; | |
2306 | + /* A set of now blocked but previously unblocked signals. */ | |
2307 | + sigandnsets(&newblocked, newset, ¤t->blocked); | |
2308 | + retarget_shared_pending(tsk, &newblocked); | |
2309 | + } | |
2310 | + tsk->blocked = *newset; | |
2311 | + recalc_sigpending(); | |
2312 | +} | |
2313 | + | |
2314 | +/** | |
2315 | + * set_current_blocked - change current->blocked mask | |
2316 | + * @newset: new mask | |
2317 | + * | |
2318 | + * It is wrong to change ->blocked directly, this helper should be used | |
2319 | + * to ensure the process can't miss a shared signal we are going to block. | |
2096 | 2320 | */ |
2321 | +void set_current_blocked(const sigset_t *newset) | |
2322 | +{ | |
2323 | + struct task_struct *tsk = current; | |
2097 | 2324 | |
2325 | + spin_lock_irq(&tsk->sighand->siglock); | |
2326 | + __set_task_blocked(tsk, newset); | |
2327 | + spin_unlock_irq(&tsk->sighand->siglock); | |
2328 | +} | |
2329 | + | |
2098 | 2330 | /* |
2099 | 2331 | * This is also useful for kernel threads that want to temporarily |
2100 | 2332 | * (or permanently) block certain signals. |
2101 | 2333 | |
2102 | 2334 | |
2103 | 2335 | |
2104 | 2336 | |
2105 | 2337 | |
2106 | 2338 | |
2107 | 2339 | |
2108 | 2340 | |
2109 | 2341 | |
... | ... | @@ -2105,30 +2337,29 @@ |
2105 | 2337 | */ |
2106 | 2338 | int sigprocmask(int how, sigset_t *set, sigset_t *oldset) |
2107 | 2339 | { |
2108 | - int error; | |
2340 | + struct task_struct *tsk = current; | |
2341 | + sigset_t newset; | |
2109 | 2342 | |
2110 | - spin_lock_irq(¤t->sighand->siglock); | |
2343 | + /* Lockless, only current can change ->blocked, never from irq */ | |
2111 | 2344 | if (oldset) |
2112 | - *oldset = current->blocked; | |
2345 | + *oldset = tsk->blocked; | |
2113 | 2346 | |
2114 | - error = 0; | |
2115 | 2347 | switch (how) { |
2116 | 2348 | case SIG_BLOCK: |
2117 | - sigorsets(¤t->blocked, ¤t->blocked, set); | |
2349 | + sigorsets(&newset, &tsk->blocked, set); | |
2118 | 2350 | break; |
2119 | 2351 | case SIG_UNBLOCK: |
2120 | - signandsets(¤t->blocked, ¤t->blocked, set); | |
2352 | + sigandnsets(&newset, &tsk->blocked, set); | |
2121 | 2353 | break; |
2122 | 2354 | case SIG_SETMASK: |
2123 | - current->blocked = *set; | |
2355 | + newset = *set; | |
2124 | 2356 | break; |
2125 | 2357 | default: |
2126 | - error = -EINVAL; | |
2358 | + return -EINVAL; | |
2127 | 2359 | } |
2128 | - recalc_sigpending(); | |
2129 | - spin_unlock_irq(¤t->sighand->siglock); | |
2130 | 2360 | |
2131 | - return error; | |
2361 | + set_current_blocked(&newset); | |
2362 | + return 0; | |
2132 | 2363 | } |
2133 | 2364 | |
2134 | 2365 | /** |
2135 | 2366 | |
2136 | 2367 | |
2137 | 2368 | |
2138 | 2369 | |
2139 | 2370 | |
2140 | 2371 | |
2141 | 2372 | |
2142 | 2373 | |
... | ... | @@ -2138,40 +2369,34 @@ |
2138 | 2369 | * @oset: previous value of signal mask if non-null |
2139 | 2370 | * @sigsetsize: size of sigset_t type |
2140 | 2371 | */ |
2141 | -SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set, | |
2372 | +SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, | |
2142 | 2373 | sigset_t __user *, oset, size_t, sigsetsize) |
2143 | 2374 | { |
2144 | - int error = -EINVAL; | |
2145 | 2375 | sigset_t old_set, new_set; |
2376 | + int error; | |
2146 | 2377 | |
2147 | 2378 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2148 | 2379 | if (sigsetsize != sizeof(sigset_t)) |
2149 | - goto out; | |
2380 | + return -EINVAL; | |
2150 | 2381 | |
2151 | - if (set) { | |
2152 | - error = -EFAULT; | |
2153 | - if (copy_from_user(&new_set, set, sizeof(*set))) | |
2154 | - goto out; | |
2382 | + old_set = current->blocked; | |
2383 | + | |
2384 | + if (nset) { | |
2385 | + if (copy_from_user(&new_set, nset, sizeof(sigset_t))) | |
2386 | + return -EFAULT; | |
2155 | 2387 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
2156 | 2388 | |
2157 | - error = sigprocmask(how, &new_set, &old_set); | |
2389 | + error = sigprocmask(how, &new_set, NULL); | |
2158 | 2390 | if (error) |
2159 | - goto out; | |
2160 | - if (oset) | |
2161 | - goto set_old; | |
2162 | - } else if (oset) { | |
2163 | - spin_lock_irq(¤t->sighand->siglock); | |
2164 | - old_set = current->blocked; | |
2165 | - spin_unlock_irq(¤t->sighand->siglock); | |
2391 | + return error; | |
2392 | + } | |
2166 | 2393 | |
2167 | - set_old: | |
2168 | - error = -EFAULT; | |
2169 | - if (copy_to_user(oset, &old_set, sizeof(*oset))) | |
2170 | - goto out; | |
2394 | + if (oset) { | |
2395 | + if (copy_to_user(oset, &old_set, sizeof(sigset_t))) | |
2396 | + return -EFAULT; | |
2171 | 2397 | } |
2172 | - error = 0; | |
2173 | -out: | |
2174 | - return error; | |
2398 | + | |
2399 | + return 0; | |
2175 | 2400 | } |
2176 | 2401 | |
2177 | 2402 | long do_sigpending(void __user *set, unsigned long sigsetsize) |
... | ... | @@ -2284,6 +2509,66 @@ |
2284 | 2509 | #endif |
2285 | 2510 | |
2286 | 2511 | /** |
2512 | + * do_sigtimedwait - wait for queued signals specified in @which | |
2513 | + * @which: queued signals to wait for | |
2514 | + * @info: if non-null, the signal's siginfo is returned here | |
2515 | + * @ts: upper bound on process time suspension | |
2516 | + */ | |
2517 | +int do_sigtimedwait(const sigset_t *which, siginfo_t *info, | |
2518 | + const struct timespec *ts) | |
2519 | +{ | |
2520 | + struct task_struct *tsk = current; | |
2521 | + long timeout = MAX_SCHEDULE_TIMEOUT; | |
2522 | + sigset_t mask = *which; | |
2523 | + int sig; | |
2524 | + | |
2525 | + if (ts) { | |
2526 | + if (!timespec_valid(ts)) | |
2527 | + return -EINVAL; | |
2528 | + timeout = timespec_to_jiffies(ts); | |
2529 | + /* | |
2530 | + * We can be close to the next tick, add another one | |
2531 | + * to ensure we will wait at least the time asked for. | |
2532 | + */ | |
2533 | + if (ts->tv_sec || ts->tv_nsec) | |
2534 | + timeout++; | |
2535 | + } | |
2536 | + | |
2537 | + /* | |
2538 | + * Invert the set of allowed signals to get those we want to block. | |
2539 | + */ | |
2540 | + sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); | |
2541 | + signotset(&mask); | |
2542 | + | |
2543 | + spin_lock_irq(&tsk->sighand->siglock); | |
2544 | + sig = dequeue_signal(tsk, &mask, info); | |
2545 | + if (!sig && timeout) { | |
2546 | + /* | |
2547 | + * None ready, temporarily unblock those we're interested | |
2548 | + * while we are sleeping in so that we'll be awakened when | |
2549 | + * they arrive. Unblocking is always fine, we can avoid | |
2550 | + * set_current_blocked(). | |
2551 | + */ | |
2552 | + tsk->real_blocked = tsk->blocked; | |
2553 | + sigandsets(&tsk->blocked, &tsk->blocked, &mask); | |
2554 | + recalc_sigpending(); | |
2555 | + spin_unlock_irq(&tsk->sighand->siglock); | |
2556 | + | |
2557 | + timeout = schedule_timeout_interruptible(timeout); | |
2558 | + | |
2559 | + spin_lock_irq(&tsk->sighand->siglock); | |
2560 | + __set_task_blocked(tsk, &tsk->real_blocked); | |
2561 | + siginitset(&tsk->real_blocked, 0); | |
2562 | + sig = dequeue_signal(tsk, &mask, info); | |
2563 | + } | |
2564 | + spin_unlock_irq(&tsk->sighand->siglock); | |
2565 | + | |
2566 | + if (sig) | |
2567 | + return sig; | |
2568 | + return timeout ? -EINTR : -EAGAIN; | |
2569 | +} | |
2570 | + | |
2571 | +/** | |
2287 | 2572 | * sys_rt_sigtimedwait - synchronously wait for queued signals specified |
2288 | 2573 | * in @uthese |
2289 | 2574 | * @uthese: queued signals to wait for |
2290 | 2575 | |
... | ... | @@ -2295,11 +2580,10 @@ |
2295 | 2580 | siginfo_t __user *, uinfo, const struct timespec __user *, uts, |
2296 | 2581 | size_t, sigsetsize) |
2297 | 2582 | { |
2298 | - int ret, sig; | |
2299 | 2583 | sigset_t these; |
2300 | 2584 | struct timespec ts; |
2301 | 2585 | siginfo_t info; |
2302 | - long timeout = 0; | |
2586 | + int ret; | |
2303 | 2587 | |
2304 | 2588 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2305 | 2589 | if (sigsetsize != sizeof(sigset_t)) |
2306 | 2590 | |
2307 | 2591 | |
2308 | 2592 | |
2309 | 2593 | |
2310 | 2594 | |
... | ... | @@ -2308,63 +2592,18 @@ |
2308 | 2592 | if (copy_from_user(&these, uthese, sizeof(these))) |
2309 | 2593 | return -EFAULT; |
2310 | 2594 | |
2311 | - /* | |
2312 | - * Invert the set of allowed signals to get those we | |
2313 | - * want to block. | |
2314 | - */ | |
2315 | - sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP)); | |
2316 | - signotset(&these); | |
2317 | - | |
2318 | 2595 | if (uts) { |
2319 | 2596 | if (copy_from_user(&ts, uts, sizeof(ts))) |
2320 | 2597 | return -EFAULT; |
2321 | - if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0 | |
2322 | - || ts.tv_sec < 0) | |
2323 | - return -EINVAL; | |
2324 | 2598 | } |
2325 | 2599 | |
2326 | - spin_lock_irq(¤t->sighand->siglock); | |
2327 | - sig = dequeue_signal(current, &these, &info); | |
2328 | - if (!sig) { | |
2329 | - timeout = MAX_SCHEDULE_TIMEOUT; | |
2330 | - if (uts) | |
2331 | - timeout = (timespec_to_jiffies(&ts) | |
2332 | - + (ts.tv_sec || ts.tv_nsec)); | |
2600 | + ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); | |
2333 | 2601 | |
2334 | - if (timeout) { | |
2335 | - /* | |
2336 | - * None ready -- temporarily unblock those we're | |
2337 | - * interested while we are sleeping in so that we'll | |
2338 | - * be awakened when they arrive. | |
2339 | - */ | |
2340 | - current->real_blocked = current->blocked; | |
2341 | - sigandsets(¤t->blocked, ¤t->blocked, &these); | |
2342 | - recalc_sigpending(); | |
2343 | - spin_unlock_irq(¤t->sighand->siglock); | |
2344 | - | |
2345 | - timeout = schedule_timeout_interruptible(timeout); | |
2346 | - | |
2347 | - spin_lock_irq(¤t->sighand->siglock); | |
2348 | - sig = dequeue_signal(current, &these, &info); | |
2349 | - current->blocked = current->real_blocked; | |
2350 | - siginitset(¤t->real_blocked, 0); | |
2351 | - recalc_sigpending(); | |
2352 | - } | |
2602 | + if (ret > 0 && uinfo) { | |
2603 | + if (copy_siginfo_to_user(uinfo, &info)) | |
2604 | + ret = -EFAULT; | |
2353 | 2605 | } |
2354 | - spin_unlock_irq(¤t->sighand->siglock); | |
2355 | 2606 | |
2356 | - if (sig) { | |
2357 | - ret = sig; | |
2358 | - if (uinfo) { | |
2359 | - if (copy_siginfo_to_user(uinfo, &info)) | |
2360 | - ret = -EFAULT; | |
2361 | - } | |
2362 | - } else { | |
2363 | - ret = -EAGAIN; | |
2364 | - if (timeout) | |
2365 | - ret = -EINTR; | |
2366 | - } | |
2367 | - | |
2368 | 2607 | return ret; |
2369 | 2608 | } |
2370 | 2609 | |
2371 | 2610 | |
2372 | 2611 | |
2373 | 2612 | |
2374 | 2613 | |
2375 | 2614 | |
2376 | 2615 | |
2377 | 2616 | |
2378 | 2617 | |
2379 | 2618 | |
2380 | 2619 | |
2381 | 2620 | |
2382 | 2621 | |
2383 | 2622 | |
2384 | 2623 | |
... | ... | @@ -2650,60 +2889,51 @@ |
2650 | 2889 | /** |
2651 | 2890 | * sys_sigprocmask - examine and change blocked signals |
2652 | 2891 | * @how: whether to add, remove, or set signals |
2653 | - * @set: signals to add or remove (if non-null) | |
2892 | + * @nset: signals to add or remove (if non-null) | |
2654 | 2893 | * @oset: previous value of signal mask if non-null |
2655 | 2894 | * |
2656 | 2895 | * Some platforms have their own version with special arguments; |
2657 | 2896 | * others support only sys_rt_sigprocmask. |
2658 | 2897 | */ |
2659 | 2898 | |
2660 | -SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set, | |
2899 | +SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, | |
2661 | 2900 | old_sigset_t __user *, oset) |
2662 | 2901 | { |
2663 | - int error; | |
2664 | 2902 | old_sigset_t old_set, new_set; |
2903 | + sigset_t new_blocked; | |
2665 | 2904 | |
2666 | - if (set) { | |
2667 | - error = -EFAULT; | |
2668 | - if (copy_from_user(&new_set, set, sizeof(*set))) | |
2669 | - goto out; | |
2905 | + old_set = current->blocked.sig[0]; | |
2906 | + | |
2907 | + if (nset) { | |
2908 | + if (copy_from_user(&new_set, nset, sizeof(*nset))) | |
2909 | + return -EFAULT; | |
2670 | 2910 | new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); |
2671 | 2911 | |
2672 | - spin_lock_irq(¤t->sighand->siglock); | |
2673 | - old_set = current->blocked.sig[0]; | |
2912 | + new_blocked = current->blocked; | |
2674 | 2913 | |
2675 | - error = 0; | |
2676 | 2914 | switch (how) { |
2677 | - default: | |
2678 | - error = -EINVAL; | |
2679 | - break; | |
2680 | 2915 | case SIG_BLOCK: |
2681 | - sigaddsetmask(¤t->blocked, new_set); | |
2916 | + sigaddsetmask(&new_blocked, new_set); | |
2682 | 2917 | break; |
2683 | 2918 | case SIG_UNBLOCK: |
2684 | - sigdelsetmask(¤t->blocked, new_set); | |
2919 | + sigdelsetmask(&new_blocked, new_set); | |
2685 | 2920 | break; |
2686 | 2921 | case SIG_SETMASK: |
2687 | - current->blocked.sig[0] = new_set; | |
2922 | + new_blocked.sig[0] = new_set; | |
2688 | 2923 | break; |
2924 | + default: | |
2925 | + return -EINVAL; | |
2689 | 2926 | } |
2690 | 2927 | |
2691 | - recalc_sigpending(); | |
2692 | - spin_unlock_irq(¤t->sighand->siglock); | |
2693 | - if (error) | |
2694 | - goto out; | |
2695 | - if (oset) | |
2696 | - goto set_old; | |
2697 | - } else if (oset) { | |
2698 | - old_set = current->blocked.sig[0]; | |
2699 | - set_old: | |
2700 | - error = -EFAULT; | |
2928 | + set_current_blocked(&new_blocked); | |
2929 | + } | |
2930 | + | |
2931 | + if (oset) { | |
2701 | 2932 | if (copy_to_user(oset, &old_set, sizeof(*oset))) |
2702 | - goto out; | |
2933 | + return -EFAULT; | |
2703 | 2934 | } |
2704 | - error = 0; | |
2705 | -out: | |
2706 | - return error; | |
2935 | + | |
2936 | + return 0; | |
2707 | 2937 | } |
2708 | 2938 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ |
2709 | 2939 |