Commit e46bc9b6fd65bc9f406a4211fbf95683cc9c2937
Exists in
master
and in
20 other branches
Merge branch 'ptrace' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc into ptrace
Showing 6 changed files Side-by-side Diff
fs/exec.c
include/linux/sched.h
| ... | ... | @@ -653,9 +653,8 @@ |
| 653 | 653 | * Bits in flags field of signal_struct. |
| 654 | 654 | */ |
| 655 | 655 | #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ |
| 656 | -#define SIGNAL_STOP_DEQUEUED 0x00000002 /* stop signal dequeued */ | |
| 657 | -#define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ | |
| 658 | -#define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */ | |
| 656 | +#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ | |
| 657 | +#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ | |
| 659 | 658 | /* |
| 660 | 659 | * Pending notifications to parent. |
| 661 | 660 | */ |
| ... | ... | @@ -1261,6 +1260,7 @@ |
| 1261 | 1260 | int exit_state; |
| 1262 | 1261 | int exit_code, exit_signal; |
| 1263 | 1262 | int pdeath_signal; /* The signal sent when the parent dies */ |
| 1263 | + unsigned int group_stop; /* GROUP_STOP_*, siglock protected */ | |
| 1264 | 1264 | /* ??? */ |
| 1265 | 1265 | unsigned int personality; |
| 1266 | 1266 | unsigned did_exec:1; |
| ... | ... | @@ -1776,6 +1776,17 @@ |
| 1776 | 1776 | /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ |
| 1777 | 1777 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) |
| 1778 | 1778 | #define used_math() tsk_used_math(current) |
| 1779 | + | |
| 1780 | +/* | |
| 1781 | + * task->group_stop flags | |
| 1782 | + */ | |
| 1783 | +#define GROUP_STOP_SIGMASK 0xffff /* signr of the last group stop */ | |
| 1784 | +#define GROUP_STOP_PENDING (1 << 16) /* task should stop for group stop */ | |
| 1785 | +#define GROUP_STOP_CONSUME (1 << 17) /* consume group stop count */ | |
| 1786 | +#define GROUP_STOP_TRAPPING (1 << 18) /* switching from STOPPED to TRACED */ | |
| 1787 | +#define GROUP_STOP_DEQUEUED (1 << 19) /* stop signal dequeued */ | |
| 1788 | + | |
| 1789 | +extern void task_clear_group_stop_pending(struct task_struct *task); | |
| 1779 | 1790 | |
| 1780 | 1791 | #ifdef CONFIG_PREEMPT_RCU |
| 1781 | 1792 |
include/linux/tracehook.h
| ... | ... | @@ -469,33 +469,6 @@ |
| 469 | 469 | } |
| 470 | 470 | |
| 471 | 471 | /** |
| 472 | - * tracehook_notify_jctl - report about job control stop/continue | |
| 473 | - * @notify: zero, %CLD_STOPPED or %CLD_CONTINUED | |
| 474 | - * @why: %CLD_STOPPED or %CLD_CONTINUED | |
| 475 | - * | |
| 476 | - * This is called when we might call do_notify_parent_cldstop(). | |
| 477 | - * | |
| 478 | - * @notify is zero if we would not ordinarily send a %SIGCHLD, | |
| 479 | - * or is the %CLD_STOPPED or %CLD_CONTINUED .si_code for %SIGCHLD. | |
| 480 | - * | |
| 481 | - * @why is %CLD_STOPPED when about to stop for job control; | |
| 482 | - * we are already in %TASK_STOPPED state, about to call schedule(). | |
| 483 | - * It might also be that we have just exited (check %PF_EXITING), | |
| 484 | - * but need to report that a group-wide stop is complete. | |
| 485 | - * | |
| 486 | - * @why is %CLD_CONTINUED when waking up after job control stop and | |
| 487 | - * ready to make a delayed @notify report. | |
| 488 | - * | |
| 489 | - * Return the %CLD_* value for %SIGCHLD, or zero to generate no signal. | |
| 490 | - * | |
| 491 | - * Called with the siglock held. | |
| 492 | - */ | |
| 493 | -static inline int tracehook_notify_jctl(int notify, int why) | |
| 494 | -{ | |
| 495 | - return notify ?: (current->ptrace & PT_PTRACED) ? why : 0; | |
| 496 | -} | |
| 497 | - | |
| 498 | -/** | |
| 499 | 472 | * tracehook_finish_jctl - report about return from job control stop |
| 500 | 473 | * |
| 501 | 474 | * This is called by do_signal_stop() after wakeup. |
kernel/exit.c
| ... | ... | @@ -1538,33 +1538,83 @@ |
| 1538 | 1538 | return 0; |
| 1539 | 1539 | } |
| 1540 | 1540 | |
| 1541 | - if (likely(!ptrace) && unlikely(task_ptrace(p))) { | |
| 1541 | + /* dead body doesn't have much to contribute */ | |
| 1542 | + if (p->exit_state == EXIT_DEAD) | |
| 1543 | + return 0; | |
| 1544 | + | |
| 1545 | + /* slay zombie? */ | |
| 1546 | + if (p->exit_state == EXIT_ZOMBIE) { | |
| 1542 | 1547 | /* |
| 1543 | - * This child is hidden by ptrace. | |
| 1544 | - * We aren't allowed to see it now, but eventually we will. | |
| 1548 | + * A zombie ptracee is only visible to its ptracer. | |
| 1549 | + * Notification and reaping will be cascaded to the real | |
| 1550 | + * parent when the ptracer detaches. | |
| 1545 | 1551 | */ |
| 1552 | + if (likely(!ptrace) && unlikely(task_ptrace(p))) { | |
| 1553 | + /* it will become visible, clear notask_error */ | |
| 1554 | + wo->notask_error = 0; | |
| 1555 | + return 0; | |
| 1556 | + } | |
| 1557 | + | |
| 1558 | + /* we don't reap group leaders with subthreads */ | |
| 1559 | + if (!delay_group_leader(p)) | |
| 1560 | + return wait_task_zombie(wo, p); | |
| 1561 | + | |
| 1562 | + /* | |
| 1563 | + * Allow access to stopped/continued state via zombie by | |
| 1564 | + * falling through. Clearing of notask_error is complex. | |
| 1565 | + * | |
| 1566 | + * When !@ptrace: | |
| 1567 | + * | |
| 1568 | + * If WEXITED is set, notask_error should naturally be | |
| 1569 | + * cleared. If not, subset of WSTOPPED|WCONTINUED is set, | |
| 1570 | + * so, if there are live subthreads, there are events to | |
| 1571 | + * wait for. If all subthreads are dead, it's still safe | |
| 1572 | + * to clear - this function will be called again in finite | |
| 1573 | + * amount time once all the subthreads are released and | |
| 1574 | + * will then return without clearing. | |
| 1575 | + * | |
| 1576 | + * When @ptrace: | |
| 1577 | + * | |
| 1578 | + * Stopped state is per-task and thus can't change once the | |
| 1579 | + * target task dies. Only continued and exited can happen. | |
| 1580 | + * Clear notask_error if WCONTINUED | WEXITED. | |
| 1581 | + */ | |
| 1582 | + if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) | |
| 1583 | + wo->notask_error = 0; | |
| 1584 | + } else { | |
| 1585 | + /* | |
| 1586 | + * If @p is ptraced by a task in its real parent's group, | |
| 1587 | + * hide group stop/continued state when looking at @p as | |
| 1588 | + * the real parent; otherwise, a single stop can be | |
| 1589 | + * reported twice as group and ptrace stops. | |
| 1590 | + * | |
| 1591 | + * If a ptracer wants to distinguish the two events for its | |
| 1592 | + * own children, it should create a separate process which | |
| 1593 | + * takes the role of real parent. | |
| 1594 | + */ | |
| 1595 | + if (likely(!ptrace) && task_ptrace(p) && | |
| 1596 | + same_thread_group(p->parent, p->real_parent)) | |
| 1597 | + return 0; | |
| 1598 | + | |
| 1599 | + /* | |
| 1600 | + * @p is alive and it's gonna stop, continue or exit, so | |
| 1601 | + * there always is something to wait for. | |
| 1602 | + */ | |
| 1546 | 1603 | wo->notask_error = 0; |
| 1547 | - return 0; | |
| 1548 | 1604 | } |
| 1549 | 1605 | |
| 1550 | - if (p->exit_state == EXIT_DEAD) | |
| 1551 | - return 0; | |
| 1552 | - | |
| 1553 | 1606 | /* |
| 1554 | - * We don't reap group leaders with subthreads. | |
| 1607 | + * Wait for stopped. Depending on @ptrace, different stopped state | |
| 1608 | + * is used and the two don't interact with each other. | |
| 1555 | 1609 | */ |
| 1556 | - if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p)) | |
| 1557 | - return wait_task_zombie(wo, p); | |
| 1558 | - | |
| 1559 | - /* | |
| 1560 | - * It's stopped or running now, so it might | |
| 1561 | - * later continue, exit, or stop again. | |
| 1562 | - */ | |
| 1563 | - wo->notask_error = 0; | |
| 1564 | - | |
| 1565 | 1610 | if (task_stopped_code(p, ptrace)) |
| 1566 | 1611 | return wait_task_stopped(wo, ptrace, p); |
| 1567 | 1612 | |
| 1613 | + /* | |
| 1614 | + * Wait for continued. There's only one continued state and the | |
| 1615 | + * ptracer can consume it which can confuse the real parent. Don't | |
| 1616 | + * use WCONTINUED from ptracer. You don't need or want it. | |
| 1617 | + */ | |
| 1568 | 1618 | return wait_task_continued(wo, p); |
| 1569 | 1619 | } |
| 1570 | 1620 |
kernel/ptrace.c
| ... | ... | @@ -37,35 +37,33 @@ |
| 37 | 37 | child->parent = new_parent; |
| 38 | 38 | } |
| 39 | 39 | |
| 40 | -/* | |
| 41 | - * Turn a tracing stop into a normal stop now, since with no tracer there | |
| 42 | - * would be no way to wake it up with SIGCONT or SIGKILL. If there was a | |
| 43 | - * signal sent that would resume the child, but didn't because it was in | |
| 44 | - * TASK_TRACED, resume it now. | |
| 45 | - * Requires that irqs be disabled. | |
| 46 | - */ | |
| 47 | -static void ptrace_untrace(struct task_struct *child) | |
| 48 | -{ | |
| 49 | - spin_lock(&child->sighand->siglock); | |
| 50 | - if (task_is_traced(child)) { | |
| 51 | - /* | |
| 52 | - * If the group stop is completed or in progress, | |
| 53 | - * this thread was already counted as stopped. | |
| 54 | - */ | |
| 55 | - if (child->signal->flags & SIGNAL_STOP_STOPPED || | |
| 56 | - child->signal->group_stop_count) | |
| 57 | - __set_task_state(child, TASK_STOPPED); | |
| 58 | - else | |
| 59 | - signal_wake_up(child, 1); | |
| 60 | - } | |
| 61 | - spin_unlock(&child->sighand->siglock); | |
| 62 | -} | |
| 63 | - | |
| 64 | -/* | |
| 65 | - * unptrace a task: move it back to its original parent and | |
| 66 | - * remove it from the ptrace list. | |
| 40 | +/** | |
| 41 | + * __ptrace_unlink - unlink ptracee and restore its execution state | |
| 42 | + * @child: ptracee to be unlinked | |
| 67 | 43 | * |
| 68 | - * Must be called with the tasklist lock write-held. | |
| 44 | + * Remove @child from the ptrace list, move it back to the original parent, | |
| 45 | + * and restore the execution state so that it conforms to the group stop | |
| 46 | + * state. | |
| 47 | + * | |
| 48 | + * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer | |
| 49 | + * exiting. For PTRACE_DETACH, unless the ptracee has been killed between | |
| 50 | + * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED. | |
| 51 | + * If the ptracer is exiting, the ptracee can be in any state. | |
| 52 | + * | |
| 53 | + * After detach, the ptracee should be in a state which conforms to the | |
| 54 | + * group stop. If the group is stopped or in the process of stopping, the | |
| 55 | + * ptracee should be put into TASK_STOPPED; otherwise, it should be woken | |
| 56 | + * up from TASK_TRACED. | |
| 57 | + * | |
| 58 | + * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED, | |
| 59 | + * it goes through TRACED -> RUNNING -> STOPPED transition which is similar | |
| 60 | + * to but in the opposite direction of what happens while attaching to a | |
| 61 | + * stopped task. However, in this direction, the intermediate RUNNING | |
| 62 | + * state is not hidden even from the current ptracer and if it immediately | |
| 63 | + * re-attaches and performs a WNOHANG wait(2), it may fail. | |
| 64 | + * | |
| 65 | + * CONTEXT: | |
| 66 | + * write_lock_irq(tasklist_lock) | |
| 69 | 67 | */ |
| 70 | 68 | void __ptrace_unlink(struct task_struct *child) |
| 71 | 69 | { |
| ... | ... | @@ -75,8 +73,27 @@ |
| 75 | 73 | child->parent = child->real_parent; |
| 76 | 74 | list_del_init(&child->ptrace_entry); |
| 77 | 75 | |
| 78 | - if (task_is_traced(child)) | |
| 79 | - ptrace_untrace(child); | |
| 76 | + spin_lock(&child->sighand->siglock); | |
| 77 | + | |
| 78 | + /* | |
| 79 | + * Reinstate GROUP_STOP_PENDING if group stop is in effect and | |
| 80 | + * @child isn't dead. | |
| 81 | + */ | |
| 82 | + if (!(child->flags & PF_EXITING) && | |
| 83 | + (child->signal->flags & SIGNAL_STOP_STOPPED || | |
| 84 | + child->signal->group_stop_count)) | |
| 85 | + child->group_stop |= GROUP_STOP_PENDING; | |
| 86 | + | |
| 87 | + /* | |
| 88 | + * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick | |
| 89 | + * @child in the butt. Note that @resume should be used iff @child | |
| 90 | + * is in TASK_TRACED; otherwise, we might unduly disrupt | |
| 91 | + * TASK_KILLABLE sleeps. | |
| 92 | + */ | |
| 93 | + if (child->group_stop & GROUP_STOP_PENDING || task_is_traced(child)) | |
| 94 | + signal_wake_up(child, task_is_traced(child)); | |
| 95 | + | |
| 96 | + spin_unlock(&child->sighand->siglock); | |
| 80 | 97 | } |
| 81 | 98 | |
| 82 | 99 | /* |
| 83 | 100 | |
| ... | ... | @@ -95,16 +112,14 @@ |
| 95 | 112 | */ |
| 96 | 113 | read_lock(&tasklist_lock); |
| 97 | 114 | if ((child->ptrace & PT_PTRACED) && child->parent == current) { |
| 98 | - ret = 0; | |
| 99 | 115 | /* |
| 100 | 116 | * child->sighand can't be NULL, release_task() |
| 101 | 117 | * does ptrace_unlink() before __exit_signal(). |
| 102 | 118 | */ |
| 103 | 119 | spin_lock_irq(&child->sighand->siglock); |
| 104 | - if (task_is_stopped(child)) | |
| 105 | - child->state = TASK_TRACED; | |
| 106 | - else if (!task_is_traced(child) && !kill) | |
| 107 | - ret = -ESRCH; | |
| 120 | + WARN_ON_ONCE(task_is_stopped(child)); | |
| 121 | + if (task_is_traced(child) || kill) | |
| 122 | + ret = 0; | |
| 108 | 123 | spin_unlock_irq(&child->sighand->siglock); |
| 109 | 124 | } |
| 110 | 125 | read_unlock(&tasklist_lock); |
| ... | ... | @@ -168,6 +183,7 @@ |
| 168 | 183 | |
| 169 | 184 | static int ptrace_attach(struct task_struct *task) |
| 170 | 185 | { |
| 186 | + bool wait_trap = false; | |
| 171 | 187 | int retval; |
| 172 | 188 | |
| 173 | 189 | audit_ptrace(task); |
| 174 | 190 | |
| ... | ... | @@ -207,12 +223,42 @@ |
| 207 | 223 | __ptrace_link(task, current); |
| 208 | 224 | send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); |
| 209 | 225 | |
| 226 | + spin_lock(&task->sighand->siglock); | |
| 227 | + | |
| 228 | + /* | |
| 229 | + * If the task is already STOPPED, set GROUP_STOP_PENDING and | |
| 230 | + * TRAPPING, and kick it so that it transits to TRACED. TRAPPING | |
| 231 | + * will be cleared if the child completes the transition or any | |
| 232 | + * event which clears the group stop states happens. We'll wait | |
| 233 | + * for the transition to complete before returning from this | |
| 234 | + * function. | |
| 235 | + * | |
| 236 | + * This hides STOPPED -> RUNNING -> TRACED transition from the | |
| 237 | + * attaching thread but a different thread in the same group can | |
| 238 | + * still observe the transient RUNNING state. IOW, if another | |
| 239 | + * thread's WNOHANG wait(2) on the stopped tracee races against | |
| 240 | + * ATTACH, the wait(2) may fail due to the transient RUNNING. | |
| 241 | + * | |
| 242 | + * The following task_is_stopped() test is safe as both transitions | |
| 243 | + * in and out of STOPPED are protected by siglock. | |
| 244 | + */ | |
| 245 | + if (task_is_stopped(task)) { | |
| 246 | + task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING; | |
| 247 | + signal_wake_up(task, 1); | |
| 248 | + wait_trap = true; | |
| 249 | + } | |
| 250 | + | |
| 251 | + spin_unlock(&task->sighand->siglock); | |
| 252 | + | |
| 210 | 253 | retval = 0; |
| 211 | 254 | unlock_tasklist: |
| 212 | 255 | write_unlock_irq(&tasklist_lock); |
| 213 | 256 | unlock_creds: |
| 214 | 257 | mutex_unlock(&task->signal->cred_guard_mutex); |
| 215 | 258 | out: |
| 259 | + if (wait_trap) | |
| 260 | + wait_event(current->signal->wait_chldexit, | |
| 261 | + !(task->group_stop & GROUP_STOP_TRAPPING)); | |
| 216 | 262 | return retval; |
| 217 | 263 | } |
| 218 | 264 | |
| ... | ... | @@ -315,8 +361,6 @@ |
| 315 | 361 | if (child->ptrace) { |
| 316 | 362 | child->exit_code = data; |
| 317 | 363 | dead = __ptrace_detach(current, child); |
| 318 | - if (!child->exit_state) | |
| 319 | - wake_up_state(child, TASK_TRACED | TASK_STOPPED); | |
| 320 | 364 | } |
| 321 | 365 | write_unlock_irq(&tasklist_lock); |
| 322 | 366 |
kernel/signal.c
| ... | ... | @@ -124,7 +124,7 @@ |
| 124 | 124 | |
| 125 | 125 | static int recalc_sigpending_tsk(struct task_struct *t) |
| 126 | 126 | { |
| 127 | - if (t->signal->group_stop_count > 0 || | |
| 127 | + if ((t->group_stop & GROUP_STOP_PENDING) || | |
| 128 | 128 | PENDING(&t->pending, &t->blocked) || |
| 129 | 129 | PENDING(&t->signal->shared_pending, &t->blocked)) { |
| 130 | 130 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
| ... | ... | @@ -223,6 +223,83 @@ |
| 223 | 223 | current->comm, current->pid, sig); |
| 224 | 224 | } |
| 225 | 225 | |
| 226 | +/** | |
| 227 | + * task_clear_group_stop_trapping - clear group stop trapping bit | |
| 228 | + * @task: target task | |
| 229 | + * | |
| 230 | + * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us. Clear it | |
| 231 | + * and wake up the ptracer. Note that we don't need any further locking. | |
| 232 | + * @task->siglock guarantees that @task->parent points to the ptracer. | |
| 233 | + * | |
| 234 | + * CONTEXT: | |
| 235 | + * Must be called with @task->sighand->siglock held. | |
| 236 | + */ | |
| 237 | +static void task_clear_group_stop_trapping(struct task_struct *task) | |
| 238 | +{ | |
| 239 | + if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) { | |
| 240 | + task->group_stop &= ~GROUP_STOP_TRAPPING; | |
| 241 | + __wake_up_sync(&task->parent->signal->wait_chldexit, | |
| 242 | + TASK_UNINTERRUPTIBLE, 1); | |
| 243 | + } | |
| 244 | +} | |
| 245 | + | |
| 246 | +/** | |
| 247 | + * task_clear_group_stop_pending - clear pending group stop | |
| 248 | + * @task: target task | |
| 249 | + * | |
| 250 | + * Clear group stop states for @task. | |
| 251 | + * | |
| 252 | + * CONTEXT: | |
| 253 | + * Must be called with @task->sighand->siglock held. | |
| 254 | + */ | |
| 255 | +void task_clear_group_stop_pending(struct task_struct *task) | |
| 256 | +{ | |
| 257 | + task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME | | |
| 258 | + GROUP_STOP_DEQUEUED); | |
| 259 | +} | |
| 260 | + | |
| 261 | +/** | |
| 262 | + * task_participate_group_stop - participate in a group stop | |
| 263 | + * @task: task participating in a group stop | |
| 264 | + * | |
| 265 | + * @task has GROUP_STOP_PENDING set and is participating in a group stop. | |
| 266 | + * Group stop states are cleared and the group stop count is consumed if | |
| 267 | + * %GROUP_STOP_CONSUME was set. If the consumption completes the group | |
| 268 | + * stop, the appropriate %SIGNAL_* flags are set. | |
| 269 | + * | |
| 270 | + * CONTEXT: | |
| 271 | + * Must be called with @task->sighand->siglock held. | |
| 272 | + * | |
| 273 | + * RETURNS: | |
| 274 | + * %true if group stop completion should be notified to the parent, %false | |
| 275 | + * otherwise. | |
| 276 | + */ | |
| 277 | +static bool task_participate_group_stop(struct task_struct *task) | |
| 278 | +{ | |
| 279 | + struct signal_struct *sig = task->signal; | |
| 280 | + bool consume = task->group_stop & GROUP_STOP_CONSUME; | |
| 281 | + | |
| 282 | + WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING)); | |
| 283 | + | |
| 284 | + task_clear_group_stop_pending(task); | |
| 285 | + | |
| 286 | + if (!consume) | |
| 287 | + return false; | |
| 288 | + | |
| 289 | + if (!WARN_ON_ONCE(sig->group_stop_count == 0)) | |
| 290 | + sig->group_stop_count--; | |
| 291 | + | |
| 292 | + /* | |
| 293 | + * Tell the caller to notify completion iff we are entering into a | |
| 294 | + * fresh group stop. Read comment in do_signal_stop() for details. | |
| 295 | + */ | |
| 296 | + if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { | |
| 297 | + sig->flags = SIGNAL_STOP_STOPPED; | |
| 298 | + return true; | |
| 299 | + } | |
| 300 | + return false; | |
| 301 | +} | |
| 302 | + | |
| 226 | 303 | /* |
| 227 | 304 | * allocate a new signal queue record |
| 228 | 305 | * - this may be called without locks if and only if t == current, otherwise an |
| ... | ... | @@ -527,7 +604,7 @@ |
| 527 | 604 | * is to alert stop-signal processing code when another |
| 528 | 605 | * processor has come along and cleared the flag. |
| 529 | 606 | */ |
| 530 | - tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; | |
| 607 | + current->group_stop |= GROUP_STOP_DEQUEUED; | |
| 531 | 608 | } |
| 532 | 609 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { |
| 533 | 610 | /* |
| 534 | 611 | |
| 535 | 612 | |
| ... | ... | @@ -727,34 +804,14 @@ |
| 727 | 804 | } else if (sig == SIGCONT) { |
| 728 | 805 | unsigned int why; |
| 729 | 806 | /* |
| 730 | - * Remove all stop signals from all queues, | |
| 731 | - * and wake all threads. | |
| 807 | + * Remove all stop signals from all queues, wake all threads. | |
| 732 | 808 | */ |
| 733 | 809 | rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); |
| 734 | 810 | t = p; |
| 735 | 811 | do { |
| 736 | - unsigned int state; | |
| 812 | + task_clear_group_stop_pending(t); | |
| 737 | 813 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); |
| 738 | - /* | |
| 739 | - * If there is a handler for SIGCONT, we must make | |
| 740 | - * sure that no thread returns to user mode before | |
| 741 | - * we post the signal, in case it was the only | |
| 742 | - * thread eligible to run the signal handler--then | |
| 743 | - * it must not do anything between resuming and | |
| 744 | - * running the handler. With the TIF_SIGPENDING | |
| 745 | - * flag set, the thread will pause and acquire the | |
| 746 | - * siglock that we hold now and until we've queued | |
| 747 | - * the pending signal. | |
| 748 | - * | |
| 749 | - * Wake up the stopped thread _after_ setting | |
| 750 | - * TIF_SIGPENDING | |
| 751 | - */ | |
| 752 | - state = __TASK_STOPPED; | |
| 753 | - if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) { | |
| 754 | - set_tsk_thread_flag(t, TIF_SIGPENDING); | |
| 755 | - state |= TASK_INTERRUPTIBLE; | |
| 756 | - } | |
| 757 | - wake_up_state(t, state); | |
| 814 | + wake_up_state(t, __TASK_STOPPED); | |
| 758 | 815 | } while_each_thread(p, t); |
| 759 | 816 | |
| 760 | 817 | /* |
| ... | ... | @@ -780,13 +837,6 @@ |
| 780 | 837 | signal->flags = why | SIGNAL_STOP_CONTINUED; |
| 781 | 838 | signal->group_stop_count = 0; |
| 782 | 839 | signal->group_exit_code = 0; |
| 783 | - } else { | |
| 784 | - /* | |
| 785 | - * We are not stopped, but there could be a stop | |
| 786 | - * signal in the middle of being processed after | |
| 787 | - * being removed from the queue. Clear that too. | |
| 788 | - */ | |
| 789 | - signal->flags &= ~SIGNAL_STOP_DEQUEUED; | |
| 790 | 840 | } |
| 791 | 841 | } |
| 792 | 842 | |
| ... | ... | @@ -875,6 +925,7 @@ |
| 875 | 925 | signal->group_stop_count = 0; |
| 876 | 926 | t = p; |
| 877 | 927 | do { |
| 928 | + task_clear_group_stop_pending(t); | |
| 878 | 929 | sigaddset(&t->pending.signal, SIGKILL); |
| 879 | 930 | signal_wake_up(t, 1); |
| 880 | 931 | } while_each_thread(p, t); |
| ... | ... | @@ -1109,6 +1160,7 @@ |
| 1109 | 1160 | p->signal->group_stop_count = 0; |
| 1110 | 1161 | |
| 1111 | 1162 | while_each_thread(p, t) { |
| 1163 | + task_clear_group_stop_pending(t); | |
| 1112 | 1164 | count++; |
| 1113 | 1165 | |
| 1114 | 1166 | /* Don't bother with already dead threads */ |
| 1115 | 1167 | |
| 1116 | 1168 | |
| ... | ... | @@ -1536,16 +1588,30 @@ |
| 1536 | 1588 | return ret; |
| 1537 | 1589 | } |
| 1538 | 1590 | |
| 1539 | -static void do_notify_parent_cldstop(struct task_struct *tsk, int why) | |
| 1591 | +/** | |
| 1592 | + * do_notify_parent_cldstop - notify parent of stopped/continued state change | |
| 1593 | + * @tsk: task reporting the state change | |
| 1594 | + * @for_ptracer: the notification is for ptracer | |
| 1595 | + * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report | |
| 1596 | + * | |
| 1597 | + * Notify @tsk's parent that the stopped/continued state has changed. If | |
| 1598 | + * @for_ptracer is %false, @tsk's group leader notifies to its real parent. | |
| 1599 | + * If %true, @tsk reports to @tsk->parent which should be the ptracer. | |
| 1600 | + * | |
| 1601 | + * CONTEXT: | |
| 1602 | + * Must be called with tasklist_lock at least read locked. | |
| 1603 | + */ | |
| 1604 | +static void do_notify_parent_cldstop(struct task_struct *tsk, | |
| 1605 | + bool for_ptracer, int why) | |
| 1540 | 1606 | { |
| 1541 | 1607 | struct siginfo info; |
| 1542 | 1608 | unsigned long flags; |
| 1543 | 1609 | struct task_struct *parent; |
| 1544 | 1610 | struct sighand_struct *sighand; |
| 1545 | 1611 | |
| 1546 | - if (task_ptrace(tsk)) | |
| 1612 | + if (for_ptracer) { | |
| 1547 | 1613 | parent = tsk->parent; |
| 1548 | - else { | |
| 1614 | + } else { | |
| 1549 | 1615 | tsk = tsk->group_leader; |
| 1550 | 1616 | parent = tsk->real_parent; |
| 1551 | 1617 | } |
| ... | ... | @@ -1621,6 +1687,15 @@ |
| 1621 | 1687 | } |
| 1622 | 1688 | |
| 1623 | 1689 | /* |
| 1690 | + * Test whether the target task of the usual cldstop notification - the | |
| 1691 | + * real_parent of @child - is in the same group as the ptracer. | |
| 1692 | + */ | |
| 1693 | +static bool real_parent_is_ptracer(struct task_struct *child) | |
| 1694 | +{ | |
| 1695 | + return same_thread_group(child->parent, child->real_parent); | |
| 1696 | +} | |
| 1697 | + | |
| 1698 | +/* | |
| 1624 | 1699 | * This must be called with current->sighand->siglock held. |
| 1625 | 1700 | * |
| 1626 | 1701 | * This should be the path for all ptrace stops. |
| 1627 | 1702 | |
| ... | ... | @@ -1631,10 +1706,12 @@ |
| 1631 | 1706 | * If we actually decide not to stop at all because the tracer |
| 1632 | 1707 | * is gone, we keep current->exit_code unless clear_code. |
| 1633 | 1708 | */ |
| 1634 | -static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) | |
| 1709 | +static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) | |
| 1635 | 1710 | __releases(¤t->sighand->siglock) |
| 1636 | 1711 | __acquires(¤t->sighand->siglock) |
| 1637 | 1712 | { |
| 1713 | + bool gstop_done = false; | |
| 1714 | + | |
| 1638 | 1715 | if (arch_ptrace_stop_needed(exit_code, info)) { |
| 1639 | 1716 | /* |
| 1640 | 1717 | * The arch code has something special to do before a |
| 1641 | 1718 | |
| 1642 | 1719 | |
| 1643 | 1720 | |
| 1644 | 1721 | |
| ... | ... | @@ -1655,22 +1732,50 @@ |
| 1655 | 1732 | } |
| 1656 | 1733 | |
| 1657 | 1734 | /* |
| 1658 | - * If there is a group stop in progress, | |
| 1659 | - * we must participate in the bookkeeping. | |
| 1735 | + * If @why is CLD_STOPPED, we're trapping to participate in a group | |
| 1736 | + * stop. Do the bookkeeping. Note that if SIGCONT was delievered | |
| 1737 | + * while siglock was released for the arch hook, PENDING could be | |
| 1738 | + * clear now. We act as if SIGCONT is received after TASK_TRACED | |
| 1739 | + * is entered - ignore it. | |
| 1660 | 1740 | */ |
| 1661 | - if (current->signal->group_stop_count > 0) | |
| 1662 | - --current->signal->group_stop_count; | |
| 1741 | + if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING)) | |
| 1742 | + gstop_done = task_participate_group_stop(current); | |
| 1663 | 1743 | |
| 1664 | 1744 | current->last_siginfo = info; |
| 1665 | 1745 | current->exit_code = exit_code; |
| 1666 | 1746 | |
| 1667 | - /* Let the debugger run. */ | |
| 1668 | - __set_current_state(TASK_TRACED); | |
| 1747 | + /* | |
| 1748 | + * TRACED should be visible before TRAPPING is cleared; otherwise, | |
| 1749 | + * the tracer might fail do_wait(). | |
| 1750 | + */ | |
| 1751 | + set_current_state(TASK_TRACED); | |
| 1752 | + | |
| 1753 | + /* | |
| 1754 | + * We're committing to trapping. Clearing GROUP_STOP_TRAPPING and | |
| 1755 | + * transition to TASK_TRACED should be atomic with respect to | |
| 1756 | + * siglock. This hsould be done after the arch hook as siglock is | |
| 1757 | + * released and regrabbed across it. | |
| 1758 | + */ | |
| 1759 | + task_clear_group_stop_trapping(current); | |
| 1760 | + | |
| 1669 | 1761 | spin_unlock_irq(¤t->sighand->siglock); |
| 1670 | 1762 | read_lock(&tasklist_lock); |
| 1671 | 1763 | if (may_ptrace_stop()) { |
| 1672 | - do_notify_parent_cldstop(current, CLD_TRAPPED); | |
| 1673 | 1764 | /* |
| 1765 | + * Notify parents of the stop. | |
| 1766 | + * | |
| 1767 | + * While ptraced, there are two parents - the ptracer and | |
| 1768 | + * the real_parent of the group_leader. The ptracer should | |
| 1769 | + * know about every stop while the real parent is only | |
| 1770 | + * interested in the completion of group stop. The states | |
| 1771 | + * for the two don't interact with each other. Notify | |
| 1772 | + * separately unless they're gonna be duplicates. | |
| 1773 | + */ | |
| 1774 | + do_notify_parent_cldstop(current, true, why); | |
| 1775 | + if (gstop_done && !real_parent_is_ptracer(current)) | |
| 1776 | + do_notify_parent_cldstop(current, false, why); | |
| 1777 | + | |
| 1778 | + /* | |
| 1674 | 1779 | * Don't want to allow preemption here, because |
| 1675 | 1780 | * sys_ptrace() needs this task to be inactive. |
| 1676 | 1781 | * |
| 1677 | 1782 | |
| ... | ... | @@ -1684,7 +1789,16 @@ |
| 1684 | 1789 | /* |
| 1685 | 1790 | * By the time we got the lock, our tracer went away. |
| 1686 | 1791 | * Don't drop the lock yet, another tracer may come. |
| 1792 | + * | |
| 1793 | + * If @gstop_done, the ptracer went away between group stop | |
| 1794 | + * completion and here. During detach, it would have set | |
| 1795 | + * GROUP_STOP_PENDING on us and we'll re-enter TASK_STOPPED | |
| 1796 | + * in do_signal_stop() on return, so notifying the real | |
| 1797 | + * parent of the group stop completion is enough. | |
| 1687 | 1798 | */ |
| 1799 | + if (gstop_done) | |
| 1800 | + do_notify_parent_cldstop(current, false, why); | |
| 1801 | + | |
| 1688 | 1802 | __set_current_state(TASK_RUNNING); |
| 1689 | 1803 | if (clear_code) |
| 1690 | 1804 | current->exit_code = 0; |
| ... | ... | @@ -1728,7 +1842,7 @@ |
| 1728 | 1842 | |
| 1729 | 1843 | /* Let the debugger run. */ |
| 1730 | 1844 | spin_lock_irq(¤t->sighand->siglock); |
| 1731 | - ptrace_stop(exit_code, 1, &info); | |
| 1845 | + ptrace_stop(exit_code, CLD_TRAPPED, 1, &info); | |
| 1732 | 1846 | spin_unlock_irq(¤t->sighand->siglock); |
| 1733 | 1847 | } |
| 1734 | 1848 | |
| 1735 | 1849 | |
| 1736 | 1850 | |
| 1737 | 1851 | |
| 1738 | 1852 | |
| 1739 | 1853 | |
| 1740 | 1854 | |
| 1741 | 1855 | |
| 1742 | 1856 | |
| 1743 | 1857 | |
| 1744 | 1858 | |
| 1745 | 1859 | |
| 1746 | 1860 | |
| 1747 | 1861 | |
| 1748 | 1862 | |
| 1749 | 1863 | |
| ... | ... | @@ -1741,66 +1855,115 @@ |
| 1741 | 1855 | static int do_signal_stop(int signr) |
| 1742 | 1856 | { |
| 1743 | 1857 | struct signal_struct *sig = current->signal; |
| 1744 | - int notify; | |
| 1745 | 1858 | |
| 1746 | - if (!sig->group_stop_count) { | |
| 1859 | + if (!(current->group_stop & GROUP_STOP_PENDING)) { | |
| 1860 | + unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME; | |
| 1747 | 1861 | struct task_struct *t; |
| 1748 | 1862 | |
| 1749 | - if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || | |
| 1863 | + /* signr will be recorded in task->group_stop for retries */ | |
| 1864 | + WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK); | |
| 1865 | + | |
| 1866 | + if (!likely(current->group_stop & GROUP_STOP_DEQUEUED) || | |
| 1750 | 1867 | unlikely(signal_group_exit(sig))) |
| 1751 | 1868 | return 0; |
| 1752 | 1869 | /* |
| 1753 | - * There is no group stop already in progress. | |
| 1754 | - * We must initiate one now. | |
| 1870 | + * There is no group stop already in progress. We must | |
| 1871 | + * initiate one now. | |
| 1872 | + * | |
| 1873 | + * While ptraced, a task may be resumed while group stop is | |
| 1874 | + * still in effect and then receive a stop signal and | |
| 1875 | + * initiate another group stop. This deviates from the | |
| 1876 | + * usual behavior as two consecutive stop signals can't | |
| 1877 | + * cause two group stops when !ptraced. That is why we | |
| 1878 | + * also check !task_is_stopped(t) below. | |
| 1879 | + * | |
| 1880 | + * The condition can be distinguished by testing whether | |
| 1881 | + * SIGNAL_STOP_STOPPED is already set. Don't generate | |
| 1882 | + * group_exit_code in such case. | |
| 1883 | + * | |
| 1884 | + * This is not necessary for SIGNAL_STOP_CONTINUED because | |
| 1885 | + * an intervening stop signal is required to cause two | |
| 1886 | + * continued events regardless of ptrace. | |
| 1755 | 1887 | */ |
| 1756 | - sig->group_exit_code = signr; | |
| 1888 | + if (!(sig->flags & SIGNAL_STOP_STOPPED)) | |
| 1889 | + sig->group_exit_code = signr; | |
| 1890 | + else | |
| 1891 | + WARN_ON_ONCE(!task_ptrace(current)); | |
| 1757 | 1892 | |
| 1893 | + current->group_stop &= ~GROUP_STOP_SIGMASK; | |
| 1894 | + current->group_stop |= signr | gstop; | |
| 1758 | 1895 | sig->group_stop_count = 1; |
| 1759 | - for (t = next_thread(current); t != current; t = next_thread(t)) | |
| 1896 | + for (t = next_thread(current); t != current; | |
| 1897 | + t = next_thread(t)) { | |
| 1898 | + t->group_stop &= ~GROUP_STOP_SIGMASK; | |
| 1760 | 1899 | /* |
| 1761 | 1900 | * Setting state to TASK_STOPPED for a group |
| 1762 | 1901 | * stop is always done with the siglock held, |
| 1763 | 1902 | * so this check has no races. |
| 1764 | 1903 | */ |
| 1765 | - if (!(t->flags & PF_EXITING) && | |
| 1766 | - !task_is_stopped_or_traced(t)) { | |
| 1904 | + if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) { | |
| 1905 | + t->group_stop |= signr | gstop; | |
| 1767 | 1906 | sig->group_stop_count++; |
| 1768 | 1907 | signal_wake_up(t, 0); |
| 1769 | 1908 | } |
| 1909 | + } | |
| 1770 | 1910 | } |
| 1771 | - /* | |
| 1772 | - * If there are no other threads in the group, or if there is | |
| 1773 | - * a group stop in progress and we are the last to stop, report | |
| 1774 | - * to the parent. When ptraced, every thread reports itself. | |
| 1775 | - */ | |
| 1776 | - notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0; | |
| 1777 | - notify = tracehook_notify_jctl(notify, CLD_STOPPED); | |
| 1778 | - /* | |
| 1779 | - * tracehook_notify_jctl() can drop and reacquire siglock, so | |
| 1780 | - * we keep ->group_stop_count != 0 before the call. If SIGCONT | |
| 1781 | - * or SIGKILL comes in between ->group_stop_count == 0. | |
| 1782 | - */ | |
| 1783 | - if (sig->group_stop_count) { | |
| 1784 | - if (!--sig->group_stop_count) | |
| 1785 | - sig->flags = SIGNAL_STOP_STOPPED; | |
| 1786 | - current->exit_code = sig->group_exit_code; | |
| 1911 | +retry: | |
| 1912 | + if (likely(!task_ptrace(current))) { | |
| 1913 | + int notify = 0; | |
| 1914 | + | |
| 1915 | + /* | |
| 1916 | + * If there are no other threads in the group, or if there | |
| 1917 | + * is a group stop in progress and we are the last to stop, | |
| 1918 | + * report to the parent. | |
| 1919 | + */ | |
| 1920 | + if (task_participate_group_stop(current)) | |
| 1921 | + notify = CLD_STOPPED; | |
| 1922 | + | |
| 1787 | 1923 | __set_current_state(TASK_STOPPED); |
| 1924 | + spin_unlock_irq(¤t->sighand->siglock); | |
| 1925 | + | |
| 1926 | + /* | |
| 1927 | + * Notify the parent of the group stop completion. Because | |
| 1928 | + * we're not holding either the siglock or tasklist_lock | |
| 1929 | + * here, ptracer may attach inbetween; however, this is for | |
| 1930 | + * group stop and should always be delivered to the real | |
| 1931 | + * parent of the group leader. The new ptracer will get | |
| 1932 | + * its notification when this task transitions into | |
| 1933 | + * TASK_TRACED. | |
| 1934 | + */ | |
| 1935 | + if (notify) { | |
| 1936 | + read_lock(&tasklist_lock); | |
| 1937 | + do_notify_parent_cldstop(current, false, notify); | |
| 1938 | + read_unlock(&tasklist_lock); | |
| 1939 | + } | |
| 1940 | + | |
| 1941 | + /* Now we don't run again until woken by SIGCONT or SIGKILL */ | |
| 1942 | + schedule(); | |
| 1943 | + | |
| 1944 | + spin_lock_irq(¤t->sighand->siglock); | |
| 1945 | + } else { | |
| 1946 | + ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK, | |
| 1947 | + CLD_STOPPED, 0, NULL); | |
| 1948 | + current->exit_code = 0; | |
| 1788 | 1949 | } |
| 1789 | - spin_unlock_irq(¤t->sighand->siglock); | |
| 1790 | 1950 | |
| 1791 | - if (notify) { | |
| 1792 | - read_lock(&tasklist_lock); | |
| 1793 | - do_notify_parent_cldstop(current, notify); | |
| 1794 | - read_unlock(&tasklist_lock); | |
| 1951 | + /* | |
| 1952 | + * GROUP_STOP_PENDING could be set if another group stop has | |
| 1953 | + * started since being woken up or ptrace wants us to transit | |
| 1954 | + * between TASK_STOPPED and TRACED. Retry group stop. | |
| 1955 | + */ | |
| 1956 | + if (current->group_stop & GROUP_STOP_PENDING) { | |
| 1957 | + WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK)); | |
| 1958 | + goto retry; | |
| 1795 | 1959 | } |
| 1796 | 1960 | |
| 1797 | - /* Now we don't run again until woken by SIGCONT or SIGKILL */ | |
| 1798 | - do { | |
| 1799 | - schedule(); | |
| 1800 | - } while (try_to_freeze()); | |
| 1961 | + /* PTRACE_ATTACH might have raced with task killing, clear trapping */ | |
| 1962 | + task_clear_group_stop_trapping(current); | |
| 1801 | 1963 | |
| 1964 | + spin_unlock_irq(¤t->sighand->siglock); | |
| 1965 | + | |
| 1802 | 1966 | tracehook_finish_jctl(); |
| 1803 | - current->exit_code = 0; | |
| 1804 | 1967 | |
| 1805 | 1968 | return 1; |
| 1806 | 1969 | } |
| ... | ... | @@ -1814,7 +1977,7 @@ |
| 1814 | 1977 | ptrace_signal_deliver(regs, cookie); |
| 1815 | 1978 | |
| 1816 | 1979 | /* Let the debugger run. */ |
| 1817 | - ptrace_stop(signr, 0, info); | |
| 1980 | + ptrace_stop(signr, CLD_TRAPPED, 0, info); | |
| 1818 | 1981 | |
| 1819 | 1982 | /* We're back. Did the debugger cancel the sig? */ |
| 1820 | 1983 | signr = current->exit_code; |
| 1821 | 1984 | |
| 1822 | 1985 | |
| ... | ... | @@ -1869,18 +2032,36 @@ |
| 1869 | 2032 | * the CLD_ si_code into SIGNAL_CLD_MASK bits. |
| 1870 | 2033 | */ |
| 1871 | 2034 | if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { |
| 1872 | - int why = (signal->flags & SIGNAL_STOP_CONTINUED) | |
| 1873 | - ? CLD_CONTINUED : CLD_STOPPED; | |
| 2035 | + struct task_struct *leader; | |
| 2036 | + int why; | |
| 2037 | + | |
| 2038 | + if (signal->flags & SIGNAL_CLD_CONTINUED) | |
| 2039 | + why = CLD_CONTINUED; | |
| 2040 | + else | |
| 2041 | + why = CLD_STOPPED; | |
| 2042 | + | |
| 1874 | 2043 | signal->flags &= ~SIGNAL_CLD_MASK; |
| 1875 | 2044 | |
| 1876 | - why = tracehook_notify_jctl(why, CLD_CONTINUED); | |
| 1877 | 2045 | spin_unlock_irq(&sighand->siglock); |
| 1878 | 2046 | |
| 1879 | - if (why) { | |
| 1880 | - read_lock(&tasklist_lock); | |
| 1881 | - do_notify_parent_cldstop(current->group_leader, why); | |
| 1882 | - read_unlock(&tasklist_lock); | |
| 1883 | - } | |
| 2047 | + /* | |
| 2048 | + * Notify the parent that we're continuing. This event is | |
| 2049 | + * always per-process and doesn't make whole lot of sense | |
| 2050 | + * for ptracers, who shouldn't consume the state via | |
| 2051 | + * wait(2) either, but, for backward compatibility, notify | |
| 2052 | + * the ptracer of the group leader too unless it's gonna be | |
| 2053 | + * a duplicate. | |
| 2054 | + */ | |
| 2055 | + read_lock(&tasklist_lock); | |
| 2056 | + | |
| 2057 | + do_notify_parent_cldstop(current, false, why); | |
| 2058 | + | |
| 2059 | + leader = current->group_leader; | |
| 2060 | + if (task_ptrace(leader) && !real_parent_is_ptracer(leader)) | |
| 2061 | + do_notify_parent_cldstop(leader, true, why); | |
| 2062 | + | |
| 2063 | + read_unlock(&tasklist_lock); | |
| 2064 | + | |
| 1884 | 2065 | goto relock; |
| 1885 | 2066 | } |
| 1886 | 2067 | |
| ... | ... | @@ -1897,8 +2078,8 @@ |
| 1897 | 2078 | if (unlikely(signr != 0)) |
| 1898 | 2079 | ka = return_ka; |
| 1899 | 2080 | else { |
| 1900 | - if (unlikely(signal->group_stop_count > 0) && | |
| 1901 | - do_signal_stop(0)) | |
| 2081 | + if (unlikely(current->group_stop & | |
| 2082 | + GROUP_STOP_PENDING) && do_signal_stop(0)) | |
| 1902 | 2083 | goto relock; |
| 1903 | 2084 | |
| 1904 | 2085 | signr = dequeue_signal(current, ¤t->blocked, |
| 1905 | 2086 | |
| 1906 | 2087 | |
| ... | ... | @@ -2045,17 +2226,19 @@ |
| 2045 | 2226 | if (!signal_pending(t) && !(t->flags & PF_EXITING)) |
| 2046 | 2227 | recalc_sigpending_and_wake(t); |
| 2047 | 2228 | |
| 2048 | - if (unlikely(tsk->signal->group_stop_count) && | |
| 2049 | - !--tsk->signal->group_stop_count) { | |
| 2050 | - tsk->signal->flags = SIGNAL_STOP_STOPPED; | |
| 2051 | - group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED); | |
| 2052 | - } | |
| 2229 | + if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) && | |
| 2230 | + task_participate_group_stop(tsk)) | |
| 2231 | + group_stop = CLD_STOPPED; | |
| 2053 | 2232 | out: |
| 2054 | 2233 | spin_unlock_irq(&tsk->sighand->siglock); |
| 2055 | 2234 | |
| 2235 | + /* | |
| 2236 | + * If group stop has completed, deliver the notification. This | |
| 2237 | + * should always go to the real parent of the group leader. | |
| 2238 | + */ | |
| 2056 | 2239 | if (unlikely(group_stop)) { |
| 2057 | 2240 | read_lock(&tasklist_lock); |
| 2058 | - do_notify_parent_cldstop(tsk, group_stop); | |
| 2241 | + do_notify_parent_cldstop(tsk, false, group_stop); | |
| 2059 | 2242 | read_unlock(&tasklist_lock); |
| 2060 | 2243 | } |
| 2061 | 2244 | } |