Commit a8f072c1d624a627b67f2ace2f0c25d856ef4e54

Authored by Tejun Heo
Committed by Oleg Nesterov
1 parent 0b1007c357

job control: rename signal->group_stop and flags to jobctl and update them

signal->group_stop currently hosts mostly group stop related flags;
however, it's gonna be used for wider purposes and the GROUP_STOP_
flag prefix becomes confusing.  Rename signal->group_stop to
signal->jobctl and rename all GROUP_STOP_* flags to JOBCTL_*.

Bit position macros JOBCTL_*_BIT are defined and JOBCTL_* flags are
defined in terms of them to allow using bitops later.

While at it, reassign JOBCTL_TRAPPING to bit 22 to better accomodate
future additions.

This doesn't cause any functional change.

-v2: JOBCTL_*_BIT macros added as suggested by Linus.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>

Showing 4 changed files with 67 additions and 60 deletions Side-by-side Diff

... ... @@ -1772,7 +1772,7 @@
1772 1772  
1773 1773 t = start;
1774 1774 do {
1775   - task_clear_group_stop_pending(t);
  1775 + task_clear_jobctl_stop_pending(t);
1776 1776 if (t != current && t->mm) {
1777 1777 sigaddset(&t->pending.signal, SIGKILL);
1778 1778 signal_wake_up(t, 1);
include/linux/sched.h
... ... @@ -1282,7 +1282,7 @@
1282 1282 int exit_state;
1283 1283 int exit_code, exit_signal;
1284 1284 int pdeath_signal; /* The signal sent when the parent dies */
1285   - unsigned int group_stop; /* GROUP_STOP_*, siglock protected */
  1285 + unsigned int jobctl; /* JOBCTL_*, siglock protected */
1286 1286 /* ??? */
1287 1287 unsigned int personality;
1288 1288 unsigned did_exec:1;
1289 1289  
1290 1290  
... ... @@ -1803,15 +1803,21 @@
1803 1803 #define used_math() tsk_used_math(current)
1804 1804  
1805 1805 /*
1806   - * task->group_stop flags
  1806 + * task->jobctl flags
1807 1807 */
1808   -#define GROUP_STOP_SIGMASK 0xffff /* signr of the last group stop */
1809   -#define GROUP_STOP_PENDING (1 << 16) /* task should stop for group stop */
1810   -#define GROUP_STOP_CONSUME (1 << 17) /* consume group stop count */
1811   -#define GROUP_STOP_TRAPPING (1 << 18) /* switching from STOPPED to TRACED */
1812   -#define GROUP_STOP_DEQUEUED (1 << 19) /* stop signal dequeued */
  1808 +#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */
1813 1809  
1814   -extern void task_clear_group_stop_pending(struct task_struct *task);
  1810 +#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */
  1811 +#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */
  1812 +#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */
  1813 +#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
  1814 +
  1815 +#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT)
  1816 +#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT)
  1817 +#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT)
  1818 +#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT)
  1819 +
  1820 +extern void task_clear_jobctl_stop_pending(struct task_struct *task);
1815 1821  
1816 1822 #ifdef CONFIG_PREEMPT_RCU
1817 1823  
... ... @@ -77,13 +77,13 @@
77 77 spin_lock(&child->sighand->siglock);
78 78  
79 79 /*
80   - * Reinstate GROUP_STOP_PENDING if group stop is in effect and
  80 + * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
81 81 * @child isn't dead.
82 82 */
83 83 if (!(child->flags & PF_EXITING) &&
84 84 (child->signal->flags & SIGNAL_STOP_STOPPED ||
85 85 child->signal->group_stop_count))
86   - child->group_stop |= GROUP_STOP_PENDING;
  86 + child->jobctl |= JOBCTL_STOP_PENDING;
87 87  
88 88 /*
89 89 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
... ... @@ -91,7 +91,7 @@
91 91 * is in TASK_TRACED; otherwise, we might unduly disrupt
92 92 * TASK_KILLABLE sleeps.
93 93 */
94   - if (child->group_stop & GROUP_STOP_PENDING || task_is_traced(child))
  94 + if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
95 95 signal_wake_up(child, task_is_traced(child));
96 96  
97 97 spin_unlock(&child->sighand->siglock);
... ... @@ -226,7 +226,7 @@
226 226 spin_lock(&task->sighand->siglock);
227 227  
228 228 /*
229   - * If the task is already STOPPED, set GROUP_STOP_PENDING and
  229 + * If the task is already STOPPED, set JOBCTL_STOP_PENDING and
230 230 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
231 231 * will be cleared if the child completes the transition or any
232 232 * event which clears the group stop states happens. We'll wait
... ... @@ -243,7 +243,7 @@
243 243 * in and out of STOPPED are protected by siglock.
244 244 */
245 245 if (task_is_stopped(task)) {
246   - task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING;
  246 + task->jobctl |= JOBCTL_STOP_PENDING | JOBCTL_TRAPPING;
247 247 signal_wake_up(task, 1);
248 248 }
249 249  
... ... @@ -257,7 +257,7 @@
257 257 out:
258 258 if (!retval)
259 259 wait_event(current->signal->wait_chldexit,
260   - !(task->group_stop & GROUP_STOP_TRAPPING));
  260 + !(task->jobctl & JOBCTL_TRAPPING));
261 261 return retval;
262 262 }
263 263  
... ... @@ -124,7 +124,7 @@
124 124  
125 125 static int recalc_sigpending_tsk(struct task_struct *t)
126 126 {
127   - if ((t->group_stop & GROUP_STOP_PENDING) ||
  127 + if ((t->jobctl & JOBCTL_STOP_PENDING) ||
128 128 PENDING(&t->pending, &t->blocked) ||
129 129 PENDING(&t->signal->shared_pending, &t->blocked)) {
130 130 set_tsk_thread_flag(t, TIF_SIGPENDING);
131 131  
132 132  
133 133  
134 134  
... ... @@ -224,27 +224,28 @@
224 224 }
225 225  
226 226 /**
227   - * task_clear_group_stop_trapping - clear group stop trapping bit
  227 + * task_clear_jobctl_trapping - clear jobctl trapping bit
228 228 * @task: target task
229 229 *
230   - * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us. Clear it
231   - * and wake up the ptracer. Note that we don't need any further locking.
232   - * @task->siglock guarantees that @task->parent points to the ptracer.
  230 + * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
  231 + * Clear it and wake up the ptracer. Note that we don't need any further
  232 + * locking. @task->siglock guarantees that @task->parent points to the
  233 + * ptracer.
233 234 *
234 235 * CONTEXT:
235 236 * Must be called with @task->sighand->siglock held.
236 237 */
237   -static void task_clear_group_stop_trapping(struct task_struct *task)
  238 +static void task_clear_jobctl_trapping(struct task_struct *task)
238 239 {
239   - if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) {
240   - task->group_stop &= ~GROUP_STOP_TRAPPING;
  240 + if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
  241 + task->jobctl &= ~JOBCTL_TRAPPING;
241 242 __wake_up_sync_key(&task->parent->signal->wait_chldexit,
242 243 TASK_UNINTERRUPTIBLE, 1, task);
243 244 }
244 245 }
245 246  
246 247 /**
247   - * task_clear_group_stop_pending - clear pending group stop
  248 + * task_clear_jobctl_stop_pending - clear pending group stop
248 249 * @task: target task
249 250 *
250 251 * Clear group stop states for @task.
251 252  
252 253  
253 254  
... ... @@ -252,19 +253,19 @@
252 253 * CONTEXT:
253 254 * Must be called with @task->sighand->siglock held.
254 255 */
255   -void task_clear_group_stop_pending(struct task_struct *task)
  256 +void task_clear_jobctl_stop_pending(struct task_struct *task)
256 257 {
257   - task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME |
258   - GROUP_STOP_DEQUEUED);
  258 + task->jobctl &= ~(JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME |
  259 + JOBCTL_STOP_DEQUEUED);
259 260 }
260 261  
261 262 /**
262 263 * task_participate_group_stop - participate in a group stop
263 264 * @task: task participating in a group stop
264 265 *
265   - * @task has GROUP_STOP_PENDING set and is participating in a group stop.
  266 + * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
266 267 * Group stop states are cleared and the group stop count is consumed if
267   - * %GROUP_STOP_CONSUME was set. If the consumption completes the group
  268 + * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
268 269 * stop, the appropriate %SIGNAL_* flags are set.
269 270 *
270 271 * CONTEXT:
271 272  
272 273  
... ... @@ -277,11 +278,11 @@
277 278 static bool task_participate_group_stop(struct task_struct *task)
278 279 {
279 280 struct signal_struct *sig = task->signal;
280   - bool consume = task->group_stop & GROUP_STOP_CONSUME;
  281 + bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
281 282  
282   - WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING));
  283 + WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
283 284  
284   - task_clear_group_stop_pending(task);
  285 + task_clear_jobctl_stop_pending(task);
285 286  
286 287 if (!consume)
287 288 return false;
... ... @@ -604,7 +605,7 @@
604 605 * is to alert stop-signal processing code when another
605 606 * processor has come along and cleared the flag.
606 607 */
607   - current->group_stop |= GROUP_STOP_DEQUEUED;
  608 + current->jobctl |= JOBCTL_STOP_DEQUEUED;
608 609 }
609 610 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
610 611 /*
... ... @@ -809,7 +810,7 @@
809 810 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
810 811 t = p;
811 812 do {
812   - task_clear_group_stop_pending(t);
  813 + task_clear_jobctl_stop_pending(t);
813 814 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
814 815 wake_up_state(t, __TASK_STOPPED);
815 816 } while_each_thread(p, t);
... ... @@ -925,7 +926,7 @@
925 926 signal->group_stop_count = 0;
926 927 t = p;
927 928 do {
928   - task_clear_group_stop_pending(t);
  929 + task_clear_jobctl_stop_pending(t);
929 930 sigaddset(&t->pending.signal, SIGKILL);
930 931 signal_wake_up(t, 1);
931 932 } while_each_thread(p, t);
... ... @@ -1160,7 +1161,7 @@
1160 1161 p->signal->group_stop_count = 0;
1161 1162  
1162 1163 while_each_thread(p, t) {
1163   - task_clear_group_stop_pending(t);
  1164 + task_clear_jobctl_stop_pending(t);
1164 1165 count++;
1165 1166  
1166 1167 /* Don't bother with already dead threads */
... ... @@ -1738,7 +1739,7 @@
1738 1739 * clear now. We act as if SIGCONT is received after TASK_TRACED
1739 1740 * is entered - ignore it.
1740 1741 */
1741   - if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING))
  1742 + if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1742 1743 gstop_done = task_participate_group_stop(current);
1743 1744  
1744 1745 current->last_siginfo = info;
1745 1746  
1746 1747  
... ... @@ -1751,12 +1752,12 @@
1751 1752 set_current_state(TASK_TRACED);
1752 1753  
1753 1754 /*
1754   - * We're committing to trapping. Clearing GROUP_STOP_TRAPPING and
  1755 + * We're committing to trapping. Clearing JOBCTL_TRAPPING and
1755 1756 * transition to TASK_TRACED should be atomic with respect to
1756   - * siglock. This hsould be done after the arch hook as siglock is
  1757 + * siglock. This should be done after the arch hook as siglock is
1757 1758 * released and regrabbed across it.
1758 1759 */
1759   - task_clear_group_stop_trapping(current);
  1760 + task_clear_jobctl_trapping(current);
1760 1761  
1761 1762 spin_unlock_irq(&current->sighand->siglock);
1762 1763 read_lock(&tasklist_lock);
... ... @@ -1792,9 +1793,9 @@
1792 1793 *
1793 1794 * If @gstop_done, the ptracer went away between group stop
1794 1795 * completion and here. During detach, it would have set
1795   - * GROUP_STOP_PENDING on us and we'll re-enter TASK_STOPPED
1796   - * in do_signal_stop() on return, so notifying the real
1797   - * parent of the group stop completion is enough.
  1796 + * JOBCTL_STOP_PENDING on us and we'll re-enter
  1797 + * TASK_STOPPED in do_signal_stop() on return, so notifying
  1798 + * the real parent of the group stop completion is enough.
1798 1799 */
1799 1800 if (gstop_done)
1800 1801 do_notify_parent_cldstop(current, false, why);
1801 1802  
1802 1803  
... ... @@ -1856,14 +1857,14 @@
1856 1857 {
1857 1858 struct signal_struct *sig = current->signal;
1858 1859  
1859   - if (!(current->group_stop & GROUP_STOP_PENDING)) {
1860   - unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME;
  1860 + if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
  1861 + unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
1861 1862 struct task_struct *t;
1862 1863  
1863   - /* signr will be recorded in task->group_stop for retries */
1864   - WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK);
  1864 + /* signr will be recorded in task->jobctl for retries */
  1865 + WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
1865 1866  
1866   - if (!likely(current->group_stop & GROUP_STOP_DEQUEUED) ||
  1867 + if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
1867 1868 unlikely(signal_group_exit(sig)))
1868 1869 return 0;
1869 1870 /*
1870 1871  
1871 1872  
... ... @@ -1890,19 +1891,19 @@
1890 1891 else
1891 1892 WARN_ON_ONCE(!task_ptrace(current));
1892 1893  
1893   - current->group_stop &= ~GROUP_STOP_SIGMASK;
1894   - current->group_stop |= signr | gstop;
  1894 + current->jobctl &= ~JOBCTL_STOP_SIGMASK;
  1895 + current->jobctl |= signr | gstop;
1895 1896 sig->group_stop_count = 1;
1896 1897 for (t = next_thread(current); t != current;
1897 1898 t = next_thread(t)) {
1898   - t->group_stop &= ~GROUP_STOP_SIGMASK;
  1899 + t->jobctl &= ~JOBCTL_STOP_SIGMASK;
1899 1900 /*
1900 1901 * Setting state to TASK_STOPPED for a group
1901 1902 * stop is always done with the siglock held,
1902 1903 * so this check has no races.
1903 1904 */
1904 1905 if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) {
1905   - t->group_stop |= signr | gstop;
  1906 + t->jobctl |= signr | gstop;
1906 1907 sig->group_stop_count++;
1907 1908 signal_wake_up(t, 0);
1908 1909 }
1909 1910  
1910 1911  
1911 1912  
... ... @@ -1943,23 +1944,23 @@
1943 1944  
1944 1945 spin_lock_irq(&current->sighand->siglock);
1945 1946 } else {
1946   - ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK,
  1947 + ptrace_stop(current->jobctl & JOBCTL_STOP_SIGMASK,
1947 1948 CLD_STOPPED, 0, NULL);
1948 1949 current->exit_code = 0;
1949 1950 }
1950 1951  
1951 1952 /*
1952   - * GROUP_STOP_PENDING could be set if another group stop has
  1953 + * JOBCTL_STOP_PENDING could be set if another group stop has
1953 1954 * started since being woken up or ptrace wants us to transit
1954 1955 * between TASK_STOPPED and TRACED. Retry group stop.
1955 1956 */
1956   - if (current->group_stop & GROUP_STOP_PENDING) {
1957   - WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK));
  1957 + if (current->jobctl & JOBCTL_STOP_PENDING) {
  1958 + WARN_ON_ONCE(!(current->jobctl & JOBCTL_STOP_SIGMASK));
1958 1959 goto retry;
1959 1960 }
1960 1961  
1961 1962 /* PTRACE_ATTACH might have raced with task killing, clear trapping */
1962   - task_clear_group_stop_trapping(current);
  1963 + task_clear_jobctl_trapping(current);
1963 1964  
1964 1965 spin_unlock_irq(&current->sighand->siglock);
1965 1966  
... ... @@ -2078,8 +2079,8 @@
2078 2079 if (unlikely(signr != 0))
2079 2080 ka = return_ka;
2080 2081 else {
2081   - if (unlikely(current->group_stop &
2082   - GROUP_STOP_PENDING) && do_signal_stop(0))
  2082 + if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
  2083 + do_signal_stop(0))
2083 2084 goto relock;
2084 2085  
2085 2086 signr = dequeue_signal(current, &current->blocked,
... ... @@ -2253,7 +2254,7 @@
2253 2254 signotset(&unblocked);
2254 2255 retarget_shared_pending(tsk, &unblocked);
2255 2256  
2256   - if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) &&
  2257 + if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2257 2258 task_participate_group_stop(tsk))
2258 2259 group_stop = CLD_STOPPED;
2259 2260 out: