Commit 53c8f9f199b239668e6b1a907735ee323a0d1ccd
1 parent
06d984737b
Exists in
master
and in
6 other branches
make do_notify_parent() return bool
- change do_notify_parent() to return a boolean, true if the task should be reaped because its parent ignores SIGCHLD. - update the only caller which checks the returned value, exit_notify(). This temporary uglifies exit_notify() even more, will be cleanuped by the next change. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Tejun Heo <tj@kernel.org>
Showing 3 changed files with 17 additions and 13 deletions Inline Diff
include/linux/sched.h
1 | #ifndef _LINUX_SCHED_H | 1 | #ifndef _LINUX_SCHED_H |
2 | #define _LINUX_SCHED_H | 2 | #define _LINUX_SCHED_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * cloning flags: | 5 | * cloning flags: |
6 | */ | 6 | */ |
7 | #define CSIGNAL 0x000000ff /* signal mask to be sent at exit */ | 7 | #define CSIGNAL 0x000000ff /* signal mask to be sent at exit */ |
8 | #define CLONE_VM 0x00000100 /* set if VM shared between processes */ | 8 | #define CLONE_VM 0x00000100 /* set if VM shared between processes */ |
9 | #define CLONE_FS 0x00000200 /* set if fs info shared between processes */ | 9 | #define CLONE_FS 0x00000200 /* set if fs info shared between processes */ |
10 | #define CLONE_FILES 0x00000400 /* set if open files shared between processes */ | 10 | #define CLONE_FILES 0x00000400 /* set if open files shared between processes */ |
11 | #define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */ | 11 | #define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */ |
12 | #define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */ | 12 | #define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */ |
13 | #define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */ | 13 | #define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */ |
14 | #define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */ | 14 | #define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */ |
15 | #define CLONE_THREAD 0x00010000 /* Same thread group? */ | 15 | #define CLONE_THREAD 0x00010000 /* Same thread group? */ |
16 | #define CLONE_NEWNS 0x00020000 /* New namespace group? */ | 16 | #define CLONE_NEWNS 0x00020000 /* New namespace group? */ |
17 | #define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */ | 17 | #define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */ |
18 | #define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */ | 18 | #define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */ |
19 | #define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */ | 19 | #define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */ |
20 | #define CLONE_CHILD_CLEARTID 0x00200000 /* clear the TID in the child */ | 20 | #define CLONE_CHILD_CLEARTID 0x00200000 /* clear the TID in the child */ |
21 | #define CLONE_DETACHED 0x00400000 /* Unused, ignored */ | 21 | #define CLONE_DETACHED 0x00400000 /* Unused, ignored */ |
22 | #define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */ | 22 | #define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */ |
23 | #define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */ | 23 | #define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */ |
24 | /* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state) | 24 | /* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state) |
25 | and is now available for re-use. */ | 25 | and is now available for re-use. */ |
26 | #define CLONE_NEWUTS 0x04000000 /* New utsname group? */ | 26 | #define CLONE_NEWUTS 0x04000000 /* New utsname group? */ |
27 | #define CLONE_NEWIPC 0x08000000 /* New ipcs */ | 27 | #define CLONE_NEWIPC 0x08000000 /* New ipcs */ |
28 | #define CLONE_NEWUSER 0x10000000 /* New user namespace */ | 28 | #define CLONE_NEWUSER 0x10000000 /* New user namespace */ |
29 | #define CLONE_NEWPID 0x20000000 /* New pid namespace */ | 29 | #define CLONE_NEWPID 0x20000000 /* New pid namespace */ |
30 | #define CLONE_NEWNET 0x40000000 /* New network namespace */ | 30 | #define CLONE_NEWNET 0x40000000 /* New network namespace */ |
31 | #define CLONE_IO 0x80000000 /* Clone io context */ | 31 | #define CLONE_IO 0x80000000 /* Clone io context */ |
32 | 32 | ||
33 | /* | 33 | /* |
34 | * Scheduling policies | 34 | * Scheduling policies |
35 | */ | 35 | */ |
36 | #define SCHED_NORMAL 0 | 36 | #define SCHED_NORMAL 0 |
37 | #define SCHED_FIFO 1 | 37 | #define SCHED_FIFO 1 |
38 | #define SCHED_RR 2 | 38 | #define SCHED_RR 2 |
39 | #define SCHED_BATCH 3 | 39 | #define SCHED_BATCH 3 |
40 | /* SCHED_ISO: reserved but not implemented yet */ | 40 | /* SCHED_ISO: reserved but not implemented yet */ |
41 | #define SCHED_IDLE 5 | 41 | #define SCHED_IDLE 5 |
42 | /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ | 42 | /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ |
43 | #define SCHED_RESET_ON_FORK 0x40000000 | 43 | #define SCHED_RESET_ON_FORK 0x40000000 |
44 | 44 | ||
45 | #ifdef __KERNEL__ | 45 | #ifdef __KERNEL__ |
46 | 46 | ||
47 | struct sched_param { | 47 | struct sched_param { |
48 | int sched_priority; | 48 | int sched_priority; |
49 | }; | 49 | }; |
50 | 50 | ||
51 | #include <asm/param.h> /* for HZ */ | 51 | #include <asm/param.h> /* for HZ */ |
52 | 52 | ||
53 | #include <linux/capability.h> | 53 | #include <linux/capability.h> |
54 | #include <linux/threads.h> | 54 | #include <linux/threads.h> |
55 | #include <linux/kernel.h> | 55 | #include <linux/kernel.h> |
56 | #include <linux/types.h> | 56 | #include <linux/types.h> |
57 | #include <linux/timex.h> | 57 | #include <linux/timex.h> |
58 | #include <linux/jiffies.h> | 58 | #include <linux/jiffies.h> |
59 | #include <linux/rbtree.h> | 59 | #include <linux/rbtree.h> |
60 | #include <linux/thread_info.h> | 60 | #include <linux/thread_info.h> |
61 | #include <linux/cpumask.h> | 61 | #include <linux/cpumask.h> |
62 | #include <linux/errno.h> | 62 | #include <linux/errno.h> |
63 | #include <linux/nodemask.h> | 63 | #include <linux/nodemask.h> |
64 | #include <linux/mm_types.h> | 64 | #include <linux/mm_types.h> |
65 | 65 | ||
66 | #include <asm/system.h> | 66 | #include <asm/system.h> |
67 | #include <asm/page.h> | 67 | #include <asm/page.h> |
68 | #include <asm/ptrace.h> | 68 | #include <asm/ptrace.h> |
69 | #include <asm/cputime.h> | 69 | #include <asm/cputime.h> |
70 | 70 | ||
71 | #include <linux/smp.h> | 71 | #include <linux/smp.h> |
72 | #include <linux/sem.h> | 72 | #include <linux/sem.h> |
73 | #include <linux/signal.h> | 73 | #include <linux/signal.h> |
74 | #include <linux/compiler.h> | 74 | #include <linux/compiler.h> |
75 | #include <linux/completion.h> | 75 | #include <linux/completion.h> |
76 | #include <linux/pid.h> | 76 | #include <linux/pid.h> |
77 | #include <linux/percpu.h> | 77 | #include <linux/percpu.h> |
78 | #include <linux/topology.h> | 78 | #include <linux/topology.h> |
79 | #include <linux/proportions.h> | 79 | #include <linux/proportions.h> |
80 | #include <linux/seccomp.h> | 80 | #include <linux/seccomp.h> |
81 | #include <linux/rcupdate.h> | 81 | #include <linux/rcupdate.h> |
82 | #include <linux/rculist.h> | 82 | #include <linux/rculist.h> |
83 | #include <linux/rtmutex.h> | 83 | #include <linux/rtmutex.h> |
84 | 84 | ||
85 | #include <linux/time.h> | 85 | #include <linux/time.h> |
86 | #include <linux/param.h> | 86 | #include <linux/param.h> |
87 | #include <linux/resource.h> | 87 | #include <linux/resource.h> |
88 | #include <linux/timer.h> | 88 | #include <linux/timer.h> |
89 | #include <linux/hrtimer.h> | 89 | #include <linux/hrtimer.h> |
90 | #include <linux/task_io_accounting.h> | 90 | #include <linux/task_io_accounting.h> |
91 | #include <linux/latencytop.h> | 91 | #include <linux/latencytop.h> |
92 | #include <linux/cred.h> | 92 | #include <linux/cred.h> |
93 | 93 | ||
94 | #include <asm/processor.h> | 94 | #include <asm/processor.h> |
95 | 95 | ||
96 | struct exec_domain; | 96 | struct exec_domain; |
97 | struct futex_pi_state; | 97 | struct futex_pi_state; |
98 | struct robust_list_head; | 98 | struct robust_list_head; |
99 | struct bio_list; | 99 | struct bio_list; |
100 | struct fs_struct; | 100 | struct fs_struct; |
101 | struct perf_event_context; | 101 | struct perf_event_context; |
102 | struct blk_plug; | 102 | struct blk_plug; |
103 | 103 | ||
104 | /* | 104 | /* |
105 | * List of flags we want to share for kernel threads, | 105 | * List of flags we want to share for kernel threads, |
106 | * if only because they are not used by them anyway. | 106 | * if only because they are not used by them anyway. |
107 | */ | 107 | */ |
108 | #define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND) | 108 | #define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND) |
109 | 109 | ||
110 | /* | 110 | /* |
111 | * These are the constant used to fake the fixed-point load-average | 111 | * These are the constant used to fake the fixed-point load-average |
112 | * counting. Some notes: | 112 | * counting. Some notes: |
113 | * - 11 bit fractions expand to 22 bits by the multiplies: this gives | 113 | * - 11 bit fractions expand to 22 bits by the multiplies: this gives |
114 | * a load-average precision of 10 bits integer + 11 bits fractional | 114 | * a load-average precision of 10 bits integer + 11 bits fractional |
115 | * - if you want to count load-averages more often, you need more | 115 | * - if you want to count load-averages more often, you need more |
116 | * precision, or rounding will get you. With 2-second counting freq, | 116 | * precision, or rounding will get you. With 2-second counting freq, |
117 | * the EXP_n values would be 1981, 2034 and 2043 if still using only | 117 | * the EXP_n values would be 1981, 2034 and 2043 if still using only |
118 | * 11 bit fractions. | 118 | * 11 bit fractions. |
119 | */ | 119 | */ |
120 | extern unsigned long avenrun[]; /* Load averages */ | 120 | extern unsigned long avenrun[]; /* Load averages */ |
121 | extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); | 121 | extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); |
122 | 122 | ||
123 | #define FSHIFT 11 /* nr of bits of precision */ | 123 | #define FSHIFT 11 /* nr of bits of precision */ |
124 | #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ | 124 | #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ |
125 | #define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */ | 125 | #define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */ |
126 | #define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ | 126 | #define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ |
127 | #define EXP_5 2014 /* 1/exp(5sec/5min) */ | 127 | #define EXP_5 2014 /* 1/exp(5sec/5min) */ |
128 | #define EXP_15 2037 /* 1/exp(5sec/15min) */ | 128 | #define EXP_15 2037 /* 1/exp(5sec/15min) */ |
129 | 129 | ||
130 | #define CALC_LOAD(load,exp,n) \ | 130 | #define CALC_LOAD(load,exp,n) \ |
131 | load *= exp; \ | 131 | load *= exp; \ |
132 | load += n*(FIXED_1-exp); \ | 132 | load += n*(FIXED_1-exp); \ |
133 | load >>= FSHIFT; | 133 | load >>= FSHIFT; |
134 | 134 | ||
135 | extern unsigned long total_forks; | 135 | extern unsigned long total_forks; |
136 | extern int nr_threads; | 136 | extern int nr_threads; |
137 | DECLARE_PER_CPU(unsigned long, process_counts); | 137 | DECLARE_PER_CPU(unsigned long, process_counts); |
138 | extern int nr_processes(void); | 138 | extern int nr_processes(void); |
139 | extern unsigned long nr_running(void); | 139 | extern unsigned long nr_running(void); |
140 | extern unsigned long nr_uninterruptible(void); | 140 | extern unsigned long nr_uninterruptible(void); |
141 | extern unsigned long nr_iowait(void); | 141 | extern unsigned long nr_iowait(void); |
142 | extern unsigned long nr_iowait_cpu(int cpu); | 142 | extern unsigned long nr_iowait_cpu(int cpu); |
143 | extern unsigned long this_cpu_load(void); | 143 | extern unsigned long this_cpu_load(void); |
144 | 144 | ||
145 | 145 | ||
146 | extern void calc_global_load(unsigned long ticks); | 146 | extern void calc_global_load(unsigned long ticks); |
147 | 147 | ||
148 | extern unsigned long get_parent_ip(unsigned long addr); | 148 | extern unsigned long get_parent_ip(unsigned long addr); |
149 | 149 | ||
150 | struct seq_file; | 150 | struct seq_file; |
151 | struct cfs_rq; | 151 | struct cfs_rq; |
152 | struct task_group; | 152 | struct task_group; |
153 | #ifdef CONFIG_SCHED_DEBUG | 153 | #ifdef CONFIG_SCHED_DEBUG |
154 | extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); | 154 | extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); |
155 | extern void proc_sched_set_task(struct task_struct *p); | 155 | extern void proc_sched_set_task(struct task_struct *p); |
156 | extern void | 156 | extern void |
157 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); | 157 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); |
158 | #else | 158 | #else |
159 | static inline void | 159 | static inline void |
160 | proc_sched_show_task(struct task_struct *p, struct seq_file *m) | 160 | proc_sched_show_task(struct task_struct *p, struct seq_file *m) |
161 | { | 161 | { |
162 | } | 162 | } |
163 | static inline void proc_sched_set_task(struct task_struct *p) | 163 | static inline void proc_sched_set_task(struct task_struct *p) |
164 | { | 164 | { |
165 | } | 165 | } |
166 | static inline void | 166 | static inline void |
167 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | 167 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) |
168 | { | 168 | { |
169 | } | 169 | } |
170 | #endif | 170 | #endif |
171 | 171 | ||
172 | /* | 172 | /* |
173 | * Task state bitmask. NOTE! These bits are also | 173 | * Task state bitmask. NOTE! These bits are also |
174 | * encoded in fs/proc/array.c: get_task_state(). | 174 | * encoded in fs/proc/array.c: get_task_state(). |
175 | * | 175 | * |
176 | * We have two separate sets of flags: task->state | 176 | * We have two separate sets of flags: task->state |
177 | * is about runnability, while task->exit_state are | 177 | * is about runnability, while task->exit_state are |
178 | * about the task exiting. Confusing, but this way | 178 | * about the task exiting. Confusing, but this way |
179 | * modifying one set can't modify the other one by | 179 | * modifying one set can't modify the other one by |
180 | * mistake. | 180 | * mistake. |
181 | */ | 181 | */ |
182 | #define TASK_RUNNING 0 | 182 | #define TASK_RUNNING 0 |
183 | #define TASK_INTERRUPTIBLE 1 | 183 | #define TASK_INTERRUPTIBLE 1 |
184 | #define TASK_UNINTERRUPTIBLE 2 | 184 | #define TASK_UNINTERRUPTIBLE 2 |
185 | #define __TASK_STOPPED 4 | 185 | #define __TASK_STOPPED 4 |
186 | #define __TASK_TRACED 8 | 186 | #define __TASK_TRACED 8 |
187 | /* in tsk->exit_state */ | 187 | /* in tsk->exit_state */ |
188 | #define EXIT_ZOMBIE 16 | 188 | #define EXIT_ZOMBIE 16 |
189 | #define EXIT_DEAD 32 | 189 | #define EXIT_DEAD 32 |
190 | /* in tsk->state again */ | 190 | /* in tsk->state again */ |
191 | #define TASK_DEAD 64 | 191 | #define TASK_DEAD 64 |
192 | #define TASK_WAKEKILL 128 | 192 | #define TASK_WAKEKILL 128 |
193 | #define TASK_WAKING 256 | 193 | #define TASK_WAKING 256 |
194 | #define TASK_STATE_MAX 512 | 194 | #define TASK_STATE_MAX 512 |
195 | 195 | ||
196 | #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW" | 196 | #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW" |
197 | 197 | ||
198 | extern char ___assert_task_state[1 - 2*!!( | 198 | extern char ___assert_task_state[1 - 2*!!( |
199 | sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; | 199 | sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; |
200 | 200 | ||
201 | /* Convenience macros for the sake of set_task_state */ | 201 | /* Convenience macros for the sake of set_task_state */ |
202 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) | 202 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) |
203 | #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) | 203 | #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) |
204 | #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) | 204 | #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) |
205 | 205 | ||
206 | /* Convenience macros for the sake of wake_up */ | 206 | /* Convenience macros for the sake of wake_up */ |
207 | #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) | 207 | #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) |
208 | #define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) | 208 | #define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) |
209 | 209 | ||
210 | /* get_task_state() */ | 210 | /* get_task_state() */ |
211 | #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ | 211 | #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ |
212 | TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ | 212 | TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ |
213 | __TASK_TRACED) | 213 | __TASK_TRACED) |
214 | 214 | ||
215 | #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) | 215 | #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) |
216 | #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) | 216 | #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) |
217 | #define task_is_dead(task) ((task)->exit_state != 0) | 217 | #define task_is_dead(task) ((task)->exit_state != 0) |
218 | #define task_is_stopped_or_traced(task) \ | 218 | #define task_is_stopped_or_traced(task) \ |
219 | ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) | 219 | ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) |
220 | #define task_contributes_to_load(task) \ | 220 | #define task_contributes_to_load(task) \ |
221 | ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ | 221 | ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ |
222 | (task->flags & PF_FREEZING) == 0) | 222 | (task->flags & PF_FREEZING) == 0) |
223 | 223 | ||
224 | #define __set_task_state(tsk, state_value) \ | 224 | #define __set_task_state(tsk, state_value) \ |
225 | do { (tsk)->state = (state_value); } while (0) | 225 | do { (tsk)->state = (state_value); } while (0) |
226 | #define set_task_state(tsk, state_value) \ | 226 | #define set_task_state(tsk, state_value) \ |
227 | set_mb((tsk)->state, (state_value)) | 227 | set_mb((tsk)->state, (state_value)) |
228 | 228 | ||
229 | /* | 229 | /* |
230 | * set_current_state() includes a barrier so that the write of current->state | 230 | * set_current_state() includes a barrier so that the write of current->state |
231 | * is correctly serialised wrt the caller's subsequent test of whether to | 231 | * is correctly serialised wrt the caller's subsequent test of whether to |
232 | * actually sleep: | 232 | * actually sleep: |
233 | * | 233 | * |
234 | * set_current_state(TASK_UNINTERRUPTIBLE); | 234 | * set_current_state(TASK_UNINTERRUPTIBLE); |
235 | * if (do_i_need_to_sleep()) | 235 | * if (do_i_need_to_sleep()) |
236 | * schedule(); | 236 | * schedule(); |
237 | * | 237 | * |
238 | * If the caller does not need such serialisation then use __set_current_state() | 238 | * If the caller does not need such serialisation then use __set_current_state() |
239 | */ | 239 | */ |
240 | #define __set_current_state(state_value) \ | 240 | #define __set_current_state(state_value) \ |
241 | do { current->state = (state_value); } while (0) | 241 | do { current->state = (state_value); } while (0) |
242 | #define set_current_state(state_value) \ | 242 | #define set_current_state(state_value) \ |
243 | set_mb(current->state, (state_value)) | 243 | set_mb(current->state, (state_value)) |
244 | 244 | ||
245 | /* Task command name length */ | 245 | /* Task command name length */ |
246 | #define TASK_COMM_LEN 16 | 246 | #define TASK_COMM_LEN 16 |
247 | 247 | ||
248 | #include <linux/spinlock.h> | 248 | #include <linux/spinlock.h> |
249 | 249 | ||
250 | /* | 250 | /* |
251 | * This serializes "schedule()" and also protects | 251 | * This serializes "schedule()" and also protects |
252 | * the run-queue from deletions/modifications (but | 252 | * the run-queue from deletions/modifications (but |
253 | * _adding_ to the beginning of the run-queue has | 253 | * _adding_ to the beginning of the run-queue has |
254 | * a separate lock). | 254 | * a separate lock). |
255 | */ | 255 | */ |
256 | extern rwlock_t tasklist_lock; | 256 | extern rwlock_t tasklist_lock; |
257 | extern spinlock_t mmlist_lock; | 257 | extern spinlock_t mmlist_lock; |
258 | 258 | ||
259 | struct task_struct; | 259 | struct task_struct; |
260 | 260 | ||
261 | #ifdef CONFIG_PROVE_RCU | 261 | #ifdef CONFIG_PROVE_RCU |
262 | extern int lockdep_tasklist_lock_is_held(void); | 262 | extern int lockdep_tasklist_lock_is_held(void); |
263 | #endif /* #ifdef CONFIG_PROVE_RCU */ | 263 | #endif /* #ifdef CONFIG_PROVE_RCU */ |
264 | 264 | ||
265 | extern void sched_init(void); | 265 | extern void sched_init(void); |
266 | extern void sched_init_smp(void); | 266 | extern void sched_init_smp(void); |
267 | extern asmlinkage void schedule_tail(struct task_struct *prev); | 267 | extern asmlinkage void schedule_tail(struct task_struct *prev); |
268 | extern void init_idle(struct task_struct *idle, int cpu); | 268 | extern void init_idle(struct task_struct *idle, int cpu); |
269 | extern void init_idle_bootup_task(struct task_struct *idle); | 269 | extern void init_idle_bootup_task(struct task_struct *idle); |
270 | 270 | ||
271 | extern int runqueue_is_locked(int cpu); | 271 | extern int runqueue_is_locked(int cpu); |
272 | 272 | ||
273 | extern cpumask_var_t nohz_cpu_mask; | 273 | extern cpumask_var_t nohz_cpu_mask; |
274 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 274 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) |
275 | extern void select_nohz_load_balancer(int stop_tick); | 275 | extern void select_nohz_load_balancer(int stop_tick); |
276 | extern int get_nohz_timer_target(void); | 276 | extern int get_nohz_timer_target(void); |
277 | #else | 277 | #else |
278 | static inline void select_nohz_load_balancer(int stop_tick) { } | 278 | static inline void select_nohz_load_balancer(int stop_tick) { } |
279 | #endif | 279 | #endif |
280 | 280 | ||
281 | /* | 281 | /* |
282 | * Only dump TASK_* tasks. (0 for all tasks) | 282 | * Only dump TASK_* tasks. (0 for all tasks) |
283 | */ | 283 | */ |
284 | extern void show_state_filter(unsigned long state_filter); | 284 | extern void show_state_filter(unsigned long state_filter); |
285 | 285 | ||
286 | static inline void show_state(void) | 286 | static inline void show_state(void) |
287 | { | 287 | { |
288 | show_state_filter(0); | 288 | show_state_filter(0); |
289 | } | 289 | } |
290 | 290 | ||
291 | extern void show_regs(struct pt_regs *); | 291 | extern void show_regs(struct pt_regs *); |
292 | 292 | ||
293 | /* | 293 | /* |
294 | * TASK is a pointer to the task whose backtrace we want to see (or NULL for current | 294 | * TASK is a pointer to the task whose backtrace we want to see (or NULL for current |
295 | * task), SP is the stack pointer of the first frame that should be shown in the back | 295 | * task), SP is the stack pointer of the first frame that should be shown in the back |
296 | * trace (or NULL if the entire call-chain of the task should be shown). | 296 | * trace (or NULL if the entire call-chain of the task should be shown). |
297 | */ | 297 | */ |
298 | extern void show_stack(struct task_struct *task, unsigned long *sp); | 298 | extern void show_stack(struct task_struct *task, unsigned long *sp); |
299 | 299 | ||
300 | void io_schedule(void); | 300 | void io_schedule(void); |
301 | long io_schedule_timeout(long timeout); | 301 | long io_schedule_timeout(long timeout); |
302 | 302 | ||
303 | extern void cpu_init (void); | 303 | extern void cpu_init (void); |
304 | extern void trap_init(void); | 304 | extern void trap_init(void); |
305 | extern void update_process_times(int user); | 305 | extern void update_process_times(int user); |
306 | extern void scheduler_tick(void); | 306 | extern void scheduler_tick(void); |
307 | 307 | ||
308 | extern void sched_show_task(struct task_struct *p); | 308 | extern void sched_show_task(struct task_struct *p); |
309 | 309 | ||
310 | #ifdef CONFIG_LOCKUP_DETECTOR | 310 | #ifdef CONFIG_LOCKUP_DETECTOR |
311 | extern void touch_softlockup_watchdog(void); | 311 | extern void touch_softlockup_watchdog(void); |
312 | extern void touch_softlockup_watchdog_sync(void); | 312 | extern void touch_softlockup_watchdog_sync(void); |
313 | extern void touch_all_softlockup_watchdogs(void); | 313 | extern void touch_all_softlockup_watchdogs(void); |
314 | extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, | 314 | extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, |
315 | void __user *buffer, | 315 | void __user *buffer, |
316 | size_t *lenp, loff_t *ppos); | 316 | size_t *lenp, loff_t *ppos); |
317 | extern unsigned int softlockup_panic; | 317 | extern unsigned int softlockup_panic; |
318 | void lockup_detector_init(void); | 318 | void lockup_detector_init(void); |
319 | #else | 319 | #else |
320 | static inline void touch_softlockup_watchdog(void) | 320 | static inline void touch_softlockup_watchdog(void) |
321 | { | 321 | { |
322 | } | 322 | } |
323 | static inline void touch_softlockup_watchdog_sync(void) | 323 | static inline void touch_softlockup_watchdog_sync(void) |
324 | { | 324 | { |
325 | } | 325 | } |
326 | static inline void touch_all_softlockup_watchdogs(void) | 326 | static inline void touch_all_softlockup_watchdogs(void) |
327 | { | 327 | { |
328 | } | 328 | } |
329 | static inline void lockup_detector_init(void) | 329 | static inline void lockup_detector_init(void) |
330 | { | 330 | { |
331 | } | 331 | } |
332 | #endif | 332 | #endif |
333 | 333 | ||
334 | #ifdef CONFIG_DETECT_HUNG_TASK | 334 | #ifdef CONFIG_DETECT_HUNG_TASK |
335 | extern unsigned int sysctl_hung_task_panic; | 335 | extern unsigned int sysctl_hung_task_panic; |
336 | extern unsigned long sysctl_hung_task_check_count; | 336 | extern unsigned long sysctl_hung_task_check_count; |
337 | extern unsigned long sysctl_hung_task_timeout_secs; | 337 | extern unsigned long sysctl_hung_task_timeout_secs; |
338 | extern unsigned long sysctl_hung_task_warnings; | 338 | extern unsigned long sysctl_hung_task_warnings; |
339 | extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, | 339 | extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, |
340 | void __user *buffer, | 340 | void __user *buffer, |
341 | size_t *lenp, loff_t *ppos); | 341 | size_t *lenp, loff_t *ppos); |
342 | #else | 342 | #else |
343 | /* Avoid need for ifdefs elsewhere in the code */ | 343 | /* Avoid need for ifdefs elsewhere in the code */ |
344 | enum { sysctl_hung_task_timeout_secs = 0 }; | 344 | enum { sysctl_hung_task_timeout_secs = 0 }; |
345 | #endif | 345 | #endif |
346 | 346 | ||
347 | /* Attach to any functions which should be ignored in wchan output. */ | 347 | /* Attach to any functions which should be ignored in wchan output. */ |
348 | #define __sched __attribute__((__section__(".sched.text"))) | 348 | #define __sched __attribute__((__section__(".sched.text"))) |
349 | 349 | ||
350 | /* Linker adds these: start and end of __sched functions */ | 350 | /* Linker adds these: start and end of __sched functions */ |
351 | extern char __sched_text_start[], __sched_text_end[]; | 351 | extern char __sched_text_start[], __sched_text_end[]; |
352 | 352 | ||
353 | /* Is this address in the __sched functions? */ | 353 | /* Is this address in the __sched functions? */ |
354 | extern int in_sched_functions(unsigned long addr); | 354 | extern int in_sched_functions(unsigned long addr); |
355 | 355 | ||
356 | #define MAX_SCHEDULE_TIMEOUT LONG_MAX | 356 | #define MAX_SCHEDULE_TIMEOUT LONG_MAX |
357 | extern signed long schedule_timeout(signed long timeout); | 357 | extern signed long schedule_timeout(signed long timeout); |
358 | extern signed long schedule_timeout_interruptible(signed long timeout); | 358 | extern signed long schedule_timeout_interruptible(signed long timeout); |
359 | extern signed long schedule_timeout_killable(signed long timeout); | 359 | extern signed long schedule_timeout_killable(signed long timeout); |
360 | extern signed long schedule_timeout_uninterruptible(signed long timeout); | 360 | extern signed long schedule_timeout_uninterruptible(signed long timeout); |
361 | asmlinkage void schedule(void); | 361 | asmlinkage void schedule(void); |
362 | extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); | 362 | extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); |
363 | 363 | ||
364 | struct nsproxy; | 364 | struct nsproxy; |
365 | struct user_namespace; | 365 | struct user_namespace; |
366 | 366 | ||
367 | /* | 367 | /* |
368 | * Default maximum number of active map areas, this limits the number of vmas | 368 | * Default maximum number of active map areas, this limits the number of vmas |
369 | * per mm struct. Users can overwrite this number by sysctl but there is a | 369 | * per mm struct. Users can overwrite this number by sysctl but there is a |
370 | * problem. | 370 | * problem. |
371 | * | 371 | * |
372 | * When a program's coredump is generated as ELF format, a section is created | 372 | * When a program's coredump is generated as ELF format, a section is created |
373 | * per a vma. In ELF, the number of sections is represented in unsigned short. | 373 | * per a vma. In ELF, the number of sections is represented in unsigned short. |
374 | * This means the number of sections should be smaller than 65535 at coredump. | 374 | * This means the number of sections should be smaller than 65535 at coredump. |
375 | * Because the kernel adds some informative sections to a image of program at | 375 | * Because the kernel adds some informative sections to a image of program at |
376 | * generating coredump, we need some margin. The number of extra sections is | 376 | * generating coredump, we need some margin. The number of extra sections is |
377 | * 1-3 now and depends on arch. We use "5" as safe margin, here. | 377 | * 1-3 now and depends on arch. We use "5" as safe margin, here. |
378 | */ | 378 | */ |
379 | #define MAPCOUNT_ELF_CORE_MARGIN (5) | 379 | #define MAPCOUNT_ELF_CORE_MARGIN (5) |
380 | #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) | 380 | #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) |
381 | 381 | ||
382 | extern int sysctl_max_map_count; | 382 | extern int sysctl_max_map_count; |
383 | 383 | ||
384 | #include <linux/aio.h> | 384 | #include <linux/aio.h> |
385 | 385 | ||
386 | #ifdef CONFIG_MMU | 386 | #ifdef CONFIG_MMU |
387 | extern void arch_pick_mmap_layout(struct mm_struct *mm); | 387 | extern void arch_pick_mmap_layout(struct mm_struct *mm); |
388 | extern unsigned long | 388 | extern unsigned long |
389 | arch_get_unmapped_area(struct file *, unsigned long, unsigned long, | 389 | arch_get_unmapped_area(struct file *, unsigned long, unsigned long, |
390 | unsigned long, unsigned long); | 390 | unsigned long, unsigned long); |
391 | extern unsigned long | 391 | extern unsigned long |
392 | arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, | 392 | arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, |
393 | unsigned long len, unsigned long pgoff, | 393 | unsigned long len, unsigned long pgoff, |
394 | unsigned long flags); | 394 | unsigned long flags); |
395 | extern void arch_unmap_area(struct mm_struct *, unsigned long); | 395 | extern void arch_unmap_area(struct mm_struct *, unsigned long); |
396 | extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); | 396 | extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); |
397 | #else | 397 | #else |
398 | static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} | 398 | static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} |
399 | #endif | 399 | #endif |
400 | 400 | ||
401 | 401 | ||
402 | extern void set_dumpable(struct mm_struct *mm, int value); | 402 | extern void set_dumpable(struct mm_struct *mm, int value); |
403 | extern int get_dumpable(struct mm_struct *mm); | 403 | extern int get_dumpable(struct mm_struct *mm); |
404 | 404 | ||
405 | /* mm flags */ | 405 | /* mm flags */ |
406 | /* dumpable bits */ | 406 | /* dumpable bits */ |
407 | #define MMF_DUMPABLE 0 /* core dump is permitted */ | 407 | #define MMF_DUMPABLE 0 /* core dump is permitted */ |
408 | #define MMF_DUMP_SECURELY 1 /* core file is readable only by root */ | 408 | #define MMF_DUMP_SECURELY 1 /* core file is readable only by root */ |
409 | 409 | ||
410 | #define MMF_DUMPABLE_BITS 2 | 410 | #define MMF_DUMPABLE_BITS 2 |
411 | #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) | 411 | #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) |
412 | 412 | ||
413 | /* coredump filter bits */ | 413 | /* coredump filter bits */ |
414 | #define MMF_DUMP_ANON_PRIVATE 2 | 414 | #define MMF_DUMP_ANON_PRIVATE 2 |
415 | #define MMF_DUMP_ANON_SHARED 3 | 415 | #define MMF_DUMP_ANON_SHARED 3 |
416 | #define MMF_DUMP_MAPPED_PRIVATE 4 | 416 | #define MMF_DUMP_MAPPED_PRIVATE 4 |
417 | #define MMF_DUMP_MAPPED_SHARED 5 | 417 | #define MMF_DUMP_MAPPED_SHARED 5 |
418 | #define MMF_DUMP_ELF_HEADERS 6 | 418 | #define MMF_DUMP_ELF_HEADERS 6 |
419 | #define MMF_DUMP_HUGETLB_PRIVATE 7 | 419 | #define MMF_DUMP_HUGETLB_PRIVATE 7 |
420 | #define MMF_DUMP_HUGETLB_SHARED 8 | 420 | #define MMF_DUMP_HUGETLB_SHARED 8 |
421 | 421 | ||
422 | #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS | 422 | #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS |
423 | #define MMF_DUMP_FILTER_BITS 7 | 423 | #define MMF_DUMP_FILTER_BITS 7 |
424 | #define MMF_DUMP_FILTER_MASK \ | 424 | #define MMF_DUMP_FILTER_MASK \ |
425 | (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) | 425 | (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) |
426 | #define MMF_DUMP_FILTER_DEFAULT \ | 426 | #define MMF_DUMP_FILTER_DEFAULT \ |
427 | ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ | 427 | ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ |
428 | (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) | 428 | (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) |
429 | 429 | ||
430 | #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS | 430 | #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS |
431 | # define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) | 431 | # define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) |
432 | #else | 432 | #else |
433 | # define MMF_DUMP_MASK_DEFAULT_ELF 0 | 433 | # define MMF_DUMP_MASK_DEFAULT_ELF 0 |
434 | #endif | 434 | #endif |
435 | /* leave room for more dump flags */ | 435 | /* leave room for more dump flags */ |
436 | #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ | 436 | #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ |
437 | #define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ | 437 | #define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ |
438 | 438 | ||
439 | #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) | 439 | #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) |
440 | 440 | ||
441 | struct sighand_struct { | 441 | struct sighand_struct { |
442 | atomic_t count; | 442 | atomic_t count; |
443 | struct k_sigaction action[_NSIG]; | 443 | struct k_sigaction action[_NSIG]; |
444 | spinlock_t siglock; | 444 | spinlock_t siglock; |
445 | wait_queue_head_t signalfd_wqh; | 445 | wait_queue_head_t signalfd_wqh; |
446 | }; | 446 | }; |
447 | 447 | ||
448 | struct pacct_struct { | 448 | struct pacct_struct { |
449 | int ac_flag; | 449 | int ac_flag; |
450 | long ac_exitcode; | 450 | long ac_exitcode; |
451 | unsigned long ac_mem; | 451 | unsigned long ac_mem; |
452 | cputime_t ac_utime, ac_stime; | 452 | cputime_t ac_utime, ac_stime; |
453 | unsigned long ac_minflt, ac_majflt; | 453 | unsigned long ac_minflt, ac_majflt; |
454 | }; | 454 | }; |
455 | 455 | ||
456 | struct cpu_itimer { | 456 | struct cpu_itimer { |
457 | cputime_t expires; | 457 | cputime_t expires; |
458 | cputime_t incr; | 458 | cputime_t incr; |
459 | u32 error; | 459 | u32 error; |
460 | u32 incr_error; | 460 | u32 incr_error; |
461 | }; | 461 | }; |
462 | 462 | ||
463 | /** | 463 | /** |
464 | * struct task_cputime - collected CPU time counts | 464 | * struct task_cputime - collected CPU time counts |
465 | * @utime: time spent in user mode, in &cputime_t units | 465 | * @utime: time spent in user mode, in &cputime_t units |
466 | * @stime: time spent in kernel mode, in &cputime_t units | 466 | * @stime: time spent in kernel mode, in &cputime_t units |
467 | * @sum_exec_runtime: total time spent on the CPU, in nanoseconds | 467 | * @sum_exec_runtime: total time spent on the CPU, in nanoseconds |
468 | * | 468 | * |
469 | * This structure groups together three kinds of CPU time that are | 469 | * This structure groups together three kinds of CPU time that are |
470 | * tracked for threads and thread groups. Most things considering | 470 | * tracked for threads and thread groups. Most things considering |
471 | * CPU time want to group these counts together and treat all three | 471 | * CPU time want to group these counts together and treat all three |
472 | * of them in parallel. | 472 | * of them in parallel. |
473 | */ | 473 | */ |
474 | struct task_cputime { | 474 | struct task_cputime { |
475 | cputime_t utime; | 475 | cputime_t utime; |
476 | cputime_t stime; | 476 | cputime_t stime; |
477 | unsigned long long sum_exec_runtime; | 477 | unsigned long long sum_exec_runtime; |
478 | }; | 478 | }; |
479 | /* Alternate field names when used to cache expirations. */ | 479 | /* Alternate field names when used to cache expirations. */ |
480 | #define prof_exp stime | 480 | #define prof_exp stime |
481 | #define virt_exp utime | 481 | #define virt_exp utime |
482 | #define sched_exp sum_exec_runtime | 482 | #define sched_exp sum_exec_runtime |
483 | 483 | ||
484 | #define INIT_CPUTIME \ | 484 | #define INIT_CPUTIME \ |
485 | (struct task_cputime) { \ | 485 | (struct task_cputime) { \ |
486 | .utime = cputime_zero, \ | 486 | .utime = cputime_zero, \ |
487 | .stime = cputime_zero, \ | 487 | .stime = cputime_zero, \ |
488 | .sum_exec_runtime = 0, \ | 488 | .sum_exec_runtime = 0, \ |
489 | } | 489 | } |
490 | 490 | ||
491 | /* | 491 | /* |
492 | * Disable preemption until the scheduler is running. | 492 | * Disable preemption until the scheduler is running. |
493 | * Reset by start_kernel()->sched_init()->init_idle(). | 493 | * Reset by start_kernel()->sched_init()->init_idle(). |
494 | * | 494 | * |
495 | * We include PREEMPT_ACTIVE to avoid cond_resched() from working | 495 | * We include PREEMPT_ACTIVE to avoid cond_resched() from working |
496 | * before the scheduler is active -- see should_resched(). | 496 | * before the scheduler is active -- see should_resched(). |
497 | */ | 497 | */ |
498 | #define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE) | 498 | #define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE) |
499 | 499 | ||
500 | /** | 500 | /** |
501 | * struct thread_group_cputimer - thread group interval timer counts | 501 | * struct thread_group_cputimer - thread group interval timer counts |
502 | * @cputime: thread group interval timers. | 502 | * @cputime: thread group interval timers. |
503 | * @running: non-zero when there are timers running and | 503 | * @running: non-zero when there are timers running and |
504 | * @cputime receives updates. | 504 | * @cputime receives updates. |
505 | * @lock: lock for fields in this struct. | 505 | * @lock: lock for fields in this struct. |
506 | * | 506 | * |
507 | * This structure contains the version of task_cputime, above, that is | 507 | * This structure contains the version of task_cputime, above, that is |
508 | * used for thread group CPU timer calculations. | 508 | * used for thread group CPU timer calculations. |
509 | */ | 509 | */ |
510 | struct thread_group_cputimer { | 510 | struct thread_group_cputimer { |
511 | struct task_cputime cputime; | 511 | struct task_cputime cputime; |
512 | int running; | 512 | int running; |
513 | spinlock_t lock; | 513 | spinlock_t lock; |
514 | }; | 514 | }; |
515 | 515 | ||
516 | #include <linux/rwsem.h> | 516 | #include <linux/rwsem.h> |
517 | struct autogroup; | 517 | struct autogroup; |
518 | 518 | ||
519 | /* | 519 | /* |
520 | * NOTE! "signal_struct" does not have its own | 520 | * NOTE! "signal_struct" does not have its own |
521 | * locking, because a shared signal_struct always | 521 | * locking, because a shared signal_struct always |
522 | * implies a shared sighand_struct, so locking | 522 | * implies a shared sighand_struct, so locking |
523 | * sighand_struct is always a proper superset of | 523 | * sighand_struct is always a proper superset of |
524 | * the locking of signal_struct. | 524 | * the locking of signal_struct. |
525 | */ | 525 | */ |
526 | struct signal_struct { | 526 | struct signal_struct { |
527 | atomic_t sigcnt; | 527 | atomic_t sigcnt; |
528 | atomic_t live; | 528 | atomic_t live; |
529 | int nr_threads; | 529 | int nr_threads; |
530 | 530 | ||
531 | wait_queue_head_t wait_chldexit; /* for wait4() */ | 531 | wait_queue_head_t wait_chldexit; /* for wait4() */ |
532 | 532 | ||
533 | /* current thread group signal load-balancing target: */ | 533 | /* current thread group signal load-balancing target: */ |
534 | struct task_struct *curr_target; | 534 | struct task_struct *curr_target; |
535 | 535 | ||
536 | /* shared signal handling: */ | 536 | /* shared signal handling: */ |
537 | struct sigpending shared_pending; | 537 | struct sigpending shared_pending; |
538 | 538 | ||
539 | /* thread group exit support */ | 539 | /* thread group exit support */ |
540 | int group_exit_code; | 540 | int group_exit_code; |
541 | /* overloaded: | 541 | /* overloaded: |
542 | * - notify group_exit_task when ->count is equal to notify_count | 542 | * - notify group_exit_task when ->count is equal to notify_count |
543 | * - everyone except group_exit_task is stopped during signal delivery | 543 | * - everyone except group_exit_task is stopped during signal delivery |
544 | * of fatal signals, group_exit_task processes the signal. | 544 | * of fatal signals, group_exit_task processes the signal. |
545 | */ | 545 | */ |
546 | int notify_count; | 546 | int notify_count; |
547 | struct task_struct *group_exit_task; | 547 | struct task_struct *group_exit_task; |
548 | 548 | ||
549 | /* thread group stop support, overloads group_exit_code too */ | 549 | /* thread group stop support, overloads group_exit_code too */ |
550 | int group_stop_count; | 550 | int group_stop_count; |
551 | unsigned int flags; /* see SIGNAL_* flags below */ | 551 | unsigned int flags; /* see SIGNAL_* flags below */ |
552 | 552 | ||
553 | /* POSIX.1b Interval Timers */ | 553 | /* POSIX.1b Interval Timers */ |
554 | struct list_head posix_timers; | 554 | struct list_head posix_timers; |
555 | 555 | ||
556 | /* ITIMER_REAL timer for the process */ | 556 | /* ITIMER_REAL timer for the process */ |
557 | struct hrtimer real_timer; | 557 | struct hrtimer real_timer; |
558 | struct pid *leader_pid; | 558 | struct pid *leader_pid; |
559 | ktime_t it_real_incr; | 559 | ktime_t it_real_incr; |
560 | 560 | ||
561 | /* | 561 | /* |
562 | * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use | 562 | * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use |
563 | * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these | 563 | * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these |
564 | * values are defined to 0 and 1 respectively | 564 | * values are defined to 0 and 1 respectively |
565 | */ | 565 | */ |
566 | struct cpu_itimer it[2]; | 566 | struct cpu_itimer it[2]; |
567 | 567 | ||
568 | /* | 568 | /* |
569 | * Thread group totals for process CPU timers. | 569 | * Thread group totals for process CPU timers. |
570 | * See thread_group_cputimer(), et al, for details. | 570 | * See thread_group_cputimer(), et al, for details. |
571 | */ | 571 | */ |
572 | struct thread_group_cputimer cputimer; | 572 | struct thread_group_cputimer cputimer; |
573 | 573 | ||
574 | /* Earliest-expiration cache. */ | 574 | /* Earliest-expiration cache. */ |
575 | struct task_cputime cputime_expires; | 575 | struct task_cputime cputime_expires; |
576 | 576 | ||
577 | struct list_head cpu_timers[3]; | 577 | struct list_head cpu_timers[3]; |
578 | 578 | ||
579 | struct pid *tty_old_pgrp; | 579 | struct pid *tty_old_pgrp; |
580 | 580 | ||
581 | /* boolean value for session group leader */ | 581 | /* boolean value for session group leader */ |
582 | int leader; | 582 | int leader; |
583 | 583 | ||
584 | struct tty_struct *tty; /* NULL if no tty */ | 584 | struct tty_struct *tty; /* NULL if no tty */ |
585 | 585 | ||
586 | #ifdef CONFIG_SCHED_AUTOGROUP | 586 | #ifdef CONFIG_SCHED_AUTOGROUP |
587 | struct autogroup *autogroup; | 587 | struct autogroup *autogroup; |
588 | #endif | 588 | #endif |
589 | /* | 589 | /* |
590 | * Cumulative resource counters for dead threads in the group, | 590 | * Cumulative resource counters for dead threads in the group, |
591 | * and for reaped dead child processes forked by this group. | 591 | * and for reaped dead child processes forked by this group. |
592 | * Live threads maintain their own counters and add to these | 592 | * Live threads maintain their own counters and add to these |
593 | * in __exit_signal, except for the group leader. | 593 | * in __exit_signal, except for the group leader. |
594 | */ | 594 | */ |
595 | cputime_t utime, stime, cutime, cstime; | 595 | cputime_t utime, stime, cutime, cstime; |
596 | cputime_t gtime; | 596 | cputime_t gtime; |
597 | cputime_t cgtime; | 597 | cputime_t cgtime; |
598 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 598 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
599 | cputime_t prev_utime, prev_stime; | 599 | cputime_t prev_utime, prev_stime; |
600 | #endif | 600 | #endif |
601 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; | 601 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; |
602 | unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; | 602 | unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; |
603 | unsigned long inblock, oublock, cinblock, coublock; | 603 | unsigned long inblock, oublock, cinblock, coublock; |
604 | unsigned long maxrss, cmaxrss; | 604 | unsigned long maxrss, cmaxrss; |
605 | struct task_io_accounting ioac; | 605 | struct task_io_accounting ioac; |
606 | 606 | ||
607 | /* | 607 | /* |
608 | * Cumulative ns of schedule CPU time fo dead threads in the | 608 | * Cumulative ns of schedule CPU time fo dead threads in the |
609 | * group, not including a zombie group leader, (This only differs | 609 | * group, not including a zombie group leader, (This only differs |
610 | * from jiffies_to_ns(utime + stime) if sched_clock uses something | 610 | * from jiffies_to_ns(utime + stime) if sched_clock uses something |
611 | * other than jiffies.) | 611 | * other than jiffies.) |
612 | */ | 612 | */ |
613 | unsigned long long sum_sched_runtime; | 613 | unsigned long long sum_sched_runtime; |
614 | 614 | ||
615 | /* | 615 | /* |
616 | * We don't bother to synchronize most readers of this at all, | 616 | * We don't bother to synchronize most readers of this at all, |
617 | * because there is no reader checking a limit that actually needs | 617 | * because there is no reader checking a limit that actually needs |
618 | * to get both rlim_cur and rlim_max atomically, and either one | 618 | * to get both rlim_cur and rlim_max atomically, and either one |
619 | * alone is a single word that can safely be read normally. | 619 | * alone is a single word that can safely be read normally. |
620 | * getrlimit/setrlimit use task_lock(current->group_leader) to | 620 | * getrlimit/setrlimit use task_lock(current->group_leader) to |
621 | * protect this instead of the siglock, because they really | 621 | * protect this instead of the siglock, because they really |
622 | * have no need to disable irqs. | 622 | * have no need to disable irqs. |
623 | */ | 623 | */ |
624 | struct rlimit rlim[RLIM_NLIMITS]; | 624 | struct rlimit rlim[RLIM_NLIMITS]; |
625 | 625 | ||
626 | #ifdef CONFIG_BSD_PROCESS_ACCT | 626 | #ifdef CONFIG_BSD_PROCESS_ACCT |
627 | struct pacct_struct pacct; /* per-process accounting information */ | 627 | struct pacct_struct pacct; /* per-process accounting information */ |
628 | #endif | 628 | #endif |
629 | #ifdef CONFIG_TASKSTATS | 629 | #ifdef CONFIG_TASKSTATS |
630 | struct taskstats *stats; | 630 | struct taskstats *stats; |
631 | #endif | 631 | #endif |
632 | #ifdef CONFIG_AUDIT | 632 | #ifdef CONFIG_AUDIT |
633 | unsigned audit_tty; | 633 | unsigned audit_tty; |
634 | struct tty_audit_buf *tty_audit_buf; | 634 | struct tty_audit_buf *tty_audit_buf; |
635 | #endif | 635 | #endif |
636 | #ifdef CONFIG_CGROUPS | 636 | #ifdef CONFIG_CGROUPS |
637 | /* | 637 | /* |
638 | * The threadgroup_fork_lock prevents threads from forking with | 638 | * The threadgroup_fork_lock prevents threads from forking with |
639 | * CLONE_THREAD while held for writing. Use this for fork-sensitive | 639 | * CLONE_THREAD while held for writing. Use this for fork-sensitive |
640 | * threadgroup-wide operations. It's taken for reading in fork.c in | 640 | * threadgroup-wide operations. It's taken for reading in fork.c in |
641 | * copy_process(). | 641 | * copy_process(). |
642 | * Currently only needed write-side by cgroups. | 642 | * Currently only needed write-side by cgroups. |
643 | */ | 643 | */ |
644 | struct rw_semaphore threadgroup_fork_lock; | 644 | struct rw_semaphore threadgroup_fork_lock; |
645 | #endif | 645 | #endif |
646 | 646 | ||
647 | int oom_adj; /* OOM kill score adjustment (bit shift) */ | 647 | int oom_adj; /* OOM kill score adjustment (bit shift) */ |
648 | int oom_score_adj; /* OOM kill score adjustment */ | 648 | int oom_score_adj; /* OOM kill score adjustment */ |
649 | int oom_score_adj_min; /* OOM kill score adjustment minimum value. | 649 | int oom_score_adj_min; /* OOM kill score adjustment minimum value. |
650 | * Only settable by CAP_SYS_RESOURCE. */ | 650 | * Only settable by CAP_SYS_RESOURCE. */ |
651 | 651 | ||
652 | struct mutex cred_guard_mutex; /* guard against foreign influences on | 652 | struct mutex cred_guard_mutex; /* guard against foreign influences on |
653 | * credential calculations | 653 | * credential calculations |
654 | * (notably. ptrace) */ | 654 | * (notably. ptrace) */ |
655 | }; | 655 | }; |
656 | 656 | ||
657 | /* Context switch must be unlocked if interrupts are to be enabled */ | 657 | /* Context switch must be unlocked if interrupts are to be enabled */ |
658 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | 658 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
659 | # define __ARCH_WANT_UNLOCKED_CTXSW | 659 | # define __ARCH_WANT_UNLOCKED_CTXSW |
660 | #endif | 660 | #endif |
661 | 661 | ||
662 | /* | 662 | /* |
663 | * Bits in flags field of signal_struct. | 663 | * Bits in flags field of signal_struct. |
664 | */ | 664 | */ |
665 | #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ | 665 | #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ |
666 | #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ | 666 | #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ |
667 | #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ | 667 | #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ |
668 | /* | 668 | /* |
669 | * Pending notifications to parent. | 669 | * Pending notifications to parent. |
670 | */ | 670 | */ |
671 | #define SIGNAL_CLD_STOPPED 0x00000010 | 671 | #define SIGNAL_CLD_STOPPED 0x00000010 |
672 | #define SIGNAL_CLD_CONTINUED 0x00000020 | 672 | #define SIGNAL_CLD_CONTINUED 0x00000020 |
673 | #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) | 673 | #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) |
674 | 674 | ||
675 | #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ | 675 | #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ |
676 | 676 | ||
677 | /* If true, all threads except ->group_exit_task have pending SIGKILL */ | 677 | /* If true, all threads except ->group_exit_task have pending SIGKILL */ |
678 | static inline int signal_group_exit(const struct signal_struct *sig) | 678 | static inline int signal_group_exit(const struct signal_struct *sig) |
679 | { | 679 | { |
680 | return (sig->flags & SIGNAL_GROUP_EXIT) || | 680 | return (sig->flags & SIGNAL_GROUP_EXIT) || |
681 | (sig->group_exit_task != NULL); | 681 | (sig->group_exit_task != NULL); |
682 | } | 682 | } |
683 | 683 | ||
684 | /* | 684 | /* |
685 | * Some day this will be a full-fledged user tracking system.. | 685 | * Some day this will be a full-fledged user tracking system.. |
686 | */ | 686 | */ |
687 | struct user_struct { | 687 | struct user_struct { |
688 | atomic_t __count; /* reference count */ | 688 | atomic_t __count; /* reference count */ |
689 | atomic_t processes; /* How many processes does this user have? */ | 689 | atomic_t processes; /* How many processes does this user have? */ |
690 | atomic_t files; /* How many open files does this user have? */ | 690 | atomic_t files; /* How many open files does this user have? */ |
691 | atomic_t sigpending; /* How many pending signals does this user have? */ | 691 | atomic_t sigpending; /* How many pending signals does this user have? */ |
692 | #ifdef CONFIG_INOTIFY_USER | 692 | #ifdef CONFIG_INOTIFY_USER |
693 | atomic_t inotify_watches; /* How many inotify watches does this user have? */ | 693 | atomic_t inotify_watches; /* How many inotify watches does this user have? */ |
694 | atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ | 694 | atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ |
695 | #endif | 695 | #endif |
696 | #ifdef CONFIG_FANOTIFY | 696 | #ifdef CONFIG_FANOTIFY |
697 | atomic_t fanotify_listeners; | 697 | atomic_t fanotify_listeners; |
698 | #endif | 698 | #endif |
699 | #ifdef CONFIG_EPOLL | 699 | #ifdef CONFIG_EPOLL |
700 | atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ | 700 | atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ |
701 | #endif | 701 | #endif |
702 | #ifdef CONFIG_POSIX_MQUEUE | 702 | #ifdef CONFIG_POSIX_MQUEUE |
703 | /* protected by mq_lock */ | 703 | /* protected by mq_lock */ |
704 | unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ | 704 | unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ |
705 | #endif | 705 | #endif |
706 | unsigned long locked_shm; /* How many pages of mlocked shm ? */ | 706 | unsigned long locked_shm; /* How many pages of mlocked shm ? */ |
707 | 707 | ||
708 | #ifdef CONFIG_KEYS | 708 | #ifdef CONFIG_KEYS |
709 | struct key *uid_keyring; /* UID specific keyring */ | 709 | struct key *uid_keyring; /* UID specific keyring */ |
710 | struct key *session_keyring; /* UID's default session keyring */ | 710 | struct key *session_keyring; /* UID's default session keyring */ |
711 | #endif | 711 | #endif |
712 | 712 | ||
713 | /* Hash table maintenance information */ | 713 | /* Hash table maintenance information */ |
714 | struct hlist_node uidhash_node; | 714 | struct hlist_node uidhash_node; |
715 | uid_t uid; | 715 | uid_t uid; |
716 | struct user_namespace *user_ns; | 716 | struct user_namespace *user_ns; |
717 | 717 | ||
718 | #ifdef CONFIG_PERF_EVENTS | 718 | #ifdef CONFIG_PERF_EVENTS |
719 | atomic_long_t locked_vm; | 719 | atomic_long_t locked_vm; |
720 | #endif | 720 | #endif |
721 | }; | 721 | }; |
722 | 722 | ||
723 | extern int uids_sysfs_init(void); | 723 | extern int uids_sysfs_init(void); |
724 | 724 | ||
725 | extern struct user_struct *find_user(uid_t); | 725 | extern struct user_struct *find_user(uid_t); |
726 | 726 | ||
727 | extern struct user_struct root_user; | 727 | extern struct user_struct root_user; |
728 | #define INIT_USER (&root_user) | 728 | #define INIT_USER (&root_user) |
729 | 729 | ||
730 | 730 | ||
731 | struct backing_dev_info; | 731 | struct backing_dev_info; |
732 | struct reclaim_state; | 732 | struct reclaim_state; |
733 | 733 | ||
734 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 734 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
735 | struct sched_info { | 735 | struct sched_info { |
736 | /* cumulative counters */ | 736 | /* cumulative counters */ |
737 | unsigned long pcount; /* # of times run on this cpu */ | 737 | unsigned long pcount; /* # of times run on this cpu */ |
738 | unsigned long long run_delay; /* time spent waiting on a runqueue */ | 738 | unsigned long long run_delay; /* time spent waiting on a runqueue */ |
739 | 739 | ||
740 | /* timestamps */ | 740 | /* timestamps */ |
741 | unsigned long long last_arrival,/* when we last ran on a cpu */ | 741 | unsigned long long last_arrival,/* when we last ran on a cpu */ |
742 | last_queued; /* when we were last queued to run */ | 742 | last_queued; /* when we were last queued to run */ |
743 | }; | 743 | }; |
744 | #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ | 744 | #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ |
745 | 745 | ||
746 | #ifdef CONFIG_TASK_DELAY_ACCT | 746 | #ifdef CONFIG_TASK_DELAY_ACCT |
747 | struct task_delay_info { | 747 | struct task_delay_info { |
748 | spinlock_t lock; | 748 | spinlock_t lock; |
749 | unsigned int flags; /* Private per-task flags */ | 749 | unsigned int flags; /* Private per-task flags */ |
750 | 750 | ||
751 | /* For each stat XXX, add following, aligned appropriately | 751 | /* For each stat XXX, add following, aligned appropriately |
752 | * | 752 | * |
753 | * struct timespec XXX_start, XXX_end; | 753 | * struct timespec XXX_start, XXX_end; |
754 | * u64 XXX_delay; | 754 | * u64 XXX_delay; |
755 | * u32 XXX_count; | 755 | * u32 XXX_count; |
756 | * | 756 | * |
757 | * Atomicity of updates to XXX_delay, XXX_count protected by | 757 | * Atomicity of updates to XXX_delay, XXX_count protected by |
758 | * single lock above (split into XXX_lock if contention is an issue). | 758 | * single lock above (split into XXX_lock if contention is an issue). |
759 | */ | 759 | */ |
760 | 760 | ||
761 | /* | 761 | /* |
762 | * XXX_count is incremented on every XXX operation, the delay | 762 | * XXX_count is incremented on every XXX operation, the delay |
763 | * associated with the operation is added to XXX_delay. | 763 | * associated with the operation is added to XXX_delay. |
764 | * XXX_delay contains the accumulated delay time in nanoseconds. | 764 | * XXX_delay contains the accumulated delay time in nanoseconds. |
765 | */ | 765 | */ |
766 | struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */ | 766 | struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */ |
767 | u64 blkio_delay; /* wait for sync block io completion */ | 767 | u64 blkio_delay; /* wait for sync block io completion */ |
768 | u64 swapin_delay; /* wait for swapin block io completion */ | 768 | u64 swapin_delay; /* wait for swapin block io completion */ |
769 | u32 blkio_count; /* total count of the number of sync block */ | 769 | u32 blkio_count; /* total count of the number of sync block */ |
770 | /* io operations performed */ | 770 | /* io operations performed */ |
771 | u32 swapin_count; /* total count of the number of swapin block */ | 771 | u32 swapin_count; /* total count of the number of swapin block */ |
772 | /* io operations performed */ | 772 | /* io operations performed */ |
773 | 773 | ||
774 | struct timespec freepages_start, freepages_end; | 774 | struct timespec freepages_start, freepages_end; |
775 | u64 freepages_delay; /* wait for memory reclaim */ | 775 | u64 freepages_delay; /* wait for memory reclaim */ |
776 | u32 freepages_count; /* total count of memory reclaim */ | 776 | u32 freepages_count; /* total count of memory reclaim */ |
777 | }; | 777 | }; |
778 | #endif /* CONFIG_TASK_DELAY_ACCT */ | 778 | #endif /* CONFIG_TASK_DELAY_ACCT */ |
779 | 779 | ||
780 | static inline int sched_info_on(void) | 780 | static inline int sched_info_on(void) |
781 | { | 781 | { |
782 | #ifdef CONFIG_SCHEDSTATS | 782 | #ifdef CONFIG_SCHEDSTATS |
783 | return 1; | 783 | return 1; |
784 | #elif defined(CONFIG_TASK_DELAY_ACCT) | 784 | #elif defined(CONFIG_TASK_DELAY_ACCT) |
785 | extern int delayacct_on; | 785 | extern int delayacct_on; |
786 | return delayacct_on; | 786 | return delayacct_on; |
787 | #else | 787 | #else |
788 | return 0; | 788 | return 0; |
789 | #endif | 789 | #endif |
790 | } | 790 | } |
791 | 791 | ||
792 | enum cpu_idle_type { | 792 | enum cpu_idle_type { |
793 | CPU_IDLE, | 793 | CPU_IDLE, |
794 | CPU_NOT_IDLE, | 794 | CPU_NOT_IDLE, |
795 | CPU_NEWLY_IDLE, | 795 | CPU_NEWLY_IDLE, |
796 | CPU_MAX_IDLE_TYPES | 796 | CPU_MAX_IDLE_TYPES |
797 | }; | 797 | }; |
798 | 798 | ||
799 | /* | 799 | /* |
800 | * Increase resolution of nice-level calculations for 64-bit architectures. | 800 | * Increase resolution of nice-level calculations for 64-bit architectures. |
801 | * The extra resolution improves shares distribution and load balancing of | 801 | * The extra resolution improves shares distribution and load balancing of |
802 | * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup | 802 | * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup |
803 | * hierarchies, especially on larger systems. This is not a user-visible change | 803 | * hierarchies, especially on larger systems. This is not a user-visible change |
804 | * and does not change the user-interface for setting shares/weights. | 804 | * and does not change the user-interface for setting shares/weights. |
805 | * | 805 | * |
806 | * We increase resolution only if we have enough bits to allow this increased | 806 | * We increase resolution only if we have enough bits to allow this increased |
807 | * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution | 807 | * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution |
808 | * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the | 808 | * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the |
809 | * increased costs. | 809 | * increased costs. |
810 | */ | 810 | */ |
811 | #if BITS_PER_LONG > 32 | 811 | #if BITS_PER_LONG > 32 |
812 | # define SCHED_LOAD_RESOLUTION 10 | 812 | # define SCHED_LOAD_RESOLUTION 10 |
813 | # define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION) | 813 | # define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION) |
814 | # define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION) | 814 | # define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION) |
815 | #else | 815 | #else |
816 | # define SCHED_LOAD_RESOLUTION 0 | 816 | # define SCHED_LOAD_RESOLUTION 0 |
817 | # define scale_load(w) (w) | 817 | # define scale_load(w) (w) |
818 | # define scale_load_down(w) (w) | 818 | # define scale_load_down(w) (w) |
819 | #endif | 819 | #endif |
820 | 820 | ||
821 | #define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION) | 821 | #define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION) |
822 | #define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) | 822 | #define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) |
823 | 823 | ||
824 | /* | 824 | /* |
825 | * Increase resolution of cpu_power calculations | 825 | * Increase resolution of cpu_power calculations |
826 | */ | 826 | */ |
827 | #define SCHED_POWER_SHIFT 10 | 827 | #define SCHED_POWER_SHIFT 10 |
828 | #define SCHED_POWER_SCALE (1L << SCHED_POWER_SHIFT) | 828 | #define SCHED_POWER_SCALE (1L << SCHED_POWER_SHIFT) |
829 | 829 | ||
830 | /* | 830 | /* |
831 | * sched-domains (multiprocessor balancing) declarations: | 831 | * sched-domains (multiprocessor balancing) declarations: |
832 | */ | 832 | */ |
833 | #ifdef CONFIG_SMP | 833 | #ifdef CONFIG_SMP |
834 | #define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ | 834 | #define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ |
835 | #define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ | 835 | #define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ |
836 | #define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ | 836 | #define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ |
837 | #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ | 837 | #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ |
838 | #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ | 838 | #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ |
839 | #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ | 839 | #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ |
840 | #define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */ | 840 | #define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */ |
841 | #define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ | 841 | #define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ |
842 | #define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ | 842 | #define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ |
843 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ | 843 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ |
844 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ | 844 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ |
845 | #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ | 845 | #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ |
846 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ | 846 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ |
847 | 847 | ||
848 | enum powersavings_balance_level { | 848 | enum powersavings_balance_level { |
849 | POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ | 849 | POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ |
850 | POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package | 850 | POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package |
851 | * first for long running threads | 851 | * first for long running threads |
852 | */ | 852 | */ |
853 | POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle | 853 | POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle |
854 | * cpu package for power savings | 854 | * cpu package for power savings |
855 | */ | 855 | */ |
856 | MAX_POWERSAVINGS_BALANCE_LEVELS | 856 | MAX_POWERSAVINGS_BALANCE_LEVELS |
857 | }; | 857 | }; |
858 | 858 | ||
859 | extern int sched_mc_power_savings, sched_smt_power_savings; | 859 | extern int sched_mc_power_savings, sched_smt_power_savings; |
860 | 860 | ||
861 | static inline int sd_balance_for_mc_power(void) | 861 | static inline int sd_balance_for_mc_power(void) |
862 | { | 862 | { |
863 | if (sched_smt_power_savings) | 863 | if (sched_smt_power_savings) |
864 | return SD_POWERSAVINGS_BALANCE; | 864 | return SD_POWERSAVINGS_BALANCE; |
865 | 865 | ||
866 | if (!sched_mc_power_savings) | 866 | if (!sched_mc_power_savings) |
867 | return SD_PREFER_SIBLING; | 867 | return SD_PREFER_SIBLING; |
868 | 868 | ||
869 | return 0; | 869 | return 0; |
870 | } | 870 | } |
871 | 871 | ||
872 | static inline int sd_balance_for_package_power(void) | 872 | static inline int sd_balance_for_package_power(void) |
873 | { | 873 | { |
874 | if (sched_mc_power_savings | sched_smt_power_savings) | 874 | if (sched_mc_power_savings | sched_smt_power_savings) |
875 | return SD_POWERSAVINGS_BALANCE; | 875 | return SD_POWERSAVINGS_BALANCE; |
876 | 876 | ||
877 | return SD_PREFER_SIBLING; | 877 | return SD_PREFER_SIBLING; |
878 | } | 878 | } |
879 | 879 | ||
880 | extern int __weak arch_sd_sibiling_asym_packing(void); | 880 | extern int __weak arch_sd_sibiling_asym_packing(void); |
881 | 881 | ||
882 | /* | 882 | /* |
883 | * Optimise SD flags for power savings: | 883 | * Optimise SD flags for power savings: |
884 | * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings. | 884 | * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings. |
885 | * Keep default SD flags if sched_{smt,mc}_power_saving=0 | 885 | * Keep default SD flags if sched_{smt,mc}_power_saving=0 |
886 | */ | 886 | */ |
887 | 887 | ||
888 | static inline int sd_power_saving_flags(void) | 888 | static inline int sd_power_saving_flags(void) |
889 | { | 889 | { |
890 | if (sched_mc_power_savings | sched_smt_power_savings) | 890 | if (sched_mc_power_savings | sched_smt_power_savings) |
891 | return SD_BALANCE_NEWIDLE; | 891 | return SD_BALANCE_NEWIDLE; |
892 | 892 | ||
893 | return 0; | 893 | return 0; |
894 | } | 894 | } |
895 | 895 | ||
896 | struct sched_group { | 896 | struct sched_group { |
897 | struct sched_group *next; /* Must be a circular list */ | 897 | struct sched_group *next; /* Must be a circular list */ |
898 | atomic_t ref; | 898 | atomic_t ref; |
899 | 899 | ||
900 | /* | 900 | /* |
901 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | 901 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a |
902 | * single CPU. | 902 | * single CPU. |
903 | */ | 903 | */ |
904 | unsigned int cpu_power, cpu_power_orig; | 904 | unsigned int cpu_power, cpu_power_orig; |
905 | unsigned int group_weight; | 905 | unsigned int group_weight; |
906 | 906 | ||
907 | /* | 907 | /* |
908 | * The CPUs this group covers. | 908 | * The CPUs this group covers. |
909 | * | 909 | * |
910 | * NOTE: this field is variable length. (Allocated dynamically | 910 | * NOTE: this field is variable length. (Allocated dynamically |
911 | * by attaching extra space to the end of the structure, | 911 | * by attaching extra space to the end of the structure, |
912 | * depending on how many CPUs the kernel has booted up with) | 912 | * depending on how many CPUs the kernel has booted up with) |
913 | */ | 913 | */ |
914 | unsigned long cpumask[0]; | 914 | unsigned long cpumask[0]; |
915 | }; | 915 | }; |
916 | 916 | ||
917 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | 917 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) |
918 | { | 918 | { |
919 | return to_cpumask(sg->cpumask); | 919 | return to_cpumask(sg->cpumask); |
920 | } | 920 | } |
921 | 921 | ||
922 | struct sched_domain_attr { | 922 | struct sched_domain_attr { |
923 | int relax_domain_level; | 923 | int relax_domain_level; |
924 | }; | 924 | }; |
925 | 925 | ||
926 | #define SD_ATTR_INIT (struct sched_domain_attr) { \ | 926 | #define SD_ATTR_INIT (struct sched_domain_attr) { \ |
927 | .relax_domain_level = -1, \ | 927 | .relax_domain_level = -1, \ |
928 | } | 928 | } |
929 | 929 | ||
930 | extern int sched_domain_level_max; | 930 | extern int sched_domain_level_max; |
931 | 931 | ||
932 | struct sched_domain { | 932 | struct sched_domain { |
933 | /* These fields must be setup */ | 933 | /* These fields must be setup */ |
934 | struct sched_domain *parent; /* top domain must be null terminated */ | 934 | struct sched_domain *parent; /* top domain must be null terminated */ |
935 | struct sched_domain *child; /* bottom domain must be null terminated */ | 935 | struct sched_domain *child; /* bottom domain must be null terminated */ |
936 | struct sched_group *groups; /* the balancing groups of the domain */ | 936 | struct sched_group *groups; /* the balancing groups of the domain */ |
937 | unsigned long min_interval; /* Minimum balance interval ms */ | 937 | unsigned long min_interval; /* Minimum balance interval ms */ |
938 | unsigned long max_interval; /* Maximum balance interval ms */ | 938 | unsigned long max_interval; /* Maximum balance interval ms */ |
939 | unsigned int busy_factor; /* less balancing by factor if busy */ | 939 | unsigned int busy_factor; /* less balancing by factor if busy */ |
940 | unsigned int imbalance_pct; /* No balance until over watermark */ | 940 | unsigned int imbalance_pct; /* No balance until over watermark */ |
941 | unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ | 941 | unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ |
942 | unsigned int busy_idx; | 942 | unsigned int busy_idx; |
943 | unsigned int idle_idx; | 943 | unsigned int idle_idx; |
944 | unsigned int newidle_idx; | 944 | unsigned int newidle_idx; |
945 | unsigned int wake_idx; | 945 | unsigned int wake_idx; |
946 | unsigned int forkexec_idx; | 946 | unsigned int forkexec_idx; |
947 | unsigned int smt_gain; | 947 | unsigned int smt_gain; |
948 | int flags; /* See SD_* */ | 948 | int flags; /* See SD_* */ |
949 | int level; | 949 | int level; |
950 | 950 | ||
951 | /* Runtime fields. */ | 951 | /* Runtime fields. */ |
952 | unsigned long last_balance; /* init to jiffies. units in jiffies */ | 952 | unsigned long last_balance; /* init to jiffies. units in jiffies */ |
953 | unsigned int balance_interval; /* initialise to 1. units in ms. */ | 953 | unsigned int balance_interval; /* initialise to 1. units in ms. */ |
954 | unsigned int nr_balance_failed; /* initialise to 0 */ | 954 | unsigned int nr_balance_failed; /* initialise to 0 */ |
955 | 955 | ||
956 | u64 last_update; | 956 | u64 last_update; |
957 | 957 | ||
958 | #ifdef CONFIG_SCHEDSTATS | 958 | #ifdef CONFIG_SCHEDSTATS |
959 | /* load_balance() stats */ | 959 | /* load_balance() stats */ |
960 | unsigned int lb_count[CPU_MAX_IDLE_TYPES]; | 960 | unsigned int lb_count[CPU_MAX_IDLE_TYPES]; |
961 | unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; | 961 | unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; |
962 | unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; | 962 | unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; |
963 | unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; | 963 | unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; |
964 | unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; | 964 | unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; |
965 | unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; | 965 | unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; |
966 | unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; | 966 | unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; |
967 | unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; | 967 | unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; |
968 | 968 | ||
969 | /* Active load balancing */ | 969 | /* Active load balancing */ |
970 | unsigned int alb_count; | 970 | unsigned int alb_count; |
971 | unsigned int alb_failed; | 971 | unsigned int alb_failed; |
972 | unsigned int alb_pushed; | 972 | unsigned int alb_pushed; |
973 | 973 | ||
974 | /* SD_BALANCE_EXEC stats */ | 974 | /* SD_BALANCE_EXEC stats */ |
975 | unsigned int sbe_count; | 975 | unsigned int sbe_count; |
976 | unsigned int sbe_balanced; | 976 | unsigned int sbe_balanced; |
977 | unsigned int sbe_pushed; | 977 | unsigned int sbe_pushed; |
978 | 978 | ||
979 | /* SD_BALANCE_FORK stats */ | 979 | /* SD_BALANCE_FORK stats */ |
980 | unsigned int sbf_count; | 980 | unsigned int sbf_count; |
981 | unsigned int sbf_balanced; | 981 | unsigned int sbf_balanced; |
982 | unsigned int sbf_pushed; | 982 | unsigned int sbf_pushed; |
983 | 983 | ||
984 | /* try_to_wake_up() stats */ | 984 | /* try_to_wake_up() stats */ |
985 | unsigned int ttwu_wake_remote; | 985 | unsigned int ttwu_wake_remote; |
986 | unsigned int ttwu_move_affine; | 986 | unsigned int ttwu_move_affine; |
987 | unsigned int ttwu_move_balance; | 987 | unsigned int ttwu_move_balance; |
988 | #endif | 988 | #endif |
989 | #ifdef CONFIG_SCHED_DEBUG | 989 | #ifdef CONFIG_SCHED_DEBUG |
990 | char *name; | 990 | char *name; |
991 | #endif | 991 | #endif |
992 | union { | 992 | union { |
993 | void *private; /* used during construction */ | 993 | void *private; /* used during construction */ |
994 | struct rcu_head rcu; /* used during destruction */ | 994 | struct rcu_head rcu; /* used during destruction */ |
995 | }; | 995 | }; |
996 | 996 | ||
997 | unsigned int span_weight; | 997 | unsigned int span_weight; |
998 | /* | 998 | /* |
999 | * Span of all CPUs in this domain. | 999 | * Span of all CPUs in this domain. |
1000 | * | 1000 | * |
1001 | * NOTE: this field is variable length. (Allocated dynamically | 1001 | * NOTE: this field is variable length. (Allocated dynamically |
1002 | * by attaching extra space to the end of the structure, | 1002 | * by attaching extra space to the end of the structure, |
1003 | * depending on how many CPUs the kernel has booted up with) | 1003 | * depending on how many CPUs the kernel has booted up with) |
1004 | */ | 1004 | */ |
1005 | unsigned long span[0]; | 1005 | unsigned long span[0]; |
1006 | }; | 1006 | }; |
1007 | 1007 | ||
1008 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) | 1008 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) |
1009 | { | 1009 | { |
1010 | return to_cpumask(sd->span); | 1010 | return to_cpumask(sd->span); |
1011 | } | 1011 | } |
1012 | 1012 | ||
1013 | extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], | 1013 | extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
1014 | struct sched_domain_attr *dattr_new); | 1014 | struct sched_domain_attr *dattr_new); |
1015 | 1015 | ||
1016 | /* Allocate an array of sched domains, for partition_sched_domains(). */ | 1016 | /* Allocate an array of sched domains, for partition_sched_domains(). */ |
1017 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms); | 1017 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms); |
1018 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); | 1018 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); |
1019 | 1019 | ||
1020 | /* Test a flag in parent sched domain */ | 1020 | /* Test a flag in parent sched domain */ |
1021 | static inline int test_sd_parent(struct sched_domain *sd, int flag) | 1021 | static inline int test_sd_parent(struct sched_domain *sd, int flag) |
1022 | { | 1022 | { |
1023 | if (sd->parent && (sd->parent->flags & flag)) | 1023 | if (sd->parent && (sd->parent->flags & flag)) |
1024 | return 1; | 1024 | return 1; |
1025 | 1025 | ||
1026 | return 0; | 1026 | return 0; |
1027 | } | 1027 | } |
1028 | 1028 | ||
1029 | unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu); | 1029 | unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu); |
1030 | unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); | 1030 | unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); |
1031 | 1031 | ||
1032 | #else /* CONFIG_SMP */ | 1032 | #else /* CONFIG_SMP */ |
1033 | 1033 | ||
1034 | struct sched_domain_attr; | 1034 | struct sched_domain_attr; |
1035 | 1035 | ||
1036 | static inline void | 1036 | static inline void |
1037 | partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], | 1037 | partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
1038 | struct sched_domain_attr *dattr_new) | 1038 | struct sched_domain_attr *dattr_new) |
1039 | { | 1039 | { |
1040 | } | 1040 | } |
1041 | #endif /* !CONFIG_SMP */ | 1041 | #endif /* !CONFIG_SMP */ |
1042 | 1042 | ||
1043 | 1043 | ||
1044 | struct io_context; /* See blkdev.h */ | 1044 | struct io_context; /* See blkdev.h */ |
1045 | 1045 | ||
1046 | 1046 | ||
1047 | #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK | 1047 | #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK |
1048 | extern void prefetch_stack(struct task_struct *t); | 1048 | extern void prefetch_stack(struct task_struct *t); |
1049 | #else | 1049 | #else |
1050 | static inline void prefetch_stack(struct task_struct *t) { } | 1050 | static inline void prefetch_stack(struct task_struct *t) { } |
1051 | #endif | 1051 | #endif |
1052 | 1052 | ||
1053 | struct audit_context; /* See audit.c */ | 1053 | struct audit_context; /* See audit.c */ |
1054 | struct mempolicy; | 1054 | struct mempolicy; |
1055 | struct pipe_inode_info; | 1055 | struct pipe_inode_info; |
1056 | struct uts_namespace; | 1056 | struct uts_namespace; |
1057 | 1057 | ||
1058 | struct rq; | 1058 | struct rq; |
1059 | struct sched_domain; | 1059 | struct sched_domain; |
1060 | 1060 | ||
1061 | /* | 1061 | /* |
1062 | * wake flags | 1062 | * wake flags |
1063 | */ | 1063 | */ |
1064 | #define WF_SYNC 0x01 /* waker goes to sleep after wakup */ | 1064 | #define WF_SYNC 0x01 /* waker goes to sleep after wakup */ |
1065 | #define WF_FORK 0x02 /* child wakeup after fork */ | 1065 | #define WF_FORK 0x02 /* child wakeup after fork */ |
1066 | 1066 | ||
1067 | #define ENQUEUE_WAKEUP 1 | 1067 | #define ENQUEUE_WAKEUP 1 |
1068 | #define ENQUEUE_HEAD 2 | 1068 | #define ENQUEUE_HEAD 2 |
1069 | #ifdef CONFIG_SMP | 1069 | #ifdef CONFIG_SMP |
1070 | #define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */ | 1070 | #define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */ |
1071 | #else | 1071 | #else |
1072 | #define ENQUEUE_WAKING 0 | 1072 | #define ENQUEUE_WAKING 0 |
1073 | #endif | 1073 | #endif |
1074 | 1074 | ||
1075 | #define DEQUEUE_SLEEP 1 | 1075 | #define DEQUEUE_SLEEP 1 |
1076 | 1076 | ||
1077 | struct sched_class { | 1077 | struct sched_class { |
1078 | const struct sched_class *next; | 1078 | const struct sched_class *next; |
1079 | 1079 | ||
1080 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); | 1080 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); |
1081 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); | 1081 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); |
1082 | void (*yield_task) (struct rq *rq); | 1082 | void (*yield_task) (struct rq *rq); |
1083 | bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); | 1083 | bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); |
1084 | 1084 | ||
1085 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); | 1085 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); |
1086 | 1086 | ||
1087 | struct task_struct * (*pick_next_task) (struct rq *rq); | 1087 | struct task_struct * (*pick_next_task) (struct rq *rq); |
1088 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | 1088 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
1089 | 1089 | ||
1090 | #ifdef CONFIG_SMP | 1090 | #ifdef CONFIG_SMP |
1091 | int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); | 1091 | int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); |
1092 | 1092 | ||
1093 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | 1093 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); |
1094 | void (*post_schedule) (struct rq *this_rq); | 1094 | void (*post_schedule) (struct rq *this_rq); |
1095 | void (*task_waking) (struct task_struct *task); | 1095 | void (*task_waking) (struct task_struct *task); |
1096 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); | 1096 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); |
1097 | 1097 | ||
1098 | void (*set_cpus_allowed)(struct task_struct *p, | 1098 | void (*set_cpus_allowed)(struct task_struct *p, |
1099 | const struct cpumask *newmask); | 1099 | const struct cpumask *newmask); |
1100 | 1100 | ||
1101 | void (*rq_online)(struct rq *rq); | 1101 | void (*rq_online)(struct rq *rq); |
1102 | void (*rq_offline)(struct rq *rq); | 1102 | void (*rq_offline)(struct rq *rq); |
1103 | #endif | 1103 | #endif |
1104 | 1104 | ||
1105 | void (*set_curr_task) (struct rq *rq); | 1105 | void (*set_curr_task) (struct rq *rq); |
1106 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); | 1106 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); |
1107 | void (*task_fork) (struct task_struct *p); | 1107 | void (*task_fork) (struct task_struct *p); |
1108 | 1108 | ||
1109 | void (*switched_from) (struct rq *this_rq, struct task_struct *task); | 1109 | void (*switched_from) (struct rq *this_rq, struct task_struct *task); |
1110 | void (*switched_to) (struct rq *this_rq, struct task_struct *task); | 1110 | void (*switched_to) (struct rq *this_rq, struct task_struct *task); |
1111 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, | 1111 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, |
1112 | int oldprio); | 1112 | int oldprio); |
1113 | 1113 | ||
1114 | unsigned int (*get_rr_interval) (struct rq *rq, | 1114 | unsigned int (*get_rr_interval) (struct rq *rq, |
1115 | struct task_struct *task); | 1115 | struct task_struct *task); |
1116 | 1116 | ||
1117 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1117 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1118 | void (*task_move_group) (struct task_struct *p, int on_rq); | 1118 | void (*task_move_group) (struct task_struct *p, int on_rq); |
1119 | #endif | 1119 | #endif |
1120 | }; | 1120 | }; |
1121 | 1121 | ||
1122 | struct load_weight { | 1122 | struct load_weight { |
1123 | unsigned long weight, inv_weight; | 1123 | unsigned long weight, inv_weight; |
1124 | }; | 1124 | }; |
1125 | 1125 | ||
1126 | #ifdef CONFIG_SCHEDSTATS | 1126 | #ifdef CONFIG_SCHEDSTATS |
1127 | struct sched_statistics { | 1127 | struct sched_statistics { |
1128 | u64 wait_start; | 1128 | u64 wait_start; |
1129 | u64 wait_max; | 1129 | u64 wait_max; |
1130 | u64 wait_count; | 1130 | u64 wait_count; |
1131 | u64 wait_sum; | 1131 | u64 wait_sum; |
1132 | u64 iowait_count; | 1132 | u64 iowait_count; |
1133 | u64 iowait_sum; | 1133 | u64 iowait_sum; |
1134 | 1134 | ||
1135 | u64 sleep_start; | 1135 | u64 sleep_start; |
1136 | u64 sleep_max; | 1136 | u64 sleep_max; |
1137 | s64 sum_sleep_runtime; | 1137 | s64 sum_sleep_runtime; |
1138 | 1138 | ||
1139 | u64 block_start; | 1139 | u64 block_start; |
1140 | u64 block_max; | 1140 | u64 block_max; |
1141 | u64 exec_max; | 1141 | u64 exec_max; |
1142 | u64 slice_max; | 1142 | u64 slice_max; |
1143 | 1143 | ||
1144 | u64 nr_migrations_cold; | 1144 | u64 nr_migrations_cold; |
1145 | u64 nr_failed_migrations_affine; | 1145 | u64 nr_failed_migrations_affine; |
1146 | u64 nr_failed_migrations_running; | 1146 | u64 nr_failed_migrations_running; |
1147 | u64 nr_failed_migrations_hot; | 1147 | u64 nr_failed_migrations_hot; |
1148 | u64 nr_forced_migrations; | 1148 | u64 nr_forced_migrations; |
1149 | 1149 | ||
1150 | u64 nr_wakeups; | 1150 | u64 nr_wakeups; |
1151 | u64 nr_wakeups_sync; | 1151 | u64 nr_wakeups_sync; |
1152 | u64 nr_wakeups_migrate; | 1152 | u64 nr_wakeups_migrate; |
1153 | u64 nr_wakeups_local; | 1153 | u64 nr_wakeups_local; |
1154 | u64 nr_wakeups_remote; | 1154 | u64 nr_wakeups_remote; |
1155 | u64 nr_wakeups_affine; | 1155 | u64 nr_wakeups_affine; |
1156 | u64 nr_wakeups_affine_attempts; | 1156 | u64 nr_wakeups_affine_attempts; |
1157 | u64 nr_wakeups_passive; | 1157 | u64 nr_wakeups_passive; |
1158 | u64 nr_wakeups_idle; | 1158 | u64 nr_wakeups_idle; |
1159 | }; | 1159 | }; |
1160 | #endif | 1160 | #endif |
1161 | 1161 | ||
1162 | struct sched_entity { | 1162 | struct sched_entity { |
1163 | struct load_weight load; /* for load-balancing */ | 1163 | struct load_weight load; /* for load-balancing */ |
1164 | struct rb_node run_node; | 1164 | struct rb_node run_node; |
1165 | struct list_head group_node; | 1165 | struct list_head group_node; |
1166 | unsigned int on_rq; | 1166 | unsigned int on_rq; |
1167 | 1167 | ||
1168 | u64 exec_start; | 1168 | u64 exec_start; |
1169 | u64 sum_exec_runtime; | 1169 | u64 sum_exec_runtime; |
1170 | u64 vruntime; | 1170 | u64 vruntime; |
1171 | u64 prev_sum_exec_runtime; | 1171 | u64 prev_sum_exec_runtime; |
1172 | 1172 | ||
1173 | u64 nr_migrations; | 1173 | u64 nr_migrations; |
1174 | 1174 | ||
1175 | #ifdef CONFIG_SCHEDSTATS | 1175 | #ifdef CONFIG_SCHEDSTATS |
1176 | struct sched_statistics statistics; | 1176 | struct sched_statistics statistics; |
1177 | #endif | 1177 | #endif |
1178 | 1178 | ||
1179 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1179 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1180 | struct sched_entity *parent; | 1180 | struct sched_entity *parent; |
1181 | /* rq on which this entity is (to be) queued: */ | 1181 | /* rq on which this entity is (to be) queued: */ |
1182 | struct cfs_rq *cfs_rq; | 1182 | struct cfs_rq *cfs_rq; |
1183 | /* rq "owned" by this entity/group: */ | 1183 | /* rq "owned" by this entity/group: */ |
1184 | struct cfs_rq *my_q; | 1184 | struct cfs_rq *my_q; |
1185 | #endif | 1185 | #endif |
1186 | }; | 1186 | }; |
1187 | 1187 | ||
1188 | struct sched_rt_entity { | 1188 | struct sched_rt_entity { |
1189 | struct list_head run_list; | 1189 | struct list_head run_list; |
1190 | unsigned long timeout; | 1190 | unsigned long timeout; |
1191 | unsigned int time_slice; | 1191 | unsigned int time_slice; |
1192 | int nr_cpus_allowed; | 1192 | int nr_cpus_allowed; |
1193 | 1193 | ||
1194 | struct sched_rt_entity *back; | 1194 | struct sched_rt_entity *back; |
1195 | #ifdef CONFIG_RT_GROUP_SCHED | 1195 | #ifdef CONFIG_RT_GROUP_SCHED |
1196 | struct sched_rt_entity *parent; | 1196 | struct sched_rt_entity *parent; |
1197 | /* rq on which this entity is (to be) queued: */ | 1197 | /* rq on which this entity is (to be) queued: */ |
1198 | struct rt_rq *rt_rq; | 1198 | struct rt_rq *rt_rq; |
1199 | /* rq "owned" by this entity/group: */ | 1199 | /* rq "owned" by this entity/group: */ |
1200 | struct rt_rq *my_q; | 1200 | struct rt_rq *my_q; |
1201 | #endif | 1201 | #endif |
1202 | }; | 1202 | }; |
1203 | 1203 | ||
1204 | struct rcu_node; | 1204 | struct rcu_node; |
1205 | 1205 | ||
1206 | enum perf_event_task_context { | 1206 | enum perf_event_task_context { |
1207 | perf_invalid_context = -1, | 1207 | perf_invalid_context = -1, |
1208 | perf_hw_context = 0, | 1208 | perf_hw_context = 0, |
1209 | perf_sw_context, | 1209 | perf_sw_context, |
1210 | perf_nr_task_contexts, | 1210 | perf_nr_task_contexts, |
1211 | }; | 1211 | }; |
1212 | 1212 | ||
1213 | struct task_struct { | 1213 | struct task_struct { |
1214 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ | 1214 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ |
1215 | void *stack; | 1215 | void *stack; |
1216 | atomic_t usage; | 1216 | atomic_t usage; |
1217 | unsigned int flags; /* per process flags, defined below */ | 1217 | unsigned int flags; /* per process flags, defined below */ |
1218 | unsigned int ptrace; | 1218 | unsigned int ptrace; |
1219 | 1219 | ||
1220 | #ifdef CONFIG_SMP | 1220 | #ifdef CONFIG_SMP |
1221 | struct task_struct *wake_entry; | 1221 | struct task_struct *wake_entry; |
1222 | int on_cpu; | 1222 | int on_cpu; |
1223 | #endif | 1223 | #endif |
1224 | int on_rq; | 1224 | int on_rq; |
1225 | 1225 | ||
1226 | int prio, static_prio, normal_prio; | 1226 | int prio, static_prio, normal_prio; |
1227 | unsigned int rt_priority; | 1227 | unsigned int rt_priority; |
1228 | const struct sched_class *sched_class; | 1228 | const struct sched_class *sched_class; |
1229 | struct sched_entity se; | 1229 | struct sched_entity se; |
1230 | struct sched_rt_entity rt; | 1230 | struct sched_rt_entity rt; |
1231 | 1231 | ||
1232 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 1232 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
1233 | /* list of struct preempt_notifier: */ | 1233 | /* list of struct preempt_notifier: */ |
1234 | struct hlist_head preempt_notifiers; | 1234 | struct hlist_head preempt_notifiers; |
1235 | #endif | 1235 | #endif |
1236 | 1236 | ||
1237 | /* | 1237 | /* |
1238 | * fpu_counter contains the number of consecutive context switches | 1238 | * fpu_counter contains the number of consecutive context switches |
1239 | * that the FPU is used. If this is over a threshold, the lazy fpu | 1239 | * that the FPU is used. If this is over a threshold, the lazy fpu |
1240 | * saving becomes unlazy to save the trap. This is an unsigned char | 1240 | * saving becomes unlazy to save the trap. This is an unsigned char |
1241 | * so that after 256 times the counter wraps and the behavior turns | 1241 | * so that after 256 times the counter wraps and the behavior turns |
1242 | * lazy again; this to deal with bursty apps that only use FPU for | 1242 | * lazy again; this to deal with bursty apps that only use FPU for |
1243 | * a short time | 1243 | * a short time |
1244 | */ | 1244 | */ |
1245 | unsigned char fpu_counter; | 1245 | unsigned char fpu_counter; |
1246 | #ifdef CONFIG_BLK_DEV_IO_TRACE | 1246 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
1247 | unsigned int btrace_seq; | 1247 | unsigned int btrace_seq; |
1248 | #endif | 1248 | #endif |
1249 | 1249 | ||
1250 | unsigned int policy; | 1250 | unsigned int policy; |
1251 | cpumask_t cpus_allowed; | 1251 | cpumask_t cpus_allowed; |
1252 | 1252 | ||
1253 | #ifdef CONFIG_PREEMPT_RCU | 1253 | #ifdef CONFIG_PREEMPT_RCU |
1254 | int rcu_read_lock_nesting; | 1254 | int rcu_read_lock_nesting; |
1255 | char rcu_read_unlock_special; | 1255 | char rcu_read_unlock_special; |
1256 | struct list_head rcu_node_entry; | 1256 | struct list_head rcu_node_entry; |
1257 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ | 1257 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ |
1258 | #ifdef CONFIG_TREE_PREEMPT_RCU | 1258 | #ifdef CONFIG_TREE_PREEMPT_RCU |
1259 | struct rcu_node *rcu_blocked_node; | 1259 | struct rcu_node *rcu_blocked_node; |
1260 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 1260 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
1261 | #ifdef CONFIG_RCU_BOOST | 1261 | #ifdef CONFIG_RCU_BOOST |
1262 | struct rt_mutex *rcu_boost_mutex; | 1262 | struct rt_mutex *rcu_boost_mutex; |
1263 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 1263 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
1264 | 1264 | ||
1265 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 1265 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
1266 | struct sched_info sched_info; | 1266 | struct sched_info sched_info; |
1267 | #endif | 1267 | #endif |
1268 | 1268 | ||
1269 | struct list_head tasks; | 1269 | struct list_head tasks; |
1270 | #ifdef CONFIG_SMP | 1270 | #ifdef CONFIG_SMP |
1271 | struct plist_node pushable_tasks; | 1271 | struct plist_node pushable_tasks; |
1272 | #endif | 1272 | #endif |
1273 | 1273 | ||
1274 | struct mm_struct *mm, *active_mm; | 1274 | struct mm_struct *mm, *active_mm; |
1275 | #ifdef CONFIG_COMPAT_BRK | 1275 | #ifdef CONFIG_COMPAT_BRK |
1276 | unsigned brk_randomized:1; | 1276 | unsigned brk_randomized:1; |
1277 | #endif | 1277 | #endif |
1278 | #if defined(SPLIT_RSS_COUNTING) | 1278 | #if defined(SPLIT_RSS_COUNTING) |
1279 | struct task_rss_stat rss_stat; | 1279 | struct task_rss_stat rss_stat; |
1280 | #endif | 1280 | #endif |
1281 | /* task state */ | 1281 | /* task state */ |
1282 | int exit_state; | 1282 | int exit_state; |
1283 | int exit_code, exit_signal; | 1283 | int exit_code, exit_signal; |
1284 | int pdeath_signal; /* The signal sent when the parent dies */ | 1284 | int pdeath_signal; /* The signal sent when the parent dies */ |
1285 | unsigned int jobctl; /* JOBCTL_*, siglock protected */ | 1285 | unsigned int jobctl; /* JOBCTL_*, siglock protected */ |
1286 | /* ??? */ | 1286 | /* ??? */ |
1287 | unsigned int personality; | 1287 | unsigned int personality; |
1288 | unsigned did_exec:1; | 1288 | unsigned did_exec:1; |
1289 | unsigned in_execve:1; /* Tell the LSMs that the process is doing an | 1289 | unsigned in_execve:1; /* Tell the LSMs that the process is doing an |
1290 | * execve */ | 1290 | * execve */ |
1291 | unsigned in_iowait:1; | 1291 | unsigned in_iowait:1; |
1292 | 1292 | ||
1293 | 1293 | ||
1294 | /* Revert to default priority/policy when forking */ | 1294 | /* Revert to default priority/policy when forking */ |
1295 | unsigned sched_reset_on_fork:1; | 1295 | unsigned sched_reset_on_fork:1; |
1296 | unsigned sched_contributes_to_load:1; | 1296 | unsigned sched_contributes_to_load:1; |
1297 | 1297 | ||
1298 | pid_t pid; | 1298 | pid_t pid; |
1299 | pid_t tgid; | 1299 | pid_t tgid; |
1300 | 1300 | ||
1301 | #ifdef CONFIG_CC_STACKPROTECTOR | 1301 | #ifdef CONFIG_CC_STACKPROTECTOR |
1302 | /* Canary value for the -fstack-protector gcc feature */ | 1302 | /* Canary value for the -fstack-protector gcc feature */ |
1303 | unsigned long stack_canary; | 1303 | unsigned long stack_canary; |
1304 | #endif | 1304 | #endif |
1305 | 1305 | ||
1306 | /* | 1306 | /* |
1307 | * pointers to (original) parent process, youngest child, younger sibling, | 1307 | * pointers to (original) parent process, youngest child, younger sibling, |
1308 | * older sibling, respectively. (p->father can be replaced with | 1308 | * older sibling, respectively. (p->father can be replaced with |
1309 | * p->real_parent->pid) | 1309 | * p->real_parent->pid) |
1310 | */ | 1310 | */ |
1311 | struct task_struct *real_parent; /* real parent process */ | 1311 | struct task_struct *real_parent; /* real parent process */ |
1312 | struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */ | 1312 | struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */ |
1313 | /* | 1313 | /* |
1314 | * children/sibling forms the list of my natural children | 1314 | * children/sibling forms the list of my natural children |
1315 | */ | 1315 | */ |
1316 | struct list_head children; /* list of my children */ | 1316 | struct list_head children; /* list of my children */ |
1317 | struct list_head sibling; /* linkage in my parent's children list */ | 1317 | struct list_head sibling; /* linkage in my parent's children list */ |
1318 | struct task_struct *group_leader; /* threadgroup leader */ | 1318 | struct task_struct *group_leader; /* threadgroup leader */ |
1319 | 1319 | ||
1320 | /* | 1320 | /* |
1321 | * ptraced is the list of tasks this task is using ptrace on. | 1321 | * ptraced is the list of tasks this task is using ptrace on. |
1322 | * This includes both natural children and PTRACE_ATTACH targets. | 1322 | * This includes both natural children and PTRACE_ATTACH targets. |
1323 | * p->ptrace_entry is p's link on the p->parent->ptraced list. | 1323 | * p->ptrace_entry is p's link on the p->parent->ptraced list. |
1324 | */ | 1324 | */ |
1325 | struct list_head ptraced; | 1325 | struct list_head ptraced; |
1326 | struct list_head ptrace_entry; | 1326 | struct list_head ptrace_entry; |
1327 | 1327 | ||
1328 | /* PID/PID hash table linkage. */ | 1328 | /* PID/PID hash table linkage. */ |
1329 | struct pid_link pids[PIDTYPE_MAX]; | 1329 | struct pid_link pids[PIDTYPE_MAX]; |
1330 | struct list_head thread_group; | 1330 | struct list_head thread_group; |
1331 | 1331 | ||
1332 | struct completion *vfork_done; /* for vfork() */ | 1332 | struct completion *vfork_done; /* for vfork() */ |
1333 | int __user *set_child_tid; /* CLONE_CHILD_SETTID */ | 1333 | int __user *set_child_tid; /* CLONE_CHILD_SETTID */ |
1334 | int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ | 1334 | int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ |
1335 | 1335 | ||
1336 | cputime_t utime, stime, utimescaled, stimescaled; | 1336 | cputime_t utime, stime, utimescaled, stimescaled; |
1337 | cputime_t gtime; | 1337 | cputime_t gtime; |
1338 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 1338 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
1339 | cputime_t prev_utime, prev_stime; | 1339 | cputime_t prev_utime, prev_stime; |
1340 | #endif | 1340 | #endif |
1341 | unsigned long nvcsw, nivcsw; /* context switch counts */ | 1341 | unsigned long nvcsw, nivcsw; /* context switch counts */ |
1342 | struct timespec start_time; /* monotonic time */ | 1342 | struct timespec start_time; /* monotonic time */ |
1343 | struct timespec real_start_time; /* boot based time */ | 1343 | struct timespec real_start_time; /* boot based time */ |
1344 | /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ | 1344 | /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ |
1345 | unsigned long min_flt, maj_flt; | 1345 | unsigned long min_flt, maj_flt; |
1346 | 1346 | ||
1347 | struct task_cputime cputime_expires; | 1347 | struct task_cputime cputime_expires; |
1348 | struct list_head cpu_timers[3]; | 1348 | struct list_head cpu_timers[3]; |
1349 | 1349 | ||
1350 | /* process credentials */ | 1350 | /* process credentials */ |
1351 | const struct cred __rcu *real_cred; /* objective and real subjective task | 1351 | const struct cred __rcu *real_cred; /* objective and real subjective task |
1352 | * credentials (COW) */ | 1352 | * credentials (COW) */ |
1353 | const struct cred __rcu *cred; /* effective (overridable) subjective task | 1353 | const struct cred __rcu *cred; /* effective (overridable) subjective task |
1354 | * credentials (COW) */ | 1354 | * credentials (COW) */ |
1355 | struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ | 1355 | struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ |
1356 | 1356 | ||
1357 | char comm[TASK_COMM_LEN]; /* executable name excluding path | 1357 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
1358 | - access with [gs]et_task_comm (which lock | 1358 | - access with [gs]et_task_comm (which lock |
1359 | it with task_lock()) | 1359 | it with task_lock()) |
1360 | - initialized normally by setup_new_exec */ | 1360 | - initialized normally by setup_new_exec */ |
1361 | /* file system info */ | 1361 | /* file system info */ |
1362 | int link_count, total_link_count; | 1362 | int link_count, total_link_count; |
1363 | #ifdef CONFIG_SYSVIPC | 1363 | #ifdef CONFIG_SYSVIPC |
1364 | /* ipc stuff */ | 1364 | /* ipc stuff */ |
1365 | struct sysv_sem sysvsem; | 1365 | struct sysv_sem sysvsem; |
1366 | #endif | 1366 | #endif |
1367 | #ifdef CONFIG_DETECT_HUNG_TASK | 1367 | #ifdef CONFIG_DETECT_HUNG_TASK |
1368 | /* hung task detection */ | 1368 | /* hung task detection */ |
1369 | unsigned long last_switch_count; | 1369 | unsigned long last_switch_count; |
1370 | #endif | 1370 | #endif |
1371 | /* CPU-specific state of this task */ | 1371 | /* CPU-specific state of this task */ |
1372 | struct thread_struct thread; | 1372 | struct thread_struct thread; |
1373 | /* filesystem information */ | 1373 | /* filesystem information */ |
1374 | struct fs_struct *fs; | 1374 | struct fs_struct *fs; |
1375 | /* open file information */ | 1375 | /* open file information */ |
1376 | struct files_struct *files; | 1376 | struct files_struct *files; |
1377 | /* namespaces */ | 1377 | /* namespaces */ |
1378 | struct nsproxy *nsproxy; | 1378 | struct nsproxy *nsproxy; |
1379 | /* signal handlers */ | 1379 | /* signal handlers */ |
1380 | struct signal_struct *signal; | 1380 | struct signal_struct *signal; |
1381 | struct sighand_struct *sighand; | 1381 | struct sighand_struct *sighand; |
1382 | 1382 | ||
1383 | sigset_t blocked, real_blocked; | 1383 | sigset_t blocked, real_blocked; |
1384 | sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ | 1384 | sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ |
1385 | struct sigpending pending; | 1385 | struct sigpending pending; |
1386 | 1386 | ||
1387 | unsigned long sas_ss_sp; | 1387 | unsigned long sas_ss_sp; |
1388 | size_t sas_ss_size; | 1388 | size_t sas_ss_size; |
1389 | int (*notifier)(void *priv); | 1389 | int (*notifier)(void *priv); |
1390 | void *notifier_data; | 1390 | void *notifier_data; |
1391 | sigset_t *notifier_mask; | 1391 | sigset_t *notifier_mask; |
1392 | struct audit_context *audit_context; | 1392 | struct audit_context *audit_context; |
1393 | #ifdef CONFIG_AUDITSYSCALL | 1393 | #ifdef CONFIG_AUDITSYSCALL |
1394 | uid_t loginuid; | 1394 | uid_t loginuid; |
1395 | unsigned int sessionid; | 1395 | unsigned int sessionid; |
1396 | #endif | 1396 | #endif |
1397 | seccomp_t seccomp; | 1397 | seccomp_t seccomp; |
1398 | 1398 | ||
1399 | /* Thread group tracking */ | 1399 | /* Thread group tracking */ |
1400 | u32 parent_exec_id; | 1400 | u32 parent_exec_id; |
1401 | u32 self_exec_id; | 1401 | u32 self_exec_id; |
1402 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, | 1402 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, |
1403 | * mempolicy */ | 1403 | * mempolicy */ |
1404 | spinlock_t alloc_lock; | 1404 | spinlock_t alloc_lock; |
1405 | 1405 | ||
1406 | #ifdef CONFIG_GENERIC_HARDIRQS | 1406 | #ifdef CONFIG_GENERIC_HARDIRQS |
1407 | /* IRQ handler threads */ | 1407 | /* IRQ handler threads */ |
1408 | struct irqaction *irqaction; | 1408 | struct irqaction *irqaction; |
1409 | #endif | 1409 | #endif |
1410 | 1410 | ||
1411 | /* Protection of the PI data structures: */ | 1411 | /* Protection of the PI data structures: */ |
1412 | raw_spinlock_t pi_lock; | 1412 | raw_spinlock_t pi_lock; |
1413 | 1413 | ||
1414 | #ifdef CONFIG_RT_MUTEXES | 1414 | #ifdef CONFIG_RT_MUTEXES |
1415 | /* PI waiters blocked on a rt_mutex held by this task */ | 1415 | /* PI waiters blocked on a rt_mutex held by this task */ |
1416 | struct plist_head pi_waiters; | 1416 | struct plist_head pi_waiters; |
1417 | /* Deadlock detection and priority inheritance handling */ | 1417 | /* Deadlock detection and priority inheritance handling */ |
1418 | struct rt_mutex_waiter *pi_blocked_on; | 1418 | struct rt_mutex_waiter *pi_blocked_on; |
1419 | #endif | 1419 | #endif |
1420 | 1420 | ||
1421 | #ifdef CONFIG_DEBUG_MUTEXES | 1421 | #ifdef CONFIG_DEBUG_MUTEXES |
1422 | /* mutex deadlock detection */ | 1422 | /* mutex deadlock detection */ |
1423 | struct mutex_waiter *blocked_on; | 1423 | struct mutex_waiter *blocked_on; |
1424 | #endif | 1424 | #endif |
1425 | #ifdef CONFIG_TRACE_IRQFLAGS | 1425 | #ifdef CONFIG_TRACE_IRQFLAGS |
1426 | unsigned int irq_events; | 1426 | unsigned int irq_events; |
1427 | unsigned long hardirq_enable_ip; | 1427 | unsigned long hardirq_enable_ip; |
1428 | unsigned long hardirq_disable_ip; | 1428 | unsigned long hardirq_disable_ip; |
1429 | unsigned int hardirq_enable_event; | 1429 | unsigned int hardirq_enable_event; |
1430 | unsigned int hardirq_disable_event; | 1430 | unsigned int hardirq_disable_event; |
1431 | int hardirqs_enabled; | 1431 | int hardirqs_enabled; |
1432 | int hardirq_context; | 1432 | int hardirq_context; |
1433 | unsigned long softirq_disable_ip; | 1433 | unsigned long softirq_disable_ip; |
1434 | unsigned long softirq_enable_ip; | 1434 | unsigned long softirq_enable_ip; |
1435 | unsigned int softirq_disable_event; | 1435 | unsigned int softirq_disable_event; |
1436 | unsigned int softirq_enable_event; | 1436 | unsigned int softirq_enable_event; |
1437 | int softirqs_enabled; | 1437 | int softirqs_enabled; |
1438 | int softirq_context; | 1438 | int softirq_context; |
1439 | #endif | 1439 | #endif |
1440 | #ifdef CONFIG_LOCKDEP | 1440 | #ifdef CONFIG_LOCKDEP |
1441 | # define MAX_LOCK_DEPTH 48UL | 1441 | # define MAX_LOCK_DEPTH 48UL |
1442 | u64 curr_chain_key; | 1442 | u64 curr_chain_key; |
1443 | int lockdep_depth; | 1443 | int lockdep_depth; |
1444 | unsigned int lockdep_recursion; | 1444 | unsigned int lockdep_recursion; |
1445 | struct held_lock held_locks[MAX_LOCK_DEPTH]; | 1445 | struct held_lock held_locks[MAX_LOCK_DEPTH]; |
1446 | gfp_t lockdep_reclaim_gfp; | 1446 | gfp_t lockdep_reclaim_gfp; |
1447 | #endif | 1447 | #endif |
1448 | 1448 | ||
1449 | /* journalling filesystem info */ | 1449 | /* journalling filesystem info */ |
1450 | void *journal_info; | 1450 | void *journal_info; |
1451 | 1451 | ||
1452 | /* stacked block device info */ | 1452 | /* stacked block device info */ |
1453 | struct bio_list *bio_list; | 1453 | struct bio_list *bio_list; |
1454 | 1454 | ||
1455 | #ifdef CONFIG_BLOCK | 1455 | #ifdef CONFIG_BLOCK |
1456 | /* stack plugging */ | 1456 | /* stack plugging */ |
1457 | struct blk_plug *plug; | 1457 | struct blk_plug *plug; |
1458 | #endif | 1458 | #endif |
1459 | 1459 | ||
1460 | /* VM state */ | 1460 | /* VM state */ |
1461 | struct reclaim_state *reclaim_state; | 1461 | struct reclaim_state *reclaim_state; |
1462 | 1462 | ||
1463 | struct backing_dev_info *backing_dev_info; | 1463 | struct backing_dev_info *backing_dev_info; |
1464 | 1464 | ||
1465 | struct io_context *io_context; | 1465 | struct io_context *io_context; |
1466 | 1466 | ||
1467 | unsigned long ptrace_message; | 1467 | unsigned long ptrace_message; |
1468 | siginfo_t *last_siginfo; /* For ptrace use. */ | 1468 | siginfo_t *last_siginfo; /* For ptrace use. */ |
1469 | struct task_io_accounting ioac; | 1469 | struct task_io_accounting ioac; |
1470 | #if defined(CONFIG_TASK_XACCT) | 1470 | #if defined(CONFIG_TASK_XACCT) |
1471 | u64 acct_rss_mem1; /* accumulated rss usage */ | 1471 | u64 acct_rss_mem1; /* accumulated rss usage */ |
1472 | u64 acct_vm_mem1; /* accumulated virtual memory usage */ | 1472 | u64 acct_vm_mem1; /* accumulated virtual memory usage */ |
1473 | cputime_t acct_timexpd; /* stime + utime since last update */ | 1473 | cputime_t acct_timexpd; /* stime + utime since last update */ |
1474 | #endif | 1474 | #endif |
1475 | #ifdef CONFIG_CPUSETS | 1475 | #ifdef CONFIG_CPUSETS |
1476 | nodemask_t mems_allowed; /* Protected by alloc_lock */ | 1476 | nodemask_t mems_allowed; /* Protected by alloc_lock */ |
1477 | int mems_allowed_change_disable; | 1477 | int mems_allowed_change_disable; |
1478 | int cpuset_mem_spread_rotor; | 1478 | int cpuset_mem_spread_rotor; |
1479 | int cpuset_slab_spread_rotor; | 1479 | int cpuset_slab_spread_rotor; |
1480 | #endif | 1480 | #endif |
1481 | #ifdef CONFIG_CGROUPS | 1481 | #ifdef CONFIG_CGROUPS |
1482 | /* Control Group info protected by css_set_lock */ | 1482 | /* Control Group info protected by css_set_lock */ |
1483 | struct css_set __rcu *cgroups; | 1483 | struct css_set __rcu *cgroups; |
1484 | /* cg_list protected by css_set_lock and tsk->alloc_lock */ | 1484 | /* cg_list protected by css_set_lock and tsk->alloc_lock */ |
1485 | struct list_head cg_list; | 1485 | struct list_head cg_list; |
1486 | #endif | 1486 | #endif |
1487 | #ifdef CONFIG_FUTEX | 1487 | #ifdef CONFIG_FUTEX |
1488 | struct robust_list_head __user *robust_list; | 1488 | struct robust_list_head __user *robust_list; |
1489 | #ifdef CONFIG_COMPAT | 1489 | #ifdef CONFIG_COMPAT |
1490 | struct compat_robust_list_head __user *compat_robust_list; | 1490 | struct compat_robust_list_head __user *compat_robust_list; |
1491 | #endif | 1491 | #endif |
1492 | struct list_head pi_state_list; | 1492 | struct list_head pi_state_list; |
1493 | struct futex_pi_state *pi_state_cache; | 1493 | struct futex_pi_state *pi_state_cache; |
1494 | #endif | 1494 | #endif |
1495 | #ifdef CONFIG_PERF_EVENTS | 1495 | #ifdef CONFIG_PERF_EVENTS |
1496 | struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; | 1496 | struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; |
1497 | struct mutex perf_event_mutex; | 1497 | struct mutex perf_event_mutex; |
1498 | struct list_head perf_event_list; | 1498 | struct list_head perf_event_list; |
1499 | #endif | 1499 | #endif |
1500 | #ifdef CONFIG_NUMA | 1500 | #ifdef CONFIG_NUMA |
1501 | struct mempolicy *mempolicy; /* Protected by alloc_lock */ | 1501 | struct mempolicy *mempolicy; /* Protected by alloc_lock */ |
1502 | short il_next; | 1502 | short il_next; |
1503 | short pref_node_fork; | 1503 | short pref_node_fork; |
1504 | #endif | 1504 | #endif |
1505 | atomic_t fs_excl; /* holding fs exclusive resources */ | 1505 | atomic_t fs_excl; /* holding fs exclusive resources */ |
1506 | struct rcu_head rcu; | 1506 | struct rcu_head rcu; |
1507 | 1507 | ||
1508 | /* | 1508 | /* |
1509 | * cache last used pipe for splice | 1509 | * cache last used pipe for splice |
1510 | */ | 1510 | */ |
1511 | struct pipe_inode_info *splice_pipe; | 1511 | struct pipe_inode_info *splice_pipe; |
1512 | #ifdef CONFIG_TASK_DELAY_ACCT | 1512 | #ifdef CONFIG_TASK_DELAY_ACCT |
1513 | struct task_delay_info *delays; | 1513 | struct task_delay_info *delays; |
1514 | #endif | 1514 | #endif |
1515 | #ifdef CONFIG_FAULT_INJECTION | 1515 | #ifdef CONFIG_FAULT_INJECTION |
1516 | int make_it_fail; | 1516 | int make_it_fail; |
1517 | #endif | 1517 | #endif |
1518 | struct prop_local_single dirties; | 1518 | struct prop_local_single dirties; |
1519 | #ifdef CONFIG_LATENCYTOP | 1519 | #ifdef CONFIG_LATENCYTOP |
1520 | int latency_record_count; | 1520 | int latency_record_count; |
1521 | struct latency_record latency_record[LT_SAVECOUNT]; | 1521 | struct latency_record latency_record[LT_SAVECOUNT]; |
1522 | #endif | 1522 | #endif |
1523 | /* | 1523 | /* |
1524 | * time slack values; these are used to round up poll() and | 1524 | * time slack values; these are used to round up poll() and |
1525 | * select() etc timeout values. These are in nanoseconds. | 1525 | * select() etc timeout values. These are in nanoseconds. |
1526 | */ | 1526 | */ |
1527 | unsigned long timer_slack_ns; | 1527 | unsigned long timer_slack_ns; |
1528 | unsigned long default_timer_slack_ns; | 1528 | unsigned long default_timer_slack_ns; |
1529 | 1529 | ||
1530 | struct list_head *scm_work_list; | 1530 | struct list_head *scm_work_list; |
1531 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1531 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1532 | /* Index of current stored address in ret_stack */ | 1532 | /* Index of current stored address in ret_stack */ |
1533 | int curr_ret_stack; | 1533 | int curr_ret_stack; |
1534 | /* Stack of return addresses for return function tracing */ | 1534 | /* Stack of return addresses for return function tracing */ |
1535 | struct ftrace_ret_stack *ret_stack; | 1535 | struct ftrace_ret_stack *ret_stack; |
1536 | /* time stamp for last schedule */ | 1536 | /* time stamp for last schedule */ |
1537 | unsigned long long ftrace_timestamp; | 1537 | unsigned long long ftrace_timestamp; |
1538 | /* | 1538 | /* |
1539 | * Number of functions that haven't been traced | 1539 | * Number of functions that haven't been traced |
1540 | * because of depth overrun. | 1540 | * because of depth overrun. |
1541 | */ | 1541 | */ |
1542 | atomic_t trace_overrun; | 1542 | atomic_t trace_overrun; |
1543 | /* Pause for the tracing */ | 1543 | /* Pause for the tracing */ |
1544 | atomic_t tracing_graph_pause; | 1544 | atomic_t tracing_graph_pause; |
1545 | #endif | 1545 | #endif |
1546 | #ifdef CONFIG_TRACING | 1546 | #ifdef CONFIG_TRACING |
1547 | /* state flags for use by tracers */ | 1547 | /* state flags for use by tracers */ |
1548 | unsigned long trace; | 1548 | unsigned long trace; |
1549 | /* bitmask and counter of trace recursion */ | 1549 | /* bitmask and counter of trace recursion */ |
1550 | unsigned long trace_recursion; | 1550 | unsigned long trace_recursion; |
1551 | #endif /* CONFIG_TRACING */ | 1551 | #endif /* CONFIG_TRACING */ |
1552 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ | 1552 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ |
1553 | struct memcg_batch_info { | 1553 | struct memcg_batch_info { |
1554 | int do_batch; /* incremented when batch uncharge started */ | 1554 | int do_batch; /* incremented when batch uncharge started */ |
1555 | struct mem_cgroup *memcg; /* target memcg of uncharge */ | 1555 | struct mem_cgroup *memcg; /* target memcg of uncharge */ |
1556 | unsigned long nr_pages; /* uncharged usage */ | 1556 | unsigned long nr_pages; /* uncharged usage */ |
1557 | unsigned long memsw_nr_pages; /* uncharged mem+swap usage */ | 1557 | unsigned long memsw_nr_pages; /* uncharged mem+swap usage */ |
1558 | } memcg_batch; | 1558 | } memcg_batch; |
1559 | #endif | 1559 | #endif |
1560 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 1560 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
1561 | atomic_t ptrace_bp_refcnt; | 1561 | atomic_t ptrace_bp_refcnt; |
1562 | #endif | 1562 | #endif |
1563 | }; | 1563 | }; |
1564 | 1564 | ||
1565 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | 1565 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ |
1566 | #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) | 1566 | #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) |
1567 | 1567 | ||
1568 | /* | 1568 | /* |
1569 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT | 1569 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT |
1570 | * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH | 1570 | * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH |
1571 | * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority | 1571 | * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority |
1572 | * values are inverted: lower p->prio value means higher priority. | 1572 | * values are inverted: lower p->prio value means higher priority. |
1573 | * | 1573 | * |
1574 | * The MAX_USER_RT_PRIO value allows the actual maximum | 1574 | * The MAX_USER_RT_PRIO value allows the actual maximum |
1575 | * RT priority to be separate from the value exported to | 1575 | * RT priority to be separate from the value exported to |
1576 | * user-space. This allows kernel threads to set their | 1576 | * user-space. This allows kernel threads to set their |
1577 | * priority to a value higher than any user task. Note: | 1577 | * priority to a value higher than any user task. Note: |
1578 | * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. | 1578 | * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. |
1579 | */ | 1579 | */ |
1580 | 1580 | ||
1581 | #define MAX_USER_RT_PRIO 100 | 1581 | #define MAX_USER_RT_PRIO 100 |
1582 | #define MAX_RT_PRIO MAX_USER_RT_PRIO | 1582 | #define MAX_RT_PRIO MAX_USER_RT_PRIO |
1583 | 1583 | ||
1584 | #define MAX_PRIO (MAX_RT_PRIO + 40) | 1584 | #define MAX_PRIO (MAX_RT_PRIO + 40) |
1585 | #define DEFAULT_PRIO (MAX_RT_PRIO + 20) | 1585 | #define DEFAULT_PRIO (MAX_RT_PRIO + 20) |
1586 | 1586 | ||
1587 | static inline int rt_prio(int prio) | 1587 | static inline int rt_prio(int prio) |
1588 | { | 1588 | { |
1589 | if (unlikely(prio < MAX_RT_PRIO)) | 1589 | if (unlikely(prio < MAX_RT_PRIO)) |
1590 | return 1; | 1590 | return 1; |
1591 | return 0; | 1591 | return 0; |
1592 | } | 1592 | } |
1593 | 1593 | ||
1594 | static inline int rt_task(struct task_struct *p) | 1594 | static inline int rt_task(struct task_struct *p) |
1595 | { | 1595 | { |
1596 | return rt_prio(p->prio); | 1596 | return rt_prio(p->prio); |
1597 | } | 1597 | } |
1598 | 1598 | ||
1599 | static inline struct pid *task_pid(struct task_struct *task) | 1599 | static inline struct pid *task_pid(struct task_struct *task) |
1600 | { | 1600 | { |
1601 | return task->pids[PIDTYPE_PID].pid; | 1601 | return task->pids[PIDTYPE_PID].pid; |
1602 | } | 1602 | } |
1603 | 1603 | ||
1604 | static inline struct pid *task_tgid(struct task_struct *task) | 1604 | static inline struct pid *task_tgid(struct task_struct *task) |
1605 | { | 1605 | { |
1606 | return task->group_leader->pids[PIDTYPE_PID].pid; | 1606 | return task->group_leader->pids[PIDTYPE_PID].pid; |
1607 | } | 1607 | } |
1608 | 1608 | ||
1609 | /* | 1609 | /* |
1610 | * Without tasklist or rcu lock it is not safe to dereference | 1610 | * Without tasklist or rcu lock it is not safe to dereference |
1611 | * the result of task_pgrp/task_session even if task == current, | 1611 | * the result of task_pgrp/task_session even if task == current, |
1612 | * we can race with another thread doing sys_setsid/sys_setpgid. | 1612 | * we can race with another thread doing sys_setsid/sys_setpgid. |
1613 | */ | 1613 | */ |
1614 | static inline struct pid *task_pgrp(struct task_struct *task) | 1614 | static inline struct pid *task_pgrp(struct task_struct *task) |
1615 | { | 1615 | { |
1616 | return task->group_leader->pids[PIDTYPE_PGID].pid; | 1616 | return task->group_leader->pids[PIDTYPE_PGID].pid; |
1617 | } | 1617 | } |
1618 | 1618 | ||
1619 | static inline struct pid *task_session(struct task_struct *task) | 1619 | static inline struct pid *task_session(struct task_struct *task) |
1620 | { | 1620 | { |
1621 | return task->group_leader->pids[PIDTYPE_SID].pid; | 1621 | return task->group_leader->pids[PIDTYPE_SID].pid; |
1622 | } | 1622 | } |
1623 | 1623 | ||
1624 | struct pid_namespace; | 1624 | struct pid_namespace; |
1625 | 1625 | ||
1626 | /* | 1626 | /* |
1627 | * the helpers to get the task's different pids as they are seen | 1627 | * the helpers to get the task's different pids as they are seen |
1628 | * from various namespaces | 1628 | * from various namespaces |
1629 | * | 1629 | * |
1630 | * task_xid_nr() : global id, i.e. the id seen from the init namespace; | 1630 | * task_xid_nr() : global id, i.e. the id seen from the init namespace; |
1631 | * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of | 1631 | * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of |
1632 | * current. | 1632 | * current. |
1633 | * task_xid_nr_ns() : id seen from the ns specified; | 1633 | * task_xid_nr_ns() : id seen from the ns specified; |
1634 | * | 1634 | * |
1635 | * set_task_vxid() : assigns a virtual id to a task; | 1635 | * set_task_vxid() : assigns a virtual id to a task; |
1636 | * | 1636 | * |
1637 | * see also pid_nr() etc in include/linux/pid.h | 1637 | * see also pid_nr() etc in include/linux/pid.h |
1638 | */ | 1638 | */ |
1639 | pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, | 1639 | pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, |
1640 | struct pid_namespace *ns); | 1640 | struct pid_namespace *ns); |
1641 | 1641 | ||
1642 | static inline pid_t task_pid_nr(struct task_struct *tsk) | 1642 | static inline pid_t task_pid_nr(struct task_struct *tsk) |
1643 | { | 1643 | { |
1644 | return tsk->pid; | 1644 | return tsk->pid; |
1645 | } | 1645 | } |
1646 | 1646 | ||
1647 | static inline pid_t task_pid_nr_ns(struct task_struct *tsk, | 1647 | static inline pid_t task_pid_nr_ns(struct task_struct *tsk, |
1648 | struct pid_namespace *ns) | 1648 | struct pid_namespace *ns) |
1649 | { | 1649 | { |
1650 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); | 1650 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); |
1651 | } | 1651 | } |
1652 | 1652 | ||
1653 | static inline pid_t task_pid_vnr(struct task_struct *tsk) | 1653 | static inline pid_t task_pid_vnr(struct task_struct *tsk) |
1654 | { | 1654 | { |
1655 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); | 1655 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); |
1656 | } | 1656 | } |
1657 | 1657 | ||
1658 | 1658 | ||
1659 | static inline pid_t task_tgid_nr(struct task_struct *tsk) | 1659 | static inline pid_t task_tgid_nr(struct task_struct *tsk) |
1660 | { | 1660 | { |
1661 | return tsk->tgid; | 1661 | return tsk->tgid; |
1662 | } | 1662 | } |
1663 | 1663 | ||
1664 | pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); | 1664 | pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); |
1665 | 1665 | ||
1666 | static inline pid_t task_tgid_vnr(struct task_struct *tsk) | 1666 | static inline pid_t task_tgid_vnr(struct task_struct *tsk) |
1667 | { | 1667 | { |
1668 | return pid_vnr(task_tgid(tsk)); | 1668 | return pid_vnr(task_tgid(tsk)); |
1669 | } | 1669 | } |
1670 | 1670 | ||
1671 | 1671 | ||
1672 | static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, | 1672 | static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, |
1673 | struct pid_namespace *ns) | 1673 | struct pid_namespace *ns) |
1674 | { | 1674 | { |
1675 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); | 1675 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); |
1676 | } | 1676 | } |
1677 | 1677 | ||
1678 | static inline pid_t task_pgrp_vnr(struct task_struct *tsk) | 1678 | static inline pid_t task_pgrp_vnr(struct task_struct *tsk) |
1679 | { | 1679 | { |
1680 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); | 1680 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); |
1681 | } | 1681 | } |
1682 | 1682 | ||
1683 | 1683 | ||
1684 | static inline pid_t task_session_nr_ns(struct task_struct *tsk, | 1684 | static inline pid_t task_session_nr_ns(struct task_struct *tsk, |
1685 | struct pid_namespace *ns) | 1685 | struct pid_namespace *ns) |
1686 | { | 1686 | { |
1687 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); | 1687 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); |
1688 | } | 1688 | } |
1689 | 1689 | ||
1690 | static inline pid_t task_session_vnr(struct task_struct *tsk) | 1690 | static inline pid_t task_session_vnr(struct task_struct *tsk) |
1691 | { | 1691 | { |
1692 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); | 1692 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); |
1693 | } | 1693 | } |
1694 | 1694 | ||
1695 | /* obsolete, do not use */ | 1695 | /* obsolete, do not use */ |
1696 | static inline pid_t task_pgrp_nr(struct task_struct *tsk) | 1696 | static inline pid_t task_pgrp_nr(struct task_struct *tsk) |
1697 | { | 1697 | { |
1698 | return task_pgrp_nr_ns(tsk, &init_pid_ns); | 1698 | return task_pgrp_nr_ns(tsk, &init_pid_ns); |
1699 | } | 1699 | } |
1700 | 1700 | ||
1701 | /** | 1701 | /** |
1702 | * pid_alive - check that a task structure is not stale | 1702 | * pid_alive - check that a task structure is not stale |
1703 | * @p: Task structure to be checked. | 1703 | * @p: Task structure to be checked. |
1704 | * | 1704 | * |
1705 | * Test if a process is not yet dead (at most zombie state) | 1705 | * Test if a process is not yet dead (at most zombie state) |
1706 | * If pid_alive fails, then pointers within the task structure | 1706 | * If pid_alive fails, then pointers within the task structure |
1707 | * can be stale and must not be dereferenced. | 1707 | * can be stale and must not be dereferenced. |
1708 | */ | 1708 | */ |
1709 | static inline int pid_alive(struct task_struct *p) | 1709 | static inline int pid_alive(struct task_struct *p) |
1710 | { | 1710 | { |
1711 | return p->pids[PIDTYPE_PID].pid != NULL; | 1711 | return p->pids[PIDTYPE_PID].pid != NULL; |
1712 | } | 1712 | } |
1713 | 1713 | ||
1714 | /** | 1714 | /** |
1715 | * is_global_init - check if a task structure is init | 1715 | * is_global_init - check if a task structure is init |
1716 | * @tsk: Task structure to be checked. | 1716 | * @tsk: Task structure to be checked. |
1717 | * | 1717 | * |
1718 | * Check if a task structure is the first user space task the kernel created. | 1718 | * Check if a task structure is the first user space task the kernel created. |
1719 | */ | 1719 | */ |
1720 | static inline int is_global_init(struct task_struct *tsk) | 1720 | static inline int is_global_init(struct task_struct *tsk) |
1721 | { | 1721 | { |
1722 | return tsk->pid == 1; | 1722 | return tsk->pid == 1; |
1723 | } | 1723 | } |
1724 | 1724 | ||
1725 | /* | 1725 | /* |
1726 | * is_container_init: | 1726 | * is_container_init: |
1727 | * check whether in the task is init in its own pid namespace. | 1727 | * check whether in the task is init in its own pid namespace. |
1728 | */ | 1728 | */ |
1729 | extern int is_container_init(struct task_struct *tsk); | 1729 | extern int is_container_init(struct task_struct *tsk); |
1730 | 1730 | ||
1731 | extern struct pid *cad_pid; | 1731 | extern struct pid *cad_pid; |
1732 | 1732 | ||
1733 | extern void free_task(struct task_struct *tsk); | 1733 | extern void free_task(struct task_struct *tsk); |
1734 | #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) | 1734 | #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) |
1735 | 1735 | ||
1736 | extern void __put_task_struct(struct task_struct *t); | 1736 | extern void __put_task_struct(struct task_struct *t); |
1737 | 1737 | ||
1738 | static inline void put_task_struct(struct task_struct *t) | 1738 | static inline void put_task_struct(struct task_struct *t) |
1739 | { | 1739 | { |
1740 | if (atomic_dec_and_test(&t->usage)) | 1740 | if (atomic_dec_and_test(&t->usage)) |
1741 | __put_task_struct(t); | 1741 | __put_task_struct(t); |
1742 | } | 1742 | } |
1743 | 1743 | ||
1744 | extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st); | 1744 | extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st); |
1745 | extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st); | 1745 | extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st); |
1746 | 1746 | ||
1747 | /* | 1747 | /* |
1748 | * Per process flags | 1748 | * Per process flags |
1749 | */ | 1749 | */ |
1750 | #define PF_STARTING 0x00000002 /* being created */ | 1750 | #define PF_STARTING 0x00000002 /* being created */ |
1751 | #define PF_EXITING 0x00000004 /* getting shut down */ | 1751 | #define PF_EXITING 0x00000004 /* getting shut down */ |
1752 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ | 1752 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ |
1753 | #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ | 1753 | #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ |
1754 | #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ | 1754 | #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ |
1755 | #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ | 1755 | #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ |
1756 | #define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ | 1756 | #define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ |
1757 | #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ | 1757 | #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ |
1758 | #define PF_DUMPCORE 0x00000200 /* dumped core */ | 1758 | #define PF_DUMPCORE 0x00000200 /* dumped core */ |
1759 | #define PF_SIGNALED 0x00000400 /* killed by a signal */ | 1759 | #define PF_SIGNALED 0x00000400 /* killed by a signal */ |
1760 | #define PF_MEMALLOC 0x00000800 /* Allocating memory */ | 1760 | #define PF_MEMALLOC 0x00000800 /* Allocating memory */ |
1761 | #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ | 1761 | #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ |
1762 | #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */ | 1762 | #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */ |
1763 | #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ | 1763 | #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ |
1764 | #define PF_FROZEN 0x00010000 /* frozen for system suspend */ | 1764 | #define PF_FROZEN 0x00010000 /* frozen for system suspend */ |
1765 | #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ | 1765 | #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ |
1766 | #define PF_KSWAPD 0x00040000 /* I am kswapd */ | 1766 | #define PF_KSWAPD 0x00040000 /* I am kswapd */ |
1767 | #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ | 1767 | #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ |
1768 | #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ | 1768 | #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ |
1769 | #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ | 1769 | #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ |
1770 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ | 1770 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ |
1771 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ | 1771 | #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ |
1772 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ | 1772 | #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ |
1773 | #define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ | 1773 | #define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ |
1774 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ | 1774 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ |
1775 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ | 1775 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ |
1776 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | 1776 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
1777 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ | 1777 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ |
1778 | #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ | 1778 | #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ |
1779 | 1779 | ||
1780 | /* | 1780 | /* |
1781 | * Only the _current_ task can read/write to tsk->flags, but other | 1781 | * Only the _current_ task can read/write to tsk->flags, but other |
1782 | * tasks can access tsk->flags in readonly mode for example | 1782 | * tasks can access tsk->flags in readonly mode for example |
1783 | * with tsk_used_math (like during threaded core dumping). | 1783 | * with tsk_used_math (like during threaded core dumping). |
1784 | * There is however an exception to this rule during ptrace | 1784 | * There is however an exception to this rule during ptrace |
1785 | * or during fork: the ptracer task is allowed to write to the | 1785 | * or during fork: the ptracer task is allowed to write to the |
1786 | * child->flags of its traced child (same goes for fork, the parent | 1786 | * child->flags of its traced child (same goes for fork, the parent |
1787 | * can write to the child->flags), because we're guaranteed the | 1787 | * can write to the child->flags), because we're guaranteed the |
1788 | * child is not running and in turn not changing child->flags | 1788 | * child is not running and in turn not changing child->flags |
1789 | * at the same time the parent does it. | 1789 | * at the same time the parent does it. |
1790 | */ | 1790 | */ |
1791 | #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) | 1791 | #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) |
1792 | #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) | 1792 | #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) |
1793 | #define clear_used_math() clear_stopped_child_used_math(current) | 1793 | #define clear_used_math() clear_stopped_child_used_math(current) |
1794 | #define set_used_math() set_stopped_child_used_math(current) | 1794 | #define set_used_math() set_stopped_child_used_math(current) |
1795 | #define conditional_stopped_child_used_math(condition, child) \ | 1795 | #define conditional_stopped_child_used_math(condition, child) \ |
1796 | do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) | 1796 | do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) |
1797 | #define conditional_used_math(condition) \ | 1797 | #define conditional_used_math(condition) \ |
1798 | conditional_stopped_child_used_math(condition, current) | 1798 | conditional_stopped_child_used_math(condition, current) |
1799 | #define copy_to_stopped_child_used_math(child) \ | 1799 | #define copy_to_stopped_child_used_math(child) \ |
1800 | do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) | 1800 | do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) |
1801 | /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ | 1801 | /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ |
1802 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) | 1802 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) |
1803 | #define used_math() tsk_used_math(current) | 1803 | #define used_math() tsk_used_math(current) |
1804 | 1804 | ||
1805 | /* | 1805 | /* |
1806 | * task->jobctl flags | 1806 | * task->jobctl flags |
1807 | */ | 1807 | */ |
1808 | #define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ | 1808 | #define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ |
1809 | 1809 | ||
1810 | #define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ | 1810 | #define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ |
1811 | #define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ | 1811 | #define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ |
1812 | #define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ | 1812 | #define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ |
1813 | #define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */ | 1813 | #define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */ |
1814 | #define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ | 1814 | #define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ |
1815 | #define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ | 1815 | #define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ |
1816 | #define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ | 1816 | #define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ |
1817 | 1817 | ||
1818 | #define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT) | 1818 | #define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT) |
1819 | #define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT) | 1819 | #define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT) |
1820 | #define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT) | 1820 | #define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT) |
1821 | #define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT) | 1821 | #define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT) |
1822 | #define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT) | 1822 | #define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT) |
1823 | #define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT) | 1823 | #define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT) |
1824 | #define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT) | 1824 | #define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT) |
1825 | 1825 | ||
1826 | #define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) | 1826 | #define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) |
1827 | #define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) | 1827 | #define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) |
1828 | 1828 | ||
1829 | extern bool task_set_jobctl_pending(struct task_struct *task, | 1829 | extern bool task_set_jobctl_pending(struct task_struct *task, |
1830 | unsigned int mask); | 1830 | unsigned int mask); |
1831 | extern void task_clear_jobctl_trapping(struct task_struct *task); | 1831 | extern void task_clear_jobctl_trapping(struct task_struct *task); |
1832 | extern void task_clear_jobctl_pending(struct task_struct *task, | 1832 | extern void task_clear_jobctl_pending(struct task_struct *task, |
1833 | unsigned int mask); | 1833 | unsigned int mask); |
1834 | 1834 | ||
1835 | #ifdef CONFIG_PREEMPT_RCU | 1835 | #ifdef CONFIG_PREEMPT_RCU |
1836 | 1836 | ||
1837 | #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ | 1837 | #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ |
1838 | #define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */ | 1838 | #define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */ |
1839 | #define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */ | 1839 | #define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */ |
1840 | 1840 | ||
1841 | static inline void rcu_copy_process(struct task_struct *p) | 1841 | static inline void rcu_copy_process(struct task_struct *p) |
1842 | { | 1842 | { |
1843 | p->rcu_read_lock_nesting = 0; | 1843 | p->rcu_read_lock_nesting = 0; |
1844 | p->rcu_read_unlock_special = 0; | 1844 | p->rcu_read_unlock_special = 0; |
1845 | #ifdef CONFIG_TREE_PREEMPT_RCU | 1845 | #ifdef CONFIG_TREE_PREEMPT_RCU |
1846 | p->rcu_blocked_node = NULL; | 1846 | p->rcu_blocked_node = NULL; |
1847 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 1847 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
1848 | #ifdef CONFIG_RCU_BOOST | 1848 | #ifdef CONFIG_RCU_BOOST |
1849 | p->rcu_boost_mutex = NULL; | 1849 | p->rcu_boost_mutex = NULL; |
1850 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 1850 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
1851 | INIT_LIST_HEAD(&p->rcu_node_entry); | 1851 | INIT_LIST_HEAD(&p->rcu_node_entry); |
1852 | } | 1852 | } |
1853 | 1853 | ||
1854 | #else | 1854 | #else |
1855 | 1855 | ||
1856 | static inline void rcu_copy_process(struct task_struct *p) | 1856 | static inline void rcu_copy_process(struct task_struct *p) |
1857 | { | 1857 | { |
1858 | } | 1858 | } |
1859 | 1859 | ||
1860 | #endif | 1860 | #endif |
1861 | 1861 | ||
1862 | #ifdef CONFIG_SMP | 1862 | #ifdef CONFIG_SMP |
1863 | extern void do_set_cpus_allowed(struct task_struct *p, | 1863 | extern void do_set_cpus_allowed(struct task_struct *p, |
1864 | const struct cpumask *new_mask); | 1864 | const struct cpumask *new_mask); |
1865 | 1865 | ||
1866 | extern int set_cpus_allowed_ptr(struct task_struct *p, | 1866 | extern int set_cpus_allowed_ptr(struct task_struct *p, |
1867 | const struct cpumask *new_mask); | 1867 | const struct cpumask *new_mask); |
1868 | #else | 1868 | #else |
1869 | static inline void do_set_cpus_allowed(struct task_struct *p, | 1869 | static inline void do_set_cpus_allowed(struct task_struct *p, |
1870 | const struct cpumask *new_mask) | 1870 | const struct cpumask *new_mask) |
1871 | { | 1871 | { |
1872 | } | 1872 | } |
1873 | static inline int set_cpus_allowed_ptr(struct task_struct *p, | 1873 | static inline int set_cpus_allowed_ptr(struct task_struct *p, |
1874 | const struct cpumask *new_mask) | 1874 | const struct cpumask *new_mask) |
1875 | { | 1875 | { |
1876 | if (!cpumask_test_cpu(0, new_mask)) | 1876 | if (!cpumask_test_cpu(0, new_mask)) |
1877 | return -EINVAL; | 1877 | return -EINVAL; |
1878 | return 0; | 1878 | return 0; |
1879 | } | 1879 | } |
1880 | #endif | 1880 | #endif |
1881 | 1881 | ||
1882 | #ifndef CONFIG_CPUMASK_OFFSTACK | 1882 | #ifndef CONFIG_CPUMASK_OFFSTACK |
1883 | static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | 1883 | static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) |
1884 | { | 1884 | { |
1885 | return set_cpus_allowed_ptr(p, &new_mask); | 1885 | return set_cpus_allowed_ptr(p, &new_mask); |
1886 | } | 1886 | } |
1887 | #endif | 1887 | #endif |
1888 | 1888 | ||
1889 | /* | 1889 | /* |
1890 | * Do not use outside of architecture code which knows its limitations. | 1890 | * Do not use outside of architecture code which knows its limitations. |
1891 | * | 1891 | * |
1892 | * sched_clock() has no promise of monotonicity or bounded drift between | 1892 | * sched_clock() has no promise of monotonicity or bounded drift between |
1893 | * CPUs, use (which you should not) requires disabling IRQs. | 1893 | * CPUs, use (which you should not) requires disabling IRQs. |
1894 | * | 1894 | * |
1895 | * Please use one of the three interfaces below. | 1895 | * Please use one of the three interfaces below. |
1896 | */ | 1896 | */ |
1897 | extern unsigned long long notrace sched_clock(void); | 1897 | extern unsigned long long notrace sched_clock(void); |
1898 | /* | 1898 | /* |
1899 | * See the comment in kernel/sched_clock.c | 1899 | * See the comment in kernel/sched_clock.c |
1900 | */ | 1900 | */ |
1901 | extern u64 cpu_clock(int cpu); | 1901 | extern u64 cpu_clock(int cpu); |
1902 | extern u64 local_clock(void); | 1902 | extern u64 local_clock(void); |
1903 | extern u64 sched_clock_cpu(int cpu); | 1903 | extern u64 sched_clock_cpu(int cpu); |
1904 | 1904 | ||
1905 | 1905 | ||
1906 | extern void sched_clock_init(void); | 1906 | extern void sched_clock_init(void); |
1907 | 1907 | ||
1908 | #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 1908 | #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
1909 | static inline void sched_clock_tick(void) | 1909 | static inline void sched_clock_tick(void) |
1910 | { | 1910 | { |
1911 | } | 1911 | } |
1912 | 1912 | ||
1913 | static inline void sched_clock_idle_sleep_event(void) | 1913 | static inline void sched_clock_idle_sleep_event(void) |
1914 | { | 1914 | { |
1915 | } | 1915 | } |
1916 | 1916 | ||
1917 | static inline void sched_clock_idle_wakeup_event(u64 delta_ns) | 1917 | static inline void sched_clock_idle_wakeup_event(u64 delta_ns) |
1918 | { | 1918 | { |
1919 | } | 1919 | } |
1920 | #else | 1920 | #else |
1921 | /* | 1921 | /* |
1922 | * Architectures can set this to 1 if they have specified | 1922 | * Architectures can set this to 1 if they have specified |
1923 | * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, | 1923 | * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, |
1924 | * but then during bootup it turns out that sched_clock() | 1924 | * but then during bootup it turns out that sched_clock() |
1925 | * is reliable after all: | 1925 | * is reliable after all: |
1926 | */ | 1926 | */ |
1927 | extern int sched_clock_stable; | 1927 | extern int sched_clock_stable; |
1928 | 1928 | ||
1929 | extern void sched_clock_tick(void); | 1929 | extern void sched_clock_tick(void); |
1930 | extern void sched_clock_idle_sleep_event(void); | 1930 | extern void sched_clock_idle_sleep_event(void); |
1931 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); | 1931 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); |
1932 | #endif | 1932 | #endif |
1933 | 1933 | ||
1934 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | 1934 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
1935 | /* | 1935 | /* |
1936 | * An i/f to runtime opt-in for irq time accounting based off of sched_clock. | 1936 | * An i/f to runtime opt-in for irq time accounting based off of sched_clock. |
1937 | * The reason for this explicit opt-in is not to have perf penalty with | 1937 | * The reason for this explicit opt-in is not to have perf penalty with |
1938 | * slow sched_clocks. | 1938 | * slow sched_clocks. |
1939 | */ | 1939 | */ |
1940 | extern void enable_sched_clock_irqtime(void); | 1940 | extern void enable_sched_clock_irqtime(void); |
1941 | extern void disable_sched_clock_irqtime(void); | 1941 | extern void disable_sched_clock_irqtime(void); |
1942 | #else | 1942 | #else |
1943 | static inline void enable_sched_clock_irqtime(void) {} | 1943 | static inline void enable_sched_clock_irqtime(void) {} |
1944 | static inline void disable_sched_clock_irqtime(void) {} | 1944 | static inline void disable_sched_clock_irqtime(void) {} |
1945 | #endif | 1945 | #endif |
1946 | 1946 | ||
1947 | extern unsigned long long | 1947 | extern unsigned long long |
1948 | task_sched_runtime(struct task_struct *task); | 1948 | task_sched_runtime(struct task_struct *task); |
1949 | extern unsigned long long thread_group_sched_runtime(struct task_struct *task); | 1949 | extern unsigned long long thread_group_sched_runtime(struct task_struct *task); |
1950 | 1950 | ||
1951 | /* sched_exec is called by processes performing an exec */ | 1951 | /* sched_exec is called by processes performing an exec */ |
1952 | #ifdef CONFIG_SMP | 1952 | #ifdef CONFIG_SMP |
1953 | extern void sched_exec(void); | 1953 | extern void sched_exec(void); |
1954 | #else | 1954 | #else |
1955 | #define sched_exec() {} | 1955 | #define sched_exec() {} |
1956 | #endif | 1956 | #endif |
1957 | 1957 | ||
1958 | extern void sched_clock_idle_sleep_event(void); | 1958 | extern void sched_clock_idle_sleep_event(void); |
1959 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); | 1959 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); |
1960 | 1960 | ||
1961 | #ifdef CONFIG_HOTPLUG_CPU | 1961 | #ifdef CONFIG_HOTPLUG_CPU |
1962 | extern void idle_task_exit(void); | 1962 | extern void idle_task_exit(void); |
1963 | #else | 1963 | #else |
1964 | static inline void idle_task_exit(void) {} | 1964 | static inline void idle_task_exit(void) {} |
1965 | #endif | 1965 | #endif |
1966 | 1966 | ||
1967 | #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) | 1967 | #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) |
1968 | extern void wake_up_idle_cpu(int cpu); | 1968 | extern void wake_up_idle_cpu(int cpu); |
1969 | #else | 1969 | #else |
1970 | static inline void wake_up_idle_cpu(int cpu) { } | 1970 | static inline void wake_up_idle_cpu(int cpu) { } |
1971 | #endif | 1971 | #endif |
1972 | 1972 | ||
1973 | extern unsigned int sysctl_sched_latency; | 1973 | extern unsigned int sysctl_sched_latency; |
1974 | extern unsigned int sysctl_sched_min_granularity; | 1974 | extern unsigned int sysctl_sched_min_granularity; |
1975 | extern unsigned int sysctl_sched_wakeup_granularity; | 1975 | extern unsigned int sysctl_sched_wakeup_granularity; |
1976 | extern unsigned int sysctl_sched_child_runs_first; | 1976 | extern unsigned int sysctl_sched_child_runs_first; |
1977 | 1977 | ||
1978 | enum sched_tunable_scaling { | 1978 | enum sched_tunable_scaling { |
1979 | SCHED_TUNABLESCALING_NONE, | 1979 | SCHED_TUNABLESCALING_NONE, |
1980 | SCHED_TUNABLESCALING_LOG, | 1980 | SCHED_TUNABLESCALING_LOG, |
1981 | SCHED_TUNABLESCALING_LINEAR, | 1981 | SCHED_TUNABLESCALING_LINEAR, |
1982 | SCHED_TUNABLESCALING_END, | 1982 | SCHED_TUNABLESCALING_END, |
1983 | }; | 1983 | }; |
1984 | extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; | 1984 | extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; |
1985 | 1985 | ||
1986 | #ifdef CONFIG_SCHED_DEBUG | 1986 | #ifdef CONFIG_SCHED_DEBUG |
1987 | extern unsigned int sysctl_sched_migration_cost; | 1987 | extern unsigned int sysctl_sched_migration_cost; |
1988 | extern unsigned int sysctl_sched_nr_migrate; | 1988 | extern unsigned int sysctl_sched_nr_migrate; |
1989 | extern unsigned int sysctl_sched_time_avg; | 1989 | extern unsigned int sysctl_sched_time_avg; |
1990 | extern unsigned int sysctl_timer_migration; | 1990 | extern unsigned int sysctl_timer_migration; |
1991 | extern unsigned int sysctl_sched_shares_window; | 1991 | extern unsigned int sysctl_sched_shares_window; |
1992 | 1992 | ||
1993 | int sched_proc_update_handler(struct ctl_table *table, int write, | 1993 | int sched_proc_update_handler(struct ctl_table *table, int write, |
1994 | void __user *buffer, size_t *length, | 1994 | void __user *buffer, size_t *length, |
1995 | loff_t *ppos); | 1995 | loff_t *ppos); |
1996 | #endif | 1996 | #endif |
1997 | #ifdef CONFIG_SCHED_DEBUG | 1997 | #ifdef CONFIG_SCHED_DEBUG |
1998 | static inline unsigned int get_sysctl_timer_migration(void) | 1998 | static inline unsigned int get_sysctl_timer_migration(void) |
1999 | { | 1999 | { |
2000 | return sysctl_timer_migration; | 2000 | return sysctl_timer_migration; |
2001 | } | 2001 | } |
2002 | #else | 2002 | #else |
2003 | static inline unsigned int get_sysctl_timer_migration(void) | 2003 | static inline unsigned int get_sysctl_timer_migration(void) |
2004 | { | 2004 | { |
2005 | return 1; | 2005 | return 1; |
2006 | } | 2006 | } |
2007 | #endif | 2007 | #endif |
2008 | extern unsigned int sysctl_sched_rt_period; | 2008 | extern unsigned int sysctl_sched_rt_period; |
2009 | extern int sysctl_sched_rt_runtime; | 2009 | extern int sysctl_sched_rt_runtime; |
2010 | 2010 | ||
2011 | int sched_rt_handler(struct ctl_table *table, int write, | 2011 | int sched_rt_handler(struct ctl_table *table, int write, |
2012 | void __user *buffer, size_t *lenp, | 2012 | void __user *buffer, size_t *lenp, |
2013 | loff_t *ppos); | 2013 | loff_t *ppos); |
2014 | 2014 | ||
2015 | #ifdef CONFIG_SCHED_AUTOGROUP | 2015 | #ifdef CONFIG_SCHED_AUTOGROUP |
2016 | extern unsigned int sysctl_sched_autogroup_enabled; | 2016 | extern unsigned int sysctl_sched_autogroup_enabled; |
2017 | 2017 | ||
2018 | extern void sched_autogroup_create_attach(struct task_struct *p); | 2018 | extern void sched_autogroup_create_attach(struct task_struct *p); |
2019 | extern void sched_autogroup_detach(struct task_struct *p); | 2019 | extern void sched_autogroup_detach(struct task_struct *p); |
2020 | extern void sched_autogroup_fork(struct signal_struct *sig); | 2020 | extern void sched_autogroup_fork(struct signal_struct *sig); |
2021 | extern void sched_autogroup_exit(struct signal_struct *sig); | 2021 | extern void sched_autogroup_exit(struct signal_struct *sig); |
2022 | #ifdef CONFIG_PROC_FS | 2022 | #ifdef CONFIG_PROC_FS |
2023 | extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); | 2023 | extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); |
2024 | extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice); | 2024 | extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice); |
2025 | #endif | 2025 | #endif |
2026 | #else | 2026 | #else |
2027 | static inline void sched_autogroup_create_attach(struct task_struct *p) { } | 2027 | static inline void sched_autogroup_create_attach(struct task_struct *p) { } |
2028 | static inline void sched_autogroup_detach(struct task_struct *p) { } | 2028 | static inline void sched_autogroup_detach(struct task_struct *p) { } |
2029 | static inline void sched_autogroup_fork(struct signal_struct *sig) { } | 2029 | static inline void sched_autogroup_fork(struct signal_struct *sig) { } |
2030 | static inline void sched_autogroup_exit(struct signal_struct *sig) { } | 2030 | static inline void sched_autogroup_exit(struct signal_struct *sig) { } |
2031 | #endif | 2031 | #endif |
2032 | 2032 | ||
2033 | #ifdef CONFIG_RT_MUTEXES | 2033 | #ifdef CONFIG_RT_MUTEXES |
2034 | extern int rt_mutex_getprio(struct task_struct *p); | 2034 | extern int rt_mutex_getprio(struct task_struct *p); |
2035 | extern void rt_mutex_setprio(struct task_struct *p, int prio); | 2035 | extern void rt_mutex_setprio(struct task_struct *p, int prio); |
2036 | extern void rt_mutex_adjust_pi(struct task_struct *p); | 2036 | extern void rt_mutex_adjust_pi(struct task_struct *p); |
2037 | #else | 2037 | #else |
2038 | static inline int rt_mutex_getprio(struct task_struct *p) | 2038 | static inline int rt_mutex_getprio(struct task_struct *p) |
2039 | { | 2039 | { |
2040 | return p->normal_prio; | 2040 | return p->normal_prio; |
2041 | } | 2041 | } |
2042 | # define rt_mutex_adjust_pi(p) do { } while (0) | 2042 | # define rt_mutex_adjust_pi(p) do { } while (0) |
2043 | #endif | 2043 | #endif |
2044 | 2044 | ||
2045 | extern bool yield_to(struct task_struct *p, bool preempt); | 2045 | extern bool yield_to(struct task_struct *p, bool preempt); |
2046 | extern void set_user_nice(struct task_struct *p, long nice); | 2046 | extern void set_user_nice(struct task_struct *p, long nice); |
2047 | extern int task_prio(const struct task_struct *p); | 2047 | extern int task_prio(const struct task_struct *p); |
2048 | extern int task_nice(const struct task_struct *p); | 2048 | extern int task_nice(const struct task_struct *p); |
2049 | extern int can_nice(const struct task_struct *p, const int nice); | 2049 | extern int can_nice(const struct task_struct *p, const int nice); |
2050 | extern int task_curr(const struct task_struct *p); | 2050 | extern int task_curr(const struct task_struct *p); |
2051 | extern int idle_cpu(int cpu); | 2051 | extern int idle_cpu(int cpu); |
2052 | extern int sched_setscheduler(struct task_struct *, int, | 2052 | extern int sched_setscheduler(struct task_struct *, int, |
2053 | const struct sched_param *); | 2053 | const struct sched_param *); |
2054 | extern int sched_setscheduler_nocheck(struct task_struct *, int, | 2054 | extern int sched_setscheduler_nocheck(struct task_struct *, int, |
2055 | const struct sched_param *); | 2055 | const struct sched_param *); |
2056 | extern struct task_struct *idle_task(int cpu); | 2056 | extern struct task_struct *idle_task(int cpu); |
2057 | extern struct task_struct *curr_task(int cpu); | 2057 | extern struct task_struct *curr_task(int cpu); |
2058 | extern void set_curr_task(int cpu, struct task_struct *p); | 2058 | extern void set_curr_task(int cpu, struct task_struct *p); |
2059 | 2059 | ||
2060 | void yield(void); | 2060 | void yield(void); |
2061 | 2061 | ||
2062 | /* | 2062 | /* |
2063 | * The default (Linux) execution domain. | 2063 | * The default (Linux) execution domain. |
2064 | */ | 2064 | */ |
2065 | extern struct exec_domain default_exec_domain; | 2065 | extern struct exec_domain default_exec_domain; |
2066 | 2066 | ||
2067 | union thread_union { | 2067 | union thread_union { |
2068 | struct thread_info thread_info; | 2068 | struct thread_info thread_info; |
2069 | unsigned long stack[THREAD_SIZE/sizeof(long)]; | 2069 | unsigned long stack[THREAD_SIZE/sizeof(long)]; |
2070 | }; | 2070 | }; |
2071 | 2071 | ||
2072 | #ifndef __HAVE_ARCH_KSTACK_END | 2072 | #ifndef __HAVE_ARCH_KSTACK_END |
2073 | static inline int kstack_end(void *addr) | 2073 | static inline int kstack_end(void *addr) |
2074 | { | 2074 | { |
2075 | /* Reliable end of stack detection: | 2075 | /* Reliable end of stack detection: |
2076 | * Some APM bios versions misalign the stack | 2076 | * Some APM bios versions misalign the stack |
2077 | */ | 2077 | */ |
2078 | return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); | 2078 | return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*))); |
2079 | } | 2079 | } |
2080 | #endif | 2080 | #endif |
2081 | 2081 | ||
2082 | extern union thread_union init_thread_union; | 2082 | extern union thread_union init_thread_union; |
2083 | extern struct task_struct init_task; | 2083 | extern struct task_struct init_task; |
2084 | 2084 | ||
2085 | extern struct mm_struct init_mm; | 2085 | extern struct mm_struct init_mm; |
2086 | 2086 | ||
2087 | extern struct pid_namespace init_pid_ns; | 2087 | extern struct pid_namespace init_pid_ns; |
2088 | 2088 | ||
2089 | /* | 2089 | /* |
2090 | * find a task by one of its numerical ids | 2090 | * find a task by one of its numerical ids |
2091 | * | 2091 | * |
2092 | * find_task_by_pid_ns(): | 2092 | * find_task_by_pid_ns(): |
2093 | * finds a task by its pid in the specified namespace | 2093 | * finds a task by its pid in the specified namespace |
2094 | * find_task_by_vpid(): | 2094 | * find_task_by_vpid(): |
2095 | * finds a task by its virtual pid | 2095 | * finds a task by its virtual pid |
2096 | * | 2096 | * |
2097 | * see also find_vpid() etc in include/linux/pid.h | 2097 | * see also find_vpid() etc in include/linux/pid.h |
2098 | */ | 2098 | */ |
2099 | 2099 | ||
2100 | extern struct task_struct *find_task_by_vpid(pid_t nr); | 2100 | extern struct task_struct *find_task_by_vpid(pid_t nr); |
2101 | extern struct task_struct *find_task_by_pid_ns(pid_t nr, | 2101 | extern struct task_struct *find_task_by_pid_ns(pid_t nr, |
2102 | struct pid_namespace *ns); | 2102 | struct pid_namespace *ns); |
2103 | 2103 | ||
2104 | extern void __set_special_pids(struct pid *pid); | 2104 | extern void __set_special_pids(struct pid *pid); |
2105 | 2105 | ||
2106 | /* per-UID process charging. */ | 2106 | /* per-UID process charging. */ |
2107 | extern struct user_struct * alloc_uid(struct user_namespace *, uid_t); | 2107 | extern struct user_struct * alloc_uid(struct user_namespace *, uid_t); |
2108 | static inline struct user_struct *get_uid(struct user_struct *u) | 2108 | static inline struct user_struct *get_uid(struct user_struct *u) |
2109 | { | 2109 | { |
2110 | atomic_inc(&u->__count); | 2110 | atomic_inc(&u->__count); |
2111 | return u; | 2111 | return u; |
2112 | } | 2112 | } |
2113 | extern void free_uid(struct user_struct *); | 2113 | extern void free_uid(struct user_struct *); |
2114 | extern void release_uids(struct user_namespace *ns); | 2114 | extern void release_uids(struct user_namespace *ns); |
2115 | 2115 | ||
2116 | #include <asm/current.h> | 2116 | #include <asm/current.h> |
2117 | 2117 | ||
2118 | extern void xtime_update(unsigned long ticks); | 2118 | extern void xtime_update(unsigned long ticks); |
2119 | 2119 | ||
2120 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); | 2120 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); |
2121 | extern int wake_up_process(struct task_struct *tsk); | 2121 | extern int wake_up_process(struct task_struct *tsk); |
2122 | extern void wake_up_new_task(struct task_struct *tsk); | 2122 | extern void wake_up_new_task(struct task_struct *tsk); |
2123 | #ifdef CONFIG_SMP | 2123 | #ifdef CONFIG_SMP |
2124 | extern void kick_process(struct task_struct *tsk); | 2124 | extern void kick_process(struct task_struct *tsk); |
2125 | #else | 2125 | #else |
2126 | static inline void kick_process(struct task_struct *tsk) { } | 2126 | static inline void kick_process(struct task_struct *tsk) { } |
2127 | #endif | 2127 | #endif |
2128 | extern void sched_fork(struct task_struct *p); | 2128 | extern void sched_fork(struct task_struct *p); |
2129 | extern void sched_dead(struct task_struct *p); | 2129 | extern void sched_dead(struct task_struct *p); |
2130 | 2130 | ||
2131 | extern void proc_caches_init(void); | 2131 | extern void proc_caches_init(void); |
2132 | extern void flush_signals(struct task_struct *); | 2132 | extern void flush_signals(struct task_struct *); |
2133 | extern void __flush_signals(struct task_struct *); | 2133 | extern void __flush_signals(struct task_struct *); |
2134 | extern void ignore_signals(struct task_struct *); | 2134 | extern void ignore_signals(struct task_struct *); |
2135 | extern void flush_signal_handlers(struct task_struct *, int force_default); | 2135 | extern void flush_signal_handlers(struct task_struct *, int force_default); |
2136 | extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); | 2136 | extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); |
2137 | 2137 | ||
2138 | static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | 2138 | static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) |
2139 | { | 2139 | { |
2140 | unsigned long flags; | 2140 | unsigned long flags; |
2141 | int ret; | 2141 | int ret; |
2142 | 2142 | ||
2143 | spin_lock_irqsave(&tsk->sighand->siglock, flags); | 2143 | spin_lock_irqsave(&tsk->sighand->siglock, flags); |
2144 | ret = dequeue_signal(tsk, mask, info); | 2144 | ret = dequeue_signal(tsk, mask, info); |
2145 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); | 2145 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); |
2146 | 2146 | ||
2147 | return ret; | 2147 | return ret; |
2148 | } | 2148 | } |
2149 | 2149 | ||
2150 | extern void block_all_signals(int (*notifier)(void *priv), void *priv, | 2150 | extern void block_all_signals(int (*notifier)(void *priv), void *priv, |
2151 | sigset_t *mask); | 2151 | sigset_t *mask); |
2152 | extern void unblock_all_signals(void); | 2152 | extern void unblock_all_signals(void); |
2153 | extern void release_task(struct task_struct * p); | 2153 | extern void release_task(struct task_struct * p); |
2154 | extern int send_sig_info(int, struct siginfo *, struct task_struct *); | 2154 | extern int send_sig_info(int, struct siginfo *, struct task_struct *); |
2155 | extern int force_sigsegv(int, struct task_struct *); | 2155 | extern int force_sigsegv(int, struct task_struct *); |
2156 | extern int force_sig_info(int, struct siginfo *, struct task_struct *); | 2156 | extern int force_sig_info(int, struct siginfo *, struct task_struct *); |
2157 | extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); | 2157 | extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); |
2158 | extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); | 2158 | extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); |
2159 | extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32); | 2159 | extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32); |
2160 | extern int kill_pgrp(struct pid *pid, int sig, int priv); | 2160 | extern int kill_pgrp(struct pid *pid, int sig, int priv); |
2161 | extern int kill_pid(struct pid *pid, int sig, int priv); | 2161 | extern int kill_pid(struct pid *pid, int sig, int priv); |
2162 | extern int kill_proc_info(int, struct siginfo *, pid_t); | 2162 | extern int kill_proc_info(int, struct siginfo *, pid_t); |
2163 | extern int do_notify_parent(struct task_struct *, int); | 2163 | extern bool do_notify_parent(struct task_struct *, int); |
2164 | extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); | 2164 | extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); |
2165 | extern void force_sig(int, struct task_struct *); | 2165 | extern void force_sig(int, struct task_struct *); |
2166 | extern int send_sig(int, struct task_struct *, int); | 2166 | extern int send_sig(int, struct task_struct *, int); |
2167 | extern int zap_other_threads(struct task_struct *p); | 2167 | extern int zap_other_threads(struct task_struct *p); |
2168 | extern struct sigqueue *sigqueue_alloc(void); | 2168 | extern struct sigqueue *sigqueue_alloc(void); |
2169 | extern void sigqueue_free(struct sigqueue *); | 2169 | extern void sigqueue_free(struct sigqueue *); |
2170 | extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); | 2170 | extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); |
2171 | extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); | 2171 | extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); |
2172 | extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long); | 2172 | extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long); |
2173 | 2173 | ||
2174 | static inline int kill_cad_pid(int sig, int priv) | 2174 | static inline int kill_cad_pid(int sig, int priv) |
2175 | { | 2175 | { |
2176 | return kill_pid(cad_pid, sig, priv); | 2176 | return kill_pid(cad_pid, sig, priv); |
2177 | } | 2177 | } |
2178 | 2178 | ||
2179 | /* These can be the second arg to send_sig_info/send_group_sig_info. */ | 2179 | /* These can be the second arg to send_sig_info/send_group_sig_info. */ |
2180 | #define SEND_SIG_NOINFO ((struct siginfo *) 0) | 2180 | #define SEND_SIG_NOINFO ((struct siginfo *) 0) |
2181 | #define SEND_SIG_PRIV ((struct siginfo *) 1) | 2181 | #define SEND_SIG_PRIV ((struct siginfo *) 1) |
2182 | #define SEND_SIG_FORCED ((struct siginfo *) 2) | 2182 | #define SEND_SIG_FORCED ((struct siginfo *) 2) |
2183 | 2183 | ||
2184 | /* | 2184 | /* |
2185 | * True if we are on the alternate signal stack. | 2185 | * True if we are on the alternate signal stack. |
2186 | */ | 2186 | */ |
2187 | static inline int on_sig_stack(unsigned long sp) | 2187 | static inline int on_sig_stack(unsigned long sp) |
2188 | { | 2188 | { |
2189 | #ifdef CONFIG_STACK_GROWSUP | 2189 | #ifdef CONFIG_STACK_GROWSUP |
2190 | return sp >= current->sas_ss_sp && | 2190 | return sp >= current->sas_ss_sp && |
2191 | sp - current->sas_ss_sp < current->sas_ss_size; | 2191 | sp - current->sas_ss_sp < current->sas_ss_size; |
2192 | #else | 2192 | #else |
2193 | return sp > current->sas_ss_sp && | 2193 | return sp > current->sas_ss_sp && |
2194 | sp - current->sas_ss_sp <= current->sas_ss_size; | 2194 | sp - current->sas_ss_sp <= current->sas_ss_size; |
2195 | #endif | 2195 | #endif |
2196 | } | 2196 | } |
2197 | 2197 | ||
2198 | static inline int sas_ss_flags(unsigned long sp) | 2198 | static inline int sas_ss_flags(unsigned long sp) |
2199 | { | 2199 | { |
2200 | return (current->sas_ss_size == 0 ? SS_DISABLE | 2200 | return (current->sas_ss_size == 0 ? SS_DISABLE |
2201 | : on_sig_stack(sp) ? SS_ONSTACK : 0); | 2201 | : on_sig_stack(sp) ? SS_ONSTACK : 0); |
2202 | } | 2202 | } |
2203 | 2203 | ||
2204 | /* | 2204 | /* |
2205 | * Routines for handling mm_structs | 2205 | * Routines for handling mm_structs |
2206 | */ | 2206 | */ |
2207 | extern struct mm_struct * mm_alloc(void); | 2207 | extern struct mm_struct * mm_alloc(void); |
2208 | 2208 | ||
2209 | /* mmdrop drops the mm and the page tables */ | 2209 | /* mmdrop drops the mm and the page tables */ |
2210 | extern void __mmdrop(struct mm_struct *); | 2210 | extern void __mmdrop(struct mm_struct *); |
2211 | static inline void mmdrop(struct mm_struct * mm) | 2211 | static inline void mmdrop(struct mm_struct * mm) |
2212 | { | 2212 | { |
2213 | if (unlikely(atomic_dec_and_test(&mm->mm_count))) | 2213 | if (unlikely(atomic_dec_and_test(&mm->mm_count))) |
2214 | __mmdrop(mm); | 2214 | __mmdrop(mm); |
2215 | } | 2215 | } |
2216 | 2216 | ||
2217 | /* mmput gets rid of the mappings and all user-space */ | 2217 | /* mmput gets rid of the mappings and all user-space */ |
2218 | extern void mmput(struct mm_struct *); | 2218 | extern void mmput(struct mm_struct *); |
2219 | /* Grab a reference to a task's mm, if it is not already going away */ | 2219 | /* Grab a reference to a task's mm, if it is not already going away */ |
2220 | extern struct mm_struct *get_task_mm(struct task_struct *task); | 2220 | extern struct mm_struct *get_task_mm(struct task_struct *task); |
2221 | /* Remove the current tasks stale references to the old mm_struct */ | 2221 | /* Remove the current tasks stale references to the old mm_struct */ |
2222 | extern void mm_release(struct task_struct *, struct mm_struct *); | 2222 | extern void mm_release(struct task_struct *, struct mm_struct *); |
2223 | /* Allocate a new mm structure and copy contents from tsk->mm */ | 2223 | /* Allocate a new mm structure and copy contents from tsk->mm */ |
2224 | extern struct mm_struct *dup_mm(struct task_struct *tsk); | 2224 | extern struct mm_struct *dup_mm(struct task_struct *tsk); |
2225 | 2225 | ||
2226 | extern int copy_thread(unsigned long, unsigned long, unsigned long, | 2226 | extern int copy_thread(unsigned long, unsigned long, unsigned long, |
2227 | struct task_struct *, struct pt_regs *); | 2227 | struct task_struct *, struct pt_regs *); |
2228 | extern void flush_thread(void); | 2228 | extern void flush_thread(void); |
2229 | extern void exit_thread(void); | 2229 | extern void exit_thread(void); |
2230 | 2230 | ||
2231 | extern void exit_files(struct task_struct *); | 2231 | extern void exit_files(struct task_struct *); |
2232 | extern void __cleanup_sighand(struct sighand_struct *); | 2232 | extern void __cleanup_sighand(struct sighand_struct *); |
2233 | 2233 | ||
2234 | extern void exit_itimers(struct signal_struct *); | 2234 | extern void exit_itimers(struct signal_struct *); |
2235 | extern void flush_itimer_signals(void); | 2235 | extern void flush_itimer_signals(void); |
2236 | 2236 | ||
2237 | extern NORET_TYPE void do_group_exit(int); | 2237 | extern NORET_TYPE void do_group_exit(int); |
2238 | 2238 | ||
2239 | extern void daemonize(const char *, ...); | 2239 | extern void daemonize(const char *, ...); |
2240 | extern int allow_signal(int); | 2240 | extern int allow_signal(int); |
2241 | extern int disallow_signal(int); | 2241 | extern int disallow_signal(int); |
2242 | 2242 | ||
2243 | extern int do_execve(const char *, | 2243 | extern int do_execve(const char *, |
2244 | const char __user * const __user *, | 2244 | const char __user * const __user *, |
2245 | const char __user * const __user *, struct pt_regs *); | 2245 | const char __user * const __user *, struct pt_regs *); |
2246 | extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); | 2246 | extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); |
2247 | struct task_struct *fork_idle(int); | 2247 | struct task_struct *fork_idle(int); |
2248 | 2248 | ||
2249 | extern void set_task_comm(struct task_struct *tsk, char *from); | 2249 | extern void set_task_comm(struct task_struct *tsk, char *from); |
2250 | extern char *get_task_comm(char *to, struct task_struct *tsk); | 2250 | extern char *get_task_comm(char *to, struct task_struct *tsk); |
2251 | 2251 | ||
2252 | #ifdef CONFIG_SMP | 2252 | #ifdef CONFIG_SMP |
2253 | void scheduler_ipi(void); | 2253 | void scheduler_ipi(void); |
2254 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); | 2254 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); |
2255 | #else | 2255 | #else |
2256 | static inline void scheduler_ipi(void) { } | 2256 | static inline void scheduler_ipi(void) { } |
2257 | static inline unsigned long wait_task_inactive(struct task_struct *p, | 2257 | static inline unsigned long wait_task_inactive(struct task_struct *p, |
2258 | long match_state) | 2258 | long match_state) |
2259 | { | 2259 | { |
2260 | return 1; | 2260 | return 1; |
2261 | } | 2261 | } |
2262 | #endif | 2262 | #endif |
2263 | 2263 | ||
2264 | #define next_task(p) \ | 2264 | #define next_task(p) \ |
2265 | list_entry_rcu((p)->tasks.next, struct task_struct, tasks) | 2265 | list_entry_rcu((p)->tasks.next, struct task_struct, tasks) |
2266 | 2266 | ||
2267 | #define for_each_process(p) \ | 2267 | #define for_each_process(p) \ |
2268 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) | 2268 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) |
2269 | 2269 | ||
2270 | extern bool current_is_single_threaded(void); | 2270 | extern bool current_is_single_threaded(void); |
2271 | 2271 | ||
2272 | /* | 2272 | /* |
2273 | * Careful: do_each_thread/while_each_thread is a double loop so | 2273 | * Careful: do_each_thread/while_each_thread is a double loop so |
2274 | * 'break' will not work as expected - use goto instead. | 2274 | * 'break' will not work as expected - use goto instead. |
2275 | */ | 2275 | */ |
2276 | #define do_each_thread(g, t) \ | 2276 | #define do_each_thread(g, t) \ |
2277 | for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do | 2277 | for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do |
2278 | 2278 | ||
2279 | #define while_each_thread(g, t) \ | 2279 | #define while_each_thread(g, t) \ |
2280 | while ((t = next_thread(t)) != g) | 2280 | while ((t = next_thread(t)) != g) |
2281 | 2281 | ||
2282 | static inline int get_nr_threads(struct task_struct *tsk) | 2282 | static inline int get_nr_threads(struct task_struct *tsk) |
2283 | { | 2283 | { |
2284 | return tsk->signal->nr_threads; | 2284 | return tsk->signal->nr_threads; |
2285 | } | 2285 | } |
2286 | 2286 | ||
2287 | /* de_thread depends on thread_group_leader not being a pid based check */ | 2287 | /* de_thread depends on thread_group_leader not being a pid based check */ |
2288 | #define thread_group_leader(p) (p == p->group_leader) | 2288 | #define thread_group_leader(p) (p == p->group_leader) |
2289 | 2289 | ||
2290 | /* Do to the insanities of de_thread it is possible for a process | 2290 | /* Do to the insanities of de_thread it is possible for a process |
2291 | * to have the pid of the thread group leader without actually being | 2291 | * to have the pid of the thread group leader without actually being |
2292 | * the thread group leader. For iteration through the pids in proc | 2292 | * the thread group leader. For iteration through the pids in proc |
2293 | * all we care about is that we have a task with the appropriate | 2293 | * all we care about is that we have a task with the appropriate |
2294 | * pid, we don't actually care if we have the right task. | 2294 | * pid, we don't actually care if we have the right task. |
2295 | */ | 2295 | */ |
2296 | static inline int has_group_leader_pid(struct task_struct *p) | 2296 | static inline int has_group_leader_pid(struct task_struct *p) |
2297 | { | 2297 | { |
2298 | return p->pid == p->tgid; | 2298 | return p->pid == p->tgid; |
2299 | } | 2299 | } |
2300 | 2300 | ||
2301 | static inline | 2301 | static inline |
2302 | int same_thread_group(struct task_struct *p1, struct task_struct *p2) | 2302 | int same_thread_group(struct task_struct *p1, struct task_struct *p2) |
2303 | { | 2303 | { |
2304 | return p1->tgid == p2->tgid; | 2304 | return p1->tgid == p2->tgid; |
2305 | } | 2305 | } |
2306 | 2306 | ||
2307 | static inline struct task_struct *next_thread(const struct task_struct *p) | 2307 | static inline struct task_struct *next_thread(const struct task_struct *p) |
2308 | { | 2308 | { |
2309 | return list_entry_rcu(p->thread_group.next, | 2309 | return list_entry_rcu(p->thread_group.next, |
2310 | struct task_struct, thread_group); | 2310 | struct task_struct, thread_group); |
2311 | } | 2311 | } |
2312 | 2312 | ||
2313 | static inline int thread_group_empty(struct task_struct *p) | 2313 | static inline int thread_group_empty(struct task_struct *p) |
2314 | { | 2314 | { |
2315 | return list_empty(&p->thread_group); | 2315 | return list_empty(&p->thread_group); |
2316 | } | 2316 | } |
2317 | 2317 | ||
2318 | #define delay_group_leader(p) \ | 2318 | #define delay_group_leader(p) \ |
2319 | (thread_group_leader(p) && !thread_group_empty(p)) | 2319 | (thread_group_leader(p) && !thread_group_empty(p)) |
2320 | 2320 | ||
2321 | static inline int task_detached(struct task_struct *p) | 2321 | static inline int task_detached(struct task_struct *p) |
2322 | { | 2322 | { |
2323 | return p->exit_signal == -1; | 2323 | return p->exit_signal == -1; |
2324 | } | 2324 | } |
2325 | 2325 | ||
2326 | /* | 2326 | /* |
2327 | * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring | 2327 | * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring |
2328 | * subscriptions and synchronises with wait4(). Also used in procfs. Also | 2328 | * subscriptions and synchronises with wait4(). Also used in procfs. Also |
2329 | * pins the final release of task.io_context. Also protects ->cpuset and | 2329 | * pins the final release of task.io_context. Also protects ->cpuset and |
2330 | * ->cgroup.subsys[]. | 2330 | * ->cgroup.subsys[]. |
2331 | * | 2331 | * |
2332 | * Nests both inside and outside of read_lock(&tasklist_lock). | 2332 | * Nests both inside and outside of read_lock(&tasklist_lock). |
2333 | * It must not be nested with write_lock_irq(&tasklist_lock), | 2333 | * It must not be nested with write_lock_irq(&tasklist_lock), |
2334 | * neither inside nor outside. | 2334 | * neither inside nor outside. |
2335 | */ | 2335 | */ |
2336 | static inline void task_lock(struct task_struct *p) | 2336 | static inline void task_lock(struct task_struct *p) |
2337 | { | 2337 | { |
2338 | spin_lock(&p->alloc_lock); | 2338 | spin_lock(&p->alloc_lock); |
2339 | } | 2339 | } |
2340 | 2340 | ||
2341 | static inline void task_unlock(struct task_struct *p) | 2341 | static inline void task_unlock(struct task_struct *p) |
2342 | { | 2342 | { |
2343 | spin_unlock(&p->alloc_lock); | 2343 | spin_unlock(&p->alloc_lock); |
2344 | } | 2344 | } |
2345 | 2345 | ||
2346 | extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, | 2346 | extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, |
2347 | unsigned long *flags); | 2347 | unsigned long *flags); |
2348 | 2348 | ||
2349 | #define lock_task_sighand(tsk, flags) \ | 2349 | #define lock_task_sighand(tsk, flags) \ |
2350 | ({ struct sighand_struct *__ss; \ | 2350 | ({ struct sighand_struct *__ss; \ |
2351 | __cond_lock(&(tsk)->sighand->siglock, \ | 2351 | __cond_lock(&(tsk)->sighand->siglock, \ |
2352 | (__ss = __lock_task_sighand(tsk, flags))); \ | 2352 | (__ss = __lock_task_sighand(tsk, flags))); \ |
2353 | __ss; \ | 2353 | __ss; \ |
2354 | }) \ | 2354 | }) \ |
2355 | 2355 | ||
2356 | static inline void unlock_task_sighand(struct task_struct *tsk, | 2356 | static inline void unlock_task_sighand(struct task_struct *tsk, |
2357 | unsigned long *flags) | 2357 | unsigned long *flags) |
2358 | { | 2358 | { |
2359 | spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); | 2359 | spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); |
2360 | } | 2360 | } |
2361 | 2361 | ||
2362 | /* See the declaration of threadgroup_fork_lock in signal_struct. */ | 2362 | /* See the declaration of threadgroup_fork_lock in signal_struct. */ |
2363 | #ifdef CONFIG_CGROUPS | 2363 | #ifdef CONFIG_CGROUPS |
2364 | static inline void threadgroup_fork_read_lock(struct task_struct *tsk) | 2364 | static inline void threadgroup_fork_read_lock(struct task_struct *tsk) |
2365 | { | 2365 | { |
2366 | down_read(&tsk->signal->threadgroup_fork_lock); | 2366 | down_read(&tsk->signal->threadgroup_fork_lock); |
2367 | } | 2367 | } |
2368 | static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) | 2368 | static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) |
2369 | { | 2369 | { |
2370 | up_read(&tsk->signal->threadgroup_fork_lock); | 2370 | up_read(&tsk->signal->threadgroup_fork_lock); |
2371 | } | 2371 | } |
2372 | static inline void threadgroup_fork_write_lock(struct task_struct *tsk) | 2372 | static inline void threadgroup_fork_write_lock(struct task_struct *tsk) |
2373 | { | 2373 | { |
2374 | down_write(&tsk->signal->threadgroup_fork_lock); | 2374 | down_write(&tsk->signal->threadgroup_fork_lock); |
2375 | } | 2375 | } |
2376 | static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) | 2376 | static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) |
2377 | { | 2377 | { |
2378 | up_write(&tsk->signal->threadgroup_fork_lock); | 2378 | up_write(&tsk->signal->threadgroup_fork_lock); |
2379 | } | 2379 | } |
2380 | #else | 2380 | #else |
2381 | static inline void threadgroup_fork_read_lock(struct task_struct *tsk) {} | 2381 | static inline void threadgroup_fork_read_lock(struct task_struct *tsk) {} |
2382 | static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) {} | 2382 | static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) {} |
2383 | static inline void threadgroup_fork_write_lock(struct task_struct *tsk) {} | 2383 | static inline void threadgroup_fork_write_lock(struct task_struct *tsk) {} |
2384 | static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) {} | 2384 | static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) {} |
2385 | #endif | 2385 | #endif |
2386 | 2386 | ||
2387 | #ifndef __HAVE_THREAD_FUNCTIONS | 2387 | #ifndef __HAVE_THREAD_FUNCTIONS |
2388 | 2388 | ||
2389 | #define task_thread_info(task) ((struct thread_info *)(task)->stack) | 2389 | #define task_thread_info(task) ((struct thread_info *)(task)->stack) |
2390 | #define task_stack_page(task) ((task)->stack) | 2390 | #define task_stack_page(task) ((task)->stack) |
2391 | 2391 | ||
2392 | static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) | 2392 | static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) |
2393 | { | 2393 | { |
2394 | *task_thread_info(p) = *task_thread_info(org); | 2394 | *task_thread_info(p) = *task_thread_info(org); |
2395 | task_thread_info(p)->task = p; | 2395 | task_thread_info(p)->task = p; |
2396 | } | 2396 | } |
2397 | 2397 | ||
2398 | static inline unsigned long *end_of_stack(struct task_struct *p) | 2398 | static inline unsigned long *end_of_stack(struct task_struct *p) |
2399 | { | 2399 | { |
2400 | return (unsigned long *)(task_thread_info(p) + 1); | 2400 | return (unsigned long *)(task_thread_info(p) + 1); |
2401 | } | 2401 | } |
2402 | 2402 | ||
2403 | #endif | 2403 | #endif |
2404 | 2404 | ||
2405 | static inline int object_is_on_stack(void *obj) | 2405 | static inline int object_is_on_stack(void *obj) |
2406 | { | 2406 | { |
2407 | void *stack = task_stack_page(current); | 2407 | void *stack = task_stack_page(current); |
2408 | 2408 | ||
2409 | return (obj >= stack) && (obj < (stack + THREAD_SIZE)); | 2409 | return (obj >= stack) && (obj < (stack + THREAD_SIZE)); |
2410 | } | 2410 | } |
2411 | 2411 | ||
2412 | extern void thread_info_cache_init(void); | 2412 | extern void thread_info_cache_init(void); |
2413 | 2413 | ||
2414 | #ifdef CONFIG_DEBUG_STACK_USAGE | 2414 | #ifdef CONFIG_DEBUG_STACK_USAGE |
2415 | static inline unsigned long stack_not_used(struct task_struct *p) | 2415 | static inline unsigned long stack_not_used(struct task_struct *p) |
2416 | { | 2416 | { |
2417 | unsigned long *n = end_of_stack(p); | 2417 | unsigned long *n = end_of_stack(p); |
2418 | 2418 | ||
2419 | do { /* Skip over canary */ | 2419 | do { /* Skip over canary */ |
2420 | n++; | 2420 | n++; |
2421 | } while (!*n); | 2421 | } while (!*n); |
2422 | 2422 | ||
2423 | return (unsigned long)n - (unsigned long)end_of_stack(p); | 2423 | return (unsigned long)n - (unsigned long)end_of_stack(p); |
2424 | } | 2424 | } |
2425 | #endif | 2425 | #endif |
2426 | 2426 | ||
2427 | /* set thread flags in other task's structures | 2427 | /* set thread flags in other task's structures |
2428 | * - see asm/thread_info.h for TIF_xxxx flags available | 2428 | * - see asm/thread_info.h for TIF_xxxx flags available |
2429 | */ | 2429 | */ |
2430 | static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) | 2430 | static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) |
2431 | { | 2431 | { |
2432 | set_ti_thread_flag(task_thread_info(tsk), flag); | 2432 | set_ti_thread_flag(task_thread_info(tsk), flag); |
2433 | } | 2433 | } |
2434 | 2434 | ||
2435 | static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) | 2435 | static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) |
2436 | { | 2436 | { |
2437 | clear_ti_thread_flag(task_thread_info(tsk), flag); | 2437 | clear_ti_thread_flag(task_thread_info(tsk), flag); |
2438 | } | 2438 | } |
2439 | 2439 | ||
2440 | static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) | 2440 | static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) |
2441 | { | 2441 | { |
2442 | return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); | 2442 | return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); |
2443 | } | 2443 | } |
2444 | 2444 | ||
2445 | static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) | 2445 | static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) |
2446 | { | 2446 | { |
2447 | return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); | 2447 | return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); |
2448 | } | 2448 | } |
2449 | 2449 | ||
2450 | static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) | 2450 | static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) |
2451 | { | 2451 | { |
2452 | return test_ti_thread_flag(task_thread_info(tsk), flag); | 2452 | return test_ti_thread_flag(task_thread_info(tsk), flag); |
2453 | } | 2453 | } |
2454 | 2454 | ||
2455 | static inline void set_tsk_need_resched(struct task_struct *tsk) | 2455 | static inline void set_tsk_need_resched(struct task_struct *tsk) |
2456 | { | 2456 | { |
2457 | set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); | 2457 | set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); |
2458 | } | 2458 | } |
2459 | 2459 | ||
2460 | static inline void clear_tsk_need_resched(struct task_struct *tsk) | 2460 | static inline void clear_tsk_need_resched(struct task_struct *tsk) |
2461 | { | 2461 | { |
2462 | clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); | 2462 | clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); |
2463 | } | 2463 | } |
2464 | 2464 | ||
2465 | static inline int test_tsk_need_resched(struct task_struct *tsk) | 2465 | static inline int test_tsk_need_resched(struct task_struct *tsk) |
2466 | { | 2466 | { |
2467 | return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); | 2467 | return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); |
2468 | } | 2468 | } |
2469 | 2469 | ||
2470 | static inline int restart_syscall(void) | 2470 | static inline int restart_syscall(void) |
2471 | { | 2471 | { |
2472 | set_tsk_thread_flag(current, TIF_SIGPENDING); | 2472 | set_tsk_thread_flag(current, TIF_SIGPENDING); |
2473 | return -ERESTARTNOINTR; | 2473 | return -ERESTARTNOINTR; |
2474 | } | 2474 | } |
2475 | 2475 | ||
2476 | static inline int signal_pending(struct task_struct *p) | 2476 | static inline int signal_pending(struct task_struct *p) |
2477 | { | 2477 | { |
2478 | return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); | 2478 | return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); |
2479 | } | 2479 | } |
2480 | 2480 | ||
2481 | static inline int __fatal_signal_pending(struct task_struct *p) | 2481 | static inline int __fatal_signal_pending(struct task_struct *p) |
2482 | { | 2482 | { |
2483 | return unlikely(sigismember(&p->pending.signal, SIGKILL)); | 2483 | return unlikely(sigismember(&p->pending.signal, SIGKILL)); |
2484 | } | 2484 | } |
2485 | 2485 | ||
2486 | static inline int fatal_signal_pending(struct task_struct *p) | 2486 | static inline int fatal_signal_pending(struct task_struct *p) |
2487 | { | 2487 | { |
2488 | return signal_pending(p) && __fatal_signal_pending(p); | 2488 | return signal_pending(p) && __fatal_signal_pending(p); |
2489 | } | 2489 | } |
2490 | 2490 | ||
2491 | static inline int signal_pending_state(long state, struct task_struct *p) | 2491 | static inline int signal_pending_state(long state, struct task_struct *p) |
2492 | { | 2492 | { |
2493 | if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) | 2493 | if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) |
2494 | return 0; | 2494 | return 0; |
2495 | if (!signal_pending(p)) | 2495 | if (!signal_pending(p)) |
2496 | return 0; | 2496 | return 0; |
2497 | 2497 | ||
2498 | return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); | 2498 | return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); |
2499 | } | 2499 | } |
2500 | 2500 | ||
2501 | static inline int need_resched(void) | 2501 | static inline int need_resched(void) |
2502 | { | 2502 | { |
2503 | return unlikely(test_thread_flag(TIF_NEED_RESCHED)); | 2503 | return unlikely(test_thread_flag(TIF_NEED_RESCHED)); |
2504 | } | 2504 | } |
2505 | 2505 | ||
2506 | /* | 2506 | /* |
2507 | * cond_resched() and cond_resched_lock(): latency reduction via | 2507 | * cond_resched() and cond_resched_lock(): latency reduction via |
2508 | * explicit rescheduling in places that are safe. The return | 2508 | * explicit rescheduling in places that are safe. The return |
2509 | * value indicates whether a reschedule was done in fact. | 2509 | * value indicates whether a reschedule was done in fact. |
2510 | * cond_resched_lock() will drop the spinlock before scheduling, | 2510 | * cond_resched_lock() will drop the spinlock before scheduling, |
2511 | * cond_resched_softirq() will enable bhs before scheduling. | 2511 | * cond_resched_softirq() will enable bhs before scheduling. |
2512 | */ | 2512 | */ |
2513 | extern int _cond_resched(void); | 2513 | extern int _cond_resched(void); |
2514 | 2514 | ||
2515 | #define cond_resched() ({ \ | 2515 | #define cond_resched() ({ \ |
2516 | __might_sleep(__FILE__, __LINE__, 0); \ | 2516 | __might_sleep(__FILE__, __LINE__, 0); \ |
2517 | _cond_resched(); \ | 2517 | _cond_resched(); \ |
2518 | }) | 2518 | }) |
2519 | 2519 | ||
2520 | extern int __cond_resched_lock(spinlock_t *lock); | 2520 | extern int __cond_resched_lock(spinlock_t *lock); |
2521 | 2521 | ||
2522 | #ifdef CONFIG_PREEMPT | 2522 | #ifdef CONFIG_PREEMPT |
2523 | #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET | 2523 | #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET |
2524 | #else | 2524 | #else |
2525 | #define PREEMPT_LOCK_OFFSET 0 | 2525 | #define PREEMPT_LOCK_OFFSET 0 |
2526 | #endif | 2526 | #endif |
2527 | 2527 | ||
2528 | #define cond_resched_lock(lock) ({ \ | 2528 | #define cond_resched_lock(lock) ({ \ |
2529 | __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ | 2529 | __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ |
2530 | __cond_resched_lock(lock); \ | 2530 | __cond_resched_lock(lock); \ |
2531 | }) | 2531 | }) |
2532 | 2532 | ||
2533 | extern int __cond_resched_softirq(void); | 2533 | extern int __cond_resched_softirq(void); |
2534 | 2534 | ||
2535 | #define cond_resched_softirq() ({ \ | 2535 | #define cond_resched_softirq() ({ \ |
2536 | __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ | 2536 | __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ |
2537 | __cond_resched_softirq(); \ | 2537 | __cond_resched_softirq(); \ |
2538 | }) | 2538 | }) |
2539 | 2539 | ||
2540 | /* | 2540 | /* |
2541 | * Does a critical section need to be broken due to another | 2541 | * Does a critical section need to be broken due to another |
2542 | * task waiting?: (technically does not depend on CONFIG_PREEMPT, | 2542 | * task waiting?: (technically does not depend on CONFIG_PREEMPT, |
2543 | * but a general need for low latency) | 2543 | * but a general need for low latency) |
2544 | */ | 2544 | */ |
2545 | static inline int spin_needbreak(spinlock_t *lock) | 2545 | static inline int spin_needbreak(spinlock_t *lock) |
2546 | { | 2546 | { |
2547 | #ifdef CONFIG_PREEMPT | 2547 | #ifdef CONFIG_PREEMPT |
2548 | return spin_is_contended(lock); | 2548 | return spin_is_contended(lock); |
2549 | #else | 2549 | #else |
2550 | return 0; | 2550 | return 0; |
2551 | #endif | 2551 | #endif |
2552 | } | 2552 | } |
2553 | 2553 | ||
2554 | /* | 2554 | /* |
2555 | * Thread group CPU time accounting. | 2555 | * Thread group CPU time accounting. |
2556 | */ | 2556 | */ |
2557 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); | 2557 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); |
2558 | void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); | 2558 | void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); |
2559 | 2559 | ||
2560 | static inline void thread_group_cputime_init(struct signal_struct *sig) | 2560 | static inline void thread_group_cputime_init(struct signal_struct *sig) |
2561 | { | 2561 | { |
2562 | spin_lock_init(&sig->cputimer.lock); | 2562 | spin_lock_init(&sig->cputimer.lock); |
2563 | } | 2563 | } |
2564 | 2564 | ||
2565 | /* | 2565 | /* |
2566 | * Reevaluate whether the task has signals pending delivery. | 2566 | * Reevaluate whether the task has signals pending delivery. |
2567 | * Wake the task if so. | 2567 | * Wake the task if so. |
2568 | * This is required every time the blocked sigset_t changes. | 2568 | * This is required every time the blocked sigset_t changes. |
2569 | * callers must hold sighand->siglock. | 2569 | * callers must hold sighand->siglock. |
2570 | */ | 2570 | */ |
2571 | extern void recalc_sigpending_and_wake(struct task_struct *t); | 2571 | extern void recalc_sigpending_and_wake(struct task_struct *t); |
2572 | extern void recalc_sigpending(void); | 2572 | extern void recalc_sigpending(void); |
2573 | 2573 | ||
2574 | extern void signal_wake_up(struct task_struct *t, int resume_stopped); | 2574 | extern void signal_wake_up(struct task_struct *t, int resume_stopped); |
2575 | 2575 | ||
2576 | /* | 2576 | /* |
2577 | * Wrappers for p->thread_info->cpu access. No-op on UP. | 2577 | * Wrappers for p->thread_info->cpu access. No-op on UP. |
2578 | */ | 2578 | */ |
2579 | #ifdef CONFIG_SMP | 2579 | #ifdef CONFIG_SMP |
2580 | 2580 | ||
2581 | static inline unsigned int task_cpu(const struct task_struct *p) | 2581 | static inline unsigned int task_cpu(const struct task_struct *p) |
2582 | { | 2582 | { |
2583 | return task_thread_info(p)->cpu; | 2583 | return task_thread_info(p)->cpu; |
2584 | } | 2584 | } |
2585 | 2585 | ||
2586 | extern void set_task_cpu(struct task_struct *p, unsigned int cpu); | 2586 | extern void set_task_cpu(struct task_struct *p, unsigned int cpu); |
2587 | 2587 | ||
2588 | #else | 2588 | #else |
2589 | 2589 | ||
2590 | static inline unsigned int task_cpu(const struct task_struct *p) | 2590 | static inline unsigned int task_cpu(const struct task_struct *p) |
2591 | { | 2591 | { |
2592 | return 0; | 2592 | return 0; |
2593 | } | 2593 | } |
2594 | 2594 | ||
2595 | static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) | 2595 | static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) |
2596 | { | 2596 | { |
2597 | } | 2597 | } |
2598 | 2598 | ||
2599 | #endif /* CONFIG_SMP */ | 2599 | #endif /* CONFIG_SMP */ |
2600 | 2600 | ||
2601 | extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); | 2601 | extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); |
2602 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); | 2602 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); |
2603 | 2603 | ||
2604 | extern void normalize_rt_tasks(void); | 2604 | extern void normalize_rt_tasks(void); |
2605 | 2605 | ||
2606 | #ifdef CONFIG_CGROUP_SCHED | 2606 | #ifdef CONFIG_CGROUP_SCHED |
2607 | 2607 | ||
2608 | extern struct task_group root_task_group; | 2608 | extern struct task_group root_task_group; |
2609 | 2609 | ||
2610 | extern struct task_group *sched_create_group(struct task_group *parent); | 2610 | extern struct task_group *sched_create_group(struct task_group *parent); |
2611 | extern void sched_destroy_group(struct task_group *tg); | 2611 | extern void sched_destroy_group(struct task_group *tg); |
2612 | extern void sched_move_task(struct task_struct *tsk); | 2612 | extern void sched_move_task(struct task_struct *tsk); |
2613 | #ifdef CONFIG_FAIR_GROUP_SCHED | 2613 | #ifdef CONFIG_FAIR_GROUP_SCHED |
2614 | extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); | 2614 | extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); |
2615 | extern unsigned long sched_group_shares(struct task_group *tg); | 2615 | extern unsigned long sched_group_shares(struct task_group *tg); |
2616 | #endif | 2616 | #endif |
2617 | #ifdef CONFIG_RT_GROUP_SCHED | 2617 | #ifdef CONFIG_RT_GROUP_SCHED |
2618 | extern int sched_group_set_rt_runtime(struct task_group *tg, | 2618 | extern int sched_group_set_rt_runtime(struct task_group *tg, |
2619 | long rt_runtime_us); | 2619 | long rt_runtime_us); |
2620 | extern long sched_group_rt_runtime(struct task_group *tg); | 2620 | extern long sched_group_rt_runtime(struct task_group *tg); |
2621 | extern int sched_group_set_rt_period(struct task_group *tg, | 2621 | extern int sched_group_set_rt_period(struct task_group *tg, |
2622 | long rt_period_us); | 2622 | long rt_period_us); |
2623 | extern long sched_group_rt_period(struct task_group *tg); | 2623 | extern long sched_group_rt_period(struct task_group *tg); |
2624 | extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); | 2624 | extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); |
2625 | #endif | 2625 | #endif |
2626 | #endif | 2626 | #endif |
2627 | 2627 | ||
2628 | extern int task_can_switch_user(struct user_struct *up, | 2628 | extern int task_can_switch_user(struct user_struct *up, |
2629 | struct task_struct *tsk); | 2629 | struct task_struct *tsk); |
2630 | 2630 | ||
2631 | #ifdef CONFIG_TASK_XACCT | 2631 | #ifdef CONFIG_TASK_XACCT |
2632 | static inline void add_rchar(struct task_struct *tsk, ssize_t amt) | 2632 | static inline void add_rchar(struct task_struct *tsk, ssize_t amt) |
2633 | { | 2633 | { |
2634 | tsk->ioac.rchar += amt; | 2634 | tsk->ioac.rchar += amt; |
2635 | } | 2635 | } |
2636 | 2636 | ||
2637 | static inline void add_wchar(struct task_struct *tsk, ssize_t amt) | 2637 | static inline void add_wchar(struct task_struct *tsk, ssize_t amt) |
2638 | { | 2638 | { |
2639 | tsk->ioac.wchar += amt; | 2639 | tsk->ioac.wchar += amt; |
2640 | } | 2640 | } |
2641 | 2641 | ||
2642 | static inline void inc_syscr(struct task_struct *tsk) | 2642 | static inline void inc_syscr(struct task_struct *tsk) |
2643 | { | 2643 | { |
2644 | tsk->ioac.syscr++; | 2644 | tsk->ioac.syscr++; |
2645 | } | 2645 | } |
2646 | 2646 | ||
2647 | static inline void inc_syscw(struct task_struct *tsk) | 2647 | static inline void inc_syscw(struct task_struct *tsk) |
2648 | { | 2648 | { |
2649 | tsk->ioac.syscw++; | 2649 | tsk->ioac.syscw++; |
2650 | } | 2650 | } |
2651 | #else | 2651 | #else |
2652 | static inline void add_rchar(struct task_struct *tsk, ssize_t amt) | 2652 | static inline void add_rchar(struct task_struct *tsk, ssize_t amt) |
2653 | { | 2653 | { |
2654 | } | 2654 | } |
2655 | 2655 | ||
2656 | static inline void add_wchar(struct task_struct *tsk, ssize_t amt) | 2656 | static inline void add_wchar(struct task_struct *tsk, ssize_t amt) |
2657 | { | 2657 | { |
2658 | } | 2658 | } |
2659 | 2659 | ||
2660 | static inline void inc_syscr(struct task_struct *tsk) | 2660 | static inline void inc_syscr(struct task_struct *tsk) |
2661 | { | 2661 | { |
2662 | } | 2662 | } |
2663 | 2663 | ||
2664 | static inline void inc_syscw(struct task_struct *tsk) | 2664 | static inline void inc_syscw(struct task_struct *tsk) |
2665 | { | 2665 | { |
2666 | } | 2666 | } |
2667 | #endif | 2667 | #endif |
2668 | 2668 | ||
2669 | #ifndef TASK_SIZE_OF | 2669 | #ifndef TASK_SIZE_OF |
2670 | #define TASK_SIZE_OF(tsk) TASK_SIZE | 2670 | #define TASK_SIZE_OF(tsk) TASK_SIZE |
2671 | #endif | 2671 | #endif |
2672 | 2672 | ||
2673 | #ifdef CONFIG_MM_OWNER | 2673 | #ifdef CONFIG_MM_OWNER |
2674 | extern void mm_update_next_owner(struct mm_struct *mm); | 2674 | extern void mm_update_next_owner(struct mm_struct *mm); |
2675 | extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); | 2675 | extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); |
2676 | #else | 2676 | #else |
2677 | static inline void mm_update_next_owner(struct mm_struct *mm) | 2677 | static inline void mm_update_next_owner(struct mm_struct *mm) |
2678 | { | 2678 | { |
2679 | } | 2679 | } |
2680 | 2680 | ||
2681 | static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) | 2681 | static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) |
2682 | { | 2682 | { |
2683 | } | 2683 | } |
2684 | #endif /* CONFIG_MM_OWNER */ | 2684 | #endif /* CONFIG_MM_OWNER */ |
2685 | 2685 | ||
2686 | static inline unsigned long task_rlimit(const struct task_struct *tsk, | 2686 | static inline unsigned long task_rlimit(const struct task_struct *tsk, |
2687 | unsigned int limit) | 2687 | unsigned int limit) |
2688 | { | 2688 | { |
2689 | return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur); | 2689 | return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur); |
2690 | } | 2690 | } |
2691 | 2691 | ||
2692 | static inline unsigned long task_rlimit_max(const struct task_struct *tsk, | 2692 | static inline unsigned long task_rlimit_max(const struct task_struct *tsk, |
2693 | unsigned int limit) | 2693 | unsigned int limit) |
2694 | { | 2694 | { |
2695 | return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max); | 2695 | return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max); |
2696 | } | 2696 | } |
2697 | 2697 | ||
2698 | static inline unsigned long rlimit(unsigned int limit) | 2698 | static inline unsigned long rlimit(unsigned int limit) |
2699 | { | 2699 | { |
2700 | return task_rlimit(current, limit); | 2700 | return task_rlimit(current, limit); |
2701 | } | 2701 | } |
2702 | 2702 | ||
2703 | static inline unsigned long rlimit_max(unsigned int limit) | 2703 | static inline unsigned long rlimit_max(unsigned int limit) |
2704 | { | 2704 | { |
2705 | return task_rlimit_max(current, limit); | 2705 | return task_rlimit_max(current, limit); |
2706 | } | 2706 | } |
2707 | 2707 | ||
2708 | #endif /* __KERNEL__ */ | 2708 | #endif /* __KERNEL__ */ |
2709 | 2709 | ||
2710 | #endif | 2710 | #endif |
2711 | 2711 |
kernel/exit.c
1 | /* | 1 | /* |
2 | * linux/kernel/exit.c | 2 | * linux/kernel/exit.c |
3 | * | 3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | 4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/mm.h> | 7 | #include <linux/mm.h> |
8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/capability.h> | 11 | #include <linux/capability.h> |
12 | #include <linux/completion.h> | 12 | #include <linux/completion.h> |
13 | #include <linux/personality.h> | 13 | #include <linux/personality.h> |
14 | #include <linux/tty.h> | 14 | #include <linux/tty.h> |
15 | #include <linux/iocontext.h> | 15 | #include <linux/iocontext.h> |
16 | #include <linux/key.h> | 16 | #include <linux/key.h> |
17 | #include <linux/security.h> | 17 | #include <linux/security.h> |
18 | #include <linux/cpu.h> | 18 | #include <linux/cpu.h> |
19 | #include <linux/acct.h> | 19 | #include <linux/acct.h> |
20 | #include <linux/tsacct_kern.h> | 20 | #include <linux/tsacct_kern.h> |
21 | #include <linux/file.h> | 21 | #include <linux/file.h> |
22 | #include <linux/fdtable.h> | 22 | #include <linux/fdtable.h> |
23 | #include <linux/binfmts.h> | 23 | #include <linux/binfmts.h> |
24 | #include <linux/nsproxy.h> | 24 | #include <linux/nsproxy.h> |
25 | #include <linux/pid_namespace.h> | 25 | #include <linux/pid_namespace.h> |
26 | #include <linux/ptrace.h> | 26 | #include <linux/ptrace.h> |
27 | #include <linux/profile.h> | 27 | #include <linux/profile.h> |
28 | #include <linux/mount.h> | 28 | #include <linux/mount.h> |
29 | #include <linux/proc_fs.h> | 29 | #include <linux/proc_fs.h> |
30 | #include <linux/kthread.h> | 30 | #include <linux/kthread.h> |
31 | #include <linux/mempolicy.h> | 31 | #include <linux/mempolicy.h> |
32 | #include <linux/taskstats_kern.h> | 32 | #include <linux/taskstats_kern.h> |
33 | #include <linux/delayacct.h> | 33 | #include <linux/delayacct.h> |
34 | #include <linux/freezer.h> | 34 | #include <linux/freezer.h> |
35 | #include <linux/cgroup.h> | 35 | #include <linux/cgroup.h> |
36 | #include <linux/syscalls.h> | 36 | #include <linux/syscalls.h> |
37 | #include <linux/signal.h> | 37 | #include <linux/signal.h> |
38 | #include <linux/posix-timers.h> | 38 | #include <linux/posix-timers.h> |
39 | #include <linux/cn_proc.h> | 39 | #include <linux/cn_proc.h> |
40 | #include <linux/mutex.h> | 40 | #include <linux/mutex.h> |
41 | #include <linux/futex.h> | 41 | #include <linux/futex.h> |
42 | #include <linux/pipe_fs_i.h> | 42 | #include <linux/pipe_fs_i.h> |
43 | #include <linux/audit.h> /* for audit_free() */ | 43 | #include <linux/audit.h> /* for audit_free() */ |
44 | #include <linux/resource.h> | 44 | #include <linux/resource.h> |
45 | #include <linux/blkdev.h> | 45 | #include <linux/blkdev.h> |
46 | #include <linux/task_io_accounting_ops.h> | 46 | #include <linux/task_io_accounting_ops.h> |
47 | #include <linux/tracehook.h> | 47 | #include <linux/tracehook.h> |
48 | #include <linux/fs_struct.h> | 48 | #include <linux/fs_struct.h> |
49 | #include <linux/init_task.h> | 49 | #include <linux/init_task.h> |
50 | #include <linux/perf_event.h> | 50 | #include <linux/perf_event.h> |
51 | #include <trace/events/sched.h> | 51 | #include <trace/events/sched.h> |
52 | #include <linux/hw_breakpoint.h> | 52 | #include <linux/hw_breakpoint.h> |
53 | #include <linux/oom.h> | 53 | #include <linux/oom.h> |
54 | 54 | ||
55 | #include <asm/uaccess.h> | 55 | #include <asm/uaccess.h> |
56 | #include <asm/unistd.h> | 56 | #include <asm/unistd.h> |
57 | #include <asm/pgtable.h> | 57 | #include <asm/pgtable.h> |
58 | #include <asm/mmu_context.h> | 58 | #include <asm/mmu_context.h> |
59 | 59 | ||
60 | static void exit_mm(struct task_struct * tsk); | 60 | static void exit_mm(struct task_struct * tsk); |
61 | 61 | ||
62 | static void __unhash_process(struct task_struct *p, bool group_dead) | 62 | static void __unhash_process(struct task_struct *p, bool group_dead) |
63 | { | 63 | { |
64 | nr_threads--; | 64 | nr_threads--; |
65 | detach_pid(p, PIDTYPE_PID); | 65 | detach_pid(p, PIDTYPE_PID); |
66 | if (group_dead) { | 66 | if (group_dead) { |
67 | detach_pid(p, PIDTYPE_PGID); | 67 | detach_pid(p, PIDTYPE_PGID); |
68 | detach_pid(p, PIDTYPE_SID); | 68 | detach_pid(p, PIDTYPE_SID); |
69 | 69 | ||
70 | list_del_rcu(&p->tasks); | 70 | list_del_rcu(&p->tasks); |
71 | list_del_init(&p->sibling); | 71 | list_del_init(&p->sibling); |
72 | __this_cpu_dec(process_counts); | 72 | __this_cpu_dec(process_counts); |
73 | } | 73 | } |
74 | list_del_rcu(&p->thread_group); | 74 | list_del_rcu(&p->thread_group); |
75 | } | 75 | } |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * This function expects the tasklist_lock write-locked. | 78 | * This function expects the tasklist_lock write-locked. |
79 | */ | 79 | */ |
80 | static void __exit_signal(struct task_struct *tsk) | 80 | static void __exit_signal(struct task_struct *tsk) |
81 | { | 81 | { |
82 | struct signal_struct *sig = tsk->signal; | 82 | struct signal_struct *sig = tsk->signal; |
83 | bool group_dead = thread_group_leader(tsk); | 83 | bool group_dead = thread_group_leader(tsk); |
84 | struct sighand_struct *sighand; | 84 | struct sighand_struct *sighand; |
85 | struct tty_struct *uninitialized_var(tty); | 85 | struct tty_struct *uninitialized_var(tty); |
86 | 86 | ||
87 | sighand = rcu_dereference_check(tsk->sighand, | 87 | sighand = rcu_dereference_check(tsk->sighand, |
88 | rcu_read_lock_held() || | 88 | rcu_read_lock_held() || |
89 | lockdep_tasklist_lock_is_held()); | 89 | lockdep_tasklist_lock_is_held()); |
90 | spin_lock(&sighand->siglock); | 90 | spin_lock(&sighand->siglock); |
91 | 91 | ||
92 | posix_cpu_timers_exit(tsk); | 92 | posix_cpu_timers_exit(tsk); |
93 | if (group_dead) { | 93 | if (group_dead) { |
94 | posix_cpu_timers_exit_group(tsk); | 94 | posix_cpu_timers_exit_group(tsk); |
95 | tty = sig->tty; | 95 | tty = sig->tty; |
96 | sig->tty = NULL; | 96 | sig->tty = NULL; |
97 | } else { | 97 | } else { |
98 | /* | 98 | /* |
99 | * This can only happen if the caller is de_thread(). | 99 | * This can only happen if the caller is de_thread(). |
100 | * FIXME: this is the temporary hack, we should teach | 100 | * FIXME: this is the temporary hack, we should teach |
101 | * posix-cpu-timers to handle this case correctly. | 101 | * posix-cpu-timers to handle this case correctly. |
102 | */ | 102 | */ |
103 | if (unlikely(has_group_leader_pid(tsk))) | 103 | if (unlikely(has_group_leader_pid(tsk))) |
104 | posix_cpu_timers_exit_group(tsk); | 104 | posix_cpu_timers_exit_group(tsk); |
105 | 105 | ||
106 | /* | 106 | /* |
107 | * If there is any task waiting for the group exit | 107 | * If there is any task waiting for the group exit |
108 | * then notify it: | 108 | * then notify it: |
109 | */ | 109 | */ |
110 | if (sig->notify_count > 0 && !--sig->notify_count) | 110 | if (sig->notify_count > 0 && !--sig->notify_count) |
111 | wake_up_process(sig->group_exit_task); | 111 | wake_up_process(sig->group_exit_task); |
112 | 112 | ||
113 | if (tsk == sig->curr_target) | 113 | if (tsk == sig->curr_target) |
114 | sig->curr_target = next_thread(tsk); | 114 | sig->curr_target = next_thread(tsk); |
115 | /* | 115 | /* |
116 | * Accumulate here the counters for all threads but the | 116 | * Accumulate here the counters for all threads but the |
117 | * group leader as they die, so they can be added into | 117 | * group leader as they die, so they can be added into |
118 | * the process-wide totals when those are taken. | 118 | * the process-wide totals when those are taken. |
119 | * The group leader stays around as a zombie as long | 119 | * The group leader stays around as a zombie as long |
120 | * as there are other threads. When it gets reaped, | 120 | * as there are other threads. When it gets reaped, |
121 | * the exit.c code will add its counts into these totals. | 121 | * the exit.c code will add its counts into these totals. |
122 | * We won't ever get here for the group leader, since it | 122 | * We won't ever get here for the group leader, since it |
123 | * will have been the last reference on the signal_struct. | 123 | * will have been the last reference on the signal_struct. |
124 | */ | 124 | */ |
125 | sig->utime = cputime_add(sig->utime, tsk->utime); | 125 | sig->utime = cputime_add(sig->utime, tsk->utime); |
126 | sig->stime = cputime_add(sig->stime, tsk->stime); | 126 | sig->stime = cputime_add(sig->stime, tsk->stime); |
127 | sig->gtime = cputime_add(sig->gtime, tsk->gtime); | 127 | sig->gtime = cputime_add(sig->gtime, tsk->gtime); |
128 | sig->min_flt += tsk->min_flt; | 128 | sig->min_flt += tsk->min_flt; |
129 | sig->maj_flt += tsk->maj_flt; | 129 | sig->maj_flt += tsk->maj_flt; |
130 | sig->nvcsw += tsk->nvcsw; | 130 | sig->nvcsw += tsk->nvcsw; |
131 | sig->nivcsw += tsk->nivcsw; | 131 | sig->nivcsw += tsk->nivcsw; |
132 | sig->inblock += task_io_get_inblock(tsk); | 132 | sig->inblock += task_io_get_inblock(tsk); |
133 | sig->oublock += task_io_get_oublock(tsk); | 133 | sig->oublock += task_io_get_oublock(tsk); |
134 | task_io_accounting_add(&sig->ioac, &tsk->ioac); | 134 | task_io_accounting_add(&sig->ioac, &tsk->ioac); |
135 | sig->sum_sched_runtime += tsk->se.sum_exec_runtime; | 135 | sig->sum_sched_runtime += tsk->se.sum_exec_runtime; |
136 | } | 136 | } |
137 | 137 | ||
138 | sig->nr_threads--; | 138 | sig->nr_threads--; |
139 | __unhash_process(tsk, group_dead); | 139 | __unhash_process(tsk, group_dead); |
140 | 140 | ||
141 | /* | 141 | /* |
142 | * Do this under ->siglock, we can race with another thread | 142 | * Do this under ->siglock, we can race with another thread |
143 | * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. | 143 | * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. |
144 | */ | 144 | */ |
145 | flush_sigqueue(&tsk->pending); | 145 | flush_sigqueue(&tsk->pending); |
146 | tsk->sighand = NULL; | 146 | tsk->sighand = NULL; |
147 | spin_unlock(&sighand->siglock); | 147 | spin_unlock(&sighand->siglock); |
148 | 148 | ||
149 | __cleanup_sighand(sighand); | 149 | __cleanup_sighand(sighand); |
150 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); | 150 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); |
151 | if (group_dead) { | 151 | if (group_dead) { |
152 | flush_sigqueue(&sig->shared_pending); | 152 | flush_sigqueue(&sig->shared_pending); |
153 | tty_kref_put(tty); | 153 | tty_kref_put(tty); |
154 | } | 154 | } |
155 | } | 155 | } |
156 | 156 | ||
157 | static void delayed_put_task_struct(struct rcu_head *rhp) | 157 | static void delayed_put_task_struct(struct rcu_head *rhp) |
158 | { | 158 | { |
159 | struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); | 159 | struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); |
160 | 160 | ||
161 | perf_event_delayed_put(tsk); | 161 | perf_event_delayed_put(tsk); |
162 | trace_sched_process_free(tsk); | 162 | trace_sched_process_free(tsk); |
163 | put_task_struct(tsk); | 163 | put_task_struct(tsk); |
164 | } | 164 | } |
165 | 165 | ||
166 | 166 | ||
167 | void release_task(struct task_struct * p) | 167 | void release_task(struct task_struct * p) |
168 | { | 168 | { |
169 | struct task_struct *leader; | 169 | struct task_struct *leader; |
170 | int zap_leader; | 170 | int zap_leader; |
171 | repeat: | 171 | repeat: |
172 | /* don't need to get the RCU readlock here - the process is dead and | 172 | /* don't need to get the RCU readlock here - the process is dead and |
173 | * can't be modifying its own credentials. But shut RCU-lockdep up */ | 173 | * can't be modifying its own credentials. But shut RCU-lockdep up */ |
174 | rcu_read_lock(); | 174 | rcu_read_lock(); |
175 | atomic_dec(&__task_cred(p)->user->processes); | 175 | atomic_dec(&__task_cred(p)->user->processes); |
176 | rcu_read_unlock(); | 176 | rcu_read_unlock(); |
177 | 177 | ||
178 | proc_flush_task(p); | 178 | proc_flush_task(p); |
179 | 179 | ||
180 | write_lock_irq(&tasklist_lock); | 180 | write_lock_irq(&tasklist_lock); |
181 | ptrace_release_task(p); | 181 | ptrace_release_task(p); |
182 | __exit_signal(p); | 182 | __exit_signal(p); |
183 | 183 | ||
184 | /* | 184 | /* |
185 | * If we are the last non-leader member of the thread | 185 | * If we are the last non-leader member of the thread |
186 | * group, and the leader is zombie, then notify the | 186 | * group, and the leader is zombie, then notify the |
187 | * group leader's parent process. (if it wants notification.) | 187 | * group leader's parent process. (if it wants notification.) |
188 | */ | 188 | */ |
189 | zap_leader = 0; | 189 | zap_leader = 0; |
190 | leader = p->group_leader; | 190 | leader = p->group_leader; |
191 | if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { | 191 | if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { |
192 | BUG_ON(task_detached(leader)); | 192 | BUG_ON(task_detached(leader)); |
193 | do_notify_parent(leader, leader->exit_signal); | 193 | do_notify_parent(leader, leader->exit_signal); |
194 | /* | 194 | /* |
195 | * If we were the last child thread and the leader has | 195 | * If we were the last child thread and the leader has |
196 | * exited already, and the leader's parent ignores SIGCHLD, | 196 | * exited already, and the leader's parent ignores SIGCHLD, |
197 | * then we are the one who should release the leader. | 197 | * then we are the one who should release the leader. |
198 | * | 198 | * |
199 | * do_notify_parent() will have marked it self-reaping in | 199 | * do_notify_parent() will have marked it self-reaping in |
200 | * that case. | 200 | * that case. |
201 | */ | 201 | */ |
202 | zap_leader = task_detached(leader); | 202 | zap_leader = task_detached(leader); |
203 | 203 | ||
204 | /* | 204 | /* |
205 | * This maintains the invariant that release_task() | 205 | * This maintains the invariant that release_task() |
206 | * only runs on a task in EXIT_DEAD, just for sanity. | 206 | * only runs on a task in EXIT_DEAD, just for sanity. |
207 | */ | 207 | */ |
208 | if (zap_leader) | 208 | if (zap_leader) |
209 | leader->exit_state = EXIT_DEAD; | 209 | leader->exit_state = EXIT_DEAD; |
210 | } | 210 | } |
211 | 211 | ||
212 | write_unlock_irq(&tasklist_lock); | 212 | write_unlock_irq(&tasklist_lock); |
213 | release_thread(p); | 213 | release_thread(p); |
214 | call_rcu(&p->rcu, delayed_put_task_struct); | 214 | call_rcu(&p->rcu, delayed_put_task_struct); |
215 | 215 | ||
216 | p = leader; | 216 | p = leader; |
217 | if (unlikely(zap_leader)) | 217 | if (unlikely(zap_leader)) |
218 | goto repeat; | 218 | goto repeat; |
219 | } | 219 | } |
220 | 220 | ||
221 | /* | 221 | /* |
222 | * This checks not only the pgrp, but falls back on the pid if no | 222 | * This checks not only the pgrp, but falls back on the pid if no |
223 | * satisfactory pgrp is found. I dunno - gdb doesn't work correctly | 223 | * satisfactory pgrp is found. I dunno - gdb doesn't work correctly |
224 | * without this... | 224 | * without this... |
225 | * | 225 | * |
226 | * The caller must hold rcu lock or the tasklist lock. | 226 | * The caller must hold rcu lock or the tasklist lock. |
227 | */ | 227 | */ |
228 | struct pid *session_of_pgrp(struct pid *pgrp) | 228 | struct pid *session_of_pgrp(struct pid *pgrp) |
229 | { | 229 | { |
230 | struct task_struct *p; | 230 | struct task_struct *p; |
231 | struct pid *sid = NULL; | 231 | struct pid *sid = NULL; |
232 | 232 | ||
233 | p = pid_task(pgrp, PIDTYPE_PGID); | 233 | p = pid_task(pgrp, PIDTYPE_PGID); |
234 | if (p == NULL) | 234 | if (p == NULL) |
235 | p = pid_task(pgrp, PIDTYPE_PID); | 235 | p = pid_task(pgrp, PIDTYPE_PID); |
236 | if (p != NULL) | 236 | if (p != NULL) |
237 | sid = task_session(p); | 237 | sid = task_session(p); |
238 | 238 | ||
239 | return sid; | 239 | return sid; |
240 | } | 240 | } |
241 | 241 | ||
242 | /* | 242 | /* |
243 | * Determine if a process group is "orphaned", according to the POSIX | 243 | * Determine if a process group is "orphaned", according to the POSIX |
244 | * definition in 2.2.2.52. Orphaned process groups are not to be affected | 244 | * definition in 2.2.2.52. Orphaned process groups are not to be affected |
245 | * by terminal-generated stop signals. Newly orphaned process groups are | 245 | * by terminal-generated stop signals. Newly orphaned process groups are |
246 | * to receive a SIGHUP and a SIGCONT. | 246 | * to receive a SIGHUP and a SIGCONT. |
247 | * | 247 | * |
248 | * "I ask you, have you ever known what it is to be an orphan?" | 248 | * "I ask you, have you ever known what it is to be an orphan?" |
249 | */ | 249 | */ |
250 | static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task) | 250 | static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task) |
251 | { | 251 | { |
252 | struct task_struct *p; | 252 | struct task_struct *p; |
253 | 253 | ||
254 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { | 254 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { |
255 | if ((p == ignored_task) || | 255 | if ((p == ignored_task) || |
256 | (p->exit_state && thread_group_empty(p)) || | 256 | (p->exit_state && thread_group_empty(p)) || |
257 | is_global_init(p->real_parent)) | 257 | is_global_init(p->real_parent)) |
258 | continue; | 258 | continue; |
259 | 259 | ||
260 | if (task_pgrp(p->real_parent) != pgrp && | 260 | if (task_pgrp(p->real_parent) != pgrp && |
261 | task_session(p->real_parent) == task_session(p)) | 261 | task_session(p->real_parent) == task_session(p)) |
262 | return 0; | 262 | return 0; |
263 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); | 263 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); |
264 | 264 | ||
265 | return 1; | 265 | return 1; |
266 | } | 266 | } |
267 | 267 | ||
268 | int is_current_pgrp_orphaned(void) | 268 | int is_current_pgrp_orphaned(void) |
269 | { | 269 | { |
270 | int retval; | 270 | int retval; |
271 | 271 | ||
272 | read_lock(&tasklist_lock); | 272 | read_lock(&tasklist_lock); |
273 | retval = will_become_orphaned_pgrp(task_pgrp(current), NULL); | 273 | retval = will_become_orphaned_pgrp(task_pgrp(current), NULL); |
274 | read_unlock(&tasklist_lock); | 274 | read_unlock(&tasklist_lock); |
275 | 275 | ||
276 | return retval; | 276 | return retval; |
277 | } | 277 | } |
278 | 278 | ||
279 | static int has_stopped_jobs(struct pid *pgrp) | 279 | static int has_stopped_jobs(struct pid *pgrp) |
280 | { | 280 | { |
281 | int retval = 0; | 281 | int retval = 0; |
282 | struct task_struct *p; | 282 | struct task_struct *p; |
283 | 283 | ||
284 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { | 284 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { |
285 | if (!task_is_stopped(p)) | 285 | if (!task_is_stopped(p)) |
286 | continue; | 286 | continue; |
287 | retval = 1; | 287 | retval = 1; |
288 | break; | 288 | break; |
289 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); | 289 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); |
290 | return retval; | 290 | return retval; |
291 | } | 291 | } |
292 | 292 | ||
293 | /* | 293 | /* |
294 | * Check to see if any process groups have become orphaned as | 294 | * Check to see if any process groups have become orphaned as |
295 | * a result of our exiting, and if they have any stopped jobs, | 295 | * a result of our exiting, and if they have any stopped jobs, |
296 | * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) | 296 | * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) |
297 | */ | 297 | */ |
298 | static void | 298 | static void |
299 | kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) | 299 | kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) |
300 | { | 300 | { |
301 | struct pid *pgrp = task_pgrp(tsk); | 301 | struct pid *pgrp = task_pgrp(tsk); |
302 | struct task_struct *ignored_task = tsk; | 302 | struct task_struct *ignored_task = tsk; |
303 | 303 | ||
304 | if (!parent) | 304 | if (!parent) |
305 | /* exit: our father is in a different pgrp than | 305 | /* exit: our father is in a different pgrp than |
306 | * we are and we were the only connection outside. | 306 | * we are and we were the only connection outside. |
307 | */ | 307 | */ |
308 | parent = tsk->real_parent; | 308 | parent = tsk->real_parent; |
309 | else | 309 | else |
310 | /* reparent: our child is in a different pgrp than | 310 | /* reparent: our child is in a different pgrp than |
311 | * we are, and it was the only connection outside. | 311 | * we are, and it was the only connection outside. |
312 | */ | 312 | */ |
313 | ignored_task = NULL; | 313 | ignored_task = NULL; |
314 | 314 | ||
315 | if (task_pgrp(parent) != pgrp && | 315 | if (task_pgrp(parent) != pgrp && |
316 | task_session(parent) == task_session(tsk) && | 316 | task_session(parent) == task_session(tsk) && |
317 | will_become_orphaned_pgrp(pgrp, ignored_task) && | 317 | will_become_orphaned_pgrp(pgrp, ignored_task) && |
318 | has_stopped_jobs(pgrp)) { | 318 | has_stopped_jobs(pgrp)) { |
319 | __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); | 319 | __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); |
320 | __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); | 320 | __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); |
321 | } | 321 | } |
322 | } | 322 | } |
323 | 323 | ||
324 | /** | 324 | /** |
325 | * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd | 325 | * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd |
326 | * | 326 | * |
327 | * If a kernel thread is launched as a result of a system call, or if | 327 | * If a kernel thread is launched as a result of a system call, or if |
328 | * it ever exits, it should generally reparent itself to kthreadd so it | 328 | * it ever exits, it should generally reparent itself to kthreadd so it |
329 | * isn't in the way of other processes and is correctly cleaned up on exit. | 329 | * isn't in the way of other processes and is correctly cleaned up on exit. |
330 | * | 330 | * |
331 | * The various task state such as scheduling policy and priority may have | 331 | * The various task state such as scheduling policy and priority may have |
332 | * been inherited from a user process, so we reset them to sane values here. | 332 | * been inherited from a user process, so we reset them to sane values here. |
333 | * | 333 | * |
334 | * NOTE that reparent_to_kthreadd() gives the caller full capabilities. | 334 | * NOTE that reparent_to_kthreadd() gives the caller full capabilities. |
335 | */ | 335 | */ |
336 | static void reparent_to_kthreadd(void) | 336 | static void reparent_to_kthreadd(void) |
337 | { | 337 | { |
338 | write_lock_irq(&tasklist_lock); | 338 | write_lock_irq(&tasklist_lock); |
339 | 339 | ||
340 | ptrace_unlink(current); | 340 | ptrace_unlink(current); |
341 | /* Reparent to init */ | 341 | /* Reparent to init */ |
342 | current->real_parent = current->parent = kthreadd_task; | 342 | current->real_parent = current->parent = kthreadd_task; |
343 | list_move_tail(¤t->sibling, ¤t->real_parent->children); | 343 | list_move_tail(¤t->sibling, ¤t->real_parent->children); |
344 | 344 | ||
345 | /* Set the exit signal to SIGCHLD so we signal init on exit */ | 345 | /* Set the exit signal to SIGCHLD so we signal init on exit */ |
346 | current->exit_signal = SIGCHLD; | 346 | current->exit_signal = SIGCHLD; |
347 | 347 | ||
348 | if (task_nice(current) < 0) | 348 | if (task_nice(current) < 0) |
349 | set_user_nice(current, 0); | 349 | set_user_nice(current, 0); |
350 | /* cpus_allowed? */ | 350 | /* cpus_allowed? */ |
351 | /* rt_priority? */ | 351 | /* rt_priority? */ |
352 | /* signals? */ | 352 | /* signals? */ |
353 | memcpy(current->signal->rlim, init_task.signal->rlim, | 353 | memcpy(current->signal->rlim, init_task.signal->rlim, |
354 | sizeof(current->signal->rlim)); | 354 | sizeof(current->signal->rlim)); |
355 | 355 | ||
356 | atomic_inc(&init_cred.usage); | 356 | atomic_inc(&init_cred.usage); |
357 | commit_creds(&init_cred); | 357 | commit_creds(&init_cred); |
358 | write_unlock_irq(&tasklist_lock); | 358 | write_unlock_irq(&tasklist_lock); |
359 | } | 359 | } |
360 | 360 | ||
361 | void __set_special_pids(struct pid *pid) | 361 | void __set_special_pids(struct pid *pid) |
362 | { | 362 | { |
363 | struct task_struct *curr = current->group_leader; | 363 | struct task_struct *curr = current->group_leader; |
364 | 364 | ||
365 | if (task_session(curr) != pid) | 365 | if (task_session(curr) != pid) |
366 | change_pid(curr, PIDTYPE_SID, pid); | 366 | change_pid(curr, PIDTYPE_SID, pid); |
367 | 367 | ||
368 | if (task_pgrp(curr) != pid) | 368 | if (task_pgrp(curr) != pid) |
369 | change_pid(curr, PIDTYPE_PGID, pid); | 369 | change_pid(curr, PIDTYPE_PGID, pid); |
370 | } | 370 | } |
371 | 371 | ||
372 | static void set_special_pids(struct pid *pid) | 372 | static void set_special_pids(struct pid *pid) |
373 | { | 373 | { |
374 | write_lock_irq(&tasklist_lock); | 374 | write_lock_irq(&tasklist_lock); |
375 | __set_special_pids(pid); | 375 | __set_special_pids(pid); |
376 | write_unlock_irq(&tasklist_lock); | 376 | write_unlock_irq(&tasklist_lock); |
377 | } | 377 | } |
378 | 378 | ||
379 | /* | 379 | /* |
380 | * Let kernel threads use this to say that they allow a certain signal. | 380 | * Let kernel threads use this to say that they allow a certain signal. |
381 | * Must not be used if kthread was cloned with CLONE_SIGHAND. | 381 | * Must not be used if kthread was cloned with CLONE_SIGHAND. |
382 | */ | 382 | */ |
383 | int allow_signal(int sig) | 383 | int allow_signal(int sig) |
384 | { | 384 | { |
385 | if (!valid_signal(sig) || sig < 1) | 385 | if (!valid_signal(sig) || sig < 1) |
386 | return -EINVAL; | 386 | return -EINVAL; |
387 | 387 | ||
388 | spin_lock_irq(¤t->sighand->siglock); | 388 | spin_lock_irq(¤t->sighand->siglock); |
389 | /* This is only needed for daemonize()'ed kthreads */ | 389 | /* This is only needed for daemonize()'ed kthreads */ |
390 | sigdelset(¤t->blocked, sig); | 390 | sigdelset(¤t->blocked, sig); |
391 | /* | 391 | /* |
392 | * Kernel threads handle their own signals. Let the signal code | 392 | * Kernel threads handle their own signals. Let the signal code |
393 | * know it'll be handled, so that they don't get converted to | 393 | * know it'll be handled, so that they don't get converted to |
394 | * SIGKILL or just silently dropped. | 394 | * SIGKILL or just silently dropped. |
395 | */ | 395 | */ |
396 | current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; | 396 | current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; |
397 | recalc_sigpending(); | 397 | recalc_sigpending(); |
398 | spin_unlock_irq(¤t->sighand->siglock); | 398 | spin_unlock_irq(¤t->sighand->siglock); |
399 | return 0; | 399 | return 0; |
400 | } | 400 | } |
401 | 401 | ||
402 | EXPORT_SYMBOL(allow_signal); | 402 | EXPORT_SYMBOL(allow_signal); |
403 | 403 | ||
404 | int disallow_signal(int sig) | 404 | int disallow_signal(int sig) |
405 | { | 405 | { |
406 | if (!valid_signal(sig) || sig < 1) | 406 | if (!valid_signal(sig) || sig < 1) |
407 | return -EINVAL; | 407 | return -EINVAL; |
408 | 408 | ||
409 | spin_lock_irq(¤t->sighand->siglock); | 409 | spin_lock_irq(¤t->sighand->siglock); |
410 | current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN; | 410 | current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN; |
411 | recalc_sigpending(); | 411 | recalc_sigpending(); |
412 | spin_unlock_irq(¤t->sighand->siglock); | 412 | spin_unlock_irq(¤t->sighand->siglock); |
413 | return 0; | 413 | return 0; |
414 | } | 414 | } |
415 | 415 | ||
416 | EXPORT_SYMBOL(disallow_signal); | 416 | EXPORT_SYMBOL(disallow_signal); |
417 | 417 | ||
418 | /* | 418 | /* |
419 | * Put all the gunge required to become a kernel thread without | 419 | * Put all the gunge required to become a kernel thread without |
420 | * attached user resources in one place where it belongs. | 420 | * attached user resources in one place where it belongs. |
421 | */ | 421 | */ |
422 | 422 | ||
423 | void daemonize(const char *name, ...) | 423 | void daemonize(const char *name, ...) |
424 | { | 424 | { |
425 | va_list args; | 425 | va_list args; |
426 | sigset_t blocked; | 426 | sigset_t blocked; |
427 | 427 | ||
428 | va_start(args, name); | 428 | va_start(args, name); |
429 | vsnprintf(current->comm, sizeof(current->comm), name, args); | 429 | vsnprintf(current->comm, sizeof(current->comm), name, args); |
430 | va_end(args); | 430 | va_end(args); |
431 | 431 | ||
432 | /* | 432 | /* |
433 | * If we were started as result of loading a module, close all of the | 433 | * If we were started as result of loading a module, close all of the |
434 | * user space pages. We don't need them, and if we didn't close them | 434 | * user space pages. We don't need them, and if we didn't close them |
435 | * they would be locked into memory. | 435 | * they would be locked into memory. |
436 | */ | 436 | */ |
437 | exit_mm(current); | 437 | exit_mm(current); |
438 | /* | 438 | /* |
439 | * We don't want to have TIF_FREEZE set if the system-wide hibernation | 439 | * We don't want to have TIF_FREEZE set if the system-wide hibernation |
440 | * or suspend transition begins right now. | 440 | * or suspend transition begins right now. |
441 | */ | 441 | */ |
442 | current->flags |= (PF_NOFREEZE | PF_KTHREAD); | 442 | current->flags |= (PF_NOFREEZE | PF_KTHREAD); |
443 | 443 | ||
444 | if (current->nsproxy != &init_nsproxy) { | 444 | if (current->nsproxy != &init_nsproxy) { |
445 | get_nsproxy(&init_nsproxy); | 445 | get_nsproxy(&init_nsproxy); |
446 | switch_task_namespaces(current, &init_nsproxy); | 446 | switch_task_namespaces(current, &init_nsproxy); |
447 | } | 447 | } |
448 | set_special_pids(&init_struct_pid); | 448 | set_special_pids(&init_struct_pid); |
449 | proc_clear_tty(current); | 449 | proc_clear_tty(current); |
450 | 450 | ||
451 | /* Block and flush all signals */ | 451 | /* Block and flush all signals */ |
452 | sigfillset(&blocked); | 452 | sigfillset(&blocked); |
453 | sigprocmask(SIG_BLOCK, &blocked, NULL); | 453 | sigprocmask(SIG_BLOCK, &blocked, NULL); |
454 | flush_signals(current); | 454 | flush_signals(current); |
455 | 455 | ||
456 | /* Become as one with the init task */ | 456 | /* Become as one with the init task */ |
457 | 457 | ||
458 | daemonize_fs_struct(); | 458 | daemonize_fs_struct(); |
459 | exit_files(current); | 459 | exit_files(current); |
460 | current->files = init_task.files; | 460 | current->files = init_task.files; |
461 | atomic_inc(¤t->files->count); | 461 | atomic_inc(¤t->files->count); |
462 | 462 | ||
463 | reparent_to_kthreadd(); | 463 | reparent_to_kthreadd(); |
464 | } | 464 | } |
465 | 465 | ||
466 | EXPORT_SYMBOL(daemonize); | 466 | EXPORT_SYMBOL(daemonize); |
467 | 467 | ||
468 | static void close_files(struct files_struct * files) | 468 | static void close_files(struct files_struct * files) |
469 | { | 469 | { |
470 | int i, j; | 470 | int i, j; |
471 | struct fdtable *fdt; | 471 | struct fdtable *fdt; |
472 | 472 | ||
473 | j = 0; | 473 | j = 0; |
474 | 474 | ||
475 | /* | 475 | /* |
476 | * It is safe to dereference the fd table without RCU or | 476 | * It is safe to dereference the fd table without RCU or |
477 | * ->file_lock because this is the last reference to the | 477 | * ->file_lock because this is the last reference to the |
478 | * files structure. But use RCU to shut RCU-lockdep up. | 478 | * files structure. But use RCU to shut RCU-lockdep up. |
479 | */ | 479 | */ |
480 | rcu_read_lock(); | 480 | rcu_read_lock(); |
481 | fdt = files_fdtable(files); | 481 | fdt = files_fdtable(files); |
482 | rcu_read_unlock(); | 482 | rcu_read_unlock(); |
483 | for (;;) { | 483 | for (;;) { |
484 | unsigned long set; | 484 | unsigned long set; |
485 | i = j * __NFDBITS; | 485 | i = j * __NFDBITS; |
486 | if (i >= fdt->max_fds) | 486 | if (i >= fdt->max_fds) |
487 | break; | 487 | break; |
488 | set = fdt->open_fds->fds_bits[j++]; | 488 | set = fdt->open_fds->fds_bits[j++]; |
489 | while (set) { | 489 | while (set) { |
490 | if (set & 1) { | 490 | if (set & 1) { |
491 | struct file * file = xchg(&fdt->fd[i], NULL); | 491 | struct file * file = xchg(&fdt->fd[i], NULL); |
492 | if (file) { | 492 | if (file) { |
493 | filp_close(file, files); | 493 | filp_close(file, files); |
494 | cond_resched(); | 494 | cond_resched(); |
495 | } | 495 | } |
496 | } | 496 | } |
497 | i++; | 497 | i++; |
498 | set >>= 1; | 498 | set >>= 1; |
499 | } | 499 | } |
500 | } | 500 | } |
501 | } | 501 | } |
502 | 502 | ||
503 | struct files_struct *get_files_struct(struct task_struct *task) | 503 | struct files_struct *get_files_struct(struct task_struct *task) |
504 | { | 504 | { |
505 | struct files_struct *files; | 505 | struct files_struct *files; |
506 | 506 | ||
507 | task_lock(task); | 507 | task_lock(task); |
508 | files = task->files; | 508 | files = task->files; |
509 | if (files) | 509 | if (files) |
510 | atomic_inc(&files->count); | 510 | atomic_inc(&files->count); |
511 | task_unlock(task); | 511 | task_unlock(task); |
512 | 512 | ||
513 | return files; | 513 | return files; |
514 | } | 514 | } |
515 | 515 | ||
516 | void put_files_struct(struct files_struct *files) | 516 | void put_files_struct(struct files_struct *files) |
517 | { | 517 | { |
518 | struct fdtable *fdt; | 518 | struct fdtable *fdt; |
519 | 519 | ||
520 | if (atomic_dec_and_test(&files->count)) { | 520 | if (atomic_dec_and_test(&files->count)) { |
521 | close_files(files); | 521 | close_files(files); |
522 | /* | 522 | /* |
523 | * Free the fd and fdset arrays if we expanded them. | 523 | * Free the fd and fdset arrays if we expanded them. |
524 | * If the fdtable was embedded, pass files for freeing | 524 | * If the fdtable was embedded, pass files for freeing |
525 | * at the end of the RCU grace period. Otherwise, | 525 | * at the end of the RCU grace period. Otherwise, |
526 | * you can free files immediately. | 526 | * you can free files immediately. |
527 | */ | 527 | */ |
528 | rcu_read_lock(); | 528 | rcu_read_lock(); |
529 | fdt = files_fdtable(files); | 529 | fdt = files_fdtable(files); |
530 | if (fdt != &files->fdtab) | 530 | if (fdt != &files->fdtab) |
531 | kmem_cache_free(files_cachep, files); | 531 | kmem_cache_free(files_cachep, files); |
532 | free_fdtable(fdt); | 532 | free_fdtable(fdt); |
533 | rcu_read_unlock(); | 533 | rcu_read_unlock(); |
534 | } | 534 | } |
535 | } | 535 | } |
536 | 536 | ||
537 | void reset_files_struct(struct files_struct *files) | 537 | void reset_files_struct(struct files_struct *files) |
538 | { | 538 | { |
539 | struct task_struct *tsk = current; | 539 | struct task_struct *tsk = current; |
540 | struct files_struct *old; | 540 | struct files_struct *old; |
541 | 541 | ||
542 | old = tsk->files; | 542 | old = tsk->files; |
543 | task_lock(tsk); | 543 | task_lock(tsk); |
544 | tsk->files = files; | 544 | tsk->files = files; |
545 | task_unlock(tsk); | 545 | task_unlock(tsk); |
546 | put_files_struct(old); | 546 | put_files_struct(old); |
547 | } | 547 | } |
548 | 548 | ||
549 | void exit_files(struct task_struct *tsk) | 549 | void exit_files(struct task_struct *tsk) |
550 | { | 550 | { |
551 | struct files_struct * files = tsk->files; | 551 | struct files_struct * files = tsk->files; |
552 | 552 | ||
553 | if (files) { | 553 | if (files) { |
554 | task_lock(tsk); | 554 | task_lock(tsk); |
555 | tsk->files = NULL; | 555 | tsk->files = NULL; |
556 | task_unlock(tsk); | 556 | task_unlock(tsk); |
557 | put_files_struct(files); | 557 | put_files_struct(files); |
558 | } | 558 | } |
559 | } | 559 | } |
560 | 560 | ||
561 | #ifdef CONFIG_MM_OWNER | 561 | #ifdef CONFIG_MM_OWNER |
562 | /* | 562 | /* |
563 | * Task p is exiting and it owned mm, lets find a new owner for it | 563 | * Task p is exiting and it owned mm, lets find a new owner for it |
564 | */ | 564 | */ |
565 | static inline int | 565 | static inline int |
566 | mm_need_new_owner(struct mm_struct *mm, struct task_struct *p) | 566 | mm_need_new_owner(struct mm_struct *mm, struct task_struct *p) |
567 | { | 567 | { |
568 | /* | 568 | /* |
569 | * If there are other users of the mm and the owner (us) is exiting | 569 | * If there are other users of the mm and the owner (us) is exiting |
570 | * we need to find a new owner to take on the responsibility. | 570 | * we need to find a new owner to take on the responsibility. |
571 | */ | 571 | */ |
572 | if (atomic_read(&mm->mm_users) <= 1) | 572 | if (atomic_read(&mm->mm_users) <= 1) |
573 | return 0; | 573 | return 0; |
574 | if (mm->owner != p) | 574 | if (mm->owner != p) |
575 | return 0; | 575 | return 0; |
576 | return 1; | 576 | return 1; |
577 | } | 577 | } |
578 | 578 | ||
579 | void mm_update_next_owner(struct mm_struct *mm) | 579 | void mm_update_next_owner(struct mm_struct *mm) |
580 | { | 580 | { |
581 | struct task_struct *c, *g, *p = current; | 581 | struct task_struct *c, *g, *p = current; |
582 | 582 | ||
583 | retry: | 583 | retry: |
584 | if (!mm_need_new_owner(mm, p)) | 584 | if (!mm_need_new_owner(mm, p)) |
585 | return; | 585 | return; |
586 | 586 | ||
587 | read_lock(&tasklist_lock); | 587 | read_lock(&tasklist_lock); |
588 | /* | 588 | /* |
589 | * Search in the children | 589 | * Search in the children |
590 | */ | 590 | */ |
591 | list_for_each_entry(c, &p->children, sibling) { | 591 | list_for_each_entry(c, &p->children, sibling) { |
592 | if (c->mm == mm) | 592 | if (c->mm == mm) |
593 | goto assign_new_owner; | 593 | goto assign_new_owner; |
594 | } | 594 | } |
595 | 595 | ||
596 | /* | 596 | /* |
597 | * Search in the siblings | 597 | * Search in the siblings |
598 | */ | 598 | */ |
599 | list_for_each_entry(c, &p->real_parent->children, sibling) { | 599 | list_for_each_entry(c, &p->real_parent->children, sibling) { |
600 | if (c->mm == mm) | 600 | if (c->mm == mm) |
601 | goto assign_new_owner; | 601 | goto assign_new_owner; |
602 | } | 602 | } |
603 | 603 | ||
604 | /* | 604 | /* |
605 | * Search through everything else. We should not get | 605 | * Search through everything else. We should not get |
606 | * here often | 606 | * here often |
607 | */ | 607 | */ |
608 | do_each_thread(g, c) { | 608 | do_each_thread(g, c) { |
609 | if (c->mm == mm) | 609 | if (c->mm == mm) |
610 | goto assign_new_owner; | 610 | goto assign_new_owner; |
611 | } while_each_thread(g, c); | 611 | } while_each_thread(g, c); |
612 | 612 | ||
613 | read_unlock(&tasklist_lock); | 613 | read_unlock(&tasklist_lock); |
614 | /* | 614 | /* |
615 | * We found no owner yet mm_users > 1: this implies that we are | 615 | * We found no owner yet mm_users > 1: this implies that we are |
616 | * most likely racing with swapoff (try_to_unuse()) or /proc or | 616 | * most likely racing with swapoff (try_to_unuse()) or /proc or |
617 | * ptrace or page migration (get_task_mm()). Mark owner as NULL. | 617 | * ptrace or page migration (get_task_mm()). Mark owner as NULL. |
618 | */ | 618 | */ |
619 | mm->owner = NULL; | 619 | mm->owner = NULL; |
620 | return; | 620 | return; |
621 | 621 | ||
622 | assign_new_owner: | 622 | assign_new_owner: |
623 | BUG_ON(c == p); | 623 | BUG_ON(c == p); |
624 | get_task_struct(c); | 624 | get_task_struct(c); |
625 | /* | 625 | /* |
626 | * The task_lock protects c->mm from changing. | 626 | * The task_lock protects c->mm from changing. |
627 | * We always want mm->owner->mm == mm | 627 | * We always want mm->owner->mm == mm |
628 | */ | 628 | */ |
629 | task_lock(c); | 629 | task_lock(c); |
630 | /* | 630 | /* |
631 | * Delay read_unlock() till we have the task_lock() | 631 | * Delay read_unlock() till we have the task_lock() |
632 | * to ensure that c does not slip away underneath us | 632 | * to ensure that c does not slip away underneath us |
633 | */ | 633 | */ |
634 | read_unlock(&tasklist_lock); | 634 | read_unlock(&tasklist_lock); |
635 | if (c->mm != mm) { | 635 | if (c->mm != mm) { |
636 | task_unlock(c); | 636 | task_unlock(c); |
637 | put_task_struct(c); | 637 | put_task_struct(c); |
638 | goto retry; | 638 | goto retry; |
639 | } | 639 | } |
640 | mm->owner = c; | 640 | mm->owner = c; |
641 | task_unlock(c); | 641 | task_unlock(c); |
642 | put_task_struct(c); | 642 | put_task_struct(c); |
643 | } | 643 | } |
644 | #endif /* CONFIG_MM_OWNER */ | 644 | #endif /* CONFIG_MM_OWNER */ |
645 | 645 | ||
646 | /* | 646 | /* |
647 | * Turn us into a lazy TLB process if we | 647 | * Turn us into a lazy TLB process if we |
648 | * aren't already.. | 648 | * aren't already.. |
649 | */ | 649 | */ |
650 | static void exit_mm(struct task_struct * tsk) | 650 | static void exit_mm(struct task_struct * tsk) |
651 | { | 651 | { |
652 | struct mm_struct *mm = tsk->mm; | 652 | struct mm_struct *mm = tsk->mm; |
653 | struct core_state *core_state; | 653 | struct core_state *core_state; |
654 | 654 | ||
655 | mm_release(tsk, mm); | 655 | mm_release(tsk, mm); |
656 | if (!mm) | 656 | if (!mm) |
657 | return; | 657 | return; |
658 | /* | 658 | /* |
659 | * Serialize with any possible pending coredump. | 659 | * Serialize with any possible pending coredump. |
660 | * We must hold mmap_sem around checking core_state | 660 | * We must hold mmap_sem around checking core_state |
661 | * and clearing tsk->mm. The core-inducing thread | 661 | * and clearing tsk->mm. The core-inducing thread |
662 | * will increment ->nr_threads for each thread in the | 662 | * will increment ->nr_threads for each thread in the |
663 | * group with ->mm != NULL. | 663 | * group with ->mm != NULL. |
664 | */ | 664 | */ |
665 | down_read(&mm->mmap_sem); | 665 | down_read(&mm->mmap_sem); |
666 | core_state = mm->core_state; | 666 | core_state = mm->core_state; |
667 | if (core_state) { | 667 | if (core_state) { |
668 | struct core_thread self; | 668 | struct core_thread self; |
669 | up_read(&mm->mmap_sem); | 669 | up_read(&mm->mmap_sem); |
670 | 670 | ||
671 | self.task = tsk; | 671 | self.task = tsk; |
672 | self.next = xchg(&core_state->dumper.next, &self); | 672 | self.next = xchg(&core_state->dumper.next, &self); |
673 | /* | 673 | /* |
674 | * Implies mb(), the result of xchg() must be visible | 674 | * Implies mb(), the result of xchg() must be visible |
675 | * to core_state->dumper. | 675 | * to core_state->dumper. |
676 | */ | 676 | */ |
677 | if (atomic_dec_and_test(&core_state->nr_threads)) | 677 | if (atomic_dec_and_test(&core_state->nr_threads)) |
678 | complete(&core_state->startup); | 678 | complete(&core_state->startup); |
679 | 679 | ||
680 | for (;;) { | 680 | for (;;) { |
681 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 681 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
682 | if (!self.task) /* see coredump_finish() */ | 682 | if (!self.task) /* see coredump_finish() */ |
683 | break; | 683 | break; |
684 | schedule(); | 684 | schedule(); |
685 | } | 685 | } |
686 | __set_task_state(tsk, TASK_RUNNING); | 686 | __set_task_state(tsk, TASK_RUNNING); |
687 | down_read(&mm->mmap_sem); | 687 | down_read(&mm->mmap_sem); |
688 | } | 688 | } |
689 | atomic_inc(&mm->mm_count); | 689 | atomic_inc(&mm->mm_count); |
690 | BUG_ON(mm != tsk->active_mm); | 690 | BUG_ON(mm != tsk->active_mm); |
691 | /* more a memory barrier than a real lock */ | 691 | /* more a memory barrier than a real lock */ |
692 | task_lock(tsk); | 692 | task_lock(tsk); |
693 | tsk->mm = NULL; | 693 | tsk->mm = NULL; |
694 | up_read(&mm->mmap_sem); | 694 | up_read(&mm->mmap_sem); |
695 | enter_lazy_tlb(mm, current); | 695 | enter_lazy_tlb(mm, current); |
696 | /* We don't want this task to be frozen prematurely */ | 696 | /* We don't want this task to be frozen prematurely */ |
697 | clear_freeze_flag(tsk); | 697 | clear_freeze_flag(tsk); |
698 | if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) | 698 | if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) |
699 | atomic_dec(&mm->oom_disable_count); | 699 | atomic_dec(&mm->oom_disable_count); |
700 | task_unlock(tsk); | 700 | task_unlock(tsk); |
701 | mm_update_next_owner(mm); | 701 | mm_update_next_owner(mm); |
702 | mmput(mm); | 702 | mmput(mm); |
703 | } | 703 | } |
704 | 704 | ||
705 | /* | 705 | /* |
706 | * When we die, we re-parent all our children. | 706 | * When we die, we re-parent all our children. |
707 | * Try to give them to another thread in our thread | 707 | * Try to give them to another thread in our thread |
708 | * group, and if no such member exists, give it to | 708 | * group, and if no such member exists, give it to |
709 | * the child reaper process (ie "init") in our pid | 709 | * the child reaper process (ie "init") in our pid |
710 | * space. | 710 | * space. |
711 | */ | 711 | */ |
712 | static struct task_struct *find_new_reaper(struct task_struct *father) | 712 | static struct task_struct *find_new_reaper(struct task_struct *father) |
713 | __releases(&tasklist_lock) | 713 | __releases(&tasklist_lock) |
714 | __acquires(&tasklist_lock) | 714 | __acquires(&tasklist_lock) |
715 | { | 715 | { |
716 | struct pid_namespace *pid_ns = task_active_pid_ns(father); | 716 | struct pid_namespace *pid_ns = task_active_pid_ns(father); |
717 | struct task_struct *thread; | 717 | struct task_struct *thread; |
718 | 718 | ||
719 | thread = father; | 719 | thread = father; |
720 | while_each_thread(father, thread) { | 720 | while_each_thread(father, thread) { |
721 | if (thread->flags & PF_EXITING) | 721 | if (thread->flags & PF_EXITING) |
722 | continue; | 722 | continue; |
723 | if (unlikely(pid_ns->child_reaper == father)) | 723 | if (unlikely(pid_ns->child_reaper == father)) |
724 | pid_ns->child_reaper = thread; | 724 | pid_ns->child_reaper = thread; |
725 | return thread; | 725 | return thread; |
726 | } | 726 | } |
727 | 727 | ||
728 | if (unlikely(pid_ns->child_reaper == father)) { | 728 | if (unlikely(pid_ns->child_reaper == father)) { |
729 | write_unlock_irq(&tasklist_lock); | 729 | write_unlock_irq(&tasklist_lock); |
730 | if (unlikely(pid_ns == &init_pid_ns)) | 730 | if (unlikely(pid_ns == &init_pid_ns)) |
731 | panic("Attempted to kill init!"); | 731 | panic("Attempted to kill init!"); |
732 | 732 | ||
733 | zap_pid_ns_processes(pid_ns); | 733 | zap_pid_ns_processes(pid_ns); |
734 | write_lock_irq(&tasklist_lock); | 734 | write_lock_irq(&tasklist_lock); |
735 | /* | 735 | /* |
736 | * We can not clear ->child_reaper or leave it alone. | 736 | * We can not clear ->child_reaper or leave it alone. |
737 | * There may by stealth EXIT_DEAD tasks on ->children, | 737 | * There may by stealth EXIT_DEAD tasks on ->children, |
738 | * forget_original_parent() must move them somewhere. | 738 | * forget_original_parent() must move them somewhere. |
739 | */ | 739 | */ |
740 | pid_ns->child_reaper = init_pid_ns.child_reaper; | 740 | pid_ns->child_reaper = init_pid_ns.child_reaper; |
741 | } | 741 | } |
742 | 742 | ||
743 | return pid_ns->child_reaper; | 743 | return pid_ns->child_reaper; |
744 | } | 744 | } |
745 | 745 | ||
746 | /* | 746 | /* |
747 | * Any that need to be release_task'd are put on the @dead list. | 747 | * Any that need to be release_task'd are put on the @dead list. |
748 | */ | 748 | */ |
749 | static void reparent_leader(struct task_struct *father, struct task_struct *p, | 749 | static void reparent_leader(struct task_struct *father, struct task_struct *p, |
750 | struct list_head *dead) | 750 | struct list_head *dead) |
751 | { | 751 | { |
752 | list_move_tail(&p->sibling, &p->real_parent->children); | 752 | list_move_tail(&p->sibling, &p->real_parent->children); |
753 | 753 | ||
754 | if (task_detached(p)) | 754 | if (task_detached(p)) |
755 | return; | 755 | return; |
756 | /* | 756 | /* |
757 | * If this is a threaded reparent there is no need to | 757 | * If this is a threaded reparent there is no need to |
758 | * notify anyone anything has happened. | 758 | * notify anyone anything has happened. |
759 | */ | 759 | */ |
760 | if (same_thread_group(p->real_parent, father)) | 760 | if (same_thread_group(p->real_parent, father)) |
761 | return; | 761 | return; |
762 | 762 | ||
763 | /* We don't want people slaying init. */ | 763 | /* We don't want people slaying init. */ |
764 | p->exit_signal = SIGCHLD; | 764 | p->exit_signal = SIGCHLD; |
765 | 765 | ||
766 | /* If it has exited notify the new parent about this child's death. */ | 766 | /* If it has exited notify the new parent about this child's death. */ |
767 | if (!p->ptrace && | 767 | if (!p->ptrace && |
768 | p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { | 768 | p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { |
769 | do_notify_parent(p, p->exit_signal); | 769 | do_notify_parent(p, p->exit_signal); |
770 | if (task_detached(p)) { | 770 | if (task_detached(p)) { |
771 | p->exit_state = EXIT_DEAD; | 771 | p->exit_state = EXIT_DEAD; |
772 | list_move_tail(&p->sibling, dead); | 772 | list_move_tail(&p->sibling, dead); |
773 | } | 773 | } |
774 | } | 774 | } |
775 | 775 | ||
776 | kill_orphaned_pgrp(p, father); | 776 | kill_orphaned_pgrp(p, father); |
777 | } | 777 | } |
778 | 778 | ||
779 | static void forget_original_parent(struct task_struct *father) | 779 | static void forget_original_parent(struct task_struct *father) |
780 | { | 780 | { |
781 | struct task_struct *p, *n, *reaper; | 781 | struct task_struct *p, *n, *reaper; |
782 | LIST_HEAD(dead_children); | 782 | LIST_HEAD(dead_children); |
783 | 783 | ||
784 | write_lock_irq(&tasklist_lock); | 784 | write_lock_irq(&tasklist_lock); |
785 | /* | 785 | /* |
786 | * Note that exit_ptrace() and find_new_reaper() might | 786 | * Note that exit_ptrace() and find_new_reaper() might |
787 | * drop tasklist_lock and reacquire it. | 787 | * drop tasklist_lock and reacquire it. |
788 | */ | 788 | */ |
789 | exit_ptrace(father); | 789 | exit_ptrace(father); |
790 | reaper = find_new_reaper(father); | 790 | reaper = find_new_reaper(father); |
791 | 791 | ||
792 | list_for_each_entry_safe(p, n, &father->children, sibling) { | 792 | list_for_each_entry_safe(p, n, &father->children, sibling) { |
793 | struct task_struct *t = p; | 793 | struct task_struct *t = p; |
794 | do { | 794 | do { |
795 | t->real_parent = reaper; | 795 | t->real_parent = reaper; |
796 | if (t->parent == father) { | 796 | if (t->parent == father) { |
797 | BUG_ON(t->ptrace); | 797 | BUG_ON(t->ptrace); |
798 | t->parent = t->real_parent; | 798 | t->parent = t->real_parent; |
799 | } | 799 | } |
800 | if (t->pdeath_signal) | 800 | if (t->pdeath_signal) |
801 | group_send_sig_info(t->pdeath_signal, | 801 | group_send_sig_info(t->pdeath_signal, |
802 | SEND_SIG_NOINFO, t); | 802 | SEND_SIG_NOINFO, t); |
803 | } while_each_thread(p, t); | 803 | } while_each_thread(p, t); |
804 | reparent_leader(father, p, &dead_children); | 804 | reparent_leader(father, p, &dead_children); |
805 | } | 805 | } |
806 | write_unlock_irq(&tasklist_lock); | 806 | write_unlock_irq(&tasklist_lock); |
807 | 807 | ||
808 | BUG_ON(!list_empty(&father->children)); | 808 | BUG_ON(!list_empty(&father->children)); |
809 | 809 | ||
810 | list_for_each_entry_safe(p, n, &dead_children, sibling) { | 810 | list_for_each_entry_safe(p, n, &dead_children, sibling) { |
811 | list_del_init(&p->sibling); | 811 | list_del_init(&p->sibling); |
812 | release_task(p); | 812 | release_task(p); |
813 | } | 813 | } |
814 | } | 814 | } |
815 | 815 | ||
816 | /* | 816 | /* |
817 | * Send signals to all our closest relatives so that they know | 817 | * Send signals to all our closest relatives so that they know |
818 | * to properly mourn us.. | 818 | * to properly mourn us.. |
819 | */ | 819 | */ |
820 | static void exit_notify(struct task_struct *tsk, int group_dead) | 820 | static void exit_notify(struct task_struct *tsk, int group_dead) |
821 | { | 821 | { |
822 | int signal; | 822 | int signal; |
823 | bool autoreap; | ||
823 | void *cookie; | 824 | void *cookie; |
824 | 825 | ||
825 | /* | 826 | /* |
826 | * This does two things: | 827 | * This does two things: |
827 | * | 828 | * |
828 | * A. Make init inherit all the child processes | 829 | * A. Make init inherit all the child processes |
829 | * B. Check to see if any process groups have become orphaned | 830 | * B. Check to see if any process groups have become orphaned |
830 | * as a result of our exiting, and if they have any stopped | 831 | * as a result of our exiting, and if they have any stopped |
831 | * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) | 832 | * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) |
832 | */ | 833 | */ |
833 | forget_original_parent(tsk); | 834 | forget_original_parent(tsk); |
834 | exit_task_namespaces(tsk); | 835 | exit_task_namespaces(tsk); |
835 | 836 | ||
836 | write_lock_irq(&tasklist_lock); | 837 | write_lock_irq(&tasklist_lock); |
837 | if (group_dead) | 838 | if (group_dead) |
838 | kill_orphaned_pgrp(tsk->group_leader, NULL); | 839 | kill_orphaned_pgrp(tsk->group_leader, NULL); |
839 | 840 | ||
840 | /* Let father know we died | 841 | /* Let father know we died |
841 | * | 842 | * |
842 | * Thread signals are configurable, but you aren't going to use | 843 | * Thread signals are configurable, but you aren't going to use |
843 | * that to send signals to arbitrary processes. | 844 | * that to send signals to arbitrary processes. |
844 | * That stops right now. | 845 | * That stops right now. |
845 | * | 846 | * |
846 | * If the parent exec id doesn't match the exec id we saved | 847 | * If the parent exec id doesn't match the exec id we saved |
847 | * when we started then we know the parent has changed security | 848 | * when we started then we know the parent has changed security |
848 | * domain. | 849 | * domain. |
849 | * | 850 | * |
850 | * If our self_exec id doesn't match our parent_exec_id then | 851 | * If our self_exec id doesn't match our parent_exec_id then |
851 | * we have changed execution domain as these two values started | 852 | * we have changed execution domain as these two values started |
852 | * the same after a fork. | 853 | * the same after a fork. |
853 | */ | 854 | */ |
854 | if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) && | 855 | if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) && |
855 | (tsk->parent_exec_id != tsk->real_parent->self_exec_id || | 856 | (tsk->parent_exec_id != tsk->real_parent->self_exec_id || |
856 | tsk->self_exec_id != tsk->parent_exec_id)) | 857 | tsk->self_exec_id != tsk->parent_exec_id)) |
857 | tsk->exit_signal = SIGCHLD; | 858 | tsk->exit_signal = SIGCHLD; |
858 | 859 | ||
859 | signal = tracehook_notify_death(tsk, &cookie, group_dead); | 860 | signal = tracehook_notify_death(tsk, &cookie, group_dead); |
860 | if (signal >= 0) | 861 | if (signal >= 0) |
861 | signal = do_notify_parent(tsk, signal); | 862 | autoreap = do_notify_parent(tsk, signal); |
863 | else | ||
864 | autoreap = (signal == DEATH_REAP); | ||
862 | 865 | ||
863 | tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE; | 866 | tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE; |
864 | 867 | ||
865 | /* mt-exec, de_thread() is waiting for group leader */ | 868 | /* mt-exec, de_thread() is waiting for group leader */ |
866 | if (unlikely(tsk->signal->notify_count < 0)) | 869 | if (unlikely(tsk->signal->notify_count < 0)) |
867 | wake_up_process(tsk->signal->group_exit_task); | 870 | wake_up_process(tsk->signal->group_exit_task); |
868 | write_unlock_irq(&tasklist_lock); | 871 | write_unlock_irq(&tasklist_lock); |
869 | 872 | ||
870 | /* If the process is dead, release it - nobody will wait for it */ | 873 | /* If the process is dead, release it - nobody will wait for it */ |
871 | if (signal == DEATH_REAP) | 874 | if (autoreap) |
872 | release_task(tsk); | 875 | release_task(tsk); |
873 | } | 876 | } |
874 | 877 | ||
875 | #ifdef CONFIG_DEBUG_STACK_USAGE | 878 | #ifdef CONFIG_DEBUG_STACK_USAGE |
876 | static void check_stack_usage(void) | 879 | static void check_stack_usage(void) |
877 | { | 880 | { |
878 | static DEFINE_SPINLOCK(low_water_lock); | 881 | static DEFINE_SPINLOCK(low_water_lock); |
879 | static int lowest_to_date = THREAD_SIZE; | 882 | static int lowest_to_date = THREAD_SIZE; |
880 | unsigned long free; | 883 | unsigned long free; |
881 | 884 | ||
882 | free = stack_not_used(current); | 885 | free = stack_not_used(current); |
883 | 886 | ||
884 | if (free >= lowest_to_date) | 887 | if (free >= lowest_to_date) |
885 | return; | 888 | return; |
886 | 889 | ||
887 | spin_lock(&low_water_lock); | 890 | spin_lock(&low_water_lock); |
888 | if (free < lowest_to_date) { | 891 | if (free < lowest_to_date) { |
889 | printk(KERN_WARNING "%s used greatest stack depth: %lu bytes " | 892 | printk(KERN_WARNING "%s used greatest stack depth: %lu bytes " |
890 | "left\n", | 893 | "left\n", |
891 | current->comm, free); | 894 | current->comm, free); |
892 | lowest_to_date = free; | 895 | lowest_to_date = free; |
893 | } | 896 | } |
894 | spin_unlock(&low_water_lock); | 897 | spin_unlock(&low_water_lock); |
895 | } | 898 | } |
896 | #else | 899 | #else |
897 | static inline void check_stack_usage(void) {} | 900 | static inline void check_stack_usage(void) {} |
898 | #endif | 901 | #endif |
899 | 902 | ||
900 | NORET_TYPE void do_exit(long code) | 903 | NORET_TYPE void do_exit(long code) |
901 | { | 904 | { |
902 | struct task_struct *tsk = current; | 905 | struct task_struct *tsk = current; |
903 | int group_dead; | 906 | int group_dead; |
904 | 907 | ||
905 | profile_task_exit(tsk); | 908 | profile_task_exit(tsk); |
906 | 909 | ||
907 | WARN_ON(atomic_read(&tsk->fs_excl)); | 910 | WARN_ON(atomic_read(&tsk->fs_excl)); |
908 | WARN_ON(blk_needs_flush_plug(tsk)); | 911 | WARN_ON(blk_needs_flush_plug(tsk)); |
909 | 912 | ||
910 | if (unlikely(in_interrupt())) | 913 | if (unlikely(in_interrupt())) |
911 | panic("Aiee, killing interrupt handler!"); | 914 | panic("Aiee, killing interrupt handler!"); |
912 | if (unlikely(!tsk->pid)) | 915 | if (unlikely(!tsk->pid)) |
913 | panic("Attempted to kill the idle task!"); | 916 | panic("Attempted to kill the idle task!"); |
914 | 917 | ||
915 | /* | 918 | /* |
916 | * If do_exit is called because this processes oopsed, it's possible | 919 | * If do_exit is called because this processes oopsed, it's possible |
917 | * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before | 920 | * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before |
918 | * continuing. Amongst other possible reasons, this is to prevent | 921 | * continuing. Amongst other possible reasons, this is to prevent |
919 | * mm_release()->clear_child_tid() from writing to a user-controlled | 922 | * mm_release()->clear_child_tid() from writing to a user-controlled |
920 | * kernel address. | 923 | * kernel address. |
921 | */ | 924 | */ |
922 | set_fs(USER_DS); | 925 | set_fs(USER_DS); |
923 | 926 | ||
924 | ptrace_event(PTRACE_EVENT_EXIT, code); | 927 | ptrace_event(PTRACE_EVENT_EXIT, code); |
925 | 928 | ||
926 | validate_creds_for_do_exit(tsk); | 929 | validate_creds_for_do_exit(tsk); |
927 | 930 | ||
928 | /* | 931 | /* |
929 | * We're taking recursive faults here in do_exit. Safest is to just | 932 | * We're taking recursive faults here in do_exit. Safest is to just |
930 | * leave this task alone and wait for reboot. | 933 | * leave this task alone and wait for reboot. |
931 | */ | 934 | */ |
932 | if (unlikely(tsk->flags & PF_EXITING)) { | 935 | if (unlikely(tsk->flags & PF_EXITING)) { |
933 | printk(KERN_ALERT | 936 | printk(KERN_ALERT |
934 | "Fixing recursive fault but reboot is needed!\n"); | 937 | "Fixing recursive fault but reboot is needed!\n"); |
935 | /* | 938 | /* |
936 | * We can do this unlocked here. The futex code uses | 939 | * We can do this unlocked here. The futex code uses |
937 | * this flag just to verify whether the pi state | 940 | * this flag just to verify whether the pi state |
938 | * cleanup has been done or not. In the worst case it | 941 | * cleanup has been done or not. In the worst case it |
939 | * loops once more. We pretend that the cleanup was | 942 | * loops once more. We pretend that the cleanup was |
940 | * done as there is no way to return. Either the | 943 | * done as there is no way to return. Either the |
941 | * OWNER_DIED bit is set by now or we push the blocked | 944 | * OWNER_DIED bit is set by now or we push the blocked |
942 | * task into the wait for ever nirwana as well. | 945 | * task into the wait for ever nirwana as well. |
943 | */ | 946 | */ |
944 | tsk->flags |= PF_EXITPIDONE; | 947 | tsk->flags |= PF_EXITPIDONE; |
945 | set_current_state(TASK_UNINTERRUPTIBLE); | 948 | set_current_state(TASK_UNINTERRUPTIBLE); |
946 | schedule(); | 949 | schedule(); |
947 | } | 950 | } |
948 | 951 | ||
949 | exit_irq_thread(); | 952 | exit_irq_thread(); |
950 | 953 | ||
951 | exit_signals(tsk); /* sets PF_EXITING */ | 954 | exit_signals(tsk); /* sets PF_EXITING */ |
952 | /* | 955 | /* |
953 | * tsk->flags are checked in the futex code to protect against | 956 | * tsk->flags are checked in the futex code to protect against |
954 | * an exiting task cleaning up the robust pi futexes. | 957 | * an exiting task cleaning up the robust pi futexes. |
955 | */ | 958 | */ |
956 | smp_mb(); | 959 | smp_mb(); |
957 | raw_spin_unlock_wait(&tsk->pi_lock); | 960 | raw_spin_unlock_wait(&tsk->pi_lock); |
958 | 961 | ||
959 | if (unlikely(in_atomic())) | 962 | if (unlikely(in_atomic())) |
960 | printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", | 963 | printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", |
961 | current->comm, task_pid_nr(current), | 964 | current->comm, task_pid_nr(current), |
962 | preempt_count()); | 965 | preempt_count()); |
963 | 966 | ||
964 | acct_update_integrals(tsk); | 967 | acct_update_integrals(tsk); |
965 | /* sync mm's RSS info before statistics gathering */ | 968 | /* sync mm's RSS info before statistics gathering */ |
966 | if (tsk->mm) | 969 | if (tsk->mm) |
967 | sync_mm_rss(tsk, tsk->mm); | 970 | sync_mm_rss(tsk, tsk->mm); |
968 | group_dead = atomic_dec_and_test(&tsk->signal->live); | 971 | group_dead = atomic_dec_and_test(&tsk->signal->live); |
969 | if (group_dead) { | 972 | if (group_dead) { |
970 | hrtimer_cancel(&tsk->signal->real_timer); | 973 | hrtimer_cancel(&tsk->signal->real_timer); |
971 | exit_itimers(tsk->signal); | 974 | exit_itimers(tsk->signal); |
972 | if (tsk->mm) | 975 | if (tsk->mm) |
973 | setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); | 976 | setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); |
974 | } | 977 | } |
975 | acct_collect(code, group_dead); | 978 | acct_collect(code, group_dead); |
976 | if (group_dead) | 979 | if (group_dead) |
977 | tty_audit_exit(); | 980 | tty_audit_exit(); |
978 | if (unlikely(tsk->audit_context)) | 981 | if (unlikely(tsk->audit_context)) |
979 | audit_free(tsk); | 982 | audit_free(tsk); |
980 | 983 | ||
981 | tsk->exit_code = code; | 984 | tsk->exit_code = code; |
982 | taskstats_exit(tsk, group_dead); | 985 | taskstats_exit(tsk, group_dead); |
983 | 986 | ||
984 | exit_mm(tsk); | 987 | exit_mm(tsk); |
985 | 988 | ||
986 | if (group_dead) | 989 | if (group_dead) |
987 | acct_process(); | 990 | acct_process(); |
988 | trace_sched_process_exit(tsk); | 991 | trace_sched_process_exit(tsk); |
989 | 992 | ||
990 | exit_sem(tsk); | 993 | exit_sem(tsk); |
991 | exit_files(tsk); | 994 | exit_files(tsk); |
992 | exit_fs(tsk); | 995 | exit_fs(tsk); |
993 | check_stack_usage(); | 996 | check_stack_usage(); |
994 | exit_thread(); | 997 | exit_thread(); |
995 | 998 | ||
996 | /* | 999 | /* |
997 | * Flush inherited counters to the parent - before the parent | 1000 | * Flush inherited counters to the parent - before the parent |
998 | * gets woken up by child-exit notifications. | 1001 | * gets woken up by child-exit notifications. |
999 | * | 1002 | * |
1000 | * because of cgroup mode, must be called before cgroup_exit() | 1003 | * because of cgroup mode, must be called before cgroup_exit() |
1001 | */ | 1004 | */ |
1002 | perf_event_exit_task(tsk); | 1005 | perf_event_exit_task(tsk); |
1003 | 1006 | ||
1004 | cgroup_exit(tsk, 1); | 1007 | cgroup_exit(tsk, 1); |
1005 | 1008 | ||
1006 | if (group_dead) | 1009 | if (group_dead) |
1007 | disassociate_ctty(1); | 1010 | disassociate_ctty(1); |
1008 | 1011 | ||
1009 | module_put(task_thread_info(tsk)->exec_domain->module); | 1012 | module_put(task_thread_info(tsk)->exec_domain->module); |
1010 | 1013 | ||
1011 | proc_exit_connector(tsk); | 1014 | proc_exit_connector(tsk); |
1012 | 1015 | ||
1013 | /* | 1016 | /* |
1014 | * FIXME: do that only when needed, using sched_exit tracepoint | 1017 | * FIXME: do that only when needed, using sched_exit tracepoint |
1015 | */ | 1018 | */ |
1016 | ptrace_put_breakpoints(tsk); | 1019 | ptrace_put_breakpoints(tsk); |
1017 | 1020 | ||
1018 | exit_notify(tsk, group_dead); | 1021 | exit_notify(tsk, group_dead); |
1019 | #ifdef CONFIG_NUMA | 1022 | #ifdef CONFIG_NUMA |
1020 | task_lock(tsk); | 1023 | task_lock(tsk); |
1021 | mpol_put(tsk->mempolicy); | 1024 | mpol_put(tsk->mempolicy); |
1022 | tsk->mempolicy = NULL; | 1025 | tsk->mempolicy = NULL; |
1023 | task_unlock(tsk); | 1026 | task_unlock(tsk); |
1024 | #endif | 1027 | #endif |
1025 | #ifdef CONFIG_FUTEX | 1028 | #ifdef CONFIG_FUTEX |
1026 | if (unlikely(current->pi_state_cache)) | 1029 | if (unlikely(current->pi_state_cache)) |
1027 | kfree(current->pi_state_cache); | 1030 | kfree(current->pi_state_cache); |
1028 | #endif | 1031 | #endif |
1029 | /* | 1032 | /* |
1030 | * Make sure we are holding no locks: | 1033 | * Make sure we are holding no locks: |
1031 | */ | 1034 | */ |
1032 | debug_check_no_locks_held(tsk); | 1035 | debug_check_no_locks_held(tsk); |
1033 | /* | 1036 | /* |
1034 | * We can do this unlocked here. The futex code uses this flag | 1037 | * We can do this unlocked here. The futex code uses this flag |
1035 | * just to verify whether the pi state cleanup has been done | 1038 | * just to verify whether the pi state cleanup has been done |
1036 | * or not. In the worst case it loops once more. | 1039 | * or not. In the worst case it loops once more. |
1037 | */ | 1040 | */ |
1038 | tsk->flags |= PF_EXITPIDONE; | 1041 | tsk->flags |= PF_EXITPIDONE; |
1039 | 1042 | ||
1040 | if (tsk->io_context) | 1043 | if (tsk->io_context) |
1041 | exit_io_context(tsk); | 1044 | exit_io_context(tsk); |
1042 | 1045 | ||
1043 | if (tsk->splice_pipe) | 1046 | if (tsk->splice_pipe) |
1044 | __free_pipe_info(tsk->splice_pipe); | 1047 | __free_pipe_info(tsk->splice_pipe); |
1045 | 1048 | ||
1046 | validate_creds_for_do_exit(tsk); | 1049 | validate_creds_for_do_exit(tsk); |
1047 | 1050 | ||
1048 | preempt_disable(); | 1051 | preempt_disable(); |
1049 | exit_rcu(); | 1052 | exit_rcu(); |
1050 | /* causes final put_task_struct in finish_task_switch(). */ | 1053 | /* causes final put_task_struct in finish_task_switch(). */ |
1051 | tsk->state = TASK_DEAD; | 1054 | tsk->state = TASK_DEAD; |
1052 | schedule(); | 1055 | schedule(); |
1053 | BUG(); | 1056 | BUG(); |
1054 | /* Avoid "noreturn function does return". */ | 1057 | /* Avoid "noreturn function does return". */ |
1055 | for (;;) | 1058 | for (;;) |
1056 | cpu_relax(); /* For when BUG is null */ | 1059 | cpu_relax(); /* For when BUG is null */ |
1057 | } | 1060 | } |
1058 | 1061 | ||
1059 | EXPORT_SYMBOL_GPL(do_exit); | 1062 | EXPORT_SYMBOL_GPL(do_exit); |
1060 | 1063 | ||
1061 | NORET_TYPE void complete_and_exit(struct completion *comp, long code) | 1064 | NORET_TYPE void complete_and_exit(struct completion *comp, long code) |
1062 | { | 1065 | { |
1063 | if (comp) | 1066 | if (comp) |
1064 | complete(comp); | 1067 | complete(comp); |
1065 | 1068 | ||
1066 | do_exit(code); | 1069 | do_exit(code); |
1067 | } | 1070 | } |
1068 | 1071 | ||
1069 | EXPORT_SYMBOL(complete_and_exit); | 1072 | EXPORT_SYMBOL(complete_and_exit); |
1070 | 1073 | ||
1071 | SYSCALL_DEFINE1(exit, int, error_code) | 1074 | SYSCALL_DEFINE1(exit, int, error_code) |
1072 | { | 1075 | { |
1073 | do_exit((error_code&0xff)<<8); | 1076 | do_exit((error_code&0xff)<<8); |
1074 | } | 1077 | } |
1075 | 1078 | ||
1076 | /* | 1079 | /* |
1077 | * Take down every thread in the group. This is called by fatal signals | 1080 | * Take down every thread in the group. This is called by fatal signals |
1078 | * as well as by sys_exit_group (below). | 1081 | * as well as by sys_exit_group (below). |
1079 | */ | 1082 | */ |
1080 | NORET_TYPE void | 1083 | NORET_TYPE void |
1081 | do_group_exit(int exit_code) | 1084 | do_group_exit(int exit_code) |
1082 | { | 1085 | { |
1083 | struct signal_struct *sig = current->signal; | 1086 | struct signal_struct *sig = current->signal; |
1084 | 1087 | ||
1085 | BUG_ON(exit_code & 0x80); /* core dumps don't get here */ | 1088 | BUG_ON(exit_code & 0x80); /* core dumps don't get here */ |
1086 | 1089 | ||
1087 | if (signal_group_exit(sig)) | 1090 | if (signal_group_exit(sig)) |
1088 | exit_code = sig->group_exit_code; | 1091 | exit_code = sig->group_exit_code; |
1089 | else if (!thread_group_empty(current)) { | 1092 | else if (!thread_group_empty(current)) { |
1090 | struct sighand_struct *const sighand = current->sighand; | 1093 | struct sighand_struct *const sighand = current->sighand; |
1091 | spin_lock_irq(&sighand->siglock); | 1094 | spin_lock_irq(&sighand->siglock); |
1092 | if (signal_group_exit(sig)) | 1095 | if (signal_group_exit(sig)) |
1093 | /* Another thread got here before we took the lock. */ | 1096 | /* Another thread got here before we took the lock. */ |
1094 | exit_code = sig->group_exit_code; | 1097 | exit_code = sig->group_exit_code; |
1095 | else { | 1098 | else { |
1096 | sig->group_exit_code = exit_code; | 1099 | sig->group_exit_code = exit_code; |
1097 | sig->flags = SIGNAL_GROUP_EXIT; | 1100 | sig->flags = SIGNAL_GROUP_EXIT; |
1098 | zap_other_threads(current); | 1101 | zap_other_threads(current); |
1099 | } | 1102 | } |
1100 | spin_unlock_irq(&sighand->siglock); | 1103 | spin_unlock_irq(&sighand->siglock); |
1101 | } | 1104 | } |
1102 | 1105 | ||
1103 | do_exit(exit_code); | 1106 | do_exit(exit_code); |
1104 | /* NOTREACHED */ | 1107 | /* NOTREACHED */ |
1105 | } | 1108 | } |
1106 | 1109 | ||
1107 | /* | 1110 | /* |
1108 | * this kills every thread in the thread group. Note that any externally | 1111 | * this kills every thread in the thread group. Note that any externally |
1109 | * wait4()-ing process will get the correct exit code - even if this | 1112 | * wait4()-ing process will get the correct exit code - even if this |
1110 | * thread is not the thread group leader. | 1113 | * thread is not the thread group leader. |
1111 | */ | 1114 | */ |
1112 | SYSCALL_DEFINE1(exit_group, int, error_code) | 1115 | SYSCALL_DEFINE1(exit_group, int, error_code) |
1113 | { | 1116 | { |
1114 | do_group_exit((error_code & 0xff) << 8); | 1117 | do_group_exit((error_code & 0xff) << 8); |
1115 | /* NOTREACHED */ | 1118 | /* NOTREACHED */ |
1116 | return 0; | 1119 | return 0; |
1117 | } | 1120 | } |
1118 | 1121 | ||
1119 | struct wait_opts { | 1122 | struct wait_opts { |
1120 | enum pid_type wo_type; | 1123 | enum pid_type wo_type; |
1121 | int wo_flags; | 1124 | int wo_flags; |
1122 | struct pid *wo_pid; | 1125 | struct pid *wo_pid; |
1123 | 1126 | ||
1124 | struct siginfo __user *wo_info; | 1127 | struct siginfo __user *wo_info; |
1125 | int __user *wo_stat; | 1128 | int __user *wo_stat; |
1126 | struct rusage __user *wo_rusage; | 1129 | struct rusage __user *wo_rusage; |
1127 | 1130 | ||
1128 | wait_queue_t child_wait; | 1131 | wait_queue_t child_wait; |
1129 | int notask_error; | 1132 | int notask_error; |
1130 | }; | 1133 | }; |
1131 | 1134 | ||
1132 | static inline | 1135 | static inline |
1133 | struct pid *task_pid_type(struct task_struct *task, enum pid_type type) | 1136 | struct pid *task_pid_type(struct task_struct *task, enum pid_type type) |
1134 | { | 1137 | { |
1135 | if (type != PIDTYPE_PID) | 1138 | if (type != PIDTYPE_PID) |
1136 | task = task->group_leader; | 1139 | task = task->group_leader; |
1137 | return task->pids[type].pid; | 1140 | return task->pids[type].pid; |
1138 | } | 1141 | } |
1139 | 1142 | ||
1140 | static int eligible_pid(struct wait_opts *wo, struct task_struct *p) | 1143 | static int eligible_pid(struct wait_opts *wo, struct task_struct *p) |
1141 | { | 1144 | { |
1142 | return wo->wo_type == PIDTYPE_MAX || | 1145 | return wo->wo_type == PIDTYPE_MAX || |
1143 | task_pid_type(p, wo->wo_type) == wo->wo_pid; | 1146 | task_pid_type(p, wo->wo_type) == wo->wo_pid; |
1144 | } | 1147 | } |
1145 | 1148 | ||
1146 | static int eligible_child(struct wait_opts *wo, struct task_struct *p) | 1149 | static int eligible_child(struct wait_opts *wo, struct task_struct *p) |
1147 | { | 1150 | { |
1148 | if (!eligible_pid(wo, p)) | 1151 | if (!eligible_pid(wo, p)) |
1149 | return 0; | 1152 | return 0; |
1150 | /* Wait for all children (clone and not) if __WALL is set; | 1153 | /* Wait for all children (clone and not) if __WALL is set; |
1151 | * otherwise, wait for clone children *only* if __WCLONE is | 1154 | * otherwise, wait for clone children *only* if __WCLONE is |
1152 | * set; otherwise, wait for non-clone children *only*. (Note: | 1155 | * set; otherwise, wait for non-clone children *only*. (Note: |
1153 | * A "clone" child here is one that reports to its parent | 1156 | * A "clone" child here is one that reports to its parent |
1154 | * using a signal other than SIGCHLD.) */ | 1157 | * using a signal other than SIGCHLD.) */ |
1155 | if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) | 1158 | if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) |
1156 | && !(wo->wo_flags & __WALL)) | 1159 | && !(wo->wo_flags & __WALL)) |
1157 | return 0; | 1160 | return 0; |
1158 | 1161 | ||
1159 | return 1; | 1162 | return 1; |
1160 | } | 1163 | } |
1161 | 1164 | ||
1162 | static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p, | 1165 | static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p, |
1163 | pid_t pid, uid_t uid, int why, int status) | 1166 | pid_t pid, uid_t uid, int why, int status) |
1164 | { | 1167 | { |
1165 | struct siginfo __user *infop; | 1168 | struct siginfo __user *infop; |
1166 | int retval = wo->wo_rusage | 1169 | int retval = wo->wo_rusage |
1167 | ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; | 1170 | ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; |
1168 | 1171 | ||
1169 | put_task_struct(p); | 1172 | put_task_struct(p); |
1170 | infop = wo->wo_info; | 1173 | infop = wo->wo_info; |
1171 | if (infop) { | 1174 | if (infop) { |
1172 | if (!retval) | 1175 | if (!retval) |
1173 | retval = put_user(SIGCHLD, &infop->si_signo); | 1176 | retval = put_user(SIGCHLD, &infop->si_signo); |
1174 | if (!retval) | 1177 | if (!retval) |
1175 | retval = put_user(0, &infop->si_errno); | 1178 | retval = put_user(0, &infop->si_errno); |
1176 | if (!retval) | 1179 | if (!retval) |
1177 | retval = put_user((short)why, &infop->si_code); | 1180 | retval = put_user((short)why, &infop->si_code); |
1178 | if (!retval) | 1181 | if (!retval) |
1179 | retval = put_user(pid, &infop->si_pid); | 1182 | retval = put_user(pid, &infop->si_pid); |
1180 | if (!retval) | 1183 | if (!retval) |
1181 | retval = put_user(uid, &infop->si_uid); | 1184 | retval = put_user(uid, &infop->si_uid); |
1182 | if (!retval) | 1185 | if (!retval) |
1183 | retval = put_user(status, &infop->si_status); | 1186 | retval = put_user(status, &infop->si_status); |
1184 | } | 1187 | } |
1185 | if (!retval) | 1188 | if (!retval) |
1186 | retval = pid; | 1189 | retval = pid; |
1187 | return retval; | 1190 | return retval; |
1188 | } | 1191 | } |
1189 | 1192 | ||
1190 | /* | 1193 | /* |
1191 | * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold | 1194 | * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold |
1192 | * read_lock(&tasklist_lock) on entry. If we return zero, we still hold | 1195 | * read_lock(&tasklist_lock) on entry. If we return zero, we still hold |
1193 | * the lock and this task is uninteresting. If we return nonzero, we have | 1196 | * the lock and this task is uninteresting. If we return nonzero, we have |
1194 | * released the lock and the system call should return. | 1197 | * released the lock and the system call should return. |
1195 | */ | 1198 | */ |
1196 | static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | 1199 | static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) |
1197 | { | 1200 | { |
1198 | unsigned long state; | 1201 | unsigned long state; |
1199 | int retval, status, traced; | 1202 | int retval, status, traced; |
1200 | pid_t pid = task_pid_vnr(p); | 1203 | pid_t pid = task_pid_vnr(p); |
1201 | uid_t uid = __task_cred(p)->uid; | 1204 | uid_t uid = __task_cred(p)->uid; |
1202 | struct siginfo __user *infop; | 1205 | struct siginfo __user *infop; |
1203 | 1206 | ||
1204 | if (!likely(wo->wo_flags & WEXITED)) | 1207 | if (!likely(wo->wo_flags & WEXITED)) |
1205 | return 0; | 1208 | return 0; |
1206 | 1209 | ||
1207 | if (unlikely(wo->wo_flags & WNOWAIT)) { | 1210 | if (unlikely(wo->wo_flags & WNOWAIT)) { |
1208 | int exit_code = p->exit_code; | 1211 | int exit_code = p->exit_code; |
1209 | int why; | 1212 | int why; |
1210 | 1213 | ||
1211 | get_task_struct(p); | 1214 | get_task_struct(p); |
1212 | read_unlock(&tasklist_lock); | 1215 | read_unlock(&tasklist_lock); |
1213 | if ((exit_code & 0x7f) == 0) { | 1216 | if ((exit_code & 0x7f) == 0) { |
1214 | why = CLD_EXITED; | 1217 | why = CLD_EXITED; |
1215 | status = exit_code >> 8; | 1218 | status = exit_code >> 8; |
1216 | } else { | 1219 | } else { |
1217 | why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED; | 1220 | why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED; |
1218 | status = exit_code & 0x7f; | 1221 | status = exit_code & 0x7f; |
1219 | } | 1222 | } |
1220 | return wait_noreap_copyout(wo, p, pid, uid, why, status); | 1223 | return wait_noreap_copyout(wo, p, pid, uid, why, status); |
1221 | } | 1224 | } |
1222 | 1225 | ||
1223 | /* | 1226 | /* |
1224 | * Try to move the task's state to DEAD | 1227 | * Try to move the task's state to DEAD |
1225 | * only one thread is allowed to do this: | 1228 | * only one thread is allowed to do this: |
1226 | */ | 1229 | */ |
1227 | state = xchg(&p->exit_state, EXIT_DEAD); | 1230 | state = xchg(&p->exit_state, EXIT_DEAD); |
1228 | if (state != EXIT_ZOMBIE) { | 1231 | if (state != EXIT_ZOMBIE) { |
1229 | BUG_ON(state != EXIT_DEAD); | 1232 | BUG_ON(state != EXIT_DEAD); |
1230 | return 0; | 1233 | return 0; |
1231 | } | 1234 | } |
1232 | 1235 | ||
1233 | traced = ptrace_reparented(p); | 1236 | traced = ptrace_reparented(p); |
1234 | /* | 1237 | /* |
1235 | * It can be ptraced but not reparented, check | 1238 | * It can be ptraced but not reparented, check |
1236 | * !task_detached() to filter out sub-threads. | 1239 | * !task_detached() to filter out sub-threads. |
1237 | */ | 1240 | */ |
1238 | if (likely(!traced) && likely(!task_detached(p))) { | 1241 | if (likely(!traced) && likely(!task_detached(p))) { |
1239 | struct signal_struct *psig; | 1242 | struct signal_struct *psig; |
1240 | struct signal_struct *sig; | 1243 | struct signal_struct *sig; |
1241 | unsigned long maxrss; | 1244 | unsigned long maxrss; |
1242 | cputime_t tgutime, tgstime; | 1245 | cputime_t tgutime, tgstime; |
1243 | 1246 | ||
1244 | /* | 1247 | /* |
1245 | * The resource counters for the group leader are in its | 1248 | * The resource counters for the group leader are in its |
1246 | * own task_struct. Those for dead threads in the group | 1249 | * own task_struct. Those for dead threads in the group |
1247 | * are in its signal_struct, as are those for the child | 1250 | * are in its signal_struct, as are those for the child |
1248 | * processes it has previously reaped. All these | 1251 | * processes it has previously reaped. All these |
1249 | * accumulate in the parent's signal_struct c* fields. | 1252 | * accumulate in the parent's signal_struct c* fields. |
1250 | * | 1253 | * |
1251 | * We don't bother to take a lock here to protect these | 1254 | * We don't bother to take a lock here to protect these |
1252 | * p->signal fields, because they are only touched by | 1255 | * p->signal fields, because they are only touched by |
1253 | * __exit_signal, which runs with tasklist_lock | 1256 | * __exit_signal, which runs with tasklist_lock |
1254 | * write-locked anyway, and so is excluded here. We do | 1257 | * write-locked anyway, and so is excluded here. We do |
1255 | * need to protect the access to parent->signal fields, | 1258 | * need to protect the access to parent->signal fields, |
1256 | * as other threads in the parent group can be right | 1259 | * as other threads in the parent group can be right |
1257 | * here reaping other children at the same time. | 1260 | * here reaping other children at the same time. |
1258 | * | 1261 | * |
1259 | * We use thread_group_times() to get times for the thread | 1262 | * We use thread_group_times() to get times for the thread |
1260 | * group, which consolidates times for all threads in the | 1263 | * group, which consolidates times for all threads in the |
1261 | * group including the group leader. | 1264 | * group including the group leader. |
1262 | */ | 1265 | */ |
1263 | thread_group_times(p, &tgutime, &tgstime); | 1266 | thread_group_times(p, &tgutime, &tgstime); |
1264 | spin_lock_irq(&p->real_parent->sighand->siglock); | 1267 | spin_lock_irq(&p->real_parent->sighand->siglock); |
1265 | psig = p->real_parent->signal; | 1268 | psig = p->real_parent->signal; |
1266 | sig = p->signal; | 1269 | sig = p->signal; |
1267 | psig->cutime = | 1270 | psig->cutime = |
1268 | cputime_add(psig->cutime, | 1271 | cputime_add(psig->cutime, |
1269 | cputime_add(tgutime, | 1272 | cputime_add(tgutime, |
1270 | sig->cutime)); | 1273 | sig->cutime)); |
1271 | psig->cstime = | 1274 | psig->cstime = |
1272 | cputime_add(psig->cstime, | 1275 | cputime_add(psig->cstime, |
1273 | cputime_add(tgstime, | 1276 | cputime_add(tgstime, |
1274 | sig->cstime)); | 1277 | sig->cstime)); |
1275 | psig->cgtime = | 1278 | psig->cgtime = |
1276 | cputime_add(psig->cgtime, | 1279 | cputime_add(psig->cgtime, |
1277 | cputime_add(p->gtime, | 1280 | cputime_add(p->gtime, |
1278 | cputime_add(sig->gtime, | 1281 | cputime_add(sig->gtime, |
1279 | sig->cgtime))); | 1282 | sig->cgtime))); |
1280 | psig->cmin_flt += | 1283 | psig->cmin_flt += |
1281 | p->min_flt + sig->min_flt + sig->cmin_flt; | 1284 | p->min_flt + sig->min_flt + sig->cmin_flt; |
1282 | psig->cmaj_flt += | 1285 | psig->cmaj_flt += |
1283 | p->maj_flt + sig->maj_flt + sig->cmaj_flt; | 1286 | p->maj_flt + sig->maj_flt + sig->cmaj_flt; |
1284 | psig->cnvcsw += | 1287 | psig->cnvcsw += |
1285 | p->nvcsw + sig->nvcsw + sig->cnvcsw; | 1288 | p->nvcsw + sig->nvcsw + sig->cnvcsw; |
1286 | psig->cnivcsw += | 1289 | psig->cnivcsw += |
1287 | p->nivcsw + sig->nivcsw + sig->cnivcsw; | 1290 | p->nivcsw + sig->nivcsw + sig->cnivcsw; |
1288 | psig->cinblock += | 1291 | psig->cinblock += |
1289 | task_io_get_inblock(p) + | 1292 | task_io_get_inblock(p) + |
1290 | sig->inblock + sig->cinblock; | 1293 | sig->inblock + sig->cinblock; |
1291 | psig->coublock += | 1294 | psig->coublock += |
1292 | task_io_get_oublock(p) + | 1295 | task_io_get_oublock(p) + |
1293 | sig->oublock + sig->coublock; | 1296 | sig->oublock + sig->coublock; |
1294 | maxrss = max(sig->maxrss, sig->cmaxrss); | 1297 | maxrss = max(sig->maxrss, sig->cmaxrss); |
1295 | if (psig->cmaxrss < maxrss) | 1298 | if (psig->cmaxrss < maxrss) |
1296 | psig->cmaxrss = maxrss; | 1299 | psig->cmaxrss = maxrss; |
1297 | task_io_accounting_add(&psig->ioac, &p->ioac); | 1300 | task_io_accounting_add(&psig->ioac, &p->ioac); |
1298 | task_io_accounting_add(&psig->ioac, &sig->ioac); | 1301 | task_io_accounting_add(&psig->ioac, &sig->ioac); |
1299 | spin_unlock_irq(&p->real_parent->sighand->siglock); | 1302 | spin_unlock_irq(&p->real_parent->sighand->siglock); |
1300 | } | 1303 | } |
1301 | 1304 | ||
1302 | /* | 1305 | /* |
1303 | * Now we are sure this task is interesting, and no other | 1306 | * Now we are sure this task is interesting, and no other |
1304 | * thread can reap it because we set its state to EXIT_DEAD. | 1307 | * thread can reap it because we set its state to EXIT_DEAD. |
1305 | */ | 1308 | */ |
1306 | read_unlock(&tasklist_lock); | 1309 | read_unlock(&tasklist_lock); |
1307 | 1310 | ||
1308 | retval = wo->wo_rusage | 1311 | retval = wo->wo_rusage |
1309 | ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; | 1312 | ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; |
1310 | status = (p->signal->flags & SIGNAL_GROUP_EXIT) | 1313 | status = (p->signal->flags & SIGNAL_GROUP_EXIT) |
1311 | ? p->signal->group_exit_code : p->exit_code; | 1314 | ? p->signal->group_exit_code : p->exit_code; |
1312 | if (!retval && wo->wo_stat) | 1315 | if (!retval && wo->wo_stat) |
1313 | retval = put_user(status, wo->wo_stat); | 1316 | retval = put_user(status, wo->wo_stat); |
1314 | 1317 | ||
1315 | infop = wo->wo_info; | 1318 | infop = wo->wo_info; |
1316 | if (!retval && infop) | 1319 | if (!retval && infop) |
1317 | retval = put_user(SIGCHLD, &infop->si_signo); | 1320 | retval = put_user(SIGCHLD, &infop->si_signo); |
1318 | if (!retval && infop) | 1321 | if (!retval && infop) |
1319 | retval = put_user(0, &infop->si_errno); | 1322 | retval = put_user(0, &infop->si_errno); |
1320 | if (!retval && infop) { | 1323 | if (!retval && infop) { |
1321 | int why; | 1324 | int why; |
1322 | 1325 | ||
1323 | if ((status & 0x7f) == 0) { | 1326 | if ((status & 0x7f) == 0) { |
1324 | why = CLD_EXITED; | 1327 | why = CLD_EXITED; |
1325 | status >>= 8; | 1328 | status >>= 8; |
1326 | } else { | 1329 | } else { |
1327 | why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; | 1330 | why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; |
1328 | status &= 0x7f; | 1331 | status &= 0x7f; |
1329 | } | 1332 | } |
1330 | retval = put_user((short)why, &infop->si_code); | 1333 | retval = put_user((short)why, &infop->si_code); |
1331 | if (!retval) | 1334 | if (!retval) |
1332 | retval = put_user(status, &infop->si_status); | 1335 | retval = put_user(status, &infop->si_status); |
1333 | } | 1336 | } |
1334 | if (!retval && infop) | 1337 | if (!retval && infop) |
1335 | retval = put_user(pid, &infop->si_pid); | 1338 | retval = put_user(pid, &infop->si_pid); |
1336 | if (!retval && infop) | 1339 | if (!retval && infop) |
1337 | retval = put_user(uid, &infop->si_uid); | 1340 | retval = put_user(uid, &infop->si_uid); |
1338 | if (!retval) | 1341 | if (!retval) |
1339 | retval = pid; | 1342 | retval = pid; |
1340 | 1343 | ||
1341 | if (traced) { | 1344 | if (traced) { |
1342 | write_lock_irq(&tasklist_lock); | 1345 | write_lock_irq(&tasklist_lock); |
1343 | /* We dropped tasklist, ptracer could die and untrace */ | 1346 | /* We dropped tasklist, ptracer could die and untrace */ |
1344 | ptrace_unlink(p); | 1347 | ptrace_unlink(p); |
1345 | /* | 1348 | /* |
1346 | * If this is not a detached task, notify the parent. | 1349 | * If this is not a detached task, notify the parent. |
1347 | * If it's still not detached after that, don't release | 1350 | * If it's still not detached after that, don't release |
1348 | * it now. | 1351 | * it now. |
1349 | */ | 1352 | */ |
1350 | if (!task_detached(p)) { | 1353 | if (!task_detached(p)) { |
1351 | do_notify_parent(p, p->exit_signal); | 1354 | do_notify_parent(p, p->exit_signal); |
1352 | if (!task_detached(p)) { | 1355 | if (!task_detached(p)) { |
1353 | p->exit_state = EXIT_ZOMBIE; | 1356 | p->exit_state = EXIT_ZOMBIE; |
1354 | p = NULL; | 1357 | p = NULL; |
1355 | } | 1358 | } |
1356 | } | 1359 | } |
1357 | write_unlock_irq(&tasklist_lock); | 1360 | write_unlock_irq(&tasklist_lock); |
1358 | } | 1361 | } |
1359 | if (p != NULL) | 1362 | if (p != NULL) |
1360 | release_task(p); | 1363 | release_task(p); |
1361 | 1364 | ||
1362 | return retval; | 1365 | return retval; |
1363 | } | 1366 | } |
1364 | 1367 | ||
1365 | static int *task_stopped_code(struct task_struct *p, bool ptrace) | 1368 | static int *task_stopped_code(struct task_struct *p, bool ptrace) |
1366 | { | 1369 | { |
1367 | if (ptrace) { | 1370 | if (ptrace) { |
1368 | if (task_is_stopped_or_traced(p) && | 1371 | if (task_is_stopped_or_traced(p) && |
1369 | !(p->jobctl & JOBCTL_LISTENING)) | 1372 | !(p->jobctl & JOBCTL_LISTENING)) |
1370 | return &p->exit_code; | 1373 | return &p->exit_code; |
1371 | } else { | 1374 | } else { |
1372 | if (p->signal->flags & SIGNAL_STOP_STOPPED) | 1375 | if (p->signal->flags & SIGNAL_STOP_STOPPED) |
1373 | return &p->signal->group_exit_code; | 1376 | return &p->signal->group_exit_code; |
1374 | } | 1377 | } |
1375 | return NULL; | 1378 | return NULL; |
1376 | } | 1379 | } |
1377 | 1380 | ||
1378 | /** | 1381 | /** |
1379 | * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED | 1382 | * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED |
1380 | * @wo: wait options | 1383 | * @wo: wait options |
1381 | * @ptrace: is the wait for ptrace | 1384 | * @ptrace: is the wait for ptrace |
1382 | * @p: task to wait for | 1385 | * @p: task to wait for |
1383 | * | 1386 | * |
1384 | * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED. | 1387 | * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED. |
1385 | * | 1388 | * |
1386 | * CONTEXT: | 1389 | * CONTEXT: |
1387 | * read_lock(&tasklist_lock), which is released if return value is | 1390 | * read_lock(&tasklist_lock), which is released if return value is |
1388 | * non-zero. Also, grabs and releases @p->sighand->siglock. | 1391 | * non-zero. Also, grabs and releases @p->sighand->siglock. |
1389 | * | 1392 | * |
1390 | * RETURNS: | 1393 | * RETURNS: |
1391 | * 0 if wait condition didn't exist and search for other wait conditions | 1394 | * 0 if wait condition didn't exist and search for other wait conditions |
1392 | * should continue. Non-zero return, -errno on failure and @p's pid on | 1395 | * should continue. Non-zero return, -errno on failure and @p's pid on |
1393 | * success, implies that tasklist_lock is released and wait condition | 1396 | * success, implies that tasklist_lock is released and wait condition |
1394 | * search should terminate. | 1397 | * search should terminate. |
1395 | */ | 1398 | */ |
1396 | static int wait_task_stopped(struct wait_opts *wo, | 1399 | static int wait_task_stopped(struct wait_opts *wo, |
1397 | int ptrace, struct task_struct *p) | 1400 | int ptrace, struct task_struct *p) |
1398 | { | 1401 | { |
1399 | struct siginfo __user *infop; | 1402 | struct siginfo __user *infop; |
1400 | int retval, exit_code, *p_code, why; | 1403 | int retval, exit_code, *p_code, why; |
1401 | uid_t uid = 0; /* unneeded, required by compiler */ | 1404 | uid_t uid = 0; /* unneeded, required by compiler */ |
1402 | pid_t pid; | 1405 | pid_t pid; |
1403 | 1406 | ||
1404 | /* | 1407 | /* |
1405 | * Traditionally we see ptrace'd stopped tasks regardless of options. | 1408 | * Traditionally we see ptrace'd stopped tasks regardless of options. |
1406 | */ | 1409 | */ |
1407 | if (!ptrace && !(wo->wo_flags & WUNTRACED)) | 1410 | if (!ptrace && !(wo->wo_flags & WUNTRACED)) |
1408 | return 0; | 1411 | return 0; |
1409 | 1412 | ||
1410 | if (!task_stopped_code(p, ptrace)) | 1413 | if (!task_stopped_code(p, ptrace)) |
1411 | return 0; | 1414 | return 0; |
1412 | 1415 | ||
1413 | exit_code = 0; | 1416 | exit_code = 0; |
1414 | spin_lock_irq(&p->sighand->siglock); | 1417 | spin_lock_irq(&p->sighand->siglock); |
1415 | 1418 | ||
1416 | p_code = task_stopped_code(p, ptrace); | 1419 | p_code = task_stopped_code(p, ptrace); |
1417 | if (unlikely(!p_code)) | 1420 | if (unlikely(!p_code)) |
1418 | goto unlock_sig; | 1421 | goto unlock_sig; |
1419 | 1422 | ||
1420 | exit_code = *p_code; | 1423 | exit_code = *p_code; |
1421 | if (!exit_code) | 1424 | if (!exit_code) |
1422 | goto unlock_sig; | 1425 | goto unlock_sig; |
1423 | 1426 | ||
1424 | if (!unlikely(wo->wo_flags & WNOWAIT)) | 1427 | if (!unlikely(wo->wo_flags & WNOWAIT)) |
1425 | *p_code = 0; | 1428 | *p_code = 0; |
1426 | 1429 | ||
1427 | uid = task_uid(p); | 1430 | uid = task_uid(p); |
1428 | unlock_sig: | 1431 | unlock_sig: |
1429 | spin_unlock_irq(&p->sighand->siglock); | 1432 | spin_unlock_irq(&p->sighand->siglock); |
1430 | if (!exit_code) | 1433 | if (!exit_code) |
1431 | return 0; | 1434 | return 0; |
1432 | 1435 | ||
1433 | /* | 1436 | /* |
1434 | * Now we are pretty sure this task is interesting. | 1437 | * Now we are pretty sure this task is interesting. |
1435 | * Make sure it doesn't get reaped out from under us while we | 1438 | * Make sure it doesn't get reaped out from under us while we |
1436 | * give up the lock and then examine it below. We don't want to | 1439 | * give up the lock and then examine it below. We don't want to |
1437 | * keep holding onto the tasklist_lock while we call getrusage and | 1440 | * keep holding onto the tasklist_lock while we call getrusage and |
1438 | * possibly take page faults for user memory. | 1441 | * possibly take page faults for user memory. |
1439 | */ | 1442 | */ |
1440 | get_task_struct(p); | 1443 | get_task_struct(p); |
1441 | pid = task_pid_vnr(p); | 1444 | pid = task_pid_vnr(p); |
1442 | why = ptrace ? CLD_TRAPPED : CLD_STOPPED; | 1445 | why = ptrace ? CLD_TRAPPED : CLD_STOPPED; |
1443 | read_unlock(&tasklist_lock); | 1446 | read_unlock(&tasklist_lock); |
1444 | 1447 | ||
1445 | if (unlikely(wo->wo_flags & WNOWAIT)) | 1448 | if (unlikely(wo->wo_flags & WNOWAIT)) |
1446 | return wait_noreap_copyout(wo, p, pid, uid, why, exit_code); | 1449 | return wait_noreap_copyout(wo, p, pid, uid, why, exit_code); |
1447 | 1450 | ||
1448 | retval = wo->wo_rusage | 1451 | retval = wo->wo_rusage |
1449 | ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; | 1452 | ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; |
1450 | if (!retval && wo->wo_stat) | 1453 | if (!retval && wo->wo_stat) |
1451 | retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat); | 1454 | retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat); |
1452 | 1455 | ||
1453 | infop = wo->wo_info; | 1456 | infop = wo->wo_info; |
1454 | if (!retval && infop) | 1457 | if (!retval && infop) |
1455 | retval = put_user(SIGCHLD, &infop->si_signo); | 1458 | retval = put_user(SIGCHLD, &infop->si_signo); |
1456 | if (!retval && infop) | 1459 | if (!retval && infop) |
1457 | retval = put_user(0, &infop->si_errno); | 1460 | retval = put_user(0, &infop->si_errno); |
1458 | if (!retval && infop) | 1461 | if (!retval && infop) |
1459 | retval = put_user((short)why, &infop->si_code); | 1462 | retval = put_user((short)why, &infop->si_code); |
1460 | if (!retval && infop) | 1463 | if (!retval && infop) |
1461 | retval = put_user(exit_code, &infop->si_status); | 1464 | retval = put_user(exit_code, &infop->si_status); |
1462 | if (!retval && infop) | 1465 | if (!retval && infop) |
1463 | retval = put_user(pid, &infop->si_pid); | 1466 | retval = put_user(pid, &infop->si_pid); |
1464 | if (!retval && infop) | 1467 | if (!retval && infop) |
1465 | retval = put_user(uid, &infop->si_uid); | 1468 | retval = put_user(uid, &infop->si_uid); |
1466 | if (!retval) | 1469 | if (!retval) |
1467 | retval = pid; | 1470 | retval = pid; |
1468 | put_task_struct(p); | 1471 | put_task_struct(p); |
1469 | 1472 | ||
1470 | BUG_ON(!retval); | 1473 | BUG_ON(!retval); |
1471 | return retval; | 1474 | return retval; |
1472 | } | 1475 | } |
1473 | 1476 | ||
1474 | /* | 1477 | /* |
1475 | * Handle do_wait work for one task in a live, non-stopped state. | 1478 | * Handle do_wait work for one task in a live, non-stopped state. |
1476 | * read_lock(&tasklist_lock) on entry. If we return zero, we still hold | 1479 | * read_lock(&tasklist_lock) on entry. If we return zero, we still hold |
1477 | * the lock and this task is uninteresting. If we return nonzero, we have | 1480 | * the lock and this task is uninteresting. If we return nonzero, we have |
1478 | * released the lock and the system call should return. | 1481 | * released the lock and the system call should return. |
1479 | */ | 1482 | */ |
1480 | static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) | 1483 | static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) |
1481 | { | 1484 | { |
1482 | int retval; | 1485 | int retval; |
1483 | pid_t pid; | 1486 | pid_t pid; |
1484 | uid_t uid; | 1487 | uid_t uid; |
1485 | 1488 | ||
1486 | if (!unlikely(wo->wo_flags & WCONTINUED)) | 1489 | if (!unlikely(wo->wo_flags & WCONTINUED)) |
1487 | return 0; | 1490 | return 0; |
1488 | 1491 | ||
1489 | if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) | 1492 | if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) |
1490 | return 0; | 1493 | return 0; |
1491 | 1494 | ||
1492 | spin_lock_irq(&p->sighand->siglock); | 1495 | spin_lock_irq(&p->sighand->siglock); |
1493 | /* Re-check with the lock held. */ | 1496 | /* Re-check with the lock held. */ |
1494 | if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { | 1497 | if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { |
1495 | spin_unlock_irq(&p->sighand->siglock); | 1498 | spin_unlock_irq(&p->sighand->siglock); |
1496 | return 0; | 1499 | return 0; |
1497 | } | 1500 | } |
1498 | if (!unlikely(wo->wo_flags & WNOWAIT)) | 1501 | if (!unlikely(wo->wo_flags & WNOWAIT)) |
1499 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; | 1502 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; |
1500 | uid = task_uid(p); | 1503 | uid = task_uid(p); |
1501 | spin_unlock_irq(&p->sighand->siglock); | 1504 | spin_unlock_irq(&p->sighand->siglock); |
1502 | 1505 | ||
1503 | pid = task_pid_vnr(p); | 1506 | pid = task_pid_vnr(p); |
1504 | get_task_struct(p); | 1507 | get_task_struct(p); |
1505 | read_unlock(&tasklist_lock); | 1508 | read_unlock(&tasklist_lock); |
1506 | 1509 | ||
1507 | if (!wo->wo_info) { | 1510 | if (!wo->wo_info) { |
1508 | retval = wo->wo_rusage | 1511 | retval = wo->wo_rusage |
1509 | ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; | 1512 | ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; |
1510 | put_task_struct(p); | 1513 | put_task_struct(p); |
1511 | if (!retval && wo->wo_stat) | 1514 | if (!retval && wo->wo_stat) |
1512 | retval = put_user(0xffff, wo->wo_stat); | 1515 | retval = put_user(0xffff, wo->wo_stat); |
1513 | if (!retval) | 1516 | if (!retval) |
1514 | retval = pid; | 1517 | retval = pid; |
1515 | } else { | 1518 | } else { |
1516 | retval = wait_noreap_copyout(wo, p, pid, uid, | 1519 | retval = wait_noreap_copyout(wo, p, pid, uid, |
1517 | CLD_CONTINUED, SIGCONT); | 1520 | CLD_CONTINUED, SIGCONT); |
1518 | BUG_ON(retval == 0); | 1521 | BUG_ON(retval == 0); |
1519 | } | 1522 | } |
1520 | 1523 | ||
1521 | return retval; | 1524 | return retval; |
1522 | } | 1525 | } |
1523 | 1526 | ||
1524 | /* | 1527 | /* |
1525 | * Consider @p for a wait by @parent. | 1528 | * Consider @p for a wait by @parent. |
1526 | * | 1529 | * |
1527 | * -ECHILD should be in ->notask_error before the first call. | 1530 | * -ECHILD should be in ->notask_error before the first call. |
1528 | * Returns nonzero for a final return, when we have unlocked tasklist_lock. | 1531 | * Returns nonzero for a final return, when we have unlocked tasklist_lock. |
1529 | * Returns zero if the search for a child should continue; | 1532 | * Returns zero if the search for a child should continue; |
1530 | * then ->notask_error is 0 if @p is an eligible child, | 1533 | * then ->notask_error is 0 if @p is an eligible child, |
1531 | * or another error from security_task_wait(), or still -ECHILD. | 1534 | * or another error from security_task_wait(), or still -ECHILD. |
1532 | */ | 1535 | */ |
1533 | static int wait_consider_task(struct wait_opts *wo, int ptrace, | 1536 | static int wait_consider_task(struct wait_opts *wo, int ptrace, |
1534 | struct task_struct *p) | 1537 | struct task_struct *p) |
1535 | { | 1538 | { |
1536 | int ret = eligible_child(wo, p); | 1539 | int ret = eligible_child(wo, p); |
1537 | if (!ret) | 1540 | if (!ret) |
1538 | return ret; | 1541 | return ret; |
1539 | 1542 | ||
1540 | ret = security_task_wait(p); | 1543 | ret = security_task_wait(p); |
1541 | if (unlikely(ret < 0)) { | 1544 | if (unlikely(ret < 0)) { |
1542 | /* | 1545 | /* |
1543 | * If we have not yet seen any eligible child, | 1546 | * If we have not yet seen any eligible child, |
1544 | * then let this error code replace -ECHILD. | 1547 | * then let this error code replace -ECHILD. |
1545 | * A permission error will give the user a clue | 1548 | * A permission error will give the user a clue |
1546 | * to look for security policy problems, rather | 1549 | * to look for security policy problems, rather |
1547 | * than for mysterious wait bugs. | 1550 | * than for mysterious wait bugs. |
1548 | */ | 1551 | */ |
1549 | if (wo->notask_error) | 1552 | if (wo->notask_error) |
1550 | wo->notask_error = ret; | 1553 | wo->notask_error = ret; |
1551 | return 0; | 1554 | return 0; |
1552 | } | 1555 | } |
1553 | 1556 | ||
1554 | /* dead body doesn't have much to contribute */ | 1557 | /* dead body doesn't have much to contribute */ |
1555 | if (p->exit_state == EXIT_DEAD) | 1558 | if (p->exit_state == EXIT_DEAD) |
1556 | return 0; | 1559 | return 0; |
1557 | 1560 | ||
1558 | /* slay zombie? */ | 1561 | /* slay zombie? */ |
1559 | if (p->exit_state == EXIT_ZOMBIE) { | 1562 | if (p->exit_state == EXIT_ZOMBIE) { |
1560 | /* | 1563 | /* |
1561 | * A zombie ptracee is only visible to its ptracer. | 1564 | * A zombie ptracee is only visible to its ptracer. |
1562 | * Notification and reaping will be cascaded to the real | 1565 | * Notification and reaping will be cascaded to the real |
1563 | * parent when the ptracer detaches. | 1566 | * parent when the ptracer detaches. |
1564 | */ | 1567 | */ |
1565 | if (likely(!ptrace) && unlikely(p->ptrace)) { | 1568 | if (likely(!ptrace) && unlikely(p->ptrace)) { |
1566 | /* it will become visible, clear notask_error */ | 1569 | /* it will become visible, clear notask_error */ |
1567 | wo->notask_error = 0; | 1570 | wo->notask_error = 0; |
1568 | return 0; | 1571 | return 0; |
1569 | } | 1572 | } |
1570 | 1573 | ||
1571 | /* we don't reap group leaders with subthreads */ | 1574 | /* we don't reap group leaders with subthreads */ |
1572 | if (!delay_group_leader(p)) | 1575 | if (!delay_group_leader(p)) |
1573 | return wait_task_zombie(wo, p); | 1576 | return wait_task_zombie(wo, p); |
1574 | 1577 | ||
1575 | /* | 1578 | /* |
1576 | * Allow access to stopped/continued state via zombie by | 1579 | * Allow access to stopped/continued state via zombie by |
1577 | * falling through. Clearing of notask_error is complex. | 1580 | * falling through. Clearing of notask_error is complex. |
1578 | * | 1581 | * |
1579 | * When !@ptrace: | 1582 | * When !@ptrace: |
1580 | * | 1583 | * |
1581 | * If WEXITED is set, notask_error should naturally be | 1584 | * If WEXITED is set, notask_error should naturally be |
1582 | * cleared. If not, subset of WSTOPPED|WCONTINUED is set, | 1585 | * cleared. If not, subset of WSTOPPED|WCONTINUED is set, |
1583 | * so, if there are live subthreads, there are events to | 1586 | * so, if there are live subthreads, there are events to |
1584 | * wait for. If all subthreads are dead, it's still safe | 1587 | * wait for. If all subthreads are dead, it's still safe |
1585 | * to clear - this function will be called again in finite | 1588 | * to clear - this function will be called again in finite |
1586 | * amount time once all the subthreads are released and | 1589 | * amount time once all the subthreads are released and |
1587 | * will then return without clearing. | 1590 | * will then return without clearing. |
1588 | * | 1591 | * |
1589 | * When @ptrace: | 1592 | * When @ptrace: |
1590 | * | 1593 | * |
1591 | * Stopped state is per-task and thus can't change once the | 1594 | * Stopped state is per-task and thus can't change once the |
1592 | * target task dies. Only continued and exited can happen. | 1595 | * target task dies. Only continued and exited can happen. |
1593 | * Clear notask_error if WCONTINUED | WEXITED. | 1596 | * Clear notask_error if WCONTINUED | WEXITED. |
1594 | */ | 1597 | */ |
1595 | if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) | 1598 | if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) |
1596 | wo->notask_error = 0; | 1599 | wo->notask_error = 0; |
1597 | } else { | 1600 | } else { |
1598 | /* | 1601 | /* |
1599 | * If @p is ptraced by a task in its real parent's group, | 1602 | * If @p is ptraced by a task in its real parent's group, |
1600 | * hide group stop/continued state when looking at @p as | 1603 | * hide group stop/continued state when looking at @p as |
1601 | * the real parent; otherwise, a single stop can be | 1604 | * the real parent; otherwise, a single stop can be |
1602 | * reported twice as group and ptrace stops. | 1605 | * reported twice as group and ptrace stops. |
1603 | * | 1606 | * |
1604 | * If a ptracer wants to distinguish the two events for its | 1607 | * If a ptracer wants to distinguish the two events for its |
1605 | * own children, it should create a separate process which | 1608 | * own children, it should create a separate process which |
1606 | * takes the role of real parent. | 1609 | * takes the role of real parent. |
1607 | */ | 1610 | */ |
1608 | if (likely(!ptrace) && p->ptrace && | 1611 | if (likely(!ptrace) && p->ptrace && |
1609 | same_thread_group(p->parent, p->real_parent)) | 1612 | same_thread_group(p->parent, p->real_parent)) |
1610 | return 0; | 1613 | return 0; |
1611 | 1614 | ||
1612 | /* | 1615 | /* |
1613 | * @p is alive and it's gonna stop, continue or exit, so | 1616 | * @p is alive and it's gonna stop, continue or exit, so |
1614 | * there always is something to wait for. | 1617 | * there always is something to wait for. |
1615 | */ | 1618 | */ |
1616 | wo->notask_error = 0; | 1619 | wo->notask_error = 0; |
1617 | } | 1620 | } |
1618 | 1621 | ||
1619 | /* | 1622 | /* |
1620 | * Wait for stopped. Depending on @ptrace, different stopped state | 1623 | * Wait for stopped. Depending on @ptrace, different stopped state |
1621 | * is used and the two don't interact with each other. | 1624 | * is used and the two don't interact with each other. |
1622 | */ | 1625 | */ |
1623 | ret = wait_task_stopped(wo, ptrace, p); | 1626 | ret = wait_task_stopped(wo, ptrace, p); |
1624 | if (ret) | 1627 | if (ret) |
1625 | return ret; | 1628 | return ret; |
1626 | 1629 | ||
1627 | /* | 1630 | /* |
1628 | * Wait for continued. There's only one continued state and the | 1631 | * Wait for continued. There's only one continued state and the |
1629 | * ptracer can consume it which can confuse the real parent. Don't | 1632 | * ptracer can consume it which can confuse the real parent. Don't |
1630 | * use WCONTINUED from ptracer. You don't need or want it. | 1633 | * use WCONTINUED from ptracer. You don't need or want it. |
1631 | */ | 1634 | */ |
1632 | return wait_task_continued(wo, p); | 1635 | return wait_task_continued(wo, p); |
1633 | } | 1636 | } |
1634 | 1637 | ||
1635 | /* | 1638 | /* |
1636 | * Do the work of do_wait() for one thread in the group, @tsk. | 1639 | * Do the work of do_wait() for one thread in the group, @tsk. |
1637 | * | 1640 | * |
1638 | * -ECHILD should be in ->notask_error before the first call. | 1641 | * -ECHILD should be in ->notask_error before the first call. |
1639 | * Returns nonzero for a final return, when we have unlocked tasklist_lock. | 1642 | * Returns nonzero for a final return, when we have unlocked tasklist_lock. |
1640 | * Returns zero if the search for a child should continue; then | 1643 | * Returns zero if the search for a child should continue; then |
1641 | * ->notask_error is 0 if there were any eligible children, | 1644 | * ->notask_error is 0 if there were any eligible children, |
1642 | * or another error from security_task_wait(), or still -ECHILD. | 1645 | * or another error from security_task_wait(), or still -ECHILD. |
1643 | */ | 1646 | */ |
1644 | static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) | 1647 | static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) |
1645 | { | 1648 | { |
1646 | struct task_struct *p; | 1649 | struct task_struct *p; |
1647 | 1650 | ||
1648 | list_for_each_entry(p, &tsk->children, sibling) { | 1651 | list_for_each_entry(p, &tsk->children, sibling) { |
1649 | int ret = wait_consider_task(wo, 0, p); | 1652 | int ret = wait_consider_task(wo, 0, p); |
1650 | if (ret) | 1653 | if (ret) |
1651 | return ret; | 1654 | return ret; |
1652 | } | 1655 | } |
1653 | 1656 | ||
1654 | return 0; | 1657 | return 0; |
1655 | } | 1658 | } |
1656 | 1659 | ||
1657 | static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) | 1660 | static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) |
1658 | { | 1661 | { |
1659 | struct task_struct *p; | 1662 | struct task_struct *p; |
1660 | 1663 | ||
1661 | list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { | 1664 | list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { |
1662 | int ret = wait_consider_task(wo, 1, p); | 1665 | int ret = wait_consider_task(wo, 1, p); |
1663 | if (ret) | 1666 | if (ret) |
1664 | return ret; | 1667 | return ret; |
1665 | } | 1668 | } |
1666 | 1669 | ||
1667 | return 0; | 1670 | return 0; |
1668 | } | 1671 | } |
1669 | 1672 | ||
1670 | static int child_wait_callback(wait_queue_t *wait, unsigned mode, | 1673 | static int child_wait_callback(wait_queue_t *wait, unsigned mode, |
1671 | int sync, void *key) | 1674 | int sync, void *key) |
1672 | { | 1675 | { |
1673 | struct wait_opts *wo = container_of(wait, struct wait_opts, | 1676 | struct wait_opts *wo = container_of(wait, struct wait_opts, |
1674 | child_wait); | 1677 | child_wait); |
1675 | struct task_struct *p = key; | 1678 | struct task_struct *p = key; |
1676 | 1679 | ||
1677 | if (!eligible_pid(wo, p)) | 1680 | if (!eligible_pid(wo, p)) |
1678 | return 0; | 1681 | return 0; |
1679 | 1682 | ||
1680 | if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent) | 1683 | if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent) |
1681 | return 0; | 1684 | return 0; |
1682 | 1685 | ||
1683 | return default_wake_function(wait, mode, sync, key); | 1686 | return default_wake_function(wait, mode, sync, key); |
1684 | } | 1687 | } |
1685 | 1688 | ||
1686 | void __wake_up_parent(struct task_struct *p, struct task_struct *parent) | 1689 | void __wake_up_parent(struct task_struct *p, struct task_struct *parent) |
1687 | { | 1690 | { |
1688 | __wake_up_sync_key(&parent->signal->wait_chldexit, | 1691 | __wake_up_sync_key(&parent->signal->wait_chldexit, |
1689 | TASK_INTERRUPTIBLE, 1, p); | 1692 | TASK_INTERRUPTIBLE, 1, p); |
1690 | } | 1693 | } |
1691 | 1694 | ||
1692 | static long do_wait(struct wait_opts *wo) | 1695 | static long do_wait(struct wait_opts *wo) |
1693 | { | 1696 | { |
1694 | struct task_struct *tsk; | 1697 | struct task_struct *tsk; |
1695 | int retval; | 1698 | int retval; |
1696 | 1699 | ||
1697 | trace_sched_process_wait(wo->wo_pid); | 1700 | trace_sched_process_wait(wo->wo_pid); |
1698 | 1701 | ||
1699 | init_waitqueue_func_entry(&wo->child_wait, child_wait_callback); | 1702 | init_waitqueue_func_entry(&wo->child_wait, child_wait_callback); |
1700 | wo->child_wait.private = current; | 1703 | wo->child_wait.private = current; |
1701 | add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); | 1704 | add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); |
1702 | repeat: | 1705 | repeat: |
1703 | /* | 1706 | /* |
1704 | * If there is nothing that can match our critiera just get out. | 1707 | * If there is nothing that can match our critiera just get out. |
1705 | * We will clear ->notask_error to zero if we see any child that | 1708 | * We will clear ->notask_error to zero if we see any child that |
1706 | * might later match our criteria, even if we are not able to reap | 1709 | * might later match our criteria, even if we are not able to reap |
1707 | * it yet. | 1710 | * it yet. |
1708 | */ | 1711 | */ |
1709 | wo->notask_error = -ECHILD; | 1712 | wo->notask_error = -ECHILD; |
1710 | if ((wo->wo_type < PIDTYPE_MAX) && | 1713 | if ((wo->wo_type < PIDTYPE_MAX) && |
1711 | (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type]))) | 1714 | (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type]))) |
1712 | goto notask; | 1715 | goto notask; |
1713 | 1716 | ||
1714 | set_current_state(TASK_INTERRUPTIBLE); | 1717 | set_current_state(TASK_INTERRUPTIBLE); |
1715 | read_lock(&tasklist_lock); | 1718 | read_lock(&tasklist_lock); |
1716 | tsk = current; | 1719 | tsk = current; |
1717 | do { | 1720 | do { |
1718 | retval = do_wait_thread(wo, tsk); | 1721 | retval = do_wait_thread(wo, tsk); |
1719 | if (retval) | 1722 | if (retval) |
1720 | goto end; | 1723 | goto end; |
1721 | 1724 | ||
1722 | retval = ptrace_do_wait(wo, tsk); | 1725 | retval = ptrace_do_wait(wo, tsk); |
1723 | if (retval) | 1726 | if (retval) |
1724 | goto end; | 1727 | goto end; |
1725 | 1728 | ||
1726 | if (wo->wo_flags & __WNOTHREAD) | 1729 | if (wo->wo_flags & __WNOTHREAD) |
1727 | break; | 1730 | break; |
1728 | } while_each_thread(current, tsk); | 1731 | } while_each_thread(current, tsk); |
1729 | read_unlock(&tasklist_lock); | 1732 | read_unlock(&tasklist_lock); |
1730 | 1733 | ||
1731 | notask: | 1734 | notask: |
1732 | retval = wo->notask_error; | 1735 | retval = wo->notask_error; |
1733 | if (!retval && !(wo->wo_flags & WNOHANG)) { | 1736 | if (!retval && !(wo->wo_flags & WNOHANG)) { |
1734 | retval = -ERESTARTSYS; | 1737 | retval = -ERESTARTSYS; |
1735 | if (!signal_pending(current)) { | 1738 | if (!signal_pending(current)) { |
1736 | schedule(); | 1739 | schedule(); |
1737 | goto repeat; | 1740 | goto repeat; |
1738 | } | 1741 | } |
1739 | } | 1742 | } |
1740 | end: | 1743 | end: |
1741 | __set_current_state(TASK_RUNNING); | 1744 | __set_current_state(TASK_RUNNING); |
1742 | remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); | 1745 | remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); |
1743 | return retval; | 1746 | return retval; |
1744 | } | 1747 | } |
1745 | 1748 | ||
1746 | SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, | 1749 | SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, |
1747 | infop, int, options, struct rusage __user *, ru) | 1750 | infop, int, options, struct rusage __user *, ru) |
1748 | { | 1751 | { |
1749 | struct wait_opts wo; | 1752 | struct wait_opts wo; |
1750 | struct pid *pid = NULL; | 1753 | struct pid *pid = NULL; |
1751 | enum pid_type type; | 1754 | enum pid_type type; |
1752 | long ret; | 1755 | long ret; |
1753 | 1756 | ||
1754 | if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED)) | 1757 | if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED)) |
1755 | return -EINVAL; | 1758 | return -EINVAL; |
1756 | if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) | 1759 | if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) |
1757 | return -EINVAL; | 1760 | return -EINVAL; |
1758 | 1761 | ||
1759 | switch (which) { | 1762 | switch (which) { |
1760 | case P_ALL: | 1763 | case P_ALL: |
1761 | type = PIDTYPE_MAX; | 1764 | type = PIDTYPE_MAX; |
1762 | break; | 1765 | break; |
1763 | case P_PID: | 1766 | case P_PID: |
1764 | type = PIDTYPE_PID; | 1767 | type = PIDTYPE_PID; |
1765 | if (upid <= 0) | 1768 | if (upid <= 0) |
1766 | return -EINVAL; | 1769 | return -EINVAL; |
1767 | break; | 1770 | break; |
1768 | case P_PGID: | 1771 | case P_PGID: |
1769 | type = PIDTYPE_PGID; | 1772 | type = PIDTYPE_PGID; |
1770 | if (upid <= 0) | 1773 | if (upid <= 0) |
1771 | return -EINVAL; | 1774 | return -EINVAL; |
1772 | break; | 1775 | break; |
1773 | default: | 1776 | default: |
1774 | return -EINVAL; | 1777 | return -EINVAL; |
1775 | } | 1778 | } |
1776 | 1779 | ||
1777 | if (type < PIDTYPE_MAX) | 1780 | if (type < PIDTYPE_MAX) |
1778 | pid = find_get_pid(upid); | 1781 | pid = find_get_pid(upid); |
1779 | 1782 | ||
1780 | wo.wo_type = type; | 1783 | wo.wo_type = type; |
1781 | wo.wo_pid = pid; | 1784 | wo.wo_pid = pid; |
1782 | wo.wo_flags = options; | 1785 | wo.wo_flags = options; |
1783 | wo.wo_info = infop; | 1786 | wo.wo_info = infop; |
1784 | wo.wo_stat = NULL; | 1787 | wo.wo_stat = NULL; |
1785 | wo.wo_rusage = ru; | 1788 | wo.wo_rusage = ru; |
1786 | ret = do_wait(&wo); | 1789 | ret = do_wait(&wo); |
1787 | 1790 | ||
1788 | if (ret > 0) { | 1791 | if (ret > 0) { |
1789 | ret = 0; | 1792 | ret = 0; |
1790 | } else if (infop) { | 1793 | } else if (infop) { |
1791 | /* | 1794 | /* |
1792 | * For a WNOHANG return, clear out all the fields | 1795 | * For a WNOHANG return, clear out all the fields |
1793 | * we would set so the user can easily tell the | 1796 | * we would set so the user can easily tell the |
1794 | * difference. | 1797 | * difference. |
1795 | */ | 1798 | */ |
1796 | if (!ret) | 1799 | if (!ret) |
1797 | ret = put_user(0, &infop->si_signo); | 1800 | ret = put_user(0, &infop->si_signo); |
1798 | if (!ret) | 1801 | if (!ret) |
1799 | ret = put_user(0, &infop->si_errno); | 1802 | ret = put_user(0, &infop->si_errno); |
1800 | if (!ret) | 1803 | if (!ret) |
1801 | ret = put_user(0, &infop->si_code); | 1804 | ret = put_user(0, &infop->si_code); |
1802 | if (!ret) | 1805 | if (!ret) |
1803 | ret = put_user(0, &infop->si_pid); | 1806 | ret = put_user(0, &infop->si_pid); |
1804 | if (!ret) | 1807 | if (!ret) |
1805 | ret = put_user(0, &infop->si_uid); | 1808 | ret = put_user(0, &infop->si_uid); |
1806 | if (!ret) | 1809 | if (!ret) |
1807 | ret = put_user(0, &infop->si_status); | 1810 | ret = put_user(0, &infop->si_status); |
1808 | } | 1811 | } |
1809 | 1812 | ||
1810 | put_pid(pid); | 1813 | put_pid(pid); |
1811 | 1814 | ||
1812 | /* avoid REGPARM breakage on x86: */ | 1815 | /* avoid REGPARM breakage on x86: */ |
1813 | asmlinkage_protect(5, ret, which, upid, infop, options, ru); | 1816 | asmlinkage_protect(5, ret, which, upid, infop, options, ru); |
1814 | return ret; | 1817 | return ret; |
1815 | } | 1818 | } |
1816 | 1819 | ||
1817 | SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, | 1820 | SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, |
1818 | int, options, struct rusage __user *, ru) | 1821 | int, options, struct rusage __user *, ru) |
1819 | { | 1822 | { |
1820 | struct wait_opts wo; | 1823 | struct wait_opts wo; |
1821 | struct pid *pid = NULL; | 1824 | struct pid *pid = NULL; |
1822 | enum pid_type type; | 1825 | enum pid_type type; |
1823 | long ret; | 1826 | long ret; |
1824 | 1827 | ||
1825 | if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| | 1828 | if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| |
1826 | __WNOTHREAD|__WCLONE|__WALL)) | 1829 | __WNOTHREAD|__WCLONE|__WALL)) |
1827 | return -EINVAL; | 1830 | return -EINVAL; |
1828 | 1831 | ||
1829 | if (upid == -1) | 1832 | if (upid == -1) |
1830 | type = PIDTYPE_MAX; | 1833 | type = PIDTYPE_MAX; |
1831 | else if (upid < 0) { | 1834 | else if (upid < 0) { |
1832 | type = PIDTYPE_PGID; | 1835 | type = PIDTYPE_PGID; |
1833 | pid = find_get_pid(-upid); | 1836 | pid = find_get_pid(-upid); |
1834 | } else if (upid == 0) { | 1837 | } else if (upid == 0) { |
1835 | type = PIDTYPE_PGID; | 1838 | type = PIDTYPE_PGID; |
1836 | pid = get_task_pid(current, PIDTYPE_PGID); | 1839 | pid = get_task_pid(current, PIDTYPE_PGID); |
1837 | } else /* upid > 0 */ { | 1840 | } else /* upid > 0 */ { |
1838 | type = PIDTYPE_PID; | 1841 | type = PIDTYPE_PID; |
1839 | pid = find_get_pid(upid); | 1842 | pid = find_get_pid(upid); |
1840 | } | 1843 | } |
1841 | 1844 | ||
1842 | wo.wo_type = type; | 1845 | wo.wo_type = type; |
1843 | wo.wo_pid = pid; | 1846 | wo.wo_pid = pid; |
1844 | wo.wo_flags = options | WEXITED; | 1847 | wo.wo_flags = options | WEXITED; |
1845 | wo.wo_info = NULL; | 1848 | wo.wo_info = NULL; |
1846 | wo.wo_stat = stat_addr; | 1849 | wo.wo_stat = stat_addr; |
1847 | wo.wo_rusage = ru; | 1850 | wo.wo_rusage = ru; |
1848 | ret = do_wait(&wo); | 1851 | ret = do_wait(&wo); |
1849 | put_pid(pid); | 1852 | put_pid(pid); |
1850 | 1853 | ||
1851 | /* avoid REGPARM breakage on x86: */ | 1854 | /* avoid REGPARM breakage on x86: */ |
1852 | asmlinkage_protect(4, ret, upid, stat_addr, options, ru); | 1855 | asmlinkage_protect(4, ret, upid, stat_addr, options, ru); |
1853 | return ret; | 1856 | return ret; |
1854 | } | 1857 | } |
1855 | 1858 | ||
1856 | #ifdef __ARCH_WANT_SYS_WAITPID | 1859 | #ifdef __ARCH_WANT_SYS_WAITPID |
1857 | 1860 | ||
1858 | /* | 1861 | /* |
1859 | * sys_waitpid() remains for compatibility. waitpid() should be | 1862 | * sys_waitpid() remains for compatibility. waitpid() should be |
1860 | * implemented by calling sys_wait4() from libc.a. | 1863 | * implemented by calling sys_wait4() from libc.a. |
1861 | */ | 1864 | */ |
1862 | SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options) | 1865 | SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options) |
1863 | { | 1866 | { |
1864 | return sys_wait4(pid, stat_addr, options, NULL); | 1867 | return sys_wait4(pid, stat_addr, options, NULL); |
1865 | } | 1868 | } |
1866 | 1869 | ||
1867 | #endif | 1870 | #endif |
1868 | 1871 |
kernel/signal.c
1 | /* | 1 | /* |
2 | * linux/kernel/signal.c | 2 | * linux/kernel/signal.c |
3 | * | 3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | 4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | * | 5 | * |
6 | * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson | 6 | * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson |
7 | * | 7 | * |
8 | * 2003-06-02 Jim Houston - Concurrent Computer Corp. | 8 | * 2003-06-02 Jim Houston - Concurrent Computer Corp. |
9 | * Changes to use preallocated sigqueue structures | 9 | * Changes to use preallocated sigqueue structures |
10 | * to allow signals to be sent reliably. | 10 | * to allow signals to be sent reliably. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/tty.h> | 18 | #include <linux/tty.h> |
19 | #include <linux/binfmts.h> | 19 | #include <linux/binfmts.h> |
20 | #include <linux/security.h> | 20 | #include <linux/security.h> |
21 | #include <linux/syscalls.h> | 21 | #include <linux/syscalls.h> |
22 | #include <linux/ptrace.h> | 22 | #include <linux/ptrace.h> |
23 | #include <linux/signal.h> | 23 | #include <linux/signal.h> |
24 | #include <linux/signalfd.h> | 24 | #include <linux/signalfd.h> |
25 | #include <linux/ratelimit.h> | 25 | #include <linux/ratelimit.h> |
26 | #include <linux/tracehook.h> | 26 | #include <linux/tracehook.h> |
27 | #include <linux/capability.h> | 27 | #include <linux/capability.h> |
28 | #include <linux/freezer.h> | 28 | #include <linux/freezer.h> |
29 | #include <linux/pid_namespace.h> | 29 | #include <linux/pid_namespace.h> |
30 | #include <linux/nsproxy.h> | 30 | #include <linux/nsproxy.h> |
31 | #define CREATE_TRACE_POINTS | 31 | #define CREATE_TRACE_POINTS |
32 | #include <trace/events/signal.h> | 32 | #include <trace/events/signal.h> |
33 | 33 | ||
34 | #include <asm/param.h> | 34 | #include <asm/param.h> |
35 | #include <asm/uaccess.h> | 35 | #include <asm/uaccess.h> |
36 | #include <asm/unistd.h> | 36 | #include <asm/unistd.h> |
37 | #include <asm/siginfo.h> | 37 | #include <asm/siginfo.h> |
38 | #include "audit.h" /* audit_signal_info() */ | 38 | #include "audit.h" /* audit_signal_info() */ |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * SLAB caches for signal bits. | 41 | * SLAB caches for signal bits. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | static struct kmem_cache *sigqueue_cachep; | 44 | static struct kmem_cache *sigqueue_cachep; |
45 | 45 | ||
46 | int print_fatal_signals __read_mostly; | 46 | int print_fatal_signals __read_mostly; |
47 | 47 | ||
48 | static void __user *sig_handler(struct task_struct *t, int sig) | 48 | static void __user *sig_handler(struct task_struct *t, int sig) |
49 | { | 49 | { |
50 | return t->sighand->action[sig - 1].sa.sa_handler; | 50 | return t->sighand->action[sig - 1].sa.sa_handler; |
51 | } | 51 | } |
52 | 52 | ||
53 | static int sig_handler_ignored(void __user *handler, int sig) | 53 | static int sig_handler_ignored(void __user *handler, int sig) |
54 | { | 54 | { |
55 | /* Is it explicitly or implicitly ignored? */ | 55 | /* Is it explicitly or implicitly ignored? */ |
56 | return handler == SIG_IGN || | 56 | return handler == SIG_IGN || |
57 | (handler == SIG_DFL && sig_kernel_ignore(sig)); | 57 | (handler == SIG_DFL && sig_kernel_ignore(sig)); |
58 | } | 58 | } |
59 | 59 | ||
60 | static int sig_task_ignored(struct task_struct *t, int sig, | 60 | static int sig_task_ignored(struct task_struct *t, int sig, |
61 | int from_ancestor_ns) | 61 | int from_ancestor_ns) |
62 | { | 62 | { |
63 | void __user *handler; | 63 | void __user *handler; |
64 | 64 | ||
65 | handler = sig_handler(t, sig); | 65 | handler = sig_handler(t, sig); |
66 | 66 | ||
67 | if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && | 67 | if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && |
68 | handler == SIG_DFL && !from_ancestor_ns) | 68 | handler == SIG_DFL && !from_ancestor_ns) |
69 | return 1; | 69 | return 1; |
70 | 70 | ||
71 | return sig_handler_ignored(handler, sig); | 71 | return sig_handler_ignored(handler, sig); |
72 | } | 72 | } |
73 | 73 | ||
74 | static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns) | 74 | static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns) |
75 | { | 75 | { |
76 | /* | 76 | /* |
77 | * Blocked signals are never ignored, since the | 77 | * Blocked signals are never ignored, since the |
78 | * signal handler may change by the time it is | 78 | * signal handler may change by the time it is |
79 | * unblocked. | 79 | * unblocked. |
80 | */ | 80 | */ |
81 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) | 81 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) |
82 | return 0; | 82 | return 0; |
83 | 83 | ||
84 | if (!sig_task_ignored(t, sig, from_ancestor_ns)) | 84 | if (!sig_task_ignored(t, sig, from_ancestor_ns)) |
85 | return 0; | 85 | return 0; |
86 | 86 | ||
87 | /* | 87 | /* |
88 | * Tracers may want to know about even ignored signals. | 88 | * Tracers may want to know about even ignored signals. |
89 | */ | 89 | */ |
90 | return !t->ptrace; | 90 | return !t->ptrace; |
91 | } | 91 | } |
92 | 92 | ||
93 | /* | 93 | /* |
94 | * Re-calculate pending state from the set of locally pending | 94 | * Re-calculate pending state from the set of locally pending |
95 | * signals, globally pending signals, and blocked signals. | 95 | * signals, globally pending signals, and blocked signals. |
96 | */ | 96 | */ |
97 | static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) | 97 | static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) |
98 | { | 98 | { |
99 | unsigned long ready; | 99 | unsigned long ready; |
100 | long i; | 100 | long i; |
101 | 101 | ||
102 | switch (_NSIG_WORDS) { | 102 | switch (_NSIG_WORDS) { |
103 | default: | 103 | default: |
104 | for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) | 104 | for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) |
105 | ready |= signal->sig[i] &~ blocked->sig[i]; | 105 | ready |= signal->sig[i] &~ blocked->sig[i]; |
106 | break; | 106 | break; |
107 | 107 | ||
108 | case 4: ready = signal->sig[3] &~ blocked->sig[3]; | 108 | case 4: ready = signal->sig[3] &~ blocked->sig[3]; |
109 | ready |= signal->sig[2] &~ blocked->sig[2]; | 109 | ready |= signal->sig[2] &~ blocked->sig[2]; |
110 | ready |= signal->sig[1] &~ blocked->sig[1]; | 110 | ready |= signal->sig[1] &~ blocked->sig[1]; |
111 | ready |= signal->sig[0] &~ blocked->sig[0]; | 111 | ready |= signal->sig[0] &~ blocked->sig[0]; |
112 | break; | 112 | break; |
113 | 113 | ||
114 | case 2: ready = signal->sig[1] &~ blocked->sig[1]; | 114 | case 2: ready = signal->sig[1] &~ blocked->sig[1]; |
115 | ready |= signal->sig[0] &~ blocked->sig[0]; | 115 | ready |= signal->sig[0] &~ blocked->sig[0]; |
116 | break; | 116 | break; |
117 | 117 | ||
118 | case 1: ready = signal->sig[0] &~ blocked->sig[0]; | 118 | case 1: ready = signal->sig[0] &~ blocked->sig[0]; |
119 | } | 119 | } |
120 | return ready != 0; | 120 | return ready != 0; |
121 | } | 121 | } |
122 | 122 | ||
123 | #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) | 123 | #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) |
124 | 124 | ||
125 | static int recalc_sigpending_tsk(struct task_struct *t) | 125 | static int recalc_sigpending_tsk(struct task_struct *t) |
126 | { | 126 | { |
127 | if ((t->jobctl & JOBCTL_PENDING_MASK) || | 127 | if ((t->jobctl & JOBCTL_PENDING_MASK) || |
128 | PENDING(&t->pending, &t->blocked) || | 128 | PENDING(&t->pending, &t->blocked) || |
129 | PENDING(&t->signal->shared_pending, &t->blocked)) { | 129 | PENDING(&t->signal->shared_pending, &t->blocked)) { |
130 | set_tsk_thread_flag(t, TIF_SIGPENDING); | 130 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
131 | return 1; | 131 | return 1; |
132 | } | 132 | } |
133 | /* | 133 | /* |
134 | * We must never clear the flag in another thread, or in current | 134 | * We must never clear the flag in another thread, or in current |
135 | * when it's possible the current syscall is returning -ERESTART*. | 135 | * when it's possible the current syscall is returning -ERESTART*. |
136 | * So we don't clear it here, and only callers who know they should do. | 136 | * So we don't clear it here, and only callers who know they should do. |
137 | */ | 137 | */ |
138 | return 0; | 138 | return 0; |
139 | } | 139 | } |
140 | 140 | ||
141 | /* | 141 | /* |
142 | * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. | 142 | * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. |
143 | * This is superfluous when called on current, the wakeup is a harmless no-op. | 143 | * This is superfluous when called on current, the wakeup is a harmless no-op. |
144 | */ | 144 | */ |
145 | void recalc_sigpending_and_wake(struct task_struct *t) | 145 | void recalc_sigpending_and_wake(struct task_struct *t) |
146 | { | 146 | { |
147 | if (recalc_sigpending_tsk(t)) | 147 | if (recalc_sigpending_tsk(t)) |
148 | signal_wake_up(t, 0); | 148 | signal_wake_up(t, 0); |
149 | } | 149 | } |
150 | 150 | ||
151 | void recalc_sigpending(void) | 151 | void recalc_sigpending(void) |
152 | { | 152 | { |
153 | if (!recalc_sigpending_tsk(current) && !freezing(current)) | 153 | if (!recalc_sigpending_tsk(current) && !freezing(current)) |
154 | clear_thread_flag(TIF_SIGPENDING); | 154 | clear_thread_flag(TIF_SIGPENDING); |
155 | 155 | ||
156 | } | 156 | } |
157 | 157 | ||
158 | /* Given the mask, find the first available signal that should be serviced. */ | 158 | /* Given the mask, find the first available signal that should be serviced. */ |
159 | 159 | ||
160 | #define SYNCHRONOUS_MASK \ | 160 | #define SYNCHRONOUS_MASK \ |
161 | (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ | 161 | (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ |
162 | sigmask(SIGTRAP) | sigmask(SIGFPE)) | 162 | sigmask(SIGTRAP) | sigmask(SIGFPE)) |
163 | 163 | ||
164 | int next_signal(struct sigpending *pending, sigset_t *mask) | 164 | int next_signal(struct sigpending *pending, sigset_t *mask) |
165 | { | 165 | { |
166 | unsigned long i, *s, *m, x; | 166 | unsigned long i, *s, *m, x; |
167 | int sig = 0; | 167 | int sig = 0; |
168 | 168 | ||
169 | s = pending->signal.sig; | 169 | s = pending->signal.sig; |
170 | m = mask->sig; | 170 | m = mask->sig; |
171 | 171 | ||
172 | /* | 172 | /* |
173 | * Handle the first word specially: it contains the | 173 | * Handle the first word specially: it contains the |
174 | * synchronous signals that need to be dequeued first. | 174 | * synchronous signals that need to be dequeued first. |
175 | */ | 175 | */ |
176 | x = *s &~ *m; | 176 | x = *s &~ *m; |
177 | if (x) { | 177 | if (x) { |
178 | if (x & SYNCHRONOUS_MASK) | 178 | if (x & SYNCHRONOUS_MASK) |
179 | x &= SYNCHRONOUS_MASK; | 179 | x &= SYNCHRONOUS_MASK; |
180 | sig = ffz(~x) + 1; | 180 | sig = ffz(~x) + 1; |
181 | return sig; | 181 | return sig; |
182 | } | 182 | } |
183 | 183 | ||
184 | switch (_NSIG_WORDS) { | 184 | switch (_NSIG_WORDS) { |
185 | default: | 185 | default: |
186 | for (i = 1; i < _NSIG_WORDS; ++i) { | 186 | for (i = 1; i < _NSIG_WORDS; ++i) { |
187 | x = *++s &~ *++m; | 187 | x = *++s &~ *++m; |
188 | if (!x) | 188 | if (!x) |
189 | continue; | 189 | continue; |
190 | sig = ffz(~x) + i*_NSIG_BPW + 1; | 190 | sig = ffz(~x) + i*_NSIG_BPW + 1; |
191 | break; | 191 | break; |
192 | } | 192 | } |
193 | break; | 193 | break; |
194 | 194 | ||
195 | case 2: | 195 | case 2: |
196 | x = s[1] &~ m[1]; | 196 | x = s[1] &~ m[1]; |
197 | if (!x) | 197 | if (!x) |
198 | break; | 198 | break; |
199 | sig = ffz(~x) + _NSIG_BPW + 1; | 199 | sig = ffz(~x) + _NSIG_BPW + 1; |
200 | break; | 200 | break; |
201 | 201 | ||
202 | case 1: | 202 | case 1: |
203 | /* Nothing to do */ | 203 | /* Nothing to do */ |
204 | break; | 204 | break; |
205 | } | 205 | } |
206 | 206 | ||
207 | return sig; | 207 | return sig; |
208 | } | 208 | } |
209 | 209 | ||
210 | static inline void print_dropped_signal(int sig) | 210 | static inline void print_dropped_signal(int sig) |
211 | { | 211 | { |
212 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); | 212 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); |
213 | 213 | ||
214 | if (!print_fatal_signals) | 214 | if (!print_fatal_signals) |
215 | return; | 215 | return; |
216 | 216 | ||
217 | if (!__ratelimit(&ratelimit_state)) | 217 | if (!__ratelimit(&ratelimit_state)) |
218 | return; | 218 | return; |
219 | 219 | ||
220 | printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", | 220 | printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", |
221 | current->comm, current->pid, sig); | 221 | current->comm, current->pid, sig); |
222 | } | 222 | } |
223 | 223 | ||
224 | /** | 224 | /** |
225 | * task_set_jobctl_pending - set jobctl pending bits | 225 | * task_set_jobctl_pending - set jobctl pending bits |
226 | * @task: target task | 226 | * @task: target task |
227 | * @mask: pending bits to set | 227 | * @mask: pending bits to set |
228 | * | 228 | * |
229 | * Clear @mask from @task->jobctl. @mask must be subset of | 229 | * Clear @mask from @task->jobctl. @mask must be subset of |
230 | * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | | 230 | * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | |
231 | * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is | 231 | * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is |
232 | * cleared. If @task is already being killed or exiting, this function | 232 | * cleared. If @task is already being killed or exiting, this function |
233 | * becomes noop. | 233 | * becomes noop. |
234 | * | 234 | * |
235 | * CONTEXT: | 235 | * CONTEXT: |
236 | * Must be called with @task->sighand->siglock held. | 236 | * Must be called with @task->sighand->siglock held. |
237 | * | 237 | * |
238 | * RETURNS: | 238 | * RETURNS: |
239 | * %true if @mask is set, %false if made noop because @task was dying. | 239 | * %true if @mask is set, %false if made noop because @task was dying. |
240 | */ | 240 | */ |
241 | bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask) | 241 | bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask) |
242 | { | 242 | { |
243 | BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | | 243 | BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | |
244 | JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); | 244 | JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); |
245 | BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); | 245 | BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); |
246 | 246 | ||
247 | if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) | 247 | if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) |
248 | return false; | 248 | return false; |
249 | 249 | ||
250 | if (mask & JOBCTL_STOP_SIGMASK) | 250 | if (mask & JOBCTL_STOP_SIGMASK) |
251 | task->jobctl &= ~JOBCTL_STOP_SIGMASK; | 251 | task->jobctl &= ~JOBCTL_STOP_SIGMASK; |
252 | 252 | ||
253 | task->jobctl |= mask; | 253 | task->jobctl |= mask; |
254 | return true; | 254 | return true; |
255 | } | 255 | } |
256 | 256 | ||
257 | /** | 257 | /** |
258 | * task_clear_jobctl_trapping - clear jobctl trapping bit | 258 | * task_clear_jobctl_trapping - clear jobctl trapping bit |
259 | * @task: target task | 259 | * @task: target task |
260 | * | 260 | * |
261 | * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. | 261 | * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. |
262 | * Clear it and wake up the ptracer. Note that we don't need any further | 262 | * Clear it and wake up the ptracer. Note that we don't need any further |
263 | * locking. @task->siglock guarantees that @task->parent points to the | 263 | * locking. @task->siglock guarantees that @task->parent points to the |
264 | * ptracer. | 264 | * ptracer. |
265 | * | 265 | * |
266 | * CONTEXT: | 266 | * CONTEXT: |
267 | * Must be called with @task->sighand->siglock held. | 267 | * Must be called with @task->sighand->siglock held. |
268 | */ | 268 | */ |
269 | void task_clear_jobctl_trapping(struct task_struct *task) | 269 | void task_clear_jobctl_trapping(struct task_struct *task) |
270 | { | 270 | { |
271 | if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { | 271 | if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { |
272 | task->jobctl &= ~JOBCTL_TRAPPING; | 272 | task->jobctl &= ~JOBCTL_TRAPPING; |
273 | wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); | 273 | wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); |
274 | } | 274 | } |
275 | } | 275 | } |
276 | 276 | ||
277 | /** | 277 | /** |
278 | * task_clear_jobctl_pending - clear jobctl pending bits | 278 | * task_clear_jobctl_pending - clear jobctl pending bits |
279 | * @task: target task | 279 | * @task: target task |
280 | * @mask: pending bits to clear | 280 | * @mask: pending bits to clear |
281 | * | 281 | * |
282 | * Clear @mask from @task->jobctl. @mask must be subset of | 282 | * Clear @mask from @task->jobctl. @mask must be subset of |
283 | * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other | 283 | * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other |
284 | * STOP bits are cleared together. | 284 | * STOP bits are cleared together. |
285 | * | 285 | * |
286 | * If clearing of @mask leaves no stop or trap pending, this function calls | 286 | * If clearing of @mask leaves no stop or trap pending, this function calls |
287 | * task_clear_jobctl_trapping(). | 287 | * task_clear_jobctl_trapping(). |
288 | * | 288 | * |
289 | * CONTEXT: | 289 | * CONTEXT: |
290 | * Must be called with @task->sighand->siglock held. | 290 | * Must be called with @task->sighand->siglock held. |
291 | */ | 291 | */ |
292 | void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask) | 292 | void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask) |
293 | { | 293 | { |
294 | BUG_ON(mask & ~JOBCTL_PENDING_MASK); | 294 | BUG_ON(mask & ~JOBCTL_PENDING_MASK); |
295 | 295 | ||
296 | if (mask & JOBCTL_STOP_PENDING) | 296 | if (mask & JOBCTL_STOP_PENDING) |
297 | mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; | 297 | mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; |
298 | 298 | ||
299 | task->jobctl &= ~mask; | 299 | task->jobctl &= ~mask; |
300 | 300 | ||
301 | if (!(task->jobctl & JOBCTL_PENDING_MASK)) | 301 | if (!(task->jobctl & JOBCTL_PENDING_MASK)) |
302 | task_clear_jobctl_trapping(task); | 302 | task_clear_jobctl_trapping(task); |
303 | } | 303 | } |
304 | 304 | ||
305 | /** | 305 | /** |
306 | * task_participate_group_stop - participate in a group stop | 306 | * task_participate_group_stop - participate in a group stop |
307 | * @task: task participating in a group stop | 307 | * @task: task participating in a group stop |
308 | * | 308 | * |
309 | * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. | 309 | * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. |
310 | * Group stop states are cleared and the group stop count is consumed if | 310 | * Group stop states are cleared and the group stop count is consumed if |
311 | * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group | 311 | * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group |
312 | * stop, the appropriate %SIGNAL_* flags are set. | 312 | * stop, the appropriate %SIGNAL_* flags are set. |
313 | * | 313 | * |
314 | * CONTEXT: | 314 | * CONTEXT: |
315 | * Must be called with @task->sighand->siglock held. | 315 | * Must be called with @task->sighand->siglock held. |
316 | * | 316 | * |
317 | * RETURNS: | 317 | * RETURNS: |
318 | * %true if group stop completion should be notified to the parent, %false | 318 | * %true if group stop completion should be notified to the parent, %false |
319 | * otherwise. | 319 | * otherwise. |
320 | */ | 320 | */ |
321 | static bool task_participate_group_stop(struct task_struct *task) | 321 | static bool task_participate_group_stop(struct task_struct *task) |
322 | { | 322 | { |
323 | struct signal_struct *sig = task->signal; | 323 | struct signal_struct *sig = task->signal; |
324 | bool consume = task->jobctl & JOBCTL_STOP_CONSUME; | 324 | bool consume = task->jobctl & JOBCTL_STOP_CONSUME; |
325 | 325 | ||
326 | WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); | 326 | WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); |
327 | 327 | ||
328 | task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); | 328 | task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); |
329 | 329 | ||
330 | if (!consume) | 330 | if (!consume) |
331 | return false; | 331 | return false; |
332 | 332 | ||
333 | if (!WARN_ON_ONCE(sig->group_stop_count == 0)) | 333 | if (!WARN_ON_ONCE(sig->group_stop_count == 0)) |
334 | sig->group_stop_count--; | 334 | sig->group_stop_count--; |
335 | 335 | ||
336 | /* | 336 | /* |
337 | * Tell the caller to notify completion iff we are entering into a | 337 | * Tell the caller to notify completion iff we are entering into a |
338 | * fresh group stop. Read comment in do_signal_stop() for details. | 338 | * fresh group stop. Read comment in do_signal_stop() for details. |
339 | */ | 339 | */ |
340 | if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { | 340 | if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { |
341 | sig->flags = SIGNAL_STOP_STOPPED; | 341 | sig->flags = SIGNAL_STOP_STOPPED; |
342 | return true; | 342 | return true; |
343 | } | 343 | } |
344 | return false; | 344 | return false; |
345 | } | 345 | } |
346 | 346 | ||
347 | /* | 347 | /* |
348 | * allocate a new signal queue record | 348 | * allocate a new signal queue record |
349 | * - this may be called without locks if and only if t == current, otherwise an | 349 | * - this may be called without locks if and only if t == current, otherwise an |
350 | * appropriate lock must be held to stop the target task from exiting | 350 | * appropriate lock must be held to stop the target task from exiting |
351 | */ | 351 | */ |
352 | static struct sigqueue * | 352 | static struct sigqueue * |
353 | __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) | 353 | __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) |
354 | { | 354 | { |
355 | struct sigqueue *q = NULL; | 355 | struct sigqueue *q = NULL; |
356 | struct user_struct *user; | 356 | struct user_struct *user; |
357 | 357 | ||
358 | /* | 358 | /* |
359 | * Protect access to @t credentials. This can go away when all | 359 | * Protect access to @t credentials. This can go away when all |
360 | * callers hold rcu read lock. | 360 | * callers hold rcu read lock. |
361 | */ | 361 | */ |
362 | rcu_read_lock(); | 362 | rcu_read_lock(); |
363 | user = get_uid(__task_cred(t)->user); | 363 | user = get_uid(__task_cred(t)->user); |
364 | atomic_inc(&user->sigpending); | 364 | atomic_inc(&user->sigpending); |
365 | rcu_read_unlock(); | 365 | rcu_read_unlock(); |
366 | 366 | ||
367 | if (override_rlimit || | 367 | if (override_rlimit || |
368 | atomic_read(&user->sigpending) <= | 368 | atomic_read(&user->sigpending) <= |
369 | task_rlimit(t, RLIMIT_SIGPENDING)) { | 369 | task_rlimit(t, RLIMIT_SIGPENDING)) { |
370 | q = kmem_cache_alloc(sigqueue_cachep, flags); | 370 | q = kmem_cache_alloc(sigqueue_cachep, flags); |
371 | } else { | 371 | } else { |
372 | print_dropped_signal(sig); | 372 | print_dropped_signal(sig); |
373 | } | 373 | } |
374 | 374 | ||
375 | if (unlikely(q == NULL)) { | 375 | if (unlikely(q == NULL)) { |
376 | atomic_dec(&user->sigpending); | 376 | atomic_dec(&user->sigpending); |
377 | free_uid(user); | 377 | free_uid(user); |
378 | } else { | 378 | } else { |
379 | INIT_LIST_HEAD(&q->list); | 379 | INIT_LIST_HEAD(&q->list); |
380 | q->flags = 0; | 380 | q->flags = 0; |
381 | q->user = user; | 381 | q->user = user; |
382 | } | 382 | } |
383 | 383 | ||
384 | return q; | 384 | return q; |
385 | } | 385 | } |
386 | 386 | ||
387 | static void __sigqueue_free(struct sigqueue *q) | 387 | static void __sigqueue_free(struct sigqueue *q) |
388 | { | 388 | { |
389 | if (q->flags & SIGQUEUE_PREALLOC) | 389 | if (q->flags & SIGQUEUE_PREALLOC) |
390 | return; | 390 | return; |
391 | atomic_dec(&q->user->sigpending); | 391 | atomic_dec(&q->user->sigpending); |
392 | free_uid(q->user); | 392 | free_uid(q->user); |
393 | kmem_cache_free(sigqueue_cachep, q); | 393 | kmem_cache_free(sigqueue_cachep, q); |
394 | } | 394 | } |
395 | 395 | ||
396 | void flush_sigqueue(struct sigpending *queue) | 396 | void flush_sigqueue(struct sigpending *queue) |
397 | { | 397 | { |
398 | struct sigqueue *q; | 398 | struct sigqueue *q; |
399 | 399 | ||
400 | sigemptyset(&queue->signal); | 400 | sigemptyset(&queue->signal); |
401 | while (!list_empty(&queue->list)) { | 401 | while (!list_empty(&queue->list)) { |
402 | q = list_entry(queue->list.next, struct sigqueue , list); | 402 | q = list_entry(queue->list.next, struct sigqueue , list); |
403 | list_del_init(&q->list); | 403 | list_del_init(&q->list); |
404 | __sigqueue_free(q); | 404 | __sigqueue_free(q); |
405 | } | 405 | } |
406 | } | 406 | } |
407 | 407 | ||
408 | /* | 408 | /* |
409 | * Flush all pending signals for a task. | 409 | * Flush all pending signals for a task. |
410 | */ | 410 | */ |
411 | void __flush_signals(struct task_struct *t) | 411 | void __flush_signals(struct task_struct *t) |
412 | { | 412 | { |
413 | clear_tsk_thread_flag(t, TIF_SIGPENDING); | 413 | clear_tsk_thread_flag(t, TIF_SIGPENDING); |
414 | flush_sigqueue(&t->pending); | 414 | flush_sigqueue(&t->pending); |
415 | flush_sigqueue(&t->signal->shared_pending); | 415 | flush_sigqueue(&t->signal->shared_pending); |
416 | } | 416 | } |
417 | 417 | ||
418 | void flush_signals(struct task_struct *t) | 418 | void flush_signals(struct task_struct *t) |
419 | { | 419 | { |
420 | unsigned long flags; | 420 | unsigned long flags; |
421 | 421 | ||
422 | spin_lock_irqsave(&t->sighand->siglock, flags); | 422 | spin_lock_irqsave(&t->sighand->siglock, flags); |
423 | __flush_signals(t); | 423 | __flush_signals(t); |
424 | spin_unlock_irqrestore(&t->sighand->siglock, flags); | 424 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
425 | } | 425 | } |
426 | 426 | ||
427 | static void __flush_itimer_signals(struct sigpending *pending) | 427 | static void __flush_itimer_signals(struct sigpending *pending) |
428 | { | 428 | { |
429 | sigset_t signal, retain; | 429 | sigset_t signal, retain; |
430 | struct sigqueue *q, *n; | 430 | struct sigqueue *q, *n; |
431 | 431 | ||
432 | signal = pending->signal; | 432 | signal = pending->signal; |
433 | sigemptyset(&retain); | 433 | sigemptyset(&retain); |
434 | 434 | ||
435 | list_for_each_entry_safe(q, n, &pending->list, list) { | 435 | list_for_each_entry_safe(q, n, &pending->list, list) { |
436 | int sig = q->info.si_signo; | 436 | int sig = q->info.si_signo; |
437 | 437 | ||
438 | if (likely(q->info.si_code != SI_TIMER)) { | 438 | if (likely(q->info.si_code != SI_TIMER)) { |
439 | sigaddset(&retain, sig); | 439 | sigaddset(&retain, sig); |
440 | } else { | 440 | } else { |
441 | sigdelset(&signal, sig); | 441 | sigdelset(&signal, sig); |
442 | list_del_init(&q->list); | 442 | list_del_init(&q->list); |
443 | __sigqueue_free(q); | 443 | __sigqueue_free(q); |
444 | } | 444 | } |
445 | } | 445 | } |
446 | 446 | ||
447 | sigorsets(&pending->signal, &signal, &retain); | 447 | sigorsets(&pending->signal, &signal, &retain); |
448 | } | 448 | } |
449 | 449 | ||
450 | void flush_itimer_signals(void) | 450 | void flush_itimer_signals(void) |
451 | { | 451 | { |
452 | struct task_struct *tsk = current; | 452 | struct task_struct *tsk = current; |
453 | unsigned long flags; | 453 | unsigned long flags; |
454 | 454 | ||
455 | spin_lock_irqsave(&tsk->sighand->siglock, flags); | 455 | spin_lock_irqsave(&tsk->sighand->siglock, flags); |
456 | __flush_itimer_signals(&tsk->pending); | 456 | __flush_itimer_signals(&tsk->pending); |
457 | __flush_itimer_signals(&tsk->signal->shared_pending); | 457 | __flush_itimer_signals(&tsk->signal->shared_pending); |
458 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); | 458 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); |
459 | } | 459 | } |
460 | 460 | ||
461 | void ignore_signals(struct task_struct *t) | 461 | void ignore_signals(struct task_struct *t) |
462 | { | 462 | { |
463 | int i; | 463 | int i; |
464 | 464 | ||
465 | for (i = 0; i < _NSIG; ++i) | 465 | for (i = 0; i < _NSIG; ++i) |
466 | t->sighand->action[i].sa.sa_handler = SIG_IGN; | 466 | t->sighand->action[i].sa.sa_handler = SIG_IGN; |
467 | 467 | ||
468 | flush_signals(t); | 468 | flush_signals(t); |
469 | } | 469 | } |
470 | 470 | ||
471 | /* | 471 | /* |
472 | * Flush all handlers for a task. | 472 | * Flush all handlers for a task. |
473 | */ | 473 | */ |
474 | 474 | ||
475 | void | 475 | void |
476 | flush_signal_handlers(struct task_struct *t, int force_default) | 476 | flush_signal_handlers(struct task_struct *t, int force_default) |
477 | { | 477 | { |
478 | int i; | 478 | int i; |
479 | struct k_sigaction *ka = &t->sighand->action[0]; | 479 | struct k_sigaction *ka = &t->sighand->action[0]; |
480 | for (i = _NSIG ; i != 0 ; i--) { | 480 | for (i = _NSIG ; i != 0 ; i--) { |
481 | if (force_default || ka->sa.sa_handler != SIG_IGN) | 481 | if (force_default || ka->sa.sa_handler != SIG_IGN) |
482 | ka->sa.sa_handler = SIG_DFL; | 482 | ka->sa.sa_handler = SIG_DFL; |
483 | ka->sa.sa_flags = 0; | 483 | ka->sa.sa_flags = 0; |
484 | sigemptyset(&ka->sa.sa_mask); | 484 | sigemptyset(&ka->sa.sa_mask); |
485 | ka++; | 485 | ka++; |
486 | } | 486 | } |
487 | } | 487 | } |
488 | 488 | ||
489 | int unhandled_signal(struct task_struct *tsk, int sig) | 489 | int unhandled_signal(struct task_struct *tsk, int sig) |
490 | { | 490 | { |
491 | void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; | 491 | void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; |
492 | if (is_global_init(tsk)) | 492 | if (is_global_init(tsk)) |
493 | return 1; | 493 | return 1; |
494 | if (handler != SIG_IGN && handler != SIG_DFL) | 494 | if (handler != SIG_IGN && handler != SIG_DFL) |
495 | return 0; | 495 | return 0; |
496 | /* if ptraced, let the tracer determine */ | 496 | /* if ptraced, let the tracer determine */ |
497 | return !tsk->ptrace; | 497 | return !tsk->ptrace; |
498 | } | 498 | } |
499 | 499 | ||
500 | /* | 500 | /* |
501 | * Notify the system that a driver wants to block all signals for this | 501 | * Notify the system that a driver wants to block all signals for this |
502 | * process, and wants to be notified if any signals at all were to be | 502 | * process, and wants to be notified if any signals at all were to be |
503 | * sent/acted upon. If the notifier routine returns non-zero, then the | 503 | * sent/acted upon. If the notifier routine returns non-zero, then the |
504 | * signal will be acted upon after all. If the notifier routine returns 0, | 504 | * signal will be acted upon after all. If the notifier routine returns 0, |
505 | * then then signal will be blocked. Only one block per process is | 505 | * then then signal will be blocked. Only one block per process is |
506 | * allowed. priv is a pointer to private data that the notifier routine | 506 | * allowed. priv is a pointer to private data that the notifier routine |
507 | * can use to determine if the signal should be blocked or not. | 507 | * can use to determine if the signal should be blocked or not. |
508 | */ | 508 | */ |
509 | void | 509 | void |
510 | block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) | 510 | block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) |
511 | { | 511 | { |
512 | unsigned long flags; | 512 | unsigned long flags; |
513 | 513 | ||
514 | spin_lock_irqsave(¤t->sighand->siglock, flags); | 514 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
515 | current->notifier_mask = mask; | 515 | current->notifier_mask = mask; |
516 | current->notifier_data = priv; | 516 | current->notifier_data = priv; |
517 | current->notifier = notifier; | 517 | current->notifier = notifier; |
518 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | 518 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
519 | } | 519 | } |
520 | 520 | ||
521 | /* Notify the system that blocking has ended. */ | 521 | /* Notify the system that blocking has ended. */ |
522 | 522 | ||
523 | void | 523 | void |
524 | unblock_all_signals(void) | 524 | unblock_all_signals(void) |
525 | { | 525 | { |
526 | unsigned long flags; | 526 | unsigned long flags; |
527 | 527 | ||
528 | spin_lock_irqsave(¤t->sighand->siglock, flags); | 528 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
529 | current->notifier = NULL; | 529 | current->notifier = NULL; |
530 | current->notifier_data = NULL; | 530 | current->notifier_data = NULL; |
531 | recalc_sigpending(); | 531 | recalc_sigpending(); |
532 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | 532 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
533 | } | 533 | } |
534 | 534 | ||
535 | static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) | 535 | static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) |
536 | { | 536 | { |
537 | struct sigqueue *q, *first = NULL; | 537 | struct sigqueue *q, *first = NULL; |
538 | 538 | ||
539 | /* | 539 | /* |
540 | * Collect the siginfo appropriate to this signal. Check if | 540 | * Collect the siginfo appropriate to this signal. Check if |
541 | * there is another siginfo for the same signal. | 541 | * there is another siginfo for the same signal. |
542 | */ | 542 | */ |
543 | list_for_each_entry(q, &list->list, list) { | 543 | list_for_each_entry(q, &list->list, list) { |
544 | if (q->info.si_signo == sig) { | 544 | if (q->info.si_signo == sig) { |
545 | if (first) | 545 | if (first) |
546 | goto still_pending; | 546 | goto still_pending; |
547 | first = q; | 547 | first = q; |
548 | } | 548 | } |
549 | } | 549 | } |
550 | 550 | ||
551 | sigdelset(&list->signal, sig); | 551 | sigdelset(&list->signal, sig); |
552 | 552 | ||
553 | if (first) { | 553 | if (first) { |
554 | still_pending: | 554 | still_pending: |
555 | list_del_init(&first->list); | 555 | list_del_init(&first->list); |
556 | copy_siginfo(info, &first->info); | 556 | copy_siginfo(info, &first->info); |
557 | __sigqueue_free(first); | 557 | __sigqueue_free(first); |
558 | } else { | 558 | } else { |
559 | /* | 559 | /* |
560 | * Ok, it wasn't in the queue. This must be | 560 | * Ok, it wasn't in the queue. This must be |
561 | * a fast-pathed signal or we must have been | 561 | * a fast-pathed signal or we must have been |
562 | * out of queue space. So zero out the info. | 562 | * out of queue space. So zero out the info. |
563 | */ | 563 | */ |
564 | info->si_signo = sig; | 564 | info->si_signo = sig; |
565 | info->si_errno = 0; | 565 | info->si_errno = 0; |
566 | info->si_code = SI_USER; | 566 | info->si_code = SI_USER; |
567 | info->si_pid = 0; | 567 | info->si_pid = 0; |
568 | info->si_uid = 0; | 568 | info->si_uid = 0; |
569 | } | 569 | } |
570 | } | 570 | } |
571 | 571 | ||
572 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, | 572 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, |
573 | siginfo_t *info) | 573 | siginfo_t *info) |
574 | { | 574 | { |
575 | int sig = next_signal(pending, mask); | 575 | int sig = next_signal(pending, mask); |
576 | 576 | ||
577 | if (sig) { | 577 | if (sig) { |
578 | if (current->notifier) { | 578 | if (current->notifier) { |
579 | if (sigismember(current->notifier_mask, sig)) { | 579 | if (sigismember(current->notifier_mask, sig)) { |
580 | if (!(current->notifier)(current->notifier_data)) { | 580 | if (!(current->notifier)(current->notifier_data)) { |
581 | clear_thread_flag(TIF_SIGPENDING); | 581 | clear_thread_flag(TIF_SIGPENDING); |
582 | return 0; | 582 | return 0; |
583 | } | 583 | } |
584 | } | 584 | } |
585 | } | 585 | } |
586 | 586 | ||
587 | collect_signal(sig, pending, info); | 587 | collect_signal(sig, pending, info); |
588 | } | 588 | } |
589 | 589 | ||
590 | return sig; | 590 | return sig; |
591 | } | 591 | } |
592 | 592 | ||
593 | /* | 593 | /* |
594 | * Dequeue a signal and return the element to the caller, which is | 594 | * Dequeue a signal and return the element to the caller, which is |
595 | * expected to free it. | 595 | * expected to free it. |
596 | * | 596 | * |
597 | * All callers have to hold the siglock. | 597 | * All callers have to hold the siglock. |
598 | */ | 598 | */ |
599 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | 599 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) |
600 | { | 600 | { |
601 | int signr; | 601 | int signr; |
602 | 602 | ||
603 | /* We only dequeue private signals from ourselves, we don't let | 603 | /* We only dequeue private signals from ourselves, we don't let |
604 | * signalfd steal them | 604 | * signalfd steal them |
605 | */ | 605 | */ |
606 | signr = __dequeue_signal(&tsk->pending, mask, info); | 606 | signr = __dequeue_signal(&tsk->pending, mask, info); |
607 | if (!signr) { | 607 | if (!signr) { |
608 | signr = __dequeue_signal(&tsk->signal->shared_pending, | 608 | signr = __dequeue_signal(&tsk->signal->shared_pending, |
609 | mask, info); | 609 | mask, info); |
610 | /* | 610 | /* |
611 | * itimer signal ? | 611 | * itimer signal ? |
612 | * | 612 | * |
613 | * itimers are process shared and we restart periodic | 613 | * itimers are process shared and we restart periodic |
614 | * itimers in the signal delivery path to prevent DoS | 614 | * itimers in the signal delivery path to prevent DoS |
615 | * attacks in the high resolution timer case. This is | 615 | * attacks in the high resolution timer case. This is |
616 | * compliant with the old way of self-restarting | 616 | * compliant with the old way of self-restarting |
617 | * itimers, as the SIGALRM is a legacy signal and only | 617 | * itimers, as the SIGALRM is a legacy signal and only |
618 | * queued once. Changing the restart behaviour to | 618 | * queued once. Changing the restart behaviour to |
619 | * restart the timer in the signal dequeue path is | 619 | * restart the timer in the signal dequeue path is |
620 | * reducing the timer noise on heavy loaded !highres | 620 | * reducing the timer noise on heavy loaded !highres |
621 | * systems too. | 621 | * systems too. |
622 | */ | 622 | */ |
623 | if (unlikely(signr == SIGALRM)) { | 623 | if (unlikely(signr == SIGALRM)) { |
624 | struct hrtimer *tmr = &tsk->signal->real_timer; | 624 | struct hrtimer *tmr = &tsk->signal->real_timer; |
625 | 625 | ||
626 | if (!hrtimer_is_queued(tmr) && | 626 | if (!hrtimer_is_queued(tmr) && |
627 | tsk->signal->it_real_incr.tv64 != 0) { | 627 | tsk->signal->it_real_incr.tv64 != 0) { |
628 | hrtimer_forward(tmr, tmr->base->get_time(), | 628 | hrtimer_forward(tmr, tmr->base->get_time(), |
629 | tsk->signal->it_real_incr); | 629 | tsk->signal->it_real_incr); |
630 | hrtimer_restart(tmr); | 630 | hrtimer_restart(tmr); |
631 | } | 631 | } |
632 | } | 632 | } |
633 | } | 633 | } |
634 | 634 | ||
635 | recalc_sigpending(); | 635 | recalc_sigpending(); |
636 | if (!signr) | 636 | if (!signr) |
637 | return 0; | 637 | return 0; |
638 | 638 | ||
639 | if (unlikely(sig_kernel_stop(signr))) { | 639 | if (unlikely(sig_kernel_stop(signr))) { |
640 | /* | 640 | /* |
641 | * Set a marker that we have dequeued a stop signal. Our | 641 | * Set a marker that we have dequeued a stop signal. Our |
642 | * caller might release the siglock and then the pending | 642 | * caller might release the siglock and then the pending |
643 | * stop signal it is about to process is no longer in the | 643 | * stop signal it is about to process is no longer in the |
644 | * pending bitmasks, but must still be cleared by a SIGCONT | 644 | * pending bitmasks, but must still be cleared by a SIGCONT |
645 | * (and overruled by a SIGKILL). So those cases clear this | 645 | * (and overruled by a SIGKILL). So those cases clear this |
646 | * shared flag after we've set it. Note that this flag may | 646 | * shared flag after we've set it. Note that this flag may |
647 | * remain set after the signal we return is ignored or | 647 | * remain set after the signal we return is ignored or |
648 | * handled. That doesn't matter because its only purpose | 648 | * handled. That doesn't matter because its only purpose |
649 | * is to alert stop-signal processing code when another | 649 | * is to alert stop-signal processing code when another |
650 | * processor has come along and cleared the flag. | 650 | * processor has come along and cleared the flag. |
651 | */ | 651 | */ |
652 | current->jobctl |= JOBCTL_STOP_DEQUEUED; | 652 | current->jobctl |= JOBCTL_STOP_DEQUEUED; |
653 | } | 653 | } |
654 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { | 654 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { |
655 | /* | 655 | /* |
656 | * Release the siglock to ensure proper locking order | 656 | * Release the siglock to ensure proper locking order |
657 | * of timer locks outside of siglocks. Note, we leave | 657 | * of timer locks outside of siglocks. Note, we leave |
658 | * irqs disabled here, since the posix-timers code is | 658 | * irqs disabled here, since the posix-timers code is |
659 | * about to disable them again anyway. | 659 | * about to disable them again anyway. |
660 | */ | 660 | */ |
661 | spin_unlock(&tsk->sighand->siglock); | 661 | spin_unlock(&tsk->sighand->siglock); |
662 | do_schedule_next_timer(info); | 662 | do_schedule_next_timer(info); |
663 | spin_lock(&tsk->sighand->siglock); | 663 | spin_lock(&tsk->sighand->siglock); |
664 | } | 664 | } |
665 | return signr; | 665 | return signr; |
666 | } | 666 | } |
667 | 667 | ||
668 | /* | 668 | /* |
669 | * Tell a process that it has a new active signal.. | 669 | * Tell a process that it has a new active signal.. |
670 | * | 670 | * |
671 | * NOTE! we rely on the previous spin_lock to | 671 | * NOTE! we rely on the previous spin_lock to |
672 | * lock interrupts for us! We can only be called with | 672 | * lock interrupts for us! We can only be called with |
673 | * "siglock" held, and the local interrupt must | 673 | * "siglock" held, and the local interrupt must |
674 | * have been disabled when that got acquired! | 674 | * have been disabled when that got acquired! |
675 | * | 675 | * |
676 | * No need to set need_resched since signal event passing | 676 | * No need to set need_resched since signal event passing |
677 | * goes through ->blocked | 677 | * goes through ->blocked |
678 | */ | 678 | */ |
679 | void signal_wake_up(struct task_struct *t, int resume) | 679 | void signal_wake_up(struct task_struct *t, int resume) |
680 | { | 680 | { |
681 | unsigned int mask; | 681 | unsigned int mask; |
682 | 682 | ||
683 | set_tsk_thread_flag(t, TIF_SIGPENDING); | 683 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
684 | 684 | ||
685 | /* | 685 | /* |
686 | * For SIGKILL, we want to wake it up in the stopped/traced/killable | 686 | * For SIGKILL, we want to wake it up in the stopped/traced/killable |
687 | * case. We don't check t->state here because there is a race with it | 687 | * case. We don't check t->state here because there is a race with it |
688 | * executing another processor and just now entering stopped state. | 688 | * executing another processor and just now entering stopped state. |
689 | * By using wake_up_state, we ensure the process will wake up and | 689 | * By using wake_up_state, we ensure the process will wake up and |
690 | * handle its death signal. | 690 | * handle its death signal. |
691 | */ | 691 | */ |
692 | mask = TASK_INTERRUPTIBLE; | 692 | mask = TASK_INTERRUPTIBLE; |
693 | if (resume) | 693 | if (resume) |
694 | mask |= TASK_WAKEKILL; | 694 | mask |= TASK_WAKEKILL; |
695 | if (!wake_up_state(t, mask)) | 695 | if (!wake_up_state(t, mask)) |
696 | kick_process(t); | 696 | kick_process(t); |
697 | } | 697 | } |
698 | 698 | ||
699 | /* | 699 | /* |
700 | * Remove signals in mask from the pending set and queue. | 700 | * Remove signals in mask from the pending set and queue. |
701 | * Returns 1 if any signals were found. | 701 | * Returns 1 if any signals were found. |
702 | * | 702 | * |
703 | * All callers must be holding the siglock. | 703 | * All callers must be holding the siglock. |
704 | * | 704 | * |
705 | * This version takes a sigset mask and looks at all signals, | 705 | * This version takes a sigset mask and looks at all signals, |
706 | * not just those in the first mask word. | 706 | * not just those in the first mask word. |
707 | */ | 707 | */ |
708 | static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) | 708 | static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) |
709 | { | 709 | { |
710 | struct sigqueue *q, *n; | 710 | struct sigqueue *q, *n; |
711 | sigset_t m; | 711 | sigset_t m; |
712 | 712 | ||
713 | sigandsets(&m, mask, &s->signal); | 713 | sigandsets(&m, mask, &s->signal); |
714 | if (sigisemptyset(&m)) | 714 | if (sigisemptyset(&m)) |
715 | return 0; | 715 | return 0; |
716 | 716 | ||
717 | sigandnsets(&s->signal, &s->signal, mask); | 717 | sigandnsets(&s->signal, &s->signal, mask); |
718 | list_for_each_entry_safe(q, n, &s->list, list) { | 718 | list_for_each_entry_safe(q, n, &s->list, list) { |
719 | if (sigismember(mask, q->info.si_signo)) { | 719 | if (sigismember(mask, q->info.si_signo)) { |
720 | list_del_init(&q->list); | 720 | list_del_init(&q->list); |
721 | __sigqueue_free(q); | 721 | __sigqueue_free(q); |
722 | } | 722 | } |
723 | } | 723 | } |
724 | return 1; | 724 | return 1; |
725 | } | 725 | } |
726 | /* | 726 | /* |
727 | * Remove signals in mask from the pending set and queue. | 727 | * Remove signals in mask from the pending set and queue. |
728 | * Returns 1 if any signals were found. | 728 | * Returns 1 if any signals were found. |
729 | * | 729 | * |
730 | * All callers must be holding the siglock. | 730 | * All callers must be holding the siglock. |
731 | */ | 731 | */ |
732 | static int rm_from_queue(unsigned long mask, struct sigpending *s) | 732 | static int rm_from_queue(unsigned long mask, struct sigpending *s) |
733 | { | 733 | { |
734 | struct sigqueue *q, *n; | 734 | struct sigqueue *q, *n; |
735 | 735 | ||
736 | if (!sigtestsetmask(&s->signal, mask)) | 736 | if (!sigtestsetmask(&s->signal, mask)) |
737 | return 0; | 737 | return 0; |
738 | 738 | ||
739 | sigdelsetmask(&s->signal, mask); | 739 | sigdelsetmask(&s->signal, mask); |
740 | list_for_each_entry_safe(q, n, &s->list, list) { | 740 | list_for_each_entry_safe(q, n, &s->list, list) { |
741 | if (q->info.si_signo < SIGRTMIN && | 741 | if (q->info.si_signo < SIGRTMIN && |
742 | (mask & sigmask(q->info.si_signo))) { | 742 | (mask & sigmask(q->info.si_signo))) { |
743 | list_del_init(&q->list); | 743 | list_del_init(&q->list); |
744 | __sigqueue_free(q); | 744 | __sigqueue_free(q); |
745 | } | 745 | } |
746 | } | 746 | } |
747 | return 1; | 747 | return 1; |
748 | } | 748 | } |
749 | 749 | ||
750 | static inline int is_si_special(const struct siginfo *info) | 750 | static inline int is_si_special(const struct siginfo *info) |
751 | { | 751 | { |
752 | return info <= SEND_SIG_FORCED; | 752 | return info <= SEND_SIG_FORCED; |
753 | } | 753 | } |
754 | 754 | ||
755 | static inline bool si_fromuser(const struct siginfo *info) | 755 | static inline bool si_fromuser(const struct siginfo *info) |
756 | { | 756 | { |
757 | return info == SEND_SIG_NOINFO || | 757 | return info == SEND_SIG_NOINFO || |
758 | (!is_si_special(info) && SI_FROMUSER(info)); | 758 | (!is_si_special(info) && SI_FROMUSER(info)); |
759 | } | 759 | } |
760 | 760 | ||
761 | /* | 761 | /* |
762 | * called with RCU read lock from check_kill_permission() | 762 | * called with RCU read lock from check_kill_permission() |
763 | */ | 763 | */ |
764 | static int kill_ok_by_cred(struct task_struct *t) | 764 | static int kill_ok_by_cred(struct task_struct *t) |
765 | { | 765 | { |
766 | const struct cred *cred = current_cred(); | 766 | const struct cred *cred = current_cred(); |
767 | const struct cred *tcred = __task_cred(t); | 767 | const struct cred *tcred = __task_cred(t); |
768 | 768 | ||
769 | if (cred->user->user_ns == tcred->user->user_ns && | 769 | if (cred->user->user_ns == tcred->user->user_ns && |
770 | (cred->euid == tcred->suid || | 770 | (cred->euid == tcred->suid || |
771 | cred->euid == tcred->uid || | 771 | cred->euid == tcred->uid || |
772 | cred->uid == tcred->suid || | 772 | cred->uid == tcred->suid || |
773 | cred->uid == tcred->uid)) | 773 | cred->uid == tcred->uid)) |
774 | return 1; | 774 | return 1; |
775 | 775 | ||
776 | if (ns_capable(tcred->user->user_ns, CAP_KILL)) | 776 | if (ns_capable(tcred->user->user_ns, CAP_KILL)) |
777 | return 1; | 777 | return 1; |
778 | 778 | ||
779 | return 0; | 779 | return 0; |
780 | } | 780 | } |
781 | 781 | ||
782 | /* | 782 | /* |
783 | * Bad permissions for sending the signal | 783 | * Bad permissions for sending the signal |
784 | * - the caller must hold the RCU read lock | 784 | * - the caller must hold the RCU read lock |
785 | */ | 785 | */ |
786 | static int check_kill_permission(int sig, struct siginfo *info, | 786 | static int check_kill_permission(int sig, struct siginfo *info, |
787 | struct task_struct *t) | 787 | struct task_struct *t) |
788 | { | 788 | { |
789 | struct pid *sid; | 789 | struct pid *sid; |
790 | int error; | 790 | int error; |
791 | 791 | ||
792 | if (!valid_signal(sig)) | 792 | if (!valid_signal(sig)) |
793 | return -EINVAL; | 793 | return -EINVAL; |
794 | 794 | ||
795 | if (!si_fromuser(info)) | 795 | if (!si_fromuser(info)) |
796 | return 0; | 796 | return 0; |
797 | 797 | ||
798 | error = audit_signal_info(sig, t); /* Let audit system see the signal */ | 798 | error = audit_signal_info(sig, t); /* Let audit system see the signal */ |
799 | if (error) | 799 | if (error) |
800 | return error; | 800 | return error; |
801 | 801 | ||
802 | if (!same_thread_group(current, t) && | 802 | if (!same_thread_group(current, t) && |
803 | !kill_ok_by_cred(t)) { | 803 | !kill_ok_by_cred(t)) { |
804 | switch (sig) { | 804 | switch (sig) { |
805 | case SIGCONT: | 805 | case SIGCONT: |
806 | sid = task_session(t); | 806 | sid = task_session(t); |
807 | /* | 807 | /* |
808 | * We don't return the error if sid == NULL. The | 808 | * We don't return the error if sid == NULL. The |
809 | * task was unhashed, the caller must notice this. | 809 | * task was unhashed, the caller must notice this. |
810 | */ | 810 | */ |
811 | if (!sid || sid == task_session(current)) | 811 | if (!sid || sid == task_session(current)) |
812 | break; | 812 | break; |
813 | default: | 813 | default: |
814 | return -EPERM; | 814 | return -EPERM; |
815 | } | 815 | } |
816 | } | 816 | } |
817 | 817 | ||
818 | return security_task_kill(t, info, sig, 0); | 818 | return security_task_kill(t, info, sig, 0); |
819 | } | 819 | } |
820 | 820 | ||
821 | /** | 821 | /** |
822 | * ptrace_trap_notify - schedule trap to notify ptracer | 822 | * ptrace_trap_notify - schedule trap to notify ptracer |
823 | * @t: tracee wanting to notify tracer | 823 | * @t: tracee wanting to notify tracer |
824 | * | 824 | * |
825 | * This function schedules sticky ptrace trap which is cleared on the next | 825 | * This function schedules sticky ptrace trap which is cleared on the next |
826 | * TRAP_STOP to notify ptracer of an event. @t must have been seized by | 826 | * TRAP_STOP to notify ptracer of an event. @t must have been seized by |
827 | * ptracer. | 827 | * ptracer. |
828 | * | 828 | * |
829 | * If @t is running, STOP trap will be taken. If trapped for STOP and | 829 | * If @t is running, STOP trap will be taken. If trapped for STOP and |
830 | * ptracer is listening for events, tracee is woken up so that it can | 830 | * ptracer is listening for events, tracee is woken up so that it can |
831 | * re-trap for the new event. If trapped otherwise, STOP trap will be | 831 | * re-trap for the new event. If trapped otherwise, STOP trap will be |
832 | * eventually taken without returning to userland after the existing traps | 832 | * eventually taken without returning to userland after the existing traps |
833 | * are finished by PTRACE_CONT. | 833 | * are finished by PTRACE_CONT. |
834 | * | 834 | * |
835 | * CONTEXT: | 835 | * CONTEXT: |
836 | * Must be called with @task->sighand->siglock held. | 836 | * Must be called with @task->sighand->siglock held. |
837 | */ | 837 | */ |
838 | static void ptrace_trap_notify(struct task_struct *t) | 838 | static void ptrace_trap_notify(struct task_struct *t) |
839 | { | 839 | { |
840 | WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); | 840 | WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); |
841 | assert_spin_locked(&t->sighand->siglock); | 841 | assert_spin_locked(&t->sighand->siglock); |
842 | 842 | ||
843 | task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); | 843 | task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); |
844 | signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); | 844 | signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); |
845 | } | 845 | } |
846 | 846 | ||
847 | /* | 847 | /* |
848 | * Handle magic process-wide effects of stop/continue signals. Unlike | 848 | * Handle magic process-wide effects of stop/continue signals. Unlike |
849 | * the signal actions, these happen immediately at signal-generation | 849 | * the signal actions, these happen immediately at signal-generation |
850 | * time regardless of blocking, ignoring, or handling. This does the | 850 | * time regardless of blocking, ignoring, or handling. This does the |
851 | * actual continuing for SIGCONT, but not the actual stopping for stop | 851 | * actual continuing for SIGCONT, but not the actual stopping for stop |
852 | * signals. The process stop is done as a signal action for SIG_DFL. | 852 | * signals. The process stop is done as a signal action for SIG_DFL. |
853 | * | 853 | * |
854 | * Returns true if the signal should be actually delivered, otherwise | 854 | * Returns true if the signal should be actually delivered, otherwise |
855 | * it should be dropped. | 855 | * it should be dropped. |
856 | */ | 856 | */ |
857 | static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) | 857 | static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) |
858 | { | 858 | { |
859 | struct signal_struct *signal = p->signal; | 859 | struct signal_struct *signal = p->signal; |
860 | struct task_struct *t; | 860 | struct task_struct *t; |
861 | 861 | ||
862 | if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) { | 862 | if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) { |
863 | /* | 863 | /* |
864 | * The process is in the middle of dying, nothing to do. | 864 | * The process is in the middle of dying, nothing to do. |
865 | */ | 865 | */ |
866 | } else if (sig_kernel_stop(sig)) { | 866 | } else if (sig_kernel_stop(sig)) { |
867 | /* | 867 | /* |
868 | * This is a stop signal. Remove SIGCONT from all queues. | 868 | * This is a stop signal. Remove SIGCONT from all queues. |
869 | */ | 869 | */ |
870 | rm_from_queue(sigmask(SIGCONT), &signal->shared_pending); | 870 | rm_from_queue(sigmask(SIGCONT), &signal->shared_pending); |
871 | t = p; | 871 | t = p; |
872 | do { | 872 | do { |
873 | rm_from_queue(sigmask(SIGCONT), &t->pending); | 873 | rm_from_queue(sigmask(SIGCONT), &t->pending); |
874 | } while_each_thread(p, t); | 874 | } while_each_thread(p, t); |
875 | } else if (sig == SIGCONT) { | 875 | } else if (sig == SIGCONT) { |
876 | unsigned int why; | 876 | unsigned int why; |
877 | /* | 877 | /* |
878 | * Remove all stop signals from all queues, wake all threads. | 878 | * Remove all stop signals from all queues, wake all threads. |
879 | */ | 879 | */ |
880 | rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); | 880 | rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); |
881 | t = p; | 881 | t = p; |
882 | do { | 882 | do { |
883 | task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); | 883 | task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); |
884 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); | 884 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); |
885 | if (likely(!(t->ptrace & PT_SEIZED))) | 885 | if (likely(!(t->ptrace & PT_SEIZED))) |
886 | wake_up_state(t, __TASK_STOPPED); | 886 | wake_up_state(t, __TASK_STOPPED); |
887 | else | 887 | else |
888 | ptrace_trap_notify(t); | 888 | ptrace_trap_notify(t); |
889 | } while_each_thread(p, t); | 889 | } while_each_thread(p, t); |
890 | 890 | ||
891 | /* | 891 | /* |
892 | * Notify the parent with CLD_CONTINUED if we were stopped. | 892 | * Notify the parent with CLD_CONTINUED if we were stopped. |
893 | * | 893 | * |
894 | * If we were in the middle of a group stop, we pretend it | 894 | * If we were in the middle of a group stop, we pretend it |
895 | * was already finished, and then continued. Since SIGCHLD | 895 | * was already finished, and then continued. Since SIGCHLD |
896 | * doesn't queue we report only CLD_STOPPED, as if the next | 896 | * doesn't queue we report only CLD_STOPPED, as if the next |
897 | * CLD_CONTINUED was dropped. | 897 | * CLD_CONTINUED was dropped. |
898 | */ | 898 | */ |
899 | why = 0; | 899 | why = 0; |
900 | if (signal->flags & SIGNAL_STOP_STOPPED) | 900 | if (signal->flags & SIGNAL_STOP_STOPPED) |
901 | why |= SIGNAL_CLD_CONTINUED; | 901 | why |= SIGNAL_CLD_CONTINUED; |
902 | else if (signal->group_stop_count) | 902 | else if (signal->group_stop_count) |
903 | why |= SIGNAL_CLD_STOPPED; | 903 | why |= SIGNAL_CLD_STOPPED; |
904 | 904 | ||
905 | if (why) { | 905 | if (why) { |
906 | /* | 906 | /* |
907 | * The first thread which returns from do_signal_stop() | 907 | * The first thread which returns from do_signal_stop() |
908 | * will take ->siglock, notice SIGNAL_CLD_MASK, and | 908 | * will take ->siglock, notice SIGNAL_CLD_MASK, and |
909 | * notify its parent. See get_signal_to_deliver(). | 909 | * notify its parent. See get_signal_to_deliver(). |
910 | */ | 910 | */ |
911 | signal->flags = why | SIGNAL_STOP_CONTINUED; | 911 | signal->flags = why | SIGNAL_STOP_CONTINUED; |
912 | signal->group_stop_count = 0; | 912 | signal->group_stop_count = 0; |
913 | signal->group_exit_code = 0; | 913 | signal->group_exit_code = 0; |
914 | } | 914 | } |
915 | } | 915 | } |
916 | 916 | ||
917 | return !sig_ignored(p, sig, from_ancestor_ns); | 917 | return !sig_ignored(p, sig, from_ancestor_ns); |
918 | } | 918 | } |
919 | 919 | ||
920 | /* | 920 | /* |
921 | * Test if P wants to take SIG. After we've checked all threads with this, | 921 | * Test if P wants to take SIG. After we've checked all threads with this, |
922 | * it's equivalent to finding no threads not blocking SIG. Any threads not | 922 | * it's equivalent to finding no threads not blocking SIG. Any threads not |
923 | * blocking SIG were ruled out because they are not running and already | 923 | * blocking SIG were ruled out because they are not running and already |
924 | * have pending signals. Such threads will dequeue from the shared queue | 924 | * have pending signals. Such threads will dequeue from the shared queue |
925 | * as soon as they're available, so putting the signal on the shared queue | 925 | * as soon as they're available, so putting the signal on the shared queue |
926 | * will be equivalent to sending it to one such thread. | 926 | * will be equivalent to sending it to one such thread. |
927 | */ | 927 | */ |
928 | static inline int wants_signal(int sig, struct task_struct *p) | 928 | static inline int wants_signal(int sig, struct task_struct *p) |
929 | { | 929 | { |
930 | if (sigismember(&p->blocked, sig)) | 930 | if (sigismember(&p->blocked, sig)) |
931 | return 0; | 931 | return 0; |
932 | if (p->flags & PF_EXITING) | 932 | if (p->flags & PF_EXITING) |
933 | return 0; | 933 | return 0; |
934 | if (sig == SIGKILL) | 934 | if (sig == SIGKILL) |
935 | return 1; | 935 | return 1; |
936 | if (task_is_stopped_or_traced(p)) | 936 | if (task_is_stopped_or_traced(p)) |
937 | return 0; | 937 | return 0; |
938 | return task_curr(p) || !signal_pending(p); | 938 | return task_curr(p) || !signal_pending(p); |
939 | } | 939 | } |
940 | 940 | ||
941 | static void complete_signal(int sig, struct task_struct *p, int group) | 941 | static void complete_signal(int sig, struct task_struct *p, int group) |
942 | { | 942 | { |
943 | struct signal_struct *signal = p->signal; | 943 | struct signal_struct *signal = p->signal; |
944 | struct task_struct *t; | 944 | struct task_struct *t; |
945 | 945 | ||
946 | /* | 946 | /* |
947 | * Now find a thread we can wake up to take the signal off the queue. | 947 | * Now find a thread we can wake up to take the signal off the queue. |
948 | * | 948 | * |
949 | * If the main thread wants the signal, it gets first crack. | 949 | * If the main thread wants the signal, it gets first crack. |
950 | * Probably the least surprising to the average bear. | 950 | * Probably the least surprising to the average bear. |
951 | */ | 951 | */ |
952 | if (wants_signal(sig, p)) | 952 | if (wants_signal(sig, p)) |
953 | t = p; | 953 | t = p; |
954 | else if (!group || thread_group_empty(p)) | 954 | else if (!group || thread_group_empty(p)) |
955 | /* | 955 | /* |
956 | * There is just one thread and it does not need to be woken. | 956 | * There is just one thread and it does not need to be woken. |
957 | * It will dequeue unblocked signals before it runs again. | 957 | * It will dequeue unblocked signals before it runs again. |
958 | */ | 958 | */ |
959 | return; | 959 | return; |
960 | else { | 960 | else { |
961 | /* | 961 | /* |
962 | * Otherwise try to find a suitable thread. | 962 | * Otherwise try to find a suitable thread. |
963 | */ | 963 | */ |
964 | t = signal->curr_target; | 964 | t = signal->curr_target; |
965 | while (!wants_signal(sig, t)) { | 965 | while (!wants_signal(sig, t)) { |
966 | t = next_thread(t); | 966 | t = next_thread(t); |
967 | if (t == signal->curr_target) | 967 | if (t == signal->curr_target) |
968 | /* | 968 | /* |
969 | * No thread needs to be woken. | 969 | * No thread needs to be woken. |
970 | * Any eligible threads will see | 970 | * Any eligible threads will see |
971 | * the signal in the queue soon. | 971 | * the signal in the queue soon. |
972 | */ | 972 | */ |
973 | return; | 973 | return; |
974 | } | 974 | } |
975 | signal->curr_target = t; | 975 | signal->curr_target = t; |
976 | } | 976 | } |
977 | 977 | ||
978 | /* | 978 | /* |
979 | * Found a killable thread. If the signal will be fatal, | 979 | * Found a killable thread. If the signal will be fatal, |
980 | * then start taking the whole group down immediately. | 980 | * then start taking the whole group down immediately. |
981 | */ | 981 | */ |
982 | if (sig_fatal(p, sig) && | 982 | if (sig_fatal(p, sig) && |
983 | !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && | 983 | !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && |
984 | !sigismember(&t->real_blocked, sig) && | 984 | !sigismember(&t->real_blocked, sig) && |
985 | (sig == SIGKILL || !t->ptrace)) { | 985 | (sig == SIGKILL || !t->ptrace)) { |
986 | /* | 986 | /* |
987 | * This signal will be fatal to the whole group. | 987 | * This signal will be fatal to the whole group. |
988 | */ | 988 | */ |
989 | if (!sig_kernel_coredump(sig)) { | 989 | if (!sig_kernel_coredump(sig)) { |
990 | /* | 990 | /* |
991 | * Start a group exit and wake everybody up. | 991 | * Start a group exit and wake everybody up. |
992 | * This way we don't have other threads | 992 | * This way we don't have other threads |
993 | * running and doing things after a slower | 993 | * running and doing things after a slower |
994 | * thread has the fatal signal pending. | 994 | * thread has the fatal signal pending. |
995 | */ | 995 | */ |
996 | signal->flags = SIGNAL_GROUP_EXIT; | 996 | signal->flags = SIGNAL_GROUP_EXIT; |
997 | signal->group_exit_code = sig; | 997 | signal->group_exit_code = sig; |
998 | signal->group_stop_count = 0; | 998 | signal->group_stop_count = 0; |
999 | t = p; | 999 | t = p; |
1000 | do { | 1000 | do { |
1001 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); | 1001 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); |
1002 | sigaddset(&t->pending.signal, SIGKILL); | 1002 | sigaddset(&t->pending.signal, SIGKILL); |
1003 | signal_wake_up(t, 1); | 1003 | signal_wake_up(t, 1); |
1004 | } while_each_thread(p, t); | 1004 | } while_each_thread(p, t); |
1005 | return; | 1005 | return; |
1006 | } | 1006 | } |
1007 | } | 1007 | } |
1008 | 1008 | ||
1009 | /* | 1009 | /* |
1010 | * The signal is already in the shared-pending queue. | 1010 | * The signal is already in the shared-pending queue. |
1011 | * Tell the chosen thread to wake up and dequeue it. | 1011 | * Tell the chosen thread to wake up and dequeue it. |
1012 | */ | 1012 | */ |
1013 | signal_wake_up(t, sig == SIGKILL); | 1013 | signal_wake_up(t, sig == SIGKILL); |
1014 | return; | 1014 | return; |
1015 | } | 1015 | } |
1016 | 1016 | ||
1017 | static inline int legacy_queue(struct sigpending *signals, int sig) | 1017 | static inline int legacy_queue(struct sigpending *signals, int sig) |
1018 | { | 1018 | { |
1019 | return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); | 1019 | return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); |
1020 | } | 1020 | } |
1021 | 1021 | ||
1022 | static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, | 1022 | static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, |
1023 | int group, int from_ancestor_ns) | 1023 | int group, int from_ancestor_ns) |
1024 | { | 1024 | { |
1025 | struct sigpending *pending; | 1025 | struct sigpending *pending; |
1026 | struct sigqueue *q; | 1026 | struct sigqueue *q; |
1027 | int override_rlimit; | 1027 | int override_rlimit; |
1028 | 1028 | ||
1029 | trace_signal_generate(sig, info, t); | 1029 | trace_signal_generate(sig, info, t); |
1030 | 1030 | ||
1031 | assert_spin_locked(&t->sighand->siglock); | 1031 | assert_spin_locked(&t->sighand->siglock); |
1032 | 1032 | ||
1033 | if (!prepare_signal(sig, t, from_ancestor_ns)) | 1033 | if (!prepare_signal(sig, t, from_ancestor_ns)) |
1034 | return 0; | 1034 | return 0; |
1035 | 1035 | ||
1036 | pending = group ? &t->signal->shared_pending : &t->pending; | 1036 | pending = group ? &t->signal->shared_pending : &t->pending; |
1037 | /* | 1037 | /* |
1038 | * Short-circuit ignored signals and support queuing | 1038 | * Short-circuit ignored signals and support queuing |
1039 | * exactly one non-rt signal, so that we can get more | 1039 | * exactly one non-rt signal, so that we can get more |
1040 | * detailed information about the cause of the signal. | 1040 | * detailed information about the cause of the signal. |
1041 | */ | 1041 | */ |
1042 | if (legacy_queue(pending, sig)) | 1042 | if (legacy_queue(pending, sig)) |
1043 | return 0; | 1043 | return 0; |
1044 | /* | 1044 | /* |
1045 | * fast-pathed signals for kernel-internal things like SIGSTOP | 1045 | * fast-pathed signals for kernel-internal things like SIGSTOP |
1046 | * or SIGKILL. | 1046 | * or SIGKILL. |
1047 | */ | 1047 | */ |
1048 | if (info == SEND_SIG_FORCED) | 1048 | if (info == SEND_SIG_FORCED) |
1049 | goto out_set; | 1049 | goto out_set; |
1050 | 1050 | ||
1051 | /* | 1051 | /* |
1052 | * Real-time signals must be queued if sent by sigqueue, or | 1052 | * Real-time signals must be queued if sent by sigqueue, or |
1053 | * some other real-time mechanism. It is implementation | 1053 | * some other real-time mechanism. It is implementation |
1054 | * defined whether kill() does so. We attempt to do so, on | 1054 | * defined whether kill() does so. We attempt to do so, on |
1055 | * the principle of least surprise, but since kill is not | 1055 | * the principle of least surprise, but since kill is not |
1056 | * allowed to fail with EAGAIN when low on memory we just | 1056 | * allowed to fail with EAGAIN when low on memory we just |
1057 | * make sure at least one signal gets delivered and don't | 1057 | * make sure at least one signal gets delivered and don't |
1058 | * pass on the info struct. | 1058 | * pass on the info struct. |
1059 | */ | 1059 | */ |
1060 | if (sig < SIGRTMIN) | 1060 | if (sig < SIGRTMIN) |
1061 | override_rlimit = (is_si_special(info) || info->si_code >= 0); | 1061 | override_rlimit = (is_si_special(info) || info->si_code >= 0); |
1062 | else | 1062 | else |
1063 | override_rlimit = 0; | 1063 | override_rlimit = 0; |
1064 | 1064 | ||
1065 | q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, | 1065 | q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, |
1066 | override_rlimit); | 1066 | override_rlimit); |
1067 | if (q) { | 1067 | if (q) { |
1068 | list_add_tail(&q->list, &pending->list); | 1068 | list_add_tail(&q->list, &pending->list); |
1069 | switch ((unsigned long) info) { | 1069 | switch ((unsigned long) info) { |
1070 | case (unsigned long) SEND_SIG_NOINFO: | 1070 | case (unsigned long) SEND_SIG_NOINFO: |
1071 | q->info.si_signo = sig; | 1071 | q->info.si_signo = sig; |
1072 | q->info.si_errno = 0; | 1072 | q->info.si_errno = 0; |
1073 | q->info.si_code = SI_USER; | 1073 | q->info.si_code = SI_USER; |
1074 | q->info.si_pid = task_tgid_nr_ns(current, | 1074 | q->info.si_pid = task_tgid_nr_ns(current, |
1075 | task_active_pid_ns(t)); | 1075 | task_active_pid_ns(t)); |
1076 | q->info.si_uid = current_uid(); | 1076 | q->info.si_uid = current_uid(); |
1077 | break; | 1077 | break; |
1078 | case (unsigned long) SEND_SIG_PRIV: | 1078 | case (unsigned long) SEND_SIG_PRIV: |
1079 | q->info.si_signo = sig; | 1079 | q->info.si_signo = sig; |
1080 | q->info.si_errno = 0; | 1080 | q->info.si_errno = 0; |
1081 | q->info.si_code = SI_KERNEL; | 1081 | q->info.si_code = SI_KERNEL; |
1082 | q->info.si_pid = 0; | 1082 | q->info.si_pid = 0; |
1083 | q->info.si_uid = 0; | 1083 | q->info.si_uid = 0; |
1084 | break; | 1084 | break; |
1085 | default: | 1085 | default: |
1086 | copy_siginfo(&q->info, info); | 1086 | copy_siginfo(&q->info, info); |
1087 | if (from_ancestor_ns) | 1087 | if (from_ancestor_ns) |
1088 | q->info.si_pid = 0; | 1088 | q->info.si_pid = 0; |
1089 | break; | 1089 | break; |
1090 | } | 1090 | } |
1091 | } else if (!is_si_special(info)) { | 1091 | } else if (!is_si_special(info)) { |
1092 | if (sig >= SIGRTMIN && info->si_code != SI_USER) { | 1092 | if (sig >= SIGRTMIN && info->si_code != SI_USER) { |
1093 | /* | 1093 | /* |
1094 | * Queue overflow, abort. We may abort if the | 1094 | * Queue overflow, abort. We may abort if the |
1095 | * signal was rt and sent by user using something | 1095 | * signal was rt and sent by user using something |
1096 | * other than kill(). | 1096 | * other than kill(). |
1097 | */ | 1097 | */ |
1098 | trace_signal_overflow_fail(sig, group, info); | 1098 | trace_signal_overflow_fail(sig, group, info); |
1099 | return -EAGAIN; | 1099 | return -EAGAIN; |
1100 | } else { | 1100 | } else { |
1101 | /* | 1101 | /* |
1102 | * This is a silent loss of information. We still | 1102 | * This is a silent loss of information. We still |
1103 | * send the signal, but the *info bits are lost. | 1103 | * send the signal, but the *info bits are lost. |
1104 | */ | 1104 | */ |
1105 | trace_signal_lose_info(sig, group, info); | 1105 | trace_signal_lose_info(sig, group, info); |
1106 | } | 1106 | } |
1107 | } | 1107 | } |
1108 | 1108 | ||
1109 | out_set: | 1109 | out_set: |
1110 | signalfd_notify(t, sig); | 1110 | signalfd_notify(t, sig); |
1111 | sigaddset(&pending->signal, sig); | 1111 | sigaddset(&pending->signal, sig); |
1112 | complete_signal(sig, t, group); | 1112 | complete_signal(sig, t, group); |
1113 | return 0; | 1113 | return 0; |
1114 | } | 1114 | } |
1115 | 1115 | ||
1116 | static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | 1116 | static int send_signal(int sig, struct siginfo *info, struct task_struct *t, |
1117 | int group) | 1117 | int group) |
1118 | { | 1118 | { |
1119 | int from_ancestor_ns = 0; | 1119 | int from_ancestor_ns = 0; |
1120 | 1120 | ||
1121 | #ifdef CONFIG_PID_NS | 1121 | #ifdef CONFIG_PID_NS |
1122 | from_ancestor_ns = si_fromuser(info) && | 1122 | from_ancestor_ns = si_fromuser(info) && |
1123 | !task_pid_nr_ns(current, task_active_pid_ns(t)); | 1123 | !task_pid_nr_ns(current, task_active_pid_ns(t)); |
1124 | #endif | 1124 | #endif |
1125 | 1125 | ||
1126 | return __send_signal(sig, info, t, group, from_ancestor_ns); | 1126 | return __send_signal(sig, info, t, group, from_ancestor_ns); |
1127 | } | 1127 | } |
1128 | 1128 | ||
1129 | static void print_fatal_signal(struct pt_regs *regs, int signr) | 1129 | static void print_fatal_signal(struct pt_regs *regs, int signr) |
1130 | { | 1130 | { |
1131 | printk("%s/%d: potentially unexpected fatal signal %d.\n", | 1131 | printk("%s/%d: potentially unexpected fatal signal %d.\n", |
1132 | current->comm, task_pid_nr(current), signr); | 1132 | current->comm, task_pid_nr(current), signr); |
1133 | 1133 | ||
1134 | #if defined(__i386__) && !defined(__arch_um__) | 1134 | #if defined(__i386__) && !defined(__arch_um__) |
1135 | printk("code at %08lx: ", regs->ip); | 1135 | printk("code at %08lx: ", regs->ip); |
1136 | { | 1136 | { |
1137 | int i; | 1137 | int i; |
1138 | for (i = 0; i < 16; i++) { | 1138 | for (i = 0; i < 16; i++) { |
1139 | unsigned char insn; | 1139 | unsigned char insn; |
1140 | 1140 | ||
1141 | if (get_user(insn, (unsigned char *)(regs->ip + i))) | 1141 | if (get_user(insn, (unsigned char *)(regs->ip + i))) |
1142 | break; | 1142 | break; |
1143 | printk("%02x ", insn); | 1143 | printk("%02x ", insn); |
1144 | } | 1144 | } |
1145 | } | 1145 | } |
1146 | #endif | 1146 | #endif |
1147 | printk("\n"); | 1147 | printk("\n"); |
1148 | preempt_disable(); | 1148 | preempt_disable(); |
1149 | show_regs(regs); | 1149 | show_regs(regs); |
1150 | preempt_enable(); | 1150 | preempt_enable(); |
1151 | } | 1151 | } |
1152 | 1152 | ||
1153 | static int __init setup_print_fatal_signals(char *str) | 1153 | static int __init setup_print_fatal_signals(char *str) |
1154 | { | 1154 | { |
1155 | get_option (&str, &print_fatal_signals); | 1155 | get_option (&str, &print_fatal_signals); |
1156 | 1156 | ||
1157 | return 1; | 1157 | return 1; |
1158 | } | 1158 | } |
1159 | 1159 | ||
1160 | __setup("print-fatal-signals=", setup_print_fatal_signals); | 1160 | __setup("print-fatal-signals=", setup_print_fatal_signals); |
1161 | 1161 | ||
1162 | int | 1162 | int |
1163 | __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | 1163 | __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1164 | { | 1164 | { |
1165 | return send_signal(sig, info, p, 1); | 1165 | return send_signal(sig, info, p, 1); |
1166 | } | 1166 | } |
1167 | 1167 | ||
1168 | static int | 1168 | static int |
1169 | specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) | 1169 | specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) |
1170 | { | 1170 | { |
1171 | return send_signal(sig, info, t, 0); | 1171 | return send_signal(sig, info, t, 0); |
1172 | } | 1172 | } |
1173 | 1173 | ||
1174 | int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, | 1174 | int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, |
1175 | bool group) | 1175 | bool group) |
1176 | { | 1176 | { |
1177 | unsigned long flags; | 1177 | unsigned long flags; |
1178 | int ret = -ESRCH; | 1178 | int ret = -ESRCH; |
1179 | 1179 | ||
1180 | if (lock_task_sighand(p, &flags)) { | 1180 | if (lock_task_sighand(p, &flags)) { |
1181 | ret = send_signal(sig, info, p, group); | 1181 | ret = send_signal(sig, info, p, group); |
1182 | unlock_task_sighand(p, &flags); | 1182 | unlock_task_sighand(p, &flags); |
1183 | } | 1183 | } |
1184 | 1184 | ||
1185 | return ret; | 1185 | return ret; |
1186 | } | 1186 | } |
1187 | 1187 | ||
1188 | /* | 1188 | /* |
1189 | * Force a signal that the process can't ignore: if necessary | 1189 | * Force a signal that the process can't ignore: if necessary |
1190 | * we unblock the signal and change any SIG_IGN to SIG_DFL. | 1190 | * we unblock the signal and change any SIG_IGN to SIG_DFL. |
1191 | * | 1191 | * |
1192 | * Note: If we unblock the signal, we always reset it to SIG_DFL, | 1192 | * Note: If we unblock the signal, we always reset it to SIG_DFL, |
1193 | * since we do not want to have a signal handler that was blocked | 1193 | * since we do not want to have a signal handler that was blocked |
1194 | * be invoked when user space had explicitly blocked it. | 1194 | * be invoked when user space had explicitly blocked it. |
1195 | * | 1195 | * |
1196 | * We don't want to have recursive SIGSEGV's etc, for example, | 1196 | * We don't want to have recursive SIGSEGV's etc, for example, |
1197 | * that is why we also clear SIGNAL_UNKILLABLE. | 1197 | * that is why we also clear SIGNAL_UNKILLABLE. |
1198 | */ | 1198 | */ |
1199 | int | 1199 | int |
1200 | force_sig_info(int sig, struct siginfo *info, struct task_struct *t) | 1200 | force_sig_info(int sig, struct siginfo *info, struct task_struct *t) |
1201 | { | 1201 | { |
1202 | unsigned long int flags; | 1202 | unsigned long int flags; |
1203 | int ret, blocked, ignored; | 1203 | int ret, blocked, ignored; |
1204 | struct k_sigaction *action; | 1204 | struct k_sigaction *action; |
1205 | 1205 | ||
1206 | spin_lock_irqsave(&t->sighand->siglock, flags); | 1206 | spin_lock_irqsave(&t->sighand->siglock, flags); |
1207 | action = &t->sighand->action[sig-1]; | 1207 | action = &t->sighand->action[sig-1]; |
1208 | ignored = action->sa.sa_handler == SIG_IGN; | 1208 | ignored = action->sa.sa_handler == SIG_IGN; |
1209 | blocked = sigismember(&t->blocked, sig); | 1209 | blocked = sigismember(&t->blocked, sig); |
1210 | if (blocked || ignored) { | 1210 | if (blocked || ignored) { |
1211 | action->sa.sa_handler = SIG_DFL; | 1211 | action->sa.sa_handler = SIG_DFL; |
1212 | if (blocked) { | 1212 | if (blocked) { |
1213 | sigdelset(&t->blocked, sig); | 1213 | sigdelset(&t->blocked, sig); |
1214 | recalc_sigpending_and_wake(t); | 1214 | recalc_sigpending_and_wake(t); |
1215 | } | 1215 | } |
1216 | } | 1216 | } |
1217 | if (action->sa.sa_handler == SIG_DFL) | 1217 | if (action->sa.sa_handler == SIG_DFL) |
1218 | t->signal->flags &= ~SIGNAL_UNKILLABLE; | 1218 | t->signal->flags &= ~SIGNAL_UNKILLABLE; |
1219 | ret = specific_send_sig_info(sig, info, t); | 1219 | ret = specific_send_sig_info(sig, info, t); |
1220 | spin_unlock_irqrestore(&t->sighand->siglock, flags); | 1220 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
1221 | 1221 | ||
1222 | return ret; | 1222 | return ret; |
1223 | } | 1223 | } |
1224 | 1224 | ||
1225 | /* | 1225 | /* |
1226 | * Nuke all other threads in the group. | 1226 | * Nuke all other threads in the group. |
1227 | */ | 1227 | */ |
1228 | int zap_other_threads(struct task_struct *p) | 1228 | int zap_other_threads(struct task_struct *p) |
1229 | { | 1229 | { |
1230 | struct task_struct *t = p; | 1230 | struct task_struct *t = p; |
1231 | int count = 0; | 1231 | int count = 0; |
1232 | 1232 | ||
1233 | p->signal->group_stop_count = 0; | 1233 | p->signal->group_stop_count = 0; |
1234 | 1234 | ||
1235 | while_each_thread(p, t) { | 1235 | while_each_thread(p, t) { |
1236 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); | 1236 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); |
1237 | count++; | 1237 | count++; |
1238 | 1238 | ||
1239 | /* Don't bother with already dead threads */ | 1239 | /* Don't bother with already dead threads */ |
1240 | if (t->exit_state) | 1240 | if (t->exit_state) |
1241 | continue; | 1241 | continue; |
1242 | sigaddset(&t->pending.signal, SIGKILL); | 1242 | sigaddset(&t->pending.signal, SIGKILL); |
1243 | signal_wake_up(t, 1); | 1243 | signal_wake_up(t, 1); |
1244 | } | 1244 | } |
1245 | 1245 | ||
1246 | return count; | 1246 | return count; |
1247 | } | 1247 | } |
1248 | 1248 | ||
1249 | struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, | 1249 | struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, |
1250 | unsigned long *flags) | 1250 | unsigned long *flags) |
1251 | { | 1251 | { |
1252 | struct sighand_struct *sighand; | 1252 | struct sighand_struct *sighand; |
1253 | 1253 | ||
1254 | rcu_read_lock(); | 1254 | rcu_read_lock(); |
1255 | for (;;) { | 1255 | for (;;) { |
1256 | sighand = rcu_dereference(tsk->sighand); | 1256 | sighand = rcu_dereference(tsk->sighand); |
1257 | if (unlikely(sighand == NULL)) | 1257 | if (unlikely(sighand == NULL)) |
1258 | break; | 1258 | break; |
1259 | 1259 | ||
1260 | spin_lock_irqsave(&sighand->siglock, *flags); | 1260 | spin_lock_irqsave(&sighand->siglock, *flags); |
1261 | if (likely(sighand == tsk->sighand)) | 1261 | if (likely(sighand == tsk->sighand)) |
1262 | break; | 1262 | break; |
1263 | spin_unlock_irqrestore(&sighand->siglock, *flags); | 1263 | spin_unlock_irqrestore(&sighand->siglock, *flags); |
1264 | } | 1264 | } |
1265 | rcu_read_unlock(); | 1265 | rcu_read_unlock(); |
1266 | 1266 | ||
1267 | return sighand; | 1267 | return sighand; |
1268 | } | 1268 | } |
1269 | 1269 | ||
1270 | /* | 1270 | /* |
1271 | * send signal info to all the members of a group | 1271 | * send signal info to all the members of a group |
1272 | */ | 1272 | */ |
1273 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | 1273 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1274 | { | 1274 | { |
1275 | int ret; | 1275 | int ret; |
1276 | 1276 | ||
1277 | rcu_read_lock(); | 1277 | rcu_read_lock(); |
1278 | ret = check_kill_permission(sig, info, p); | 1278 | ret = check_kill_permission(sig, info, p); |
1279 | rcu_read_unlock(); | 1279 | rcu_read_unlock(); |
1280 | 1280 | ||
1281 | if (!ret && sig) | 1281 | if (!ret && sig) |
1282 | ret = do_send_sig_info(sig, info, p, true); | 1282 | ret = do_send_sig_info(sig, info, p, true); |
1283 | 1283 | ||
1284 | return ret; | 1284 | return ret; |
1285 | } | 1285 | } |
1286 | 1286 | ||
1287 | /* | 1287 | /* |
1288 | * __kill_pgrp_info() sends a signal to a process group: this is what the tty | 1288 | * __kill_pgrp_info() sends a signal to a process group: this is what the tty |
1289 | * control characters do (^C, ^Z etc) | 1289 | * control characters do (^C, ^Z etc) |
1290 | * - the caller must hold at least a readlock on tasklist_lock | 1290 | * - the caller must hold at least a readlock on tasklist_lock |
1291 | */ | 1291 | */ |
1292 | int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) | 1292 | int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) |
1293 | { | 1293 | { |
1294 | struct task_struct *p = NULL; | 1294 | struct task_struct *p = NULL; |
1295 | int retval, success; | 1295 | int retval, success; |
1296 | 1296 | ||
1297 | success = 0; | 1297 | success = 0; |
1298 | retval = -ESRCH; | 1298 | retval = -ESRCH; |
1299 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { | 1299 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { |
1300 | int err = group_send_sig_info(sig, info, p); | 1300 | int err = group_send_sig_info(sig, info, p); |
1301 | success |= !err; | 1301 | success |= !err; |
1302 | retval = err; | 1302 | retval = err; |
1303 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); | 1303 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); |
1304 | return success ? 0 : retval; | 1304 | return success ? 0 : retval; |
1305 | } | 1305 | } |
1306 | 1306 | ||
1307 | int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) | 1307 | int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) |
1308 | { | 1308 | { |
1309 | int error = -ESRCH; | 1309 | int error = -ESRCH; |
1310 | struct task_struct *p; | 1310 | struct task_struct *p; |
1311 | 1311 | ||
1312 | rcu_read_lock(); | 1312 | rcu_read_lock(); |
1313 | retry: | 1313 | retry: |
1314 | p = pid_task(pid, PIDTYPE_PID); | 1314 | p = pid_task(pid, PIDTYPE_PID); |
1315 | if (p) { | 1315 | if (p) { |
1316 | error = group_send_sig_info(sig, info, p); | 1316 | error = group_send_sig_info(sig, info, p); |
1317 | if (unlikely(error == -ESRCH)) | 1317 | if (unlikely(error == -ESRCH)) |
1318 | /* | 1318 | /* |
1319 | * The task was unhashed in between, try again. | 1319 | * The task was unhashed in between, try again. |
1320 | * If it is dead, pid_task() will return NULL, | 1320 | * If it is dead, pid_task() will return NULL, |
1321 | * if we race with de_thread() it will find the | 1321 | * if we race with de_thread() it will find the |
1322 | * new leader. | 1322 | * new leader. |
1323 | */ | 1323 | */ |
1324 | goto retry; | 1324 | goto retry; |
1325 | } | 1325 | } |
1326 | rcu_read_unlock(); | 1326 | rcu_read_unlock(); |
1327 | 1327 | ||
1328 | return error; | 1328 | return error; |
1329 | } | 1329 | } |
1330 | 1330 | ||
1331 | int kill_proc_info(int sig, struct siginfo *info, pid_t pid) | 1331 | int kill_proc_info(int sig, struct siginfo *info, pid_t pid) |
1332 | { | 1332 | { |
1333 | int error; | 1333 | int error; |
1334 | rcu_read_lock(); | 1334 | rcu_read_lock(); |
1335 | error = kill_pid_info(sig, info, find_vpid(pid)); | 1335 | error = kill_pid_info(sig, info, find_vpid(pid)); |
1336 | rcu_read_unlock(); | 1336 | rcu_read_unlock(); |
1337 | return error; | 1337 | return error; |
1338 | } | 1338 | } |
1339 | 1339 | ||
1340 | /* like kill_pid_info(), but doesn't use uid/euid of "current" */ | 1340 | /* like kill_pid_info(), but doesn't use uid/euid of "current" */ |
1341 | int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, | 1341 | int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, |
1342 | uid_t uid, uid_t euid, u32 secid) | 1342 | uid_t uid, uid_t euid, u32 secid) |
1343 | { | 1343 | { |
1344 | int ret = -EINVAL; | 1344 | int ret = -EINVAL; |
1345 | struct task_struct *p; | 1345 | struct task_struct *p; |
1346 | const struct cred *pcred; | 1346 | const struct cred *pcred; |
1347 | unsigned long flags; | 1347 | unsigned long flags; |
1348 | 1348 | ||
1349 | if (!valid_signal(sig)) | 1349 | if (!valid_signal(sig)) |
1350 | return ret; | 1350 | return ret; |
1351 | 1351 | ||
1352 | rcu_read_lock(); | 1352 | rcu_read_lock(); |
1353 | p = pid_task(pid, PIDTYPE_PID); | 1353 | p = pid_task(pid, PIDTYPE_PID); |
1354 | if (!p) { | 1354 | if (!p) { |
1355 | ret = -ESRCH; | 1355 | ret = -ESRCH; |
1356 | goto out_unlock; | 1356 | goto out_unlock; |
1357 | } | 1357 | } |
1358 | pcred = __task_cred(p); | 1358 | pcred = __task_cred(p); |
1359 | if (si_fromuser(info) && | 1359 | if (si_fromuser(info) && |
1360 | euid != pcred->suid && euid != pcred->uid && | 1360 | euid != pcred->suid && euid != pcred->uid && |
1361 | uid != pcred->suid && uid != pcred->uid) { | 1361 | uid != pcred->suid && uid != pcred->uid) { |
1362 | ret = -EPERM; | 1362 | ret = -EPERM; |
1363 | goto out_unlock; | 1363 | goto out_unlock; |
1364 | } | 1364 | } |
1365 | ret = security_task_kill(p, info, sig, secid); | 1365 | ret = security_task_kill(p, info, sig, secid); |
1366 | if (ret) | 1366 | if (ret) |
1367 | goto out_unlock; | 1367 | goto out_unlock; |
1368 | 1368 | ||
1369 | if (sig) { | 1369 | if (sig) { |
1370 | if (lock_task_sighand(p, &flags)) { | 1370 | if (lock_task_sighand(p, &flags)) { |
1371 | ret = __send_signal(sig, info, p, 1, 0); | 1371 | ret = __send_signal(sig, info, p, 1, 0); |
1372 | unlock_task_sighand(p, &flags); | 1372 | unlock_task_sighand(p, &flags); |
1373 | } else | 1373 | } else |
1374 | ret = -ESRCH; | 1374 | ret = -ESRCH; |
1375 | } | 1375 | } |
1376 | out_unlock: | 1376 | out_unlock: |
1377 | rcu_read_unlock(); | 1377 | rcu_read_unlock(); |
1378 | return ret; | 1378 | return ret; |
1379 | } | 1379 | } |
1380 | EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); | 1380 | EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); |
1381 | 1381 | ||
1382 | /* | 1382 | /* |
1383 | * kill_something_info() interprets pid in interesting ways just like kill(2). | 1383 | * kill_something_info() interprets pid in interesting ways just like kill(2). |
1384 | * | 1384 | * |
1385 | * POSIX specifies that kill(-1,sig) is unspecified, but what we have | 1385 | * POSIX specifies that kill(-1,sig) is unspecified, but what we have |
1386 | * is probably wrong. Should make it like BSD or SYSV. | 1386 | * is probably wrong. Should make it like BSD or SYSV. |
1387 | */ | 1387 | */ |
1388 | 1388 | ||
1389 | static int kill_something_info(int sig, struct siginfo *info, pid_t pid) | 1389 | static int kill_something_info(int sig, struct siginfo *info, pid_t pid) |
1390 | { | 1390 | { |
1391 | int ret; | 1391 | int ret; |
1392 | 1392 | ||
1393 | if (pid > 0) { | 1393 | if (pid > 0) { |
1394 | rcu_read_lock(); | 1394 | rcu_read_lock(); |
1395 | ret = kill_pid_info(sig, info, find_vpid(pid)); | 1395 | ret = kill_pid_info(sig, info, find_vpid(pid)); |
1396 | rcu_read_unlock(); | 1396 | rcu_read_unlock(); |
1397 | return ret; | 1397 | return ret; |
1398 | } | 1398 | } |
1399 | 1399 | ||
1400 | read_lock(&tasklist_lock); | 1400 | read_lock(&tasklist_lock); |
1401 | if (pid != -1) { | 1401 | if (pid != -1) { |
1402 | ret = __kill_pgrp_info(sig, info, | 1402 | ret = __kill_pgrp_info(sig, info, |
1403 | pid ? find_vpid(-pid) : task_pgrp(current)); | 1403 | pid ? find_vpid(-pid) : task_pgrp(current)); |
1404 | } else { | 1404 | } else { |
1405 | int retval = 0, count = 0; | 1405 | int retval = 0, count = 0; |
1406 | struct task_struct * p; | 1406 | struct task_struct * p; |
1407 | 1407 | ||
1408 | for_each_process(p) { | 1408 | for_each_process(p) { |
1409 | if (task_pid_vnr(p) > 1 && | 1409 | if (task_pid_vnr(p) > 1 && |
1410 | !same_thread_group(p, current)) { | 1410 | !same_thread_group(p, current)) { |
1411 | int err = group_send_sig_info(sig, info, p); | 1411 | int err = group_send_sig_info(sig, info, p); |
1412 | ++count; | 1412 | ++count; |
1413 | if (err != -EPERM) | 1413 | if (err != -EPERM) |
1414 | retval = err; | 1414 | retval = err; |
1415 | } | 1415 | } |
1416 | } | 1416 | } |
1417 | ret = count ? retval : -ESRCH; | 1417 | ret = count ? retval : -ESRCH; |
1418 | } | 1418 | } |
1419 | read_unlock(&tasklist_lock); | 1419 | read_unlock(&tasklist_lock); |
1420 | 1420 | ||
1421 | return ret; | 1421 | return ret; |
1422 | } | 1422 | } |
1423 | 1423 | ||
1424 | /* | 1424 | /* |
1425 | * These are for backward compatibility with the rest of the kernel source. | 1425 | * These are for backward compatibility with the rest of the kernel source. |
1426 | */ | 1426 | */ |
1427 | 1427 | ||
1428 | int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | 1428 | int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1429 | { | 1429 | { |
1430 | /* | 1430 | /* |
1431 | * Make sure legacy kernel users don't send in bad values | 1431 | * Make sure legacy kernel users don't send in bad values |
1432 | * (normal paths check this in check_kill_permission). | 1432 | * (normal paths check this in check_kill_permission). |
1433 | */ | 1433 | */ |
1434 | if (!valid_signal(sig)) | 1434 | if (!valid_signal(sig)) |
1435 | return -EINVAL; | 1435 | return -EINVAL; |
1436 | 1436 | ||
1437 | return do_send_sig_info(sig, info, p, false); | 1437 | return do_send_sig_info(sig, info, p, false); |
1438 | } | 1438 | } |
1439 | 1439 | ||
1440 | #define __si_special(priv) \ | 1440 | #define __si_special(priv) \ |
1441 | ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) | 1441 | ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) |
1442 | 1442 | ||
1443 | int | 1443 | int |
1444 | send_sig(int sig, struct task_struct *p, int priv) | 1444 | send_sig(int sig, struct task_struct *p, int priv) |
1445 | { | 1445 | { |
1446 | return send_sig_info(sig, __si_special(priv), p); | 1446 | return send_sig_info(sig, __si_special(priv), p); |
1447 | } | 1447 | } |
1448 | 1448 | ||
1449 | void | 1449 | void |
1450 | force_sig(int sig, struct task_struct *p) | 1450 | force_sig(int sig, struct task_struct *p) |
1451 | { | 1451 | { |
1452 | force_sig_info(sig, SEND_SIG_PRIV, p); | 1452 | force_sig_info(sig, SEND_SIG_PRIV, p); |
1453 | } | 1453 | } |
1454 | 1454 | ||
1455 | /* | 1455 | /* |
1456 | * When things go south during signal handling, we | 1456 | * When things go south during signal handling, we |
1457 | * will force a SIGSEGV. And if the signal that caused | 1457 | * will force a SIGSEGV. And if the signal that caused |
1458 | * the problem was already a SIGSEGV, we'll want to | 1458 | * the problem was already a SIGSEGV, we'll want to |
1459 | * make sure we don't even try to deliver the signal.. | 1459 | * make sure we don't even try to deliver the signal.. |
1460 | */ | 1460 | */ |
1461 | int | 1461 | int |
1462 | force_sigsegv(int sig, struct task_struct *p) | 1462 | force_sigsegv(int sig, struct task_struct *p) |
1463 | { | 1463 | { |
1464 | if (sig == SIGSEGV) { | 1464 | if (sig == SIGSEGV) { |
1465 | unsigned long flags; | 1465 | unsigned long flags; |
1466 | spin_lock_irqsave(&p->sighand->siglock, flags); | 1466 | spin_lock_irqsave(&p->sighand->siglock, flags); |
1467 | p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; | 1467 | p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; |
1468 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | 1468 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
1469 | } | 1469 | } |
1470 | force_sig(SIGSEGV, p); | 1470 | force_sig(SIGSEGV, p); |
1471 | return 0; | 1471 | return 0; |
1472 | } | 1472 | } |
1473 | 1473 | ||
1474 | int kill_pgrp(struct pid *pid, int sig, int priv) | 1474 | int kill_pgrp(struct pid *pid, int sig, int priv) |
1475 | { | 1475 | { |
1476 | int ret; | 1476 | int ret; |
1477 | 1477 | ||
1478 | read_lock(&tasklist_lock); | 1478 | read_lock(&tasklist_lock); |
1479 | ret = __kill_pgrp_info(sig, __si_special(priv), pid); | 1479 | ret = __kill_pgrp_info(sig, __si_special(priv), pid); |
1480 | read_unlock(&tasklist_lock); | 1480 | read_unlock(&tasklist_lock); |
1481 | 1481 | ||
1482 | return ret; | 1482 | return ret; |
1483 | } | 1483 | } |
1484 | EXPORT_SYMBOL(kill_pgrp); | 1484 | EXPORT_SYMBOL(kill_pgrp); |
1485 | 1485 | ||
1486 | int kill_pid(struct pid *pid, int sig, int priv) | 1486 | int kill_pid(struct pid *pid, int sig, int priv) |
1487 | { | 1487 | { |
1488 | return kill_pid_info(sig, __si_special(priv), pid); | 1488 | return kill_pid_info(sig, __si_special(priv), pid); |
1489 | } | 1489 | } |
1490 | EXPORT_SYMBOL(kill_pid); | 1490 | EXPORT_SYMBOL(kill_pid); |
1491 | 1491 | ||
1492 | /* | 1492 | /* |
1493 | * These functions support sending signals using preallocated sigqueue | 1493 | * These functions support sending signals using preallocated sigqueue |
1494 | * structures. This is needed "because realtime applications cannot | 1494 | * structures. This is needed "because realtime applications cannot |
1495 | * afford to lose notifications of asynchronous events, like timer | 1495 | * afford to lose notifications of asynchronous events, like timer |
1496 | * expirations or I/O completions". In the case of POSIX Timers | 1496 | * expirations or I/O completions". In the case of POSIX Timers |
1497 | * we allocate the sigqueue structure from the timer_create. If this | 1497 | * we allocate the sigqueue structure from the timer_create. If this |
1498 | * allocation fails we are able to report the failure to the application | 1498 | * allocation fails we are able to report the failure to the application |
1499 | * with an EAGAIN error. | 1499 | * with an EAGAIN error. |
1500 | */ | 1500 | */ |
1501 | struct sigqueue *sigqueue_alloc(void) | 1501 | struct sigqueue *sigqueue_alloc(void) |
1502 | { | 1502 | { |
1503 | struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); | 1503 | struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); |
1504 | 1504 | ||
1505 | if (q) | 1505 | if (q) |
1506 | q->flags |= SIGQUEUE_PREALLOC; | 1506 | q->flags |= SIGQUEUE_PREALLOC; |
1507 | 1507 | ||
1508 | return q; | 1508 | return q; |
1509 | } | 1509 | } |
1510 | 1510 | ||
1511 | void sigqueue_free(struct sigqueue *q) | 1511 | void sigqueue_free(struct sigqueue *q) |
1512 | { | 1512 | { |
1513 | unsigned long flags; | 1513 | unsigned long flags; |
1514 | spinlock_t *lock = ¤t->sighand->siglock; | 1514 | spinlock_t *lock = ¤t->sighand->siglock; |
1515 | 1515 | ||
1516 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | 1516 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1517 | /* | 1517 | /* |
1518 | * We must hold ->siglock while testing q->list | 1518 | * We must hold ->siglock while testing q->list |
1519 | * to serialize with collect_signal() or with | 1519 | * to serialize with collect_signal() or with |
1520 | * __exit_signal()->flush_sigqueue(). | 1520 | * __exit_signal()->flush_sigqueue(). |
1521 | */ | 1521 | */ |
1522 | spin_lock_irqsave(lock, flags); | 1522 | spin_lock_irqsave(lock, flags); |
1523 | q->flags &= ~SIGQUEUE_PREALLOC; | 1523 | q->flags &= ~SIGQUEUE_PREALLOC; |
1524 | /* | 1524 | /* |
1525 | * If it is queued it will be freed when dequeued, | 1525 | * If it is queued it will be freed when dequeued, |
1526 | * like the "regular" sigqueue. | 1526 | * like the "regular" sigqueue. |
1527 | */ | 1527 | */ |
1528 | if (!list_empty(&q->list)) | 1528 | if (!list_empty(&q->list)) |
1529 | q = NULL; | 1529 | q = NULL; |
1530 | spin_unlock_irqrestore(lock, flags); | 1530 | spin_unlock_irqrestore(lock, flags); |
1531 | 1531 | ||
1532 | if (q) | 1532 | if (q) |
1533 | __sigqueue_free(q); | 1533 | __sigqueue_free(q); |
1534 | } | 1534 | } |
1535 | 1535 | ||
1536 | int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) | 1536 | int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) |
1537 | { | 1537 | { |
1538 | int sig = q->info.si_signo; | 1538 | int sig = q->info.si_signo; |
1539 | struct sigpending *pending; | 1539 | struct sigpending *pending; |
1540 | unsigned long flags; | 1540 | unsigned long flags; |
1541 | int ret; | 1541 | int ret; |
1542 | 1542 | ||
1543 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | 1543 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1544 | 1544 | ||
1545 | ret = -1; | 1545 | ret = -1; |
1546 | if (!likely(lock_task_sighand(t, &flags))) | 1546 | if (!likely(lock_task_sighand(t, &flags))) |
1547 | goto ret; | 1547 | goto ret; |
1548 | 1548 | ||
1549 | ret = 1; /* the signal is ignored */ | 1549 | ret = 1; /* the signal is ignored */ |
1550 | if (!prepare_signal(sig, t, 0)) | 1550 | if (!prepare_signal(sig, t, 0)) |
1551 | goto out; | 1551 | goto out; |
1552 | 1552 | ||
1553 | ret = 0; | 1553 | ret = 0; |
1554 | if (unlikely(!list_empty(&q->list))) { | 1554 | if (unlikely(!list_empty(&q->list))) { |
1555 | /* | 1555 | /* |
1556 | * If an SI_TIMER entry is already queue just increment | 1556 | * If an SI_TIMER entry is already queue just increment |
1557 | * the overrun count. | 1557 | * the overrun count. |
1558 | */ | 1558 | */ |
1559 | BUG_ON(q->info.si_code != SI_TIMER); | 1559 | BUG_ON(q->info.si_code != SI_TIMER); |
1560 | q->info.si_overrun++; | 1560 | q->info.si_overrun++; |
1561 | goto out; | 1561 | goto out; |
1562 | } | 1562 | } |
1563 | q->info.si_overrun = 0; | 1563 | q->info.si_overrun = 0; |
1564 | 1564 | ||
1565 | signalfd_notify(t, sig); | 1565 | signalfd_notify(t, sig); |
1566 | pending = group ? &t->signal->shared_pending : &t->pending; | 1566 | pending = group ? &t->signal->shared_pending : &t->pending; |
1567 | list_add_tail(&q->list, &pending->list); | 1567 | list_add_tail(&q->list, &pending->list); |
1568 | sigaddset(&pending->signal, sig); | 1568 | sigaddset(&pending->signal, sig); |
1569 | complete_signal(sig, t, group); | 1569 | complete_signal(sig, t, group); |
1570 | out: | 1570 | out: |
1571 | unlock_task_sighand(t, &flags); | 1571 | unlock_task_sighand(t, &flags); |
1572 | ret: | 1572 | ret: |
1573 | return ret; | 1573 | return ret; |
1574 | } | 1574 | } |
1575 | 1575 | ||
1576 | /* | 1576 | /* |
1577 | * Let a parent know about the death of a child. | 1577 | * Let a parent know about the death of a child. |
1578 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. | 1578 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. |
1579 | * | 1579 | * |
1580 | * Returns -1 if our parent ignored us and so we've switched to | 1580 | * Returns true if our parent ignored us and so we've switched to |
1581 | * self-reaping, or else @sig. | 1581 | * self-reaping. |
1582 | */ | 1582 | */ |
1583 | int do_notify_parent(struct task_struct *tsk, int sig) | 1583 | bool do_notify_parent(struct task_struct *tsk, int sig) |
1584 | { | 1584 | { |
1585 | struct siginfo info; | 1585 | struct siginfo info; |
1586 | unsigned long flags; | 1586 | unsigned long flags; |
1587 | struct sighand_struct *psig; | 1587 | struct sighand_struct *psig; |
1588 | int ret = sig; | 1588 | bool autoreap = false; |
1589 | 1589 | ||
1590 | BUG_ON(sig == -1); | 1590 | BUG_ON(sig == -1); |
1591 | 1591 | ||
1592 | /* do_notify_parent_cldstop should have been called instead. */ | 1592 | /* do_notify_parent_cldstop should have been called instead. */ |
1593 | BUG_ON(task_is_stopped_or_traced(tsk)); | 1593 | BUG_ON(task_is_stopped_or_traced(tsk)); |
1594 | 1594 | ||
1595 | BUG_ON(!tsk->ptrace && | 1595 | BUG_ON(!tsk->ptrace && |
1596 | (tsk->group_leader != tsk || !thread_group_empty(tsk))); | 1596 | (tsk->group_leader != tsk || !thread_group_empty(tsk))); |
1597 | 1597 | ||
1598 | info.si_signo = sig; | 1598 | info.si_signo = sig; |
1599 | info.si_errno = 0; | 1599 | info.si_errno = 0; |
1600 | /* | 1600 | /* |
1601 | * we are under tasklist_lock here so our parent is tied to | 1601 | * we are under tasklist_lock here so our parent is tied to |
1602 | * us and cannot exit and release its namespace. | 1602 | * us and cannot exit and release its namespace. |
1603 | * | 1603 | * |
1604 | * the only it can is to switch its nsproxy with sys_unshare, | 1604 | * the only it can is to switch its nsproxy with sys_unshare, |
1605 | * bu uncharing pid namespaces is not allowed, so we'll always | 1605 | * bu uncharing pid namespaces is not allowed, so we'll always |
1606 | * see relevant namespace | 1606 | * see relevant namespace |
1607 | * | 1607 | * |
1608 | * write_lock() currently calls preempt_disable() which is the | 1608 | * write_lock() currently calls preempt_disable() which is the |
1609 | * same as rcu_read_lock(), but according to Oleg, this is not | 1609 | * same as rcu_read_lock(), but according to Oleg, this is not |
1610 | * correct to rely on this | 1610 | * correct to rely on this |
1611 | */ | 1611 | */ |
1612 | rcu_read_lock(); | 1612 | rcu_read_lock(); |
1613 | info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); | 1613 | info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); |
1614 | info.si_uid = __task_cred(tsk)->uid; | 1614 | info.si_uid = __task_cred(tsk)->uid; |
1615 | rcu_read_unlock(); | 1615 | rcu_read_unlock(); |
1616 | 1616 | ||
1617 | info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime, | 1617 | info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime, |
1618 | tsk->signal->utime)); | 1618 | tsk->signal->utime)); |
1619 | info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime, | 1619 | info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime, |
1620 | tsk->signal->stime)); | 1620 | tsk->signal->stime)); |
1621 | 1621 | ||
1622 | info.si_status = tsk->exit_code & 0x7f; | 1622 | info.si_status = tsk->exit_code & 0x7f; |
1623 | if (tsk->exit_code & 0x80) | 1623 | if (tsk->exit_code & 0x80) |
1624 | info.si_code = CLD_DUMPED; | 1624 | info.si_code = CLD_DUMPED; |
1625 | else if (tsk->exit_code & 0x7f) | 1625 | else if (tsk->exit_code & 0x7f) |
1626 | info.si_code = CLD_KILLED; | 1626 | info.si_code = CLD_KILLED; |
1627 | else { | 1627 | else { |
1628 | info.si_code = CLD_EXITED; | 1628 | info.si_code = CLD_EXITED; |
1629 | info.si_status = tsk->exit_code >> 8; | 1629 | info.si_status = tsk->exit_code >> 8; |
1630 | } | 1630 | } |
1631 | 1631 | ||
1632 | psig = tsk->parent->sighand; | 1632 | psig = tsk->parent->sighand; |
1633 | spin_lock_irqsave(&psig->siglock, flags); | 1633 | spin_lock_irqsave(&psig->siglock, flags); |
1634 | if (!tsk->ptrace && sig == SIGCHLD && | 1634 | if (!tsk->ptrace && sig == SIGCHLD && |
1635 | (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || | 1635 | (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || |
1636 | (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { | 1636 | (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { |
1637 | /* | 1637 | /* |
1638 | * We are exiting and our parent doesn't care. POSIX.1 | 1638 | * We are exiting and our parent doesn't care. POSIX.1 |
1639 | * defines special semantics for setting SIGCHLD to SIG_IGN | 1639 | * defines special semantics for setting SIGCHLD to SIG_IGN |
1640 | * or setting the SA_NOCLDWAIT flag: we should be reaped | 1640 | * or setting the SA_NOCLDWAIT flag: we should be reaped |
1641 | * automatically and not left for our parent's wait4 call. | 1641 | * automatically and not left for our parent's wait4 call. |
1642 | * Rather than having the parent do it as a magic kind of | 1642 | * Rather than having the parent do it as a magic kind of |
1643 | * signal handler, we just set this to tell do_exit that we | 1643 | * signal handler, we just set this to tell do_exit that we |
1644 | * can be cleaned up without becoming a zombie. Note that | 1644 | * can be cleaned up without becoming a zombie. Note that |
1645 | * we still call __wake_up_parent in this case, because a | 1645 | * we still call __wake_up_parent in this case, because a |
1646 | * blocked sys_wait4 might now return -ECHILD. | 1646 | * blocked sys_wait4 might now return -ECHILD. |
1647 | * | 1647 | * |
1648 | * Whether we send SIGCHLD or not for SA_NOCLDWAIT | 1648 | * Whether we send SIGCHLD or not for SA_NOCLDWAIT |
1649 | * is implementation-defined: we do (if you don't want | 1649 | * is implementation-defined: we do (if you don't want |
1650 | * it, just use SIG_IGN instead). | 1650 | * it, just use SIG_IGN instead). |
1651 | */ | 1651 | */ |
1652 | ret = tsk->exit_signal = -1; | 1652 | autoreap = true; |
1653 | tsk->exit_signal = -1; | ||
1653 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) | 1654 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) |
1654 | sig = -1; | 1655 | sig = 0; |
1655 | } | 1656 | } |
1656 | if (valid_signal(sig) && sig > 0) | 1657 | if (valid_signal(sig) && sig) |
1657 | __group_send_sig_info(sig, &info, tsk->parent); | 1658 | __group_send_sig_info(sig, &info, tsk->parent); |
1658 | __wake_up_parent(tsk, tsk->parent); | 1659 | __wake_up_parent(tsk, tsk->parent); |
1659 | spin_unlock_irqrestore(&psig->siglock, flags); | 1660 | spin_unlock_irqrestore(&psig->siglock, flags); |
1660 | 1661 | ||
1661 | return ret; | 1662 | return autoreap; |
1662 | } | 1663 | } |
1663 | 1664 | ||
1664 | /** | 1665 | /** |
1665 | * do_notify_parent_cldstop - notify parent of stopped/continued state change | 1666 | * do_notify_parent_cldstop - notify parent of stopped/continued state change |
1666 | * @tsk: task reporting the state change | 1667 | * @tsk: task reporting the state change |
1667 | * @for_ptracer: the notification is for ptracer | 1668 | * @for_ptracer: the notification is for ptracer |
1668 | * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report | 1669 | * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report |
1669 | * | 1670 | * |
1670 | * Notify @tsk's parent that the stopped/continued state has changed. If | 1671 | * Notify @tsk's parent that the stopped/continued state has changed. If |
1671 | * @for_ptracer is %false, @tsk's group leader notifies to its real parent. | 1672 | * @for_ptracer is %false, @tsk's group leader notifies to its real parent. |
1672 | * If %true, @tsk reports to @tsk->parent which should be the ptracer. | 1673 | * If %true, @tsk reports to @tsk->parent which should be the ptracer. |
1673 | * | 1674 | * |
1674 | * CONTEXT: | 1675 | * CONTEXT: |
1675 | * Must be called with tasklist_lock at least read locked. | 1676 | * Must be called with tasklist_lock at least read locked. |
1676 | */ | 1677 | */ |
1677 | static void do_notify_parent_cldstop(struct task_struct *tsk, | 1678 | static void do_notify_parent_cldstop(struct task_struct *tsk, |
1678 | bool for_ptracer, int why) | 1679 | bool for_ptracer, int why) |
1679 | { | 1680 | { |
1680 | struct siginfo info; | 1681 | struct siginfo info; |
1681 | unsigned long flags; | 1682 | unsigned long flags; |
1682 | struct task_struct *parent; | 1683 | struct task_struct *parent; |
1683 | struct sighand_struct *sighand; | 1684 | struct sighand_struct *sighand; |
1684 | 1685 | ||
1685 | if (for_ptracer) { | 1686 | if (for_ptracer) { |
1686 | parent = tsk->parent; | 1687 | parent = tsk->parent; |
1687 | } else { | 1688 | } else { |
1688 | tsk = tsk->group_leader; | 1689 | tsk = tsk->group_leader; |
1689 | parent = tsk->real_parent; | 1690 | parent = tsk->real_parent; |
1690 | } | 1691 | } |
1691 | 1692 | ||
1692 | info.si_signo = SIGCHLD; | 1693 | info.si_signo = SIGCHLD; |
1693 | info.si_errno = 0; | 1694 | info.si_errno = 0; |
1694 | /* | 1695 | /* |
1695 | * see comment in do_notify_parent() about the following 4 lines | 1696 | * see comment in do_notify_parent() about the following 4 lines |
1696 | */ | 1697 | */ |
1697 | rcu_read_lock(); | 1698 | rcu_read_lock(); |
1698 | info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns); | 1699 | info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns); |
1699 | info.si_uid = __task_cred(tsk)->uid; | 1700 | info.si_uid = __task_cred(tsk)->uid; |
1700 | rcu_read_unlock(); | 1701 | rcu_read_unlock(); |
1701 | 1702 | ||
1702 | info.si_utime = cputime_to_clock_t(tsk->utime); | 1703 | info.si_utime = cputime_to_clock_t(tsk->utime); |
1703 | info.si_stime = cputime_to_clock_t(tsk->stime); | 1704 | info.si_stime = cputime_to_clock_t(tsk->stime); |
1704 | 1705 | ||
1705 | info.si_code = why; | 1706 | info.si_code = why; |
1706 | switch (why) { | 1707 | switch (why) { |
1707 | case CLD_CONTINUED: | 1708 | case CLD_CONTINUED: |
1708 | info.si_status = SIGCONT; | 1709 | info.si_status = SIGCONT; |
1709 | break; | 1710 | break; |
1710 | case CLD_STOPPED: | 1711 | case CLD_STOPPED: |
1711 | info.si_status = tsk->signal->group_exit_code & 0x7f; | 1712 | info.si_status = tsk->signal->group_exit_code & 0x7f; |
1712 | break; | 1713 | break; |
1713 | case CLD_TRAPPED: | 1714 | case CLD_TRAPPED: |
1714 | info.si_status = tsk->exit_code & 0x7f; | 1715 | info.si_status = tsk->exit_code & 0x7f; |
1715 | break; | 1716 | break; |
1716 | default: | 1717 | default: |
1717 | BUG(); | 1718 | BUG(); |
1718 | } | 1719 | } |
1719 | 1720 | ||
1720 | sighand = parent->sighand; | 1721 | sighand = parent->sighand; |
1721 | spin_lock_irqsave(&sighand->siglock, flags); | 1722 | spin_lock_irqsave(&sighand->siglock, flags); |
1722 | if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && | 1723 | if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && |
1723 | !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) | 1724 | !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) |
1724 | __group_send_sig_info(SIGCHLD, &info, parent); | 1725 | __group_send_sig_info(SIGCHLD, &info, parent); |
1725 | /* | 1726 | /* |
1726 | * Even if SIGCHLD is not generated, we must wake up wait4 calls. | 1727 | * Even if SIGCHLD is not generated, we must wake up wait4 calls. |
1727 | */ | 1728 | */ |
1728 | __wake_up_parent(tsk, parent); | 1729 | __wake_up_parent(tsk, parent); |
1729 | spin_unlock_irqrestore(&sighand->siglock, flags); | 1730 | spin_unlock_irqrestore(&sighand->siglock, flags); |
1730 | } | 1731 | } |
1731 | 1732 | ||
1732 | static inline int may_ptrace_stop(void) | 1733 | static inline int may_ptrace_stop(void) |
1733 | { | 1734 | { |
1734 | if (!likely(current->ptrace)) | 1735 | if (!likely(current->ptrace)) |
1735 | return 0; | 1736 | return 0; |
1736 | /* | 1737 | /* |
1737 | * Are we in the middle of do_coredump? | 1738 | * Are we in the middle of do_coredump? |
1738 | * If so and our tracer is also part of the coredump stopping | 1739 | * If so and our tracer is also part of the coredump stopping |
1739 | * is a deadlock situation, and pointless because our tracer | 1740 | * is a deadlock situation, and pointless because our tracer |
1740 | * is dead so don't allow us to stop. | 1741 | * is dead so don't allow us to stop. |
1741 | * If SIGKILL was already sent before the caller unlocked | 1742 | * If SIGKILL was already sent before the caller unlocked |
1742 | * ->siglock we must see ->core_state != NULL. Otherwise it | 1743 | * ->siglock we must see ->core_state != NULL. Otherwise it |
1743 | * is safe to enter schedule(). | 1744 | * is safe to enter schedule(). |
1744 | */ | 1745 | */ |
1745 | if (unlikely(current->mm->core_state) && | 1746 | if (unlikely(current->mm->core_state) && |
1746 | unlikely(current->mm == current->parent->mm)) | 1747 | unlikely(current->mm == current->parent->mm)) |
1747 | return 0; | 1748 | return 0; |
1748 | 1749 | ||
1749 | return 1; | 1750 | return 1; |
1750 | } | 1751 | } |
1751 | 1752 | ||
1752 | /* | 1753 | /* |
1753 | * Return non-zero if there is a SIGKILL that should be waking us up. | 1754 | * Return non-zero if there is a SIGKILL that should be waking us up. |
1754 | * Called with the siglock held. | 1755 | * Called with the siglock held. |
1755 | */ | 1756 | */ |
1756 | static int sigkill_pending(struct task_struct *tsk) | 1757 | static int sigkill_pending(struct task_struct *tsk) |
1757 | { | 1758 | { |
1758 | return sigismember(&tsk->pending.signal, SIGKILL) || | 1759 | return sigismember(&tsk->pending.signal, SIGKILL) || |
1759 | sigismember(&tsk->signal->shared_pending.signal, SIGKILL); | 1760 | sigismember(&tsk->signal->shared_pending.signal, SIGKILL); |
1760 | } | 1761 | } |
1761 | 1762 | ||
1762 | /* | 1763 | /* |
1763 | * Test whether the target task of the usual cldstop notification - the | 1764 | * Test whether the target task of the usual cldstop notification - the |
1764 | * real_parent of @child - is in the same group as the ptracer. | 1765 | * real_parent of @child - is in the same group as the ptracer. |
1765 | */ | 1766 | */ |
1766 | static bool real_parent_is_ptracer(struct task_struct *child) | 1767 | static bool real_parent_is_ptracer(struct task_struct *child) |
1767 | { | 1768 | { |
1768 | return same_thread_group(child->parent, child->real_parent); | 1769 | return same_thread_group(child->parent, child->real_parent); |
1769 | } | 1770 | } |
1770 | 1771 | ||
1771 | /* | 1772 | /* |
1772 | * This must be called with current->sighand->siglock held. | 1773 | * This must be called with current->sighand->siglock held. |
1773 | * | 1774 | * |
1774 | * This should be the path for all ptrace stops. | 1775 | * This should be the path for all ptrace stops. |
1775 | * We always set current->last_siginfo while stopped here. | 1776 | * We always set current->last_siginfo while stopped here. |
1776 | * That makes it a way to test a stopped process for | 1777 | * That makes it a way to test a stopped process for |
1777 | * being ptrace-stopped vs being job-control-stopped. | 1778 | * being ptrace-stopped vs being job-control-stopped. |
1778 | * | 1779 | * |
1779 | * If we actually decide not to stop at all because the tracer | 1780 | * If we actually decide not to stop at all because the tracer |
1780 | * is gone, we keep current->exit_code unless clear_code. | 1781 | * is gone, we keep current->exit_code unless clear_code. |
1781 | */ | 1782 | */ |
1782 | static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) | 1783 | static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) |
1783 | __releases(¤t->sighand->siglock) | 1784 | __releases(¤t->sighand->siglock) |
1784 | __acquires(¤t->sighand->siglock) | 1785 | __acquires(¤t->sighand->siglock) |
1785 | { | 1786 | { |
1786 | bool gstop_done = false; | 1787 | bool gstop_done = false; |
1787 | 1788 | ||
1788 | if (arch_ptrace_stop_needed(exit_code, info)) { | 1789 | if (arch_ptrace_stop_needed(exit_code, info)) { |
1789 | /* | 1790 | /* |
1790 | * The arch code has something special to do before a | 1791 | * The arch code has something special to do before a |
1791 | * ptrace stop. This is allowed to block, e.g. for faults | 1792 | * ptrace stop. This is allowed to block, e.g. for faults |
1792 | * on user stack pages. We can't keep the siglock while | 1793 | * on user stack pages. We can't keep the siglock while |
1793 | * calling arch_ptrace_stop, so we must release it now. | 1794 | * calling arch_ptrace_stop, so we must release it now. |
1794 | * To preserve proper semantics, we must do this before | 1795 | * To preserve proper semantics, we must do this before |
1795 | * any signal bookkeeping like checking group_stop_count. | 1796 | * any signal bookkeeping like checking group_stop_count. |
1796 | * Meanwhile, a SIGKILL could come in before we retake the | 1797 | * Meanwhile, a SIGKILL could come in before we retake the |
1797 | * siglock. That must prevent us from sleeping in TASK_TRACED. | 1798 | * siglock. That must prevent us from sleeping in TASK_TRACED. |
1798 | * So after regaining the lock, we must check for SIGKILL. | 1799 | * So after regaining the lock, we must check for SIGKILL. |
1799 | */ | 1800 | */ |
1800 | spin_unlock_irq(¤t->sighand->siglock); | 1801 | spin_unlock_irq(¤t->sighand->siglock); |
1801 | arch_ptrace_stop(exit_code, info); | 1802 | arch_ptrace_stop(exit_code, info); |
1802 | spin_lock_irq(¤t->sighand->siglock); | 1803 | spin_lock_irq(¤t->sighand->siglock); |
1803 | if (sigkill_pending(current)) | 1804 | if (sigkill_pending(current)) |
1804 | return; | 1805 | return; |
1805 | } | 1806 | } |
1806 | 1807 | ||
1807 | /* | 1808 | /* |
1808 | * We're committing to trapping. TRACED should be visible before | 1809 | * We're committing to trapping. TRACED should be visible before |
1809 | * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). | 1810 | * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). |
1810 | * Also, transition to TRACED and updates to ->jobctl should be | 1811 | * Also, transition to TRACED and updates to ->jobctl should be |
1811 | * atomic with respect to siglock and should be done after the arch | 1812 | * atomic with respect to siglock and should be done after the arch |
1812 | * hook as siglock is released and regrabbed across it. | 1813 | * hook as siglock is released and regrabbed across it. |
1813 | */ | 1814 | */ |
1814 | set_current_state(TASK_TRACED); | 1815 | set_current_state(TASK_TRACED); |
1815 | 1816 | ||
1816 | current->last_siginfo = info; | 1817 | current->last_siginfo = info; |
1817 | current->exit_code = exit_code; | 1818 | current->exit_code = exit_code; |
1818 | 1819 | ||
1819 | /* | 1820 | /* |
1820 | * If @why is CLD_STOPPED, we're trapping to participate in a group | 1821 | * If @why is CLD_STOPPED, we're trapping to participate in a group |
1821 | * stop. Do the bookkeeping. Note that if SIGCONT was delievered | 1822 | * stop. Do the bookkeeping. Note that if SIGCONT was delievered |
1822 | * across siglock relocks since INTERRUPT was scheduled, PENDING | 1823 | * across siglock relocks since INTERRUPT was scheduled, PENDING |
1823 | * could be clear now. We act as if SIGCONT is received after | 1824 | * could be clear now. We act as if SIGCONT is received after |
1824 | * TASK_TRACED is entered - ignore it. | 1825 | * TASK_TRACED is entered - ignore it. |
1825 | */ | 1826 | */ |
1826 | if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) | 1827 | if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) |
1827 | gstop_done = task_participate_group_stop(current); | 1828 | gstop_done = task_participate_group_stop(current); |
1828 | 1829 | ||
1829 | /* any trap clears pending STOP trap, STOP trap clears NOTIFY */ | 1830 | /* any trap clears pending STOP trap, STOP trap clears NOTIFY */ |
1830 | task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); | 1831 | task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); |
1831 | if (info && info->si_code >> 8 == PTRACE_EVENT_STOP) | 1832 | if (info && info->si_code >> 8 == PTRACE_EVENT_STOP) |
1832 | task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); | 1833 | task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); |
1833 | 1834 | ||
1834 | /* entering a trap, clear TRAPPING */ | 1835 | /* entering a trap, clear TRAPPING */ |
1835 | task_clear_jobctl_trapping(current); | 1836 | task_clear_jobctl_trapping(current); |
1836 | 1837 | ||
1837 | spin_unlock_irq(¤t->sighand->siglock); | 1838 | spin_unlock_irq(¤t->sighand->siglock); |
1838 | read_lock(&tasklist_lock); | 1839 | read_lock(&tasklist_lock); |
1839 | if (may_ptrace_stop()) { | 1840 | if (may_ptrace_stop()) { |
1840 | /* | 1841 | /* |
1841 | * Notify parents of the stop. | 1842 | * Notify parents of the stop. |
1842 | * | 1843 | * |
1843 | * While ptraced, there are two parents - the ptracer and | 1844 | * While ptraced, there are two parents - the ptracer and |
1844 | * the real_parent of the group_leader. The ptracer should | 1845 | * the real_parent of the group_leader. The ptracer should |
1845 | * know about every stop while the real parent is only | 1846 | * know about every stop while the real parent is only |
1846 | * interested in the completion of group stop. The states | 1847 | * interested in the completion of group stop. The states |
1847 | * for the two don't interact with each other. Notify | 1848 | * for the two don't interact with each other. Notify |
1848 | * separately unless they're gonna be duplicates. | 1849 | * separately unless they're gonna be duplicates. |
1849 | */ | 1850 | */ |
1850 | do_notify_parent_cldstop(current, true, why); | 1851 | do_notify_parent_cldstop(current, true, why); |
1851 | if (gstop_done && !real_parent_is_ptracer(current)) | 1852 | if (gstop_done && !real_parent_is_ptracer(current)) |
1852 | do_notify_parent_cldstop(current, false, why); | 1853 | do_notify_parent_cldstop(current, false, why); |
1853 | 1854 | ||
1854 | /* | 1855 | /* |
1855 | * Don't want to allow preemption here, because | 1856 | * Don't want to allow preemption here, because |
1856 | * sys_ptrace() needs this task to be inactive. | 1857 | * sys_ptrace() needs this task to be inactive. |
1857 | * | 1858 | * |
1858 | * XXX: implement read_unlock_no_resched(). | 1859 | * XXX: implement read_unlock_no_resched(). |
1859 | */ | 1860 | */ |
1860 | preempt_disable(); | 1861 | preempt_disable(); |
1861 | read_unlock(&tasklist_lock); | 1862 | read_unlock(&tasklist_lock); |
1862 | preempt_enable_no_resched(); | 1863 | preempt_enable_no_resched(); |
1863 | schedule(); | 1864 | schedule(); |
1864 | } else { | 1865 | } else { |
1865 | /* | 1866 | /* |
1866 | * By the time we got the lock, our tracer went away. | 1867 | * By the time we got the lock, our tracer went away. |
1867 | * Don't drop the lock yet, another tracer may come. | 1868 | * Don't drop the lock yet, another tracer may come. |
1868 | * | 1869 | * |
1869 | * If @gstop_done, the ptracer went away between group stop | 1870 | * If @gstop_done, the ptracer went away between group stop |
1870 | * completion and here. During detach, it would have set | 1871 | * completion and here. During detach, it would have set |
1871 | * JOBCTL_STOP_PENDING on us and we'll re-enter | 1872 | * JOBCTL_STOP_PENDING on us and we'll re-enter |
1872 | * TASK_STOPPED in do_signal_stop() on return, so notifying | 1873 | * TASK_STOPPED in do_signal_stop() on return, so notifying |
1873 | * the real parent of the group stop completion is enough. | 1874 | * the real parent of the group stop completion is enough. |
1874 | */ | 1875 | */ |
1875 | if (gstop_done) | 1876 | if (gstop_done) |
1876 | do_notify_parent_cldstop(current, false, why); | 1877 | do_notify_parent_cldstop(current, false, why); |
1877 | 1878 | ||
1878 | __set_current_state(TASK_RUNNING); | 1879 | __set_current_state(TASK_RUNNING); |
1879 | if (clear_code) | 1880 | if (clear_code) |
1880 | current->exit_code = 0; | 1881 | current->exit_code = 0; |
1881 | read_unlock(&tasklist_lock); | 1882 | read_unlock(&tasklist_lock); |
1882 | } | 1883 | } |
1883 | 1884 | ||
1884 | /* | 1885 | /* |
1885 | * While in TASK_TRACED, we were considered "frozen enough". | 1886 | * While in TASK_TRACED, we were considered "frozen enough". |
1886 | * Now that we woke up, it's crucial if we're supposed to be | 1887 | * Now that we woke up, it's crucial if we're supposed to be |
1887 | * frozen that we freeze now before running anything substantial. | 1888 | * frozen that we freeze now before running anything substantial. |
1888 | */ | 1889 | */ |
1889 | try_to_freeze(); | 1890 | try_to_freeze(); |
1890 | 1891 | ||
1891 | /* | 1892 | /* |
1892 | * We are back. Now reacquire the siglock before touching | 1893 | * We are back. Now reacquire the siglock before touching |
1893 | * last_siginfo, so that we are sure to have synchronized with | 1894 | * last_siginfo, so that we are sure to have synchronized with |
1894 | * any signal-sending on another CPU that wants to examine it. | 1895 | * any signal-sending on another CPU that wants to examine it. |
1895 | */ | 1896 | */ |
1896 | spin_lock_irq(¤t->sighand->siglock); | 1897 | spin_lock_irq(¤t->sighand->siglock); |
1897 | current->last_siginfo = NULL; | 1898 | current->last_siginfo = NULL; |
1898 | 1899 | ||
1899 | /* LISTENING can be set only during STOP traps, clear it */ | 1900 | /* LISTENING can be set only during STOP traps, clear it */ |
1900 | current->jobctl &= ~JOBCTL_LISTENING; | 1901 | current->jobctl &= ~JOBCTL_LISTENING; |
1901 | 1902 | ||
1902 | /* | 1903 | /* |
1903 | * Queued signals ignored us while we were stopped for tracing. | 1904 | * Queued signals ignored us while we were stopped for tracing. |
1904 | * So check for any that we should take before resuming user mode. | 1905 | * So check for any that we should take before resuming user mode. |
1905 | * This sets TIF_SIGPENDING, but never clears it. | 1906 | * This sets TIF_SIGPENDING, but never clears it. |
1906 | */ | 1907 | */ |
1907 | recalc_sigpending_tsk(current); | 1908 | recalc_sigpending_tsk(current); |
1908 | } | 1909 | } |
1909 | 1910 | ||
1910 | static void ptrace_do_notify(int signr, int exit_code, int why) | 1911 | static void ptrace_do_notify(int signr, int exit_code, int why) |
1911 | { | 1912 | { |
1912 | siginfo_t info; | 1913 | siginfo_t info; |
1913 | 1914 | ||
1914 | memset(&info, 0, sizeof info); | 1915 | memset(&info, 0, sizeof info); |
1915 | info.si_signo = signr; | 1916 | info.si_signo = signr; |
1916 | info.si_code = exit_code; | 1917 | info.si_code = exit_code; |
1917 | info.si_pid = task_pid_vnr(current); | 1918 | info.si_pid = task_pid_vnr(current); |
1918 | info.si_uid = current_uid(); | 1919 | info.si_uid = current_uid(); |
1919 | 1920 | ||
1920 | /* Let the debugger run. */ | 1921 | /* Let the debugger run. */ |
1921 | ptrace_stop(exit_code, why, 1, &info); | 1922 | ptrace_stop(exit_code, why, 1, &info); |
1922 | } | 1923 | } |
1923 | 1924 | ||
1924 | void ptrace_notify(int exit_code) | 1925 | void ptrace_notify(int exit_code) |
1925 | { | 1926 | { |
1926 | BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); | 1927 | BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); |
1927 | 1928 | ||
1928 | spin_lock_irq(¤t->sighand->siglock); | 1929 | spin_lock_irq(¤t->sighand->siglock); |
1929 | ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); | 1930 | ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); |
1930 | spin_unlock_irq(¤t->sighand->siglock); | 1931 | spin_unlock_irq(¤t->sighand->siglock); |
1931 | } | 1932 | } |
1932 | 1933 | ||
1933 | /** | 1934 | /** |
1934 | * do_signal_stop - handle group stop for SIGSTOP and other stop signals | 1935 | * do_signal_stop - handle group stop for SIGSTOP and other stop signals |
1935 | * @signr: signr causing group stop if initiating | 1936 | * @signr: signr causing group stop if initiating |
1936 | * | 1937 | * |
1937 | * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr | 1938 | * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr |
1938 | * and participate in it. If already set, participate in the existing | 1939 | * and participate in it. If already set, participate in the existing |
1939 | * group stop. If participated in a group stop (and thus slept), %true is | 1940 | * group stop. If participated in a group stop (and thus slept), %true is |
1940 | * returned with siglock released. | 1941 | * returned with siglock released. |
1941 | * | 1942 | * |
1942 | * If ptraced, this function doesn't handle stop itself. Instead, | 1943 | * If ptraced, this function doesn't handle stop itself. Instead, |
1943 | * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock | 1944 | * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock |
1944 | * untouched. The caller must ensure that INTERRUPT trap handling takes | 1945 | * untouched. The caller must ensure that INTERRUPT trap handling takes |
1945 | * places afterwards. | 1946 | * places afterwards. |
1946 | * | 1947 | * |
1947 | * CONTEXT: | 1948 | * CONTEXT: |
1948 | * Must be called with @current->sighand->siglock held, which is released | 1949 | * Must be called with @current->sighand->siglock held, which is released |
1949 | * on %true return. | 1950 | * on %true return. |
1950 | * | 1951 | * |
1951 | * RETURNS: | 1952 | * RETURNS: |
1952 | * %false if group stop is already cancelled or ptrace trap is scheduled. | 1953 | * %false if group stop is already cancelled or ptrace trap is scheduled. |
1953 | * %true if participated in group stop. | 1954 | * %true if participated in group stop. |
1954 | */ | 1955 | */ |
1955 | static bool do_signal_stop(int signr) | 1956 | static bool do_signal_stop(int signr) |
1956 | __releases(¤t->sighand->siglock) | 1957 | __releases(¤t->sighand->siglock) |
1957 | { | 1958 | { |
1958 | struct signal_struct *sig = current->signal; | 1959 | struct signal_struct *sig = current->signal; |
1959 | 1960 | ||
1960 | if (!(current->jobctl & JOBCTL_STOP_PENDING)) { | 1961 | if (!(current->jobctl & JOBCTL_STOP_PENDING)) { |
1961 | unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; | 1962 | unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; |
1962 | struct task_struct *t; | 1963 | struct task_struct *t; |
1963 | 1964 | ||
1964 | /* signr will be recorded in task->jobctl for retries */ | 1965 | /* signr will be recorded in task->jobctl for retries */ |
1965 | WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); | 1966 | WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); |
1966 | 1967 | ||
1967 | if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || | 1968 | if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || |
1968 | unlikely(signal_group_exit(sig))) | 1969 | unlikely(signal_group_exit(sig))) |
1969 | return false; | 1970 | return false; |
1970 | /* | 1971 | /* |
1971 | * There is no group stop already in progress. We must | 1972 | * There is no group stop already in progress. We must |
1972 | * initiate one now. | 1973 | * initiate one now. |
1973 | * | 1974 | * |
1974 | * While ptraced, a task may be resumed while group stop is | 1975 | * While ptraced, a task may be resumed while group stop is |
1975 | * still in effect and then receive a stop signal and | 1976 | * still in effect and then receive a stop signal and |
1976 | * initiate another group stop. This deviates from the | 1977 | * initiate another group stop. This deviates from the |
1977 | * usual behavior as two consecutive stop signals can't | 1978 | * usual behavior as two consecutive stop signals can't |
1978 | * cause two group stops when !ptraced. That is why we | 1979 | * cause two group stops when !ptraced. That is why we |
1979 | * also check !task_is_stopped(t) below. | 1980 | * also check !task_is_stopped(t) below. |
1980 | * | 1981 | * |
1981 | * The condition can be distinguished by testing whether | 1982 | * The condition can be distinguished by testing whether |
1982 | * SIGNAL_STOP_STOPPED is already set. Don't generate | 1983 | * SIGNAL_STOP_STOPPED is already set. Don't generate |
1983 | * group_exit_code in such case. | 1984 | * group_exit_code in such case. |
1984 | * | 1985 | * |
1985 | * This is not necessary for SIGNAL_STOP_CONTINUED because | 1986 | * This is not necessary for SIGNAL_STOP_CONTINUED because |
1986 | * an intervening stop signal is required to cause two | 1987 | * an intervening stop signal is required to cause two |
1987 | * continued events regardless of ptrace. | 1988 | * continued events regardless of ptrace. |
1988 | */ | 1989 | */ |
1989 | if (!(sig->flags & SIGNAL_STOP_STOPPED)) | 1990 | if (!(sig->flags & SIGNAL_STOP_STOPPED)) |
1990 | sig->group_exit_code = signr; | 1991 | sig->group_exit_code = signr; |
1991 | else | 1992 | else |
1992 | WARN_ON_ONCE(!current->ptrace); | 1993 | WARN_ON_ONCE(!current->ptrace); |
1993 | 1994 | ||
1994 | sig->group_stop_count = 0; | 1995 | sig->group_stop_count = 0; |
1995 | 1996 | ||
1996 | if (task_set_jobctl_pending(current, signr | gstop)) | 1997 | if (task_set_jobctl_pending(current, signr | gstop)) |
1997 | sig->group_stop_count++; | 1998 | sig->group_stop_count++; |
1998 | 1999 | ||
1999 | for (t = next_thread(current); t != current; | 2000 | for (t = next_thread(current); t != current; |
2000 | t = next_thread(t)) { | 2001 | t = next_thread(t)) { |
2001 | /* | 2002 | /* |
2002 | * Setting state to TASK_STOPPED for a group | 2003 | * Setting state to TASK_STOPPED for a group |
2003 | * stop is always done with the siglock held, | 2004 | * stop is always done with the siglock held, |
2004 | * so this check has no races. | 2005 | * so this check has no races. |
2005 | */ | 2006 | */ |
2006 | if (!task_is_stopped(t) && | 2007 | if (!task_is_stopped(t) && |
2007 | task_set_jobctl_pending(t, signr | gstop)) { | 2008 | task_set_jobctl_pending(t, signr | gstop)) { |
2008 | sig->group_stop_count++; | 2009 | sig->group_stop_count++; |
2009 | if (likely(!(t->ptrace & PT_SEIZED))) | 2010 | if (likely(!(t->ptrace & PT_SEIZED))) |
2010 | signal_wake_up(t, 0); | 2011 | signal_wake_up(t, 0); |
2011 | else | 2012 | else |
2012 | ptrace_trap_notify(t); | 2013 | ptrace_trap_notify(t); |
2013 | } | 2014 | } |
2014 | } | 2015 | } |
2015 | } | 2016 | } |
2016 | 2017 | ||
2017 | if (likely(!current->ptrace)) { | 2018 | if (likely(!current->ptrace)) { |
2018 | int notify = 0; | 2019 | int notify = 0; |
2019 | 2020 | ||
2020 | /* | 2021 | /* |
2021 | * If there are no other threads in the group, or if there | 2022 | * If there are no other threads in the group, or if there |
2022 | * is a group stop in progress and we are the last to stop, | 2023 | * is a group stop in progress and we are the last to stop, |
2023 | * report to the parent. | 2024 | * report to the parent. |
2024 | */ | 2025 | */ |
2025 | if (task_participate_group_stop(current)) | 2026 | if (task_participate_group_stop(current)) |
2026 | notify = CLD_STOPPED; | 2027 | notify = CLD_STOPPED; |
2027 | 2028 | ||
2028 | __set_current_state(TASK_STOPPED); | 2029 | __set_current_state(TASK_STOPPED); |
2029 | spin_unlock_irq(¤t->sighand->siglock); | 2030 | spin_unlock_irq(¤t->sighand->siglock); |
2030 | 2031 | ||
2031 | /* | 2032 | /* |
2032 | * Notify the parent of the group stop completion. Because | 2033 | * Notify the parent of the group stop completion. Because |
2033 | * we're not holding either the siglock or tasklist_lock | 2034 | * we're not holding either the siglock or tasklist_lock |
2034 | * here, ptracer may attach inbetween; however, this is for | 2035 | * here, ptracer may attach inbetween; however, this is for |
2035 | * group stop and should always be delivered to the real | 2036 | * group stop and should always be delivered to the real |
2036 | * parent of the group leader. The new ptracer will get | 2037 | * parent of the group leader. The new ptracer will get |
2037 | * its notification when this task transitions into | 2038 | * its notification when this task transitions into |
2038 | * TASK_TRACED. | 2039 | * TASK_TRACED. |
2039 | */ | 2040 | */ |
2040 | if (notify) { | 2041 | if (notify) { |
2041 | read_lock(&tasklist_lock); | 2042 | read_lock(&tasklist_lock); |
2042 | do_notify_parent_cldstop(current, false, notify); | 2043 | do_notify_parent_cldstop(current, false, notify); |
2043 | read_unlock(&tasklist_lock); | 2044 | read_unlock(&tasklist_lock); |
2044 | } | 2045 | } |
2045 | 2046 | ||
2046 | /* Now we don't run again until woken by SIGCONT or SIGKILL */ | 2047 | /* Now we don't run again until woken by SIGCONT or SIGKILL */ |
2047 | schedule(); | 2048 | schedule(); |
2048 | return true; | 2049 | return true; |
2049 | } else { | 2050 | } else { |
2050 | /* | 2051 | /* |
2051 | * While ptraced, group stop is handled by STOP trap. | 2052 | * While ptraced, group stop is handled by STOP trap. |
2052 | * Schedule it and let the caller deal with it. | 2053 | * Schedule it and let the caller deal with it. |
2053 | */ | 2054 | */ |
2054 | task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); | 2055 | task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); |
2055 | return false; | 2056 | return false; |
2056 | } | 2057 | } |
2057 | } | 2058 | } |
2058 | 2059 | ||
2059 | /** | 2060 | /** |
2060 | * do_jobctl_trap - take care of ptrace jobctl traps | 2061 | * do_jobctl_trap - take care of ptrace jobctl traps |
2061 | * | 2062 | * |
2062 | * When PT_SEIZED, it's used for both group stop and explicit | 2063 | * When PT_SEIZED, it's used for both group stop and explicit |
2063 | * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with | 2064 | * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with |
2064 | * accompanying siginfo. If stopped, lower eight bits of exit_code contain | 2065 | * accompanying siginfo. If stopped, lower eight bits of exit_code contain |
2065 | * the stop signal; otherwise, %SIGTRAP. | 2066 | * the stop signal; otherwise, %SIGTRAP. |
2066 | * | 2067 | * |
2067 | * When !PT_SEIZED, it's used only for group stop trap with stop signal | 2068 | * When !PT_SEIZED, it's used only for group stop trap with stop signal |
2068 | * number as exit_code and no siginfo. | 2069 | * number as exit_code and no siginfo. |
2069 | * | 2070 | * |
2070 | * CONTEXT: | 2071 | * CONTEXT: |
2071 | * Must be called with @current->sighand->siglock held, which may be | 2072 | * Must be called with @current->sighand->siglock held, which may be |
2072 | * released and re-acquired before returning with intervening sleep. | 2073 | * released and re-acquired before returning with intervening sleep. |
2073 | */ | 2074 | */ |
2074 | static void do_jobctl_trap(void) | 2075 | static void do_jobctl_trap(void) |
2075 | { | 2076 | { |
2076 | struct signal_struct *signal = current->signal; | 2077 | struct signal_struct *signal = current->signal; |
2077 | int signr = current->jobctl & JOBCTL_STOP_SIGMASK; | 2078 | int signr = current->jobctl & JOBCTL_STOP_SIGMASK; |
2078 | 2079 | ||
2079 | if (current->ptrace & PT_SEIZED) { | 2080 | if (current->ptrace & PT_SEIZED) { |
2080 | if (!signal->group_stop_count && | 2081 | if (!signal->group_stop_count && |
2081 | !(signal->flags & SIGNAL_STOP_STOPPED)) | 2082 | !(signal->flags & SIGNAL_STOP_STOPPED)) |
2082 | signr = SIGTRAP; | 2083 | signr = SIGTRAP; |
2083 | WARN_ON_ONCE(!signr); | 2084 | WARN_ON_ONCE(!signr); |
2084 | ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), | 2085 | ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), |
2085 | CLD_STOPPED); | 2086 | CLD_STOPPED); |
2086 | } else { | 2087 | } else { |
2087 | WARN_ON_ONCE(!signr); | 2088 | WARN_ON_ONCE(!signr); |
2088 | ptrace_stop(signr, CLD_STOPPED, 0, NULL); | 2089 | ptrace_stop(signr, CLD_STOPPED, 0, NULL); |
2089 | current->exit_code = 0; | 2090 | current->exit_code = 0; |
2090 | } | 2091 | } |
2091 | } | 2092 | } |
2092 | 2093 | ||
2093 | static int ptrace_signal(int signr, siginfo_t *info, | 2094 | static int ptrace_signal(int signr, siginfo_t *info, |
2094 | struct pt_regs *regs, void *cookie) | 2095 | struct pt_regs *regs, void *cookie) |
2095 | { | 2096 | { |
2096 | if (!current->ptrace) | 2097 | if (!current->ptrace) |
2097 | return signr; | 2098 | return signr; |
2098 | 2099 | ||
2099 | ptrace_signal_deliver(regs, cookie); | 2100 | ptrace_signal_deliver(regs, cookie); |
2100 | 2101 | ||
2101 | /* Let the debugger run. */ | 2102 | /* Let the debugger run. */ |
2102 | ptrace_stop(signr, CLD_TRAPPED, 0, info); | 2103 | ptrace_stop(signr, CLD_TRAPPED, 0, info); |
2103 | 2104 | ||
2104 | /* We're back. Did the debugger cancel the sig? */ | 2105 | /* We're back. Did the debugger cancel the sig? */ |
2105 | signr = current->exit_code; | 2106 | signr = current->exit_code; |
2106 | if (signr == 0) | 2107 | if (signr == 0) |
2107 | return signr; | 2108 | return signr; |
2108 | 2109 | ||
2109 | current->exit_code = 0; | 2110 | current->exit_code = 0; |
2110 | 2111 | ||
2111 | /* | 2112 | /* |
2112 | * Update the siginfo structure if the signal has | 2113 | * Update the siginfo structure if the signal has |
2113 | * changed. If the debugger wanted something | 2114 | * changed. If the debugger wanted something |
2114 | * specific in the siginfo structure then it should | 2115 | * specific in the siginfo structure then it should |
2115 | * have updated *info via PTRACE_SETSIGINFO. | 2116 | * have updated *info via PTRACE_SETSIGINFO. |
2116 | */ | 2117 | */ |
2117 | if (signr != info->si_signo) { | 2118 | if (signr != info->si_signo) { |
2118 | info->si_signo = signr; | 2119 | info->si_signo = signr; |
2119 | info->si_errno = 0; | 2120 | info->si_errno = 0; |
2120 | info->si_code = SI_USER; | 2121 | info->si_code = SI_USER; |
2121 | info->si_pid = task_pid_vnr(current->parent); | 2122 | info->si_pid = task_pid_vnr(current->parent); |
2122 | info->si_uid = task_uid(current->parent); | 2123 | info->si_uid = task_uid(current->parent); |
2123 | } | 2124 | } |
2124 | 2125 | ||
2125 | /* If the (new) signal is now blocked, requeue it. */ | 2126 | /* If the (new) signal is now blocked, requeue it. */ |
2126 | if (sigismember(¤t->blocked, signr)) { | 2127 | if (sigismember(¤t->blocked, signr)) { |
2127 | specific_send_sig_info(signr, info, current); | 2128 | specific_send_sig_info(signr, info, current); |
2128 | signr = 0; | 2129 | signr = 0; |
2129 | } | 2130 | } |
2130 | 2131 | ||
2131 | return signr; | 2132 | return signr; |
2132 | } | 2133 | } |
2133 | 2134 | ||
2134 | int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, | 2135 | int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, |
2135 | struct pt_regs *regs, void *cookie) | 2136 | struct pt_regs *regs, void *cookie) |
2136 | { | 2137 | { |
2137 | struct sighand_struct *sighand = current->sighand; | 2138 | struct sighand_struct *sighand = current->sighand; |
2138 | struct signal_struct *signal = current->signal; | 2139 | struct signal_struct *signal = current->signal; |
2139 | int signr; | 2140 | int signr; |
2140 | 2141 | ||
2141 | relock: | 2142 | relock: |
2142 | /* | 2143 | /* |
2143 | * We'll jump back here after any time we were stopped in TASK_STOPPED. | 2144 | * We'll jump back here after any time we were stopped in TASK_STOPPED. |
2144 | * While in TASK_STOPPED, we were considered "frozen enough". | 2145 | * While in TASK_STOPPED, we were considered "frozen enough". |
2145 | * Now that we woke up, it's crucial if we're supposed to be | 2146 | * Now that we woke up, it's crucial if we're supposed to be |
2146 | * frozen that we freeze now before running anything substantial. | 2147 | * frozen that we freeze now before running anything substantial. |
2147 | */ | 2148 | */ |
2148 | try_to_freeze(); | 2149 | try_to_freeze(); |
2149 | 2150 | ||
2150 | spin_lock_irq(&sighand->siglock); | 2151 | spin_lock_irq(&sighand->siglock); |
2151 | /* | 2152 | /* |
2152 | * Every stopped thread goes here after wakeup. Check to see if | 2153 | * Every stopped thread goes here after wakeup. Check to see if |
2153 | * we should notify the parent, prepare_signal(SIGCONT) encodes | 2154 | * we should notify the parent, prepare_signal(SIGCONT) encodes |
2154 | * the CLD_ si_code into SIGNAL_CLD_MASK bits. | 2155 | * the CLD_ si_code into SIGNAL_CLD_MASK bits. |
2155 | */ | 2156 | */ |
2156 | if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { | 2157 | if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { |
2157 | struct task_struct *leader; | 2158 | struct task_struct *leader; |
2158 | int why; | 2159 | int why; |
2159 | 2160 | ||
2160 | if (signal->flags & SIGNAL_CLD_CONTINUED) | 2161 | if (signal->flags & SIGNAL_CLD_CONTINUED) |
2161 | why = CLD_CONTINUED; | 2162 | why = CLD_CONTINUED; |
2162 | else | 2163 | else |
2163 | why = CLD_STOPPED; | 2164 | why = CLD_STOPPED; |
2164 | 2165 | ||
2165 | signal->flags &= ~SIGNAL_CLD_MASK; | 2166 | signal->flags &= ~SIGNAL_CLD_MASK; |
2166 | 2167 | ||
2167 | spin_unlock_irq(&sighand->siglock); | 2168 | spin_unlock_irq(&sighand->siglock); |
2168 | 2169 | ||
2169 | /* | 2170 | /* |
2170 | * Notify the parent that we're continuing. This event is | 2171 | * Notify the parent that we're continuing. This event is |
2171 | * always per-process and doesn't make whole lot of sense | 2172 | * always per-process and doesn't make whole lot of sense |
2172 | * for ptracers, who shouldn't consume the state via | 2173 | * for ptracers, who shouldn't consume the state via |
2173 | * wait(2) either, but, for backward compatibility, notify | 2174 | * wait(2) either, but, for backward compatibility, notify |
2174 | * the ptracer of the group leader too unless it's gonna be | 2175 | * the ptracer of the group leader too unless it's gonna be |
2175 | * a duplicate. | 2176 | * a duplicate. |
2176 | */ | 2177 | */ |
2177 | read_lock(&tasklist_lock); | 2178 | read_lock(&tasklist_lock); |
2178 | 2179 | ||
2179 | do_notify_parent_cldstop(current, false, why); | 2180 | do_notify_parent_cldstop(current, false, why); |
2180 | 2181 | ||
2181 | leader = current->group_leader; | 2182 | leader = current->group_leader; |
2182 | if (leader->ptrace && !real_parent_is_ptracer(leader)) | 2183 | if (leader->ptrace && !real_parent_is_ptracer(leader)) |
2183 | do_notify_parent_cldstop(leader, true, why); | 2184 | do_notify_parent_cldstop(leader, true, why); |
2184 | 2185 | ||
2185 | read_unlock(&tasklist_lock); | 2186 | read_unlock(&tasklist_lock); |
2186 | 2187 | ||
2187 | goto relock; | 2188 | goto relock; |
2188 | } | 2189 | } |
2189 | 2190 | ||
2190 | for (;;) { | 2191 | for (;;) { |
2191 | struct k_sigaction *ka; | 2192 | struct k_sigaction *ka; |
2192 | 2193 | ||
2193 | if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && | 2194 | if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && |
2194 | do_signal_stop(0)) | 2195 | do_signal_stop(0)) |
2195 | goto relock; | 2196 | goto relock; |
2196 | 2197 | ||
2197 | if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) { | 2198 | if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) { |
2198 | do_jobctl_trap(); | 2199 | do_jobctl_trap(); |
2199 | spin_unlock_irq(&sighand->siglock); | 2200 | spin_unlock_irq(&sighand->siglock); |
2200 | goto relock; | 2201 | goto relock; |
2201 | } | 2202 | } |
2202 | 2203 | ||
2203 | signr = dequeue_signal(current, ¤t->blocked, info); | 2204 | signr = dequeue_signal(current, ¤t->blocked, info); |
2204 | 2205 | ||
2205 | if (!signr) | 2206 | if (!signr) |
2206 | break; /* will return 0 */ | 2207 | break; /* will return 0 */ |
2207 | 2208 | ||
2208 | if (signr != SIGKILL) { | 2209 | if (signr != SIGKILL) { |
2209 | signr = ptrace_signal(signr, info, | 2210 | signr = ptrace_signal(signr, info, |
2210 | regs, cookie); | 2211 | regs, cookie); |
2211 | if (!signr) | 2212 | if (!signr) |
2212 | continue; | 2213 | continue; |
2213 | } | 2214 | } |
2214 | 2215 | ||
2215 | ka = &sighand->action[signr-1]; | 2216 | ka = &sighand->action[signr-1]; |
2216 | 2217 | ||
2217 | /* Trace actually delivered signals. */ | 2218 | /* Trace actually delivered signals. */ |
2218 | trace_signal_deliver(signr, info, ka); | 2219 | trace_signal_deliver(signr, info, ka); |
2219 | 2220 | ||
2220 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ | 2221 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ |
2221 | continue; | 2222 | continue; |
2222 | if (ka->sa.sa_handler != SIG_DFL) { | 2223 | if (ka->sa.sa_handler != SIG_DFL) { |
2223 | /* Run the handler. */ | 2224 | /* Run the handler. */ |
2224 | *return_ka = *ka; | 2225 | *return_ka = *ka; |
2225 | 2226 | ||
2226 | if (ka->sa.sa_flags & SA_ONESHOT) | 2227 | if (ka->sa.sa_flags & SA_ONESHOT) |
2227 | ka->sa.sa_handler = SIG_DFL; | 2228 | ka->sa.sa_handler = SIG_DFL; |
2228 | 2229 | ||
2229 | break; /* will return non-zero "signr" value */ | 2230 | break; /* will return non-zero "signr" value */ |
2230 | } | 2231 | } |
2231 | 2232 | ||
2232 | /* | 2233 | /* |
2233 | * Now we are doing the default action for this signal. | 2234 | * Now we are doing the default action for this signal. |
2234 | */ | 2235 | */ |
2235 | if (sig_kernel_ignore(signr)) /* Default is nothing. */ | 2236 | if (sig_kernel_ignore(signr)) /* Default is nothing. */ |
2236 | continue; | 2237 | continue; |
2237 | 2238 | ||
2238 | /* | 2239 | /* |
2239 | * Global init gets no signals it doesn't want. | 2240 | * Global init gets no signals it doesn't want. |
2240 | * Container-init gets no signals it doesn't want from same | 2241 | * Container-init gets no signals it doesn't want from same |
2241 | * container. | 2242 | * container. |
2242 | * | 2243 | * |
2243 | * Note that if global/container-init sees a sig_kernel_only() | 2244 | * Note that if global/container-init sees a sig_kernel_only() |
2244 | * signal here, the signal must have been generated internally | 2245 | * signal here, the signal must have been generated internally |
2245 | * or must have come from an ancestor namespace. In either | 2246 | * or must have come from an ancestor namespace. In either |
2246 | * case, the signal cannot be dropped. | 2247 | * case, the signal cannot be dropped. |
2247 | */ | 2248 | */ |
2248 | if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && | 2249 | if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && |
2249 | !sig_kernel_only(signr)) | 2250 | !sig_kernel_only(signr)) |
2250 | continue; | 2251 | continue; |
2251 | 2252 | ||
2252 | if (sig_kernel_stop(signr)) { | 2253 | if (sig_kernel_stop(signr)) { |
2253 | /* | 2254 | /* |
2254 | * The default action is to stop all threads in | 2255 | * The default action is to stop all threads in |
2255 | * the thread group. The job control signals | 2256 | * the thread group. The job control signals |
2256 | * do nothing in an orphaned pgrp, but SIGSTOP | 2257 | * do nothing in an orphaned pgrp, but SIGSTOP |
2257 | * always works. Note that siglock needs to be | 2258 | * always works. Note that siglock needs to be |
2258 | * dropped during the call to is_orphaned_pgrp() | 2259 | * dropped during the call to is_orphaned_pgrp() |
2259 | * because of lock ordering with tasklist_lock. | 2260 | * because of lock ordering with tasklist_lock. |
2260 | * This allows an intervening SIGCONT to be posted. | 2261 | * This allows an intervening SIGCONT to be posted. |
2261 | * We need to check for that and bail out if necessary. | 2262 | * We need to check for that and bail out if necessary. |
2262 | */ | 2263 | */ |
2263 | if (signr != SIGSTOP) { | 2264 | if (signr != SIGSTOP) { |
2264 | spin_unlock_irq(&sighand->siglock); | 2265 | spin_unlock_irq(&sighand->siglock); |
2265 | 2266 | ||
2266 | /* signals can be posted during this window */ | 2267 | /* signals can be posted during this window */ |
2267 | 2268 | ||
2268 | if (is_current_pgrp_orphaned()) | 2269 | if (is_current_pgrp_orphaned()) |
2269 | goto relock; | 2270 | goto relock; |
2270 | 2271 | ||
2271 | spin_lock_irq(&sighand->siglock); | 2272 | spin_lock_irq(&sighand->siglock); |
2272 | } | 2273 | } |
2273 | 2274 | ||
2274 | if (likely(do_signal_stop(info->si_signo))) { | 2275 | if (likely(do_signal_stop(info->si_signo))) { |
2275 | /* It released the siglock. */ | 2276 | /* It released the siglock. */ |
2276 | goto relock; | 2277 | goto relock; |
2277 | } | 2278 | } |
2278 | 2279 | ||
2279 | /* | 2280 | /* |
2280 | * We didn't actually stop, due to a race | 2281 | * We didn't actually stop, due to a race |
2281 | * with SIGCONT or something like that. | 2282 | * with SIGCONT or something like that. |
2282 | */ | 2283 | */ |
2283 | continue; | 2284 | continue; |
2284 | } | 2285 | } |
2285 | 2286 | ||
2286 | spin_unlock_irq(&sighand->siglock); | 2287 | spin_unlock_irq(&sighand->siglock); |
2287 | 2288 | ||
2288 | /* | 2289 | /* |
2289 | * Anything else is fatal, maybe with a core dump. | 2290 | * Anything else is fatal, maybe with a core dump. |
2290 | */ | 2291 | */ |
2291 | current->flags |= PF_SIGNALED; | 2292 | current->flags |= PF_SIGNALED; |
2292 | 2293 | ||
2293 | if (sig_kernel_coredump(signr)) { | 2294 | if (sig_kernel_coredump(signr)) { |
2294 | if (print_fatal_signals) | 2295 | if (print_fatal_signals) |
2295 | print_fatal_signal(regs, info->si_signo); | 2296 | print_fatal_signal(regs, info->si_signo); |
2296 | /* | 2297 | /* |
2297 | * If it was able to dump core, this kills all | 2298 | * If it was able to dump core, this kills all |
2298 | * other threads in the group and synchronizes with | 2299 | * other threads in the group and synchronizes with |
2299 | * their demise. If we lost the race with another | 2300 | * their demise. If we lost the race with another |
2300 | * thread getting here, it set group_exit_code | 2301 | * thread getting here, it set group_exit_code |
2301 | * first and our do_group_exit call below will use | 2302 | * first and our do_group_exit call below will use |
2302 | * that value and ignore the one we pass it. | 2303 | * that value and ignore the one we pass it. |
2303 | */ | 2304 | */ |
2304 | do_coredump(info->si_signo, info->si_signo, regs); | 2305 | do_coredump(info->si_signo, info->si_signo, regs); |
2305 | } | 2306 | } |
2306 | 2307 | ||
2307 | /* | 2308 | /* |
2308 | * Death signals, no core dump. | 2309 | * Death signals, no core dump. |
2309 | */ | 2310 | */ |
2310 | do_group_exit(info->si_signo); | 2311 | do_group_exit(info->si_signo); |
2311 | /* NOTREACHED */ | 2312 | /* NOTREACHED */ |
2312 | } | 2313 | } |
2313 | spin_unlock_irq(&sighand->siglock); | 2314 | spin_unlock_irq(&sighand->siglock); |
2314 | return signr; | 2315 | return signr; |
2315 | } | 2316 | } |
2316 | 2317 | ||
2317 | /* | 2318 | /* |
2318 | * It could be that complete_signal() picked us to notify about the | 2319 | * It could be that complete_signal() picked us to notify about the |
2319 | * group-wide signal. Other threads should be notified now to take | 2320 | * group-wide signal. Other threads should be notified now to take |
2320 | * the shared signals in @which since we will not. | 2321 | * the shared signals in @which since we will not. |
2321 | */ | 2322 | */ |
2322 | static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) | 2323 | static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) |
2323 | { | 2324 | { |
2324 | sigset_t retarget; | 2325 | sigset_t retarget; |
2325 | struct task_struct *t; | 2326 | struct task_struct *t; |
2326 | 2327 | ||
2327 | sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); | 2328 | sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); |
2328 | if (sigisemptyset(&retarget)) | 2329 | if (sigisemptyset(&retarget)) |
2329 | return; | 2330 | return; |
2330 | 2331 | ||
2331 | t = tsk; | 2332 | t = tsk; |
2332 | while_each_thread(tsk, t) { | 2333 | while_each_thread(tsk, t) { |
2333 | if (t->flags & PF_EXITING) | 2334 | if (t->flags & PF_EXITING) |
2334 | continue; | 2335 | continue; |
2335 | 2336 | ||
2336 | if (!has_pending_signals(&retarget, &t->blocked)) | 2337 | if (!has_pending_signals(&retarget, &t->blocked)) |
2337 | continue; | 2338 | continue; |
2338 | /* Remove the signals this thread can handle. */ | 2339 | /* Remove the signals this thread can handle. */ |
2339 | sigandsets(&retarget, &retarget, &t->blocked); | 2340 | sigandsets(&retarget, &retarget, &t->blocked); |
2340 | 2341 | ||
2341 | if (!signal_pending(t)) | 2342 | if (!signal_pending(t)) |
2342 | signal_wake_up(t, 0); | 2343 | signal_wake_up(t, 0); |
2343 | 2344 | ||
2344 | if (sigisemptyset(&retarget)) | 2345 | if (sigisemptyset(&retarget)) |
2345 | break; | 2346 | break; |
2346 | } | 2347 | } |
2347 | } | 2348 | } |
2348 | 2349 | ||
2349 | void exit_signals(struct task_struct *tsk) | 2350 | void exit_signals(struct task_struct *tsk) |
2350 | { | 2351 | { |
2351 | int group_stop = 0; | 2352 | int group_stop = 0; |
2352 | sigset_t unblocked; | 2353 | sigset_t unblocked; |
2353 | 2354 | ||
2354 | if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { | 2355 | if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { |
2355 | tsk->flags |= PF_EXITING; | 2356 | tsk->flags |= PF_EXITING; |
2356 | return; | 2357 | return; |
2357 | } | 2358 | } |
2358 | 2359 | ||
2359 | spin_lock_irq(&tsk->sighand->siglock); | 2360 | spin_lock_irq(&tsk->sighand->siglock); |
2360 | /* | 2361 | /* |
2361 | * From now this task is not visible for group-wide signals, | 2362 | * From now this task is not visible for group-wide signals, |
2362 | * see wants_signal(), do_signal_stop(). | 2363 | * see wants_signal(), do_signal_stop(). |
2363 | */ | 2364 | */ |
2364 | tsk->flags |= PF_EXITING; | 2365 | tsk->flags |= PF_EXITING; |
2365 | if (!signal_pending(tsk)) | 2366 | if (!signal_pending(tsk)) |
2366 | goto out; | 2367 | goto out; |
2367 | 2368 | ||
2368 | unblocked = tsk->blocked; | 2369 | unblocked = tsk->blocked; |
2369 | signotset(&unblocked); | 2370 | signotset(&unblocked); |
2370 | retarget_shared_pending(tsk, &unblocked); | 2371 | retarget_shared_pending(tsk, &unblocked); |
2371 | 2372 | ||
2372 | if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && | 2373 | if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && |
2373 | task_participate_group_stop(tsk)) | 2374 | task_participate_group_stop(tsk)) |
2374 | group_stop = CLD_STOPPED; | 2375 | group_stop = CLD_STOPPED; |
2375 | out: | 2376 | out: |
2376 | spin_unlock_irq(&tsk->sighand->siglock); | 2377 | spin_unlock_irq(&tsk->sighand->siglock); |
2377 | 2378 | ||
2378 | /* | 2379 | /* |
2379 | * If group stop has completed, deliver the notification. This | 2380 | * If group stop has completed, deliver the notification. This |
2380 | * should always go to the real parent of the group leader. | 2381 | * should always go to the real parent of the group leader. |
2381 | */ | 2382 | */ |
2382 | if (unlikely(group_stop)) { | 2383 | if (unlikely(group_stop)) { |
2383 | read_lock(&tasklist_lock); | 2384 | read_lock(&tasklist_lock); |
2384 | do_notify_parent_cldstop(tsk, false, group_stop); | 2385 | do_notify_parent_cldstop(tsk, false, group_stop); |
2385 | read_unlock(&tasklist_lock); | 2386 | read_unlock(&tasklist_lock); |
2386 | } | 2387 | } |
2387 | } | 2388 | } |
2388 | 2389 | ||
2389 | EXPORT_SYMBOL(recalc_sigpending); | 2390 | EXPORT_SYMBOL(recalc_sigpending); |
2390 | EXPORT_SYMBOL_GPL(dequeue_signal); | 2391 | EXPORT_SYMBOL_GPL(dequeue_signal); |
2391 | EXPORT_SYMBOL(flush_signals); | 2392 | EXPORT_SYMBOL(flush_signals); |
2392 | EXPORT_SYMBOL(force_sig); | 2393 | EXPORT_SYMBOL(force_sig); |
2393 | EXPORT_SYMBOL(send_sig); | 2394 | EXPORT_SYMBOL(send_sig); |
2394 | EXPORT_SYMBOL(send_sig_info); | 2395 | EXPORT_SYMBOL(send_sig_info); |
2395 | EXPORT_SYMBOL(sigprocmask); | 2396 | EXPORT_SYMBOL(sigprocmask); |
2396 | EXPORT_SYMBOL(block_all_signals); | 2397 | EXPORT_SYMBOL(block_all_signals); |
2397 | EXPORT_SYMBOL(unblock_all_signals); | 2398 | EXPORT_SYMBOL(unblock_all_signals); |
2398 | 2399 | ||
2399 | 2400 | ||
2400 | /* | 2401 | /* |
2401 | * System call entry points. | 2402 | * System call entry points. |
2402 | */ | 2403 | */ |
2403 | 2404 | ||
2404 | /** | 2405 | /** |
2405 | * sys_restart_syscall - restart a system call | 2406 | * sys_restart_syscall - restart a system call |
2406 | */ | 2407 | */ |
2407 | SYSCALL_DEFINE0(restart_syscall) | 2408 | SYSCALL_DEFINE0(restart_syscall) |
2408 | { | 2409 | { |
2409 | struct restart_block *restart = ¤t_thread_info()->restart_block; | 2410 | struct restart_block *restart = ¤t_thread_info()->restart_block; |
2410 | return restart->fn(restart); | 2411 | return restart->fn(restart); |
2411 | } | 2412 | } |
2412 | 2413 | ||
2413 | long do_no_restart_syscall(struct restart_block *param) | 2414 | long do_no_restart_syscall(struct restart_block *param) |
2414 | { | 2415 | { |
2415 | return -EINTR; | 2416 | return -EINTR; |
2416 | } | 2417 | } |
2417 | 2418 | ||
2418 | static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) | 2419 | static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) |
2419 | { | 2420 | { |
2420 | if (signal_pending(tsk) && !thread_group_empty(tsk)) { | 2421 | if (signal_pending(tsk) && !thread_group_empty(tsk)) { |
2421 | sigset_t newblocked; | 2422 | sigset_t newblocked; |
2422 | /* A set of now blocked but previously unblocked signals. */ | 2423 | /* A set of now blocked but previously unblocked signals. */ |
2423 | sigandnsets(&newblocked, newset, ¤t->blocked); | 2424 | sigandnsets(&newblocked, newset, ¤t->blocked); |
2424 | retarget_shared_pending(tsk, &newblocked); | 2425 | retarget_shared_pending(tsk, &newblocked); |
2425 | } | 2426 | } |
2426 | tsk->blocked = *newset; | 2427 | tsk->blocked = *newset; |
2427 | recalc_sigpending(); | 2428 | recalc_sigpending(); |
2428 | } | 2429 | } |
2429 | 2430 | ||
2430 | /** | 2431 | /** |
2431 | * set_current_blocked - change current->blocked mask | 2432 | * set_current_blocked - change current->blocked mask |
2432 | * @newset: new mask | 2433 | * @newset: new mask |
2433 | * | 2434 | * |
2434 | * It is wrong to change ->blocked directly, this helper should be used | 2435 | * It is wrong to change ->blocked directly, this helper should be used |
2435 | * to ensure the process can't miss a shared signal we are going to block. | 2436 | * to ensure the process can't miss a shared signal we are going to block. |
2436 | */ | 2437 | */ |
2437 | void set_current_blocked(const sigset_t *newset) | 2438 | void set_current_blocked(const sigset_t *newset) |
2438 | { | 2439 | { |
2439 | struct task_struct *tsk = current; | 2440 | struct task_struct *tsk = current; |
2440 | 2441 | ||
2441 | spin_lock_irq(&tsk->sighand->siglock); | 2442 | spin_lock_irq(&tsk->sighand->siglock); |
2442 | __set_task_blocked(tsk, newset); | 2443 | __set_task_blocked(tsk, newset); |
2443 | spin_unlock_irq(&tsk->sighand->siglock); | 2444 | spin_unlock_irq(&tsk->sighand->siglock); |
2444 | } | 2445 | } |
2445 | 2446 | ||
2446 | /* | 2447 | /* |
2447 | * This is also useful for kernel threads that want to temporarily | 2448 | * This is also useful for kernel threads that want to temporarily |
2448 | * (or permanently) block certain signals. | 2449 | * (or permanently) block certain signals. |
2449 | * | 2450 | * |
2450 | * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel | 2451 | * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel |
2451 | * interface happily blocks "unblockable" signals like SIGKILL | 2452 | * interface happily blocks "unblockable" signals like SIGKILL |
2452 | * and friends. | 2453 | * and friends. |
2453 | */ | 2454 | */ |
2454 | int sigprocmask(int how, sigset_t *set, sigset_t *oldset) | 2455 | int sigprocmask(int how, sigset_t *set, sigset_t *oldset) |
2455 | { | 2456 | { |
2456 | struct task_struct *tsk = current; | 2457 | struct task_struct *tsk = current; |
2457 | sigset_t newset; | 2458 | sigset_t newset; |
2458 | 2459 | ||
2459 | /* Lockless, only current can change ->blocked, never from irq */ | 2460 | /* Lockless, only current can change ->blocked, never from irq */ |
2460 | if (oldset) | 2461 | if (oldset) |
2461 | *oldset = tsk->blocked; | 2462 | *oldset = tsk->blocked; |
2462 | 2463 | ||
2463 | switch (how) { | 2464 | switch (how) { |
2464 | case SIG_BLOCK: | 2465 | case SIG_BLOCK: |
2465 | sigorsets(&newset, &tsk->blocked, set); | 2466 | sigorsets(&newset, &tsk->blocked, set); |
2466 | break; | 2467 | break; |
2467 | case SIG_UNBLOCK: | 2468 | case SIG_UNBLOCK: |
2468 | sigandnsets(&newset, &tsk->blocked, set); | 2469 | sigandnsets(&newset, &tsk->blocked, set); |
2469 | break; | 2470 | break; |
2470 | case SIG_SETMASK: | 2471 | case SIG_SETMASK: |
2471 | newset = *set; | 2472 | newset = *set; |
2472 | break; | 2473 | break; |
2473 | default: | 2474 | default: |
2474 | return -EINVAL; | 2475 | return -EINVAL; |
2475 | } | 2476 | } |
2476 | 2477 | ||
2477 | set_current_blocked(&newset); | 2478 | set_current_blocked(&newset); |
2478 | return 0; | 2479 | return 0; |
2479 | } | 2480 | } |
2480 | 2481 | ||
2481 | /** | 2482 | /** |
2482 | * sys_rt_sigprocmask - change the list of currently blocked signals | 2483 | * sys_rt_sigprocmask - change the list of currently blocked signals |
2483 | * @how: whether to add, remove, or set signals | 2484 | * @how: whether to add, remove, or set signals |
2484 | * @set: stores pending signals | 2485 | * @set: stores pending signals |
2485 | * @oset: previous value of signal mask if non-null | 2486 | * @oset: previous value of signal mask if non-null |
2486 | * @sigsetsize: size of sigset_t type | 2487 | * @sigsetsize: size of sigset_t type |
2487 | */ | 2488 | */ |
2488 | SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, | 2489 | SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, |
2489 | sigset_t __user *, oset, size_t, sigsetsize) | 2490 | sigset_t __user *, oset, size_t, sigsetsize) |
2490 | { | 2491 | { |
2491 | sigset_t old_set, new_set; | 2492 | sigset_t old_set, new_set; |
2492 | int error; | 2493 | int error; |
2493 | 2494 | ||
2494 | /* XXX: Don't preclude handling different sized sigset_t's. */ | 2495 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2495 | if (sigsetsize != sizeof(sigset_t)) | 2496 | if (sigsetsize != sizeof(sigset_t)) |
2496 | return -EINVAL; | 2497 | return -EINVAL; |
2497 | 2498 | ||
2498 | old_set = current->blocked; | 2499 | old_set = current->blocked; |
2499 | 2500 | ||
2500 | if (nset) { | 2501 | if (nset) { |
2501 | if (copy_from_user(&new_set, nset, sizeof(sigset_t))) | 2502 | if (copy_from_user(&new_set, nset, sizeof(sigset_t))) |
2502 | return -EFAULT; | 2503 | return -EFAULT; |
2503 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); | 2504 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
2504 | 2505 | ||
2505 | error = sigprocmask(how, &new_set, NULL); | 2506 | error = sigprocmask(how, &new_set, NULL); |
2506 | if (error) | 2507 | if (error) |
2507 | return error; | 2508 | return error; |
2508 | } | 2509 | } |
2509 | 2510 | ||
2510 | if (oset) { | 2511 | if (oset) { |
2511 | if (copy_to_user(oset, &old_set, sizeof(sigset_t))) | 2512 | if (copy_to_user(oset, &old_set, sizeof(sigset_t))) |
2512 | return -EFAULT; | 2513 | return -EFAULT; |
2513 | } | 2514 | } |
2514 | 2515 | ||
2515 | return 0; | 2516 | return 0; |
2516 | } | 2517 | } |
2517 | 2518 | ||
2518 | long do_sigpending(void __user *set, unsigned long sigsetsize) | 2519 | long do_sigpending(void __user *set, unsigned long sigsetsize) |
2519 | { | 2520 | { |
2520 | long error = -EINVAL; | 2521 | long error = -EINVAL; |
2521 | sigset_t pending; | 2522 | sigset_t pending; |
2522 | 2523 | ||
2523 | if (sigsetsize > sizeof(sigset_t)) | 2524 | if (sigsetsize > sizeof(sigset_t)) |
2524 | goto out; | 2525 | goto out; |
2525 | 2526 | ||
2526 | spin_lock_irq(¤t->sighand->siglock); | 2527 | spin_lock_irq(¤t->sighand->siglock); |
2527 | sigorsets(&pending, ¤t->pending.signal, | 2528 | sigorsets(&pending, ¤t->pending.signal, |
2528 | ¤t->signal->shared_pending.signal); | 2529 | ¤t->signal->shared_pending.signal); |
2529 | spin_unlock_irq(¤t->sighand->siglock); | 2530 | spin_unlock_irq(¤t->sighand->siglock); |
2530 | 2531 | ||
2531 | /* Outside the lock because only this thread touches it. */ | 2532 | /* Outside the lock because only this thread touches it. */ |
2532 | sigandsets(&pending, ¤t->blocked, &pending); | 2533 | sigandsets(&pending, ¤t->blocked, &pending); |
2533 | 2534 | ||
2534 | error = -EFAULT; | 2535 | error = -EFAULT; |
2535 | if (!copy_to_user(set, &pending, sigsetsize)) | 2536 | if (!copy_to_user(set, &pending, sigsetsize)) |
2536 | error = 0; | 2537 | error = 0; |
2537 | 2538 | ||
2538 | out: | 2539 | out: |
2539 | return error; | 2540 | return error; |
2540 | } | 2541 | } |
2541 | 2542 | ||
2542 | /** | 2543 | /** |
2543 | * sys_rt_sigpending - examine a pending signal that has been raised | 2544 | * sys_rt_sigpending - examine a pending signal that has been raised |
2544 | * while blocked | 2545 | * while blocked |
2545 | * @set: stores pending signals | 2546 | * @set: stores pending signals |
2546 | * @sigsetsize: size of sigset_t type or larger | 2547 | * @sigsetsize: size of sigset_t type or larger |
2547 | */ | 2548 | */ |
2548 | SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize) | 2549 | SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize) |
2549 | { | 2550 | { |
2550 | return do_sigpending(set, sigsetsize); | 2551 | return do_sigpending(set, sigsetsize); |
2551 | } | 2552 | } |
2552 | 2553 | ||
2553 | #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER | 2554 | #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER |
2554 | 2555 | ||
2555 | int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) | 2556 | int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) |
2556 | { | 2557 | { |
2557 | int err; | 2558 | int err; |
2558 | 2559 | ||
2559 | if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) | 2560 | if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) |
2560 | return -EFAULT; | 2561 | return -EFAULT; |
2561 | if (from->si_code < 0) | 2562 | if (from->si_code < 0) |
2562 | return __copy_to_user(to, from, sizeof(siginfo_t)) | 2563 | return __copy_to_user(to, from, sizeof(siginfo_t)) |
2563 | ? -EFAULT : 0; | 2564 | ? -EFAULT : 0; |
2564 | /* | 2565 | /* |
2565 | * If you change siginfo_t structure, please be sure | 2566 | * If you change siginfo_t structure, please be sure |
2566 | * this code is fixed accordingly. | 2567 | * this code is fixed accordingly. |
2567 | * Please remember to update the signalfd_copyinfo() function | 2568 | * Please remember to update the signalfd_copyinfo() function |
2568 | * inside fs/signalfd.c too, in case siginfo_t changes. | 2569 | * inside fs/signalfd.c too, in case siginfo_t changes. |
2569 | * It should never copy any pad contained in the structure | 2570 | * It should never copy any pad contained in the structure |
2570 | * to avoid security leaks, but must copy the generic | 2571 | * to avoid security leaks, but must copy the generic |
2571 | * 3 ints plus the relevant union member. | 2572 | * 3 ints plus the relevant union member. |
2572 | */ | 2573 | */ |
2573 | err = __put_user(from->si_signo, &to->si_signo); | 2574 | err = __put_user(from->si_signo, &to->si_signo); |
2574 | err |= __put_user(from->si_errno, &to->si_errno); | 2575 | err |= __put_user(from->si_errno, &to->si_errno); |
2575 | err |= __put_user((short)from->si_code, &to->si_code); | 2576 | err |= __put_user((short)from->si_code, &to->si_code); |
2576 | switch (from->si_code & __SI_MASK) { | 2577 | switch (from->si_code & __SI_MASK) { |
2577 | case __SI_KILL: | 2578 | case __SI_KILL: |
2578 | err |= __put_user(from->si_pid, &to->si_pid); | 2579 | err |= __put_user(from->si_pid, &to->si_pid); |
2579 | err |= __put_user(from->si_uid, &to->si_uid); | 2580 | err |= __put_user(from->si_uid, &to->si_uid); |
2580 | break; | 2581 | break; |
2581 | case __SI_TIMER: | 2582 | case __SI_TIMER: |
2582 | err |= __put_user(from->si_tid, &to->si_tid); | 2583 | err |= __put_user(from->si_tid, &to->si_tid); |
2583 | err |= __put_user(from->si_overrun, &to->si_overrun); | 2584 | err |= __put_user(from->si_overrun, &to->si_overrun); |
2584 | err |= __put_user(from->si_ptr, &to->si_ptr); | 2585 | err |= __put_user(from->si_ptr, &to->si_ptr); |
2585 | break; | 2586 | break; |
2586 | case __SI_POLL: | 2587 | case __SI_POLL: |
2587 | err |= __put_user(from->si_band, &to->si_band); | 2588 | err |= __put_user(from->si_band, &to->si_band); |
2588 | err |= __put_user(from->si_fd, &to->si_fd); | 2589 | err |= __put_user(from->si_fd, &to->si_fd); |
2589 | break; | 2590 | break; |
2590 | case __SI_FAULT: | 2591 | case __SI_FAULT: |
2591 | err |= __put_user(from->si_addr, &to->si_addr); | 2592 | err |= __put_user(from->si_addr, &to->si_addr); |
2592 | #ifdef __ARCH_SI_TRAPNO | 2593 | #ifdef __ARCH_SI_TRAPNO |
2593 | err |= __put_user(from->si_trapno, &to->si_trapno); | 2594 | err |= __put_user(from->si_trapno, &to->si_trapno); |
2594 | #endif | 2595 | #endif |
2595 | #ifdef BUS_MCEERR_AO | 2596 | #ifdef BUS_MCEERR_AO |
2596 | /* | 2597 | /* |
2597 | * Other callers might not initialize the si_lsb field, | 2598 | * Other callers might not initialize the si_lsb field, |
2598 | * so check explicitly for the right codes here. | 2599 | * so check explicitly for the right codes here. |
2599 | */ | 2600 | */ |
2600 | if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) | 2601 | if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) |
2601 | err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); | 2602 | err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); |
2602 | #endif | 2603 | #endif |
2603 | break; | 2604 | break; |
2604 | case __SI_CHLD: | 2605 | case __SI_CHLD: |
2605 | err |= __put_user(from->si_pid, &to->si_pid); | 2606 | err |= __put_user(from->si_pid, &to->si_pid); |
2606 | err |= __put_user(from->si_uid, &to->si_uid); | 2607 | err |= __put_user(from->si_uid, &to->si_uid); |
2607 | err |= __put_user(from->si_status, &to->si_status); | 2608 | err |= __put_user(from->si_status, &to->si_status); |
2608 | err |= __put_user(from->si_utime, &to->si_utime); | 2609 | err |= __put_user(from->si_utime, &to->si_utime); |
2609 | err |= __put_user(from->si_stime, &to->si_stime); | 2610 | err |= __put_user(from->si_stime, &to->si_stime); |
2610 | break; | 2611 | break; |
2611 | case __SI_RT: /* This is not generated by the kernel as of now. */ | 2612 | case __SI_RT: /* This is not generated by the kernel as of now. */ |
2612 | case __SI_MESGQ: /* But this is */ | 2613 | case __SI_MESGQ: /* But this is */ |
2613 | err |= __put_user(from->si_pid, &to->si_pid); | 2614 | err |= __put_user(from->si_pid, &to->si_pid); |
2614 | err |= __put_user(from->si_uid, &to->si_uid); | 2615 | err |= __put_user(from->si_uid, &to->si_uid); |
2615 | err |= __put_user(from->si_ptr, &to->si_ptr); | 2616 | err |= __put_user(from->si_ptr, &to->si_ptr); |
2616 | break; | 2617 | break; |
2617 | default: /* this is just in case for now ... */ | 2618 | default: /* this is just in case for now ... */ |
2618 | err |= __put_user(from->si_pid, &to->si_pid); | 2619 | err |= __put_user(from->si_pid, &to->si_pid); |
2619 | err |= __put_user(from->si_uid, &to->si_uid); | 2620 | err |= __put_user(from->si_uid, &to->si_uid); |
2620 | break; | 2621 | break; |
2621 | } | 2622 | } |
2622 | return err; | 2623 | return err; |
2623 | } | 2624 | } |
2624 | 2625 | ||
2625 | #endif | 2626 | #endif |
2626 | 2627 | ||
2627 | /** | 2628 | /** |
2628 | * do_sigtimedwait - wait for queued signals specified in @which | 2629 | * do_sigtimedwait - wait for queued signals specified in @which |
2629 | * @which: queued signals to wait for | 2630 | * @which: queued signals to wait for |
2630 | * @info: if non-null, the signal's siginfo is returned here | 2631 | * @info: if non-null, the signal's siginfo is returned here |
2631 | * @ts: upper bound on process time suspension | 2632 | * @ts: upper bound on process time suspension |
2632 | */ | 2633 | */ |
2633 | int do_sigtimedwait(const sigset_t *which, siginfo_t *info, | 2634 | int do_sigtimedwait(const sigset_t *which, siginfo_t *info, |
2634 | const struct timespec *ts) | 2635 | const struct timespec *ts) |
2635 | { | 2636 | { |
2636 | struct task_struct *tsk = current; | 2637 | struct task_struct *tsk = current; |
2637 | long timeout = MAX_SCHEDULE_TIMEOUT; | 2638 | long timeout = MAX_SCHEDULE_TIMEOUT; |
2638 | sigset_t mask = *which; | 2639 | sigset_t mask = *which; |
2639 | int sig; | 2640 | int sig; |
2640 | 2641 | ||
2641 | if (ts) { | 2642 | if (ts) { |
2642 | if (!timespec_valid(ts)) | 2643 | if (!timespec_valid(ts)) |
2643 | return -EINVAL; | 2644 | return -EINVAL; |
2644 | timeout = timespec_to_jiffies(ts); | 2645 | timeout = timespec_to_jiffies(ts); |
2645 | /* | 2646 | /* |
2646 | * We can be close to the next tick, add another one | 2647 | * We can be close to the next tick, add another one |
2647 | * to ensure we will wait at least the time asked for. | 2648 | * to ensure we will wait at least the time asked for. |
2648 | */ | 2649 | */ |
2649 | if (ts->tv_sec || ts->tv_nsec) | 2650 | if (ts->tv_sec || ts->tv_nsec) |
2650 | timeout++; | 2651 | timeout++; |
2651 | } | 2652 | } |
2652 | 2653 | ||
2653 | /* | 2654 | /* |
2654 | * Invert the set of allowed signals to get those we want to block. | 2655 | * Invert the set of allowed signals to get those we want to block. |
2655 | */ | 2656 | */ |
2656 | sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); | 2657 | sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); |
2657 | signotset(&mask); | 2658 | signotset(&mask); |
2658 | 2659 | ||
2659 | spin_lock_irq(&tsk->sighand->siglock); | 2660 | spin_lock_irq(&tsk->sighand->siglock); |
2660 | sig = dequeue_signal(tsk, &mask, info); | 2661 | sig = dequeue_signal(tsk, &mask, info); |
2661 | if (!sig && timeout) { | 2662 | if (!sig && timeout) { |
2662 | /* | 2663 | /* |
2663 | * None ready, temporarily unblock those we're interested | 2664 | * None ready, temporarily unblock those we're interested |
2664 | * while we are sleeping in so that we'll be awakened when | 2665 | * while we are sleeping in so that we'll be awakened when |
2665 | * they arrive. Unblocking is always fine, we can avoid | 2666 | * they arrive. Unblocking is always fine, we can avoid |
2666 | * set_current_blocked(). | 2667 | * set_current_blocked(). |
2667 | */ | 2668 | */ |
2668 | tsk->real_blocked = tsk->blocked; | 2669 | tsk->real_blocked = tsk->blocked; |
2669 | sigandsets(&tsk->blocked, &tsk->blocked, &mask); | 2670 | sigandsets(&tsk->blocked, &tsk->blocked, &mask); |
2670 | recalc_sigpending(); | 2671 | recalc_sigpending(); |
2671 | spin_unlock_irq(&tsk->sighand->siglock); | 2672 | spin_unlock_irq(&tsk->sighand->siglock); |
2672 | 2673 | ||
2673 | timeout = schedule_timeout_interruptible(timeout); | 2674 | timeout = schedule_timeout_interruptible(timeout); |
2674 | 2675 | ||
2675 | spin_lock_irq(&tsk->sighand->siglock); | 2676 | spin_lock_irq(&tsk->sighand->siglock); |
2676 | __set_task_blocked(tsk, &tsk->real_blocked); | 2677 | __set_task_blocked(tsk, &tsk->real_blocked); |
2677 | siginitset(&tsk->real_blocked, 0); | 2678 | siginitset(&tsk->real_blocked, 0); |
2678 | sig = dequeue_signal(tsk, &mask, info); | 2679 | sig = dequeue_signal(tsk, &mask, info); |
2679 | } | 2680 | } |
2680 | spin_unlock_irq(&tsk->sighand->siglock); | 2681 | spin_unlock_irq(&tsk->sighand->siglock); |
2681 | 2682 | ||
2682 | if (sig) | 2683 | if (sig) |
2683 | return sig; | 2684 | return sig; |
2684 | return timeout ? -EINTR : -EAGAIN; | 2685 | return timeout ? -EINTR : -EAGAIN; |
2685 | } | 2686 | } |
2686 | 2687 | ||
2687 | /** | 2688 | /** |
2688 | * sys_rt_sigtimedwait - synchronously wait for queued signals specified | 2689 | * sys_rt_sigtimedwait - synchronously wait for queued signals specified |
2689 | * in @uthese | 2690 | * in @uthese |
2690 | * @uthese: queued signals to wait for | 2691 | * @uthese: queued signals to wait for |
2691 | * @uinfo: if non-null, the signal's siginfo is returned here | 2692 | * @uinfo: if non-null, the signal's siginfo is returned here |
2692 | * @uts: upper bound on process time suspension | 2693 | * @uts: upper bound on process time suspension |
2693 | * @sigsetsize: size of sigset_t type | 2694 | * @sigsetsize: size of sigset_t type |
2694 | */ | 2695 | */ |
2695 | SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, | 2696 | SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, |
2696 | siginfo_t __user *, uinfo, const struct timespec __user *, uts, | 2697 | siginfo_t __user *, uinfo, const struct timespec __user *, uts, |
2697 | size_t, sigsetsize) | 2698 | size_t, sigsetsize) |
2698 | { | 2699 | { |
2699 | sigset_t these; | 2700 | sigset_t these; |
2700 | struct timespec ts; | 2701 | struct timespec ts; |
2701 | siginfo_t info; | 2702 | siginfo_t info; |
2702 | int ret; | 2703 | int ret; |
2703 | 2704 | ||
2704 | /* XXX: Don't preclude handling different sized sigset_t's. */ | 2705 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2705 | if (sigsetsize != sizeof(sigset_t)) | 2706 | if (sigsetsize != sizeof(sigset_t)) |
2706 | return -EINVAL; | 2707 | return -EINVAL; |
2707 | 2708 | ||
2708 | if (copy_from_user(&these, uthese, sizeof(these))) | 2709 | if (copy_from_user(&these, uthese, sizeof(these))) |
2709 | return -EFAULT; | 2710 | return -EFAULT; |
2710 | 2711 | ||
2711 | if (uts) { | 2712 | if (uts) { |
2712 | if (copy_from_user(&ts, uts, sizeof(ts))) | 2713 | if (copy_from_user(&ts, uts, sizeof(ts))) |
2713 | return -EFAULT; | 2714 | return -EFAULT; |
2714 | } | 2715 | } |
2715 | 2716 | ||
2716 | ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); | 2717 | ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); |
2717 | 2718 | ||
2718 | if (ret > 0 && uinfo) { | 2719 | if (ret > 0 && uinfo) { |
2719 | if (copy_siginfo_to_user(uinfo, &info)) | 2720 | if (copy_siginfo_to_user(uinfo, &info)) |
2720 | ret = -EFAULT; | 2721 | ret = -EFAULT; |
2721 | } | 2722 | } |
2722 | 2723 | ||
2723 | return ret; | 2724 | return ret; |
2724 | } | 2725 | } |
2725 | 2726 | ||
2726 | /** | 2727 | /** |
2727 | * sys_kill - send a signal to a process | 2728 | * sys_kill - send a signal to a process |
2728 | * @pid: the PID of the process | 2729 | * @pid: the PID of the process |
2729 | * @sig: signal to be sent | 2730 | * @sig: signal to be sent |
2730 | */ | 2731 | */ |
2731 | SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) | 2732 | SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) |
2732 | { | 2733 | { |
2733 | struct siginfo info; | 2734 | struct siginfo info; |
2734 | 2735 | ||
2735 | info.si_signo = sig; | 2736 | info.si_signo = sig; |
2736 | info.si_errno = 0; | 2737 | info.si_errno = 0; |
2737 | info.si_code = SI_USER; | 2738 | info.si_code = SI_USER; |
2738 | info.si_pid = task_tgid_vnr(current); | 2739 | info.si_pid = task_tgid_vnr(current); |
2739 | info.si_uid = current_uid(); | 2740 | info.si_uid = current_uid(); |
2740 | 2741 | ||
2741 | return kill_something_info(sig, &info, pid); | 2742 | return kill_something_info(sig, &info, pid); |
2742 | } | 2743 | } |
2743 | 2744 | ||
2744 | static int | 2745 | static int |
2745 | do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) | 2746 | do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) |
2746 | { | 2747 | { |
2747 | struct task_struct *p; | 2748 | struct task_struct *p; |
2748 | int error = -ESRCH; | 2749 | int error = -ESRCH; |
2749 | 2750 | ||
2750 | rcu_read_lock(); | 2751 | rcu_read_lock(); |
2751 | p = find_task_by_vpid(pid); | 2752 | p = find_task_by_vpid(pid); |
2752 | if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { | 2753 | if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { |
2753 | error = check_kill_permission(sig, info, p); | 2754 | error = check_kill_permission(sig, info, p); |
2754 | /* | 2755 | /* |
2755 | * The null signal is a permissions and process existence | 2756 | * The null signal is a permissions and process existence |
2756 | * probe. No signal is actually delivered. | 2757 | * probe. No signal is actually delivered. |
2757 | */ | 2758 | */ |
2758 | if (!error && sig) { | 2759 | if (!error && sig) { |
2759 | error = do_send_sig_info(sig, info, p, false); | 2760 | error = do_send_sig_info(sig, info, p, false); |
2760 | /* | 2761 | /* |
2761 | * If lock_task_sighand() failed we pretend the task | 2762 | * If lock_task_sighand() failed we pretend the task |
2762 | * dies after receiving the signal. The window is tiny, | 2763 | * dies after receiving the signal. The window is tiny, |
2763 | * and the signal is private anyway. | 2764 | * and the signal is private anyway. |
2764 | */ | 2765 | */ |
2765 | if (unlikely(error == -ESRCH)) | 2766 | if (unlikely(error == -ESRCH)) |
2766 | error = 0; | 2767 | error = 0; |
2767 | } | 2768 | } |
2768 | } | 2769 | } |
2769 | rcu_read_unlock(); | 2770 | rcu_read_unlock(); |
2770 | 2771 | ||
2771 | return error; | 2772 | return error; |
2772 | } | 2773 | } |
2773 | 2774 | ||
2774 | static int do_tkill(pid_t tgid, pid_t pid, int sig) | 2775 | static int do_tkill(pid_t tgid, pid_t pid, int sig) |
2775 | { | 2776 | { |
2776 | struct siginfo info; | 2777 | struct siginfo info; |
2777 | 2778 | ||
2778 | info.si_signo = sig; | 2779 | info.si_signo = sig; |
2779 | info.si_errno = 0; | 2780 | info.si_errno = 0; |
2780 | info.si_code = SI_TKILL; | 2781 | info.si_code = SI_TKILL; |
2781 | info.si_pid = task_tgid_vnr(current); | 2782 | info.si_pid = task_tgid_vnr(current); |
2782 | info.si_uid = current_uid(); | 2783 | info.si_uid = current_uid(); |
2783 | 2784 | ||
2784 | return do_send_specific(tgid, pid, sig, &info); | 2785 | return do_send_specific(tgid, pid, sig, &info); |
2785 | } | 2786 | } |
2786 | 2787 | ||
2787 | /** | 2788 | /** |
2788 | * sys_tgkill - send signal to one specific thread | 2789 | * sys_tgkill - send signal to one specific thread |
2789 | * @tgid: the thread group ID of the thread | 2790 | * @tgid: the thread group ID of the thread |
2790 | * @pid: the PID of the thread | 2791 | * @pid: the PID of the thread |
2791 | * @sig: signal to be sent | 2792 | * @sig: signal to be sent |
2792 | * | 2793 | * |
2793 | * This syscall also checks the @tgid and returns -ESRCH even if the PID | 2794 | * This syscall also checks the @tgid and returns -ESRCH even if the PID |
2794 | * exists but it's not belonging to the target process anymore. This | 2795 | * exists but it's not belonging to the target process anymore. This |
2795 | * method solves the problem of threads exiting and PIDs getting reused. | 2796 | * method solves the problem of threads exiting and PIDs getting reused. |
2796 | */ | 2797 | */ |
2797 | SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) | 2798 | SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) |
2798 | { | 2799 | { |
2799 | /* This is only valid for single tasks */ | 2800 | /* This is only valid for single tasks */ |
2800 | if (pid <= 0 || tgid <= 0) | 2801 | if (pid <= 0 || tgid <= 0) |
2801 | return -EINVAL; | 2802 | return -EINVAL; |
2802 | 2803 | ||
2803 | return do_tkill(tgid, pid, sig); | 2804 | return do_tkill(tgid, pid, sig); |
2804 | } | 2805 | } |
2805 | 2806 | ||
2806 | /** | 2807 | /** |
2807 | * sys_tkill - send signal to one specific task | 2808 | * sys_tkill - send signal to one specific task |
2808 | * @pid: the PID of the task | 2809 | * @pid: the PID of the task |
2809 | * @sig: signal to be sent | 2810 | * @sig: signal to be sent |
2810 | * | 2811 | * |
2811 | * Send a signal to only one task, even if it's a CLONE_THREAD task. | 2812 | * Send a signal to only one task, even if it's a CLONE_THREAD task. |
2812 | */ | 2813 | */ |
2813 | SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) | 2814 | SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) |
2814 | { | 2815 | { |
2815 | /* This is only valid for single tasks */ | 2816 | /* This is only valid for single tasks */ |
2816 | if (pid <= 0) | 2817 | if (pid <= 0) |
2817 | return -EINVAL; | 2818 | return -EINVAL; |
2818 | 2819 | ||
2819 | return do_tkill(0, pid, sig); | 2820 | return do_tkill(0, pid, sig); |
2820 | } | 2821 | } |
2821 | 2822 | ||
2822 | /** | 2823 | /** |
2823 | * sys_rt_sigqueueinfo - send signal information to a signal | 2824 | * sys_rt_sigqueueinfo - send signal information to a signal |
2824 | * @pid: the PID of the thread | 2825 | * @pid: the PID of the thread |
2825 | * @sig: signal to be sent | 2826 | * @sig: signal to be sent |
2826 | * @uinfo: signal info to be sent | 2827 | * @uinfo: signal info to be sent |
2827 | */ | 2828 | */ |
2828 | SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, | 2829 | SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, |
2829 | siginfo_t __user *, uinfo) | 2830 | siginfo_t __user *, uinfo) |
2830 | { | 2831 | { |
2831 | siginfo_t info; | 2832 | siginfo_t info; |
2832 | 2833 | ||
2833 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) | 2834 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) |
2834 | return -EFAULT; | 2835 | return -EFAULT; |
2835 | 2836 | ||
2836 | /* Not even root can pretend to send signals from the kernel. | 2837 | /* Not even root can pretend to send signals from the kernel. |
2837 | * Nor can they impersonate a kill()/tgkill(), which adds source info. | 2838 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
2838 | */ | 2839 | */ |
2839 | if (info.si_code >= 0 || info.si_code == SI_TKILL) { | 2840 | if (info.si_code >= 0 || info.si_code == SI_TKILL) { |
2840 | /* We used to allow any < 0 si_code */ | 2841 | /* We used to allow any < 0 si_code */ |
2841 | WARN_ON_ONCE(info.si_code < 0); | 2842 | WARN_ON_ONCE(info.si_code < 0); |
2842 | return -EPERM; | 2843 | return -EPERM; |
2843 | } | 2844 | } |
2844 | info.si_signo = sig; | 2845 | info.si_signo = sig; |
2845 | 2846 | ||
2846 | /* POSIX.1b doesn't mention process groups. */ | 2847 | /* POSIX.1b doesn't mention process groups. */ |
2847 | return kill_proc_info(sig, &info, pid); | 2848 | return kill_proc_info(sig, &info, pid); |
2848 | } | 2849 | } |
2849 | 2850 | ||
2850 | long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) | 2851 | long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) |
2851 | { | 2852 | { |
2852 | /* This is only valid for single tasks */ | 2853 | /* This is only valid for single tasks */ |
2853 | if (pid <= 0 || tgid <= 0) | 2854 | if (pid <= 0 || tgid <= 0) |
2854 | return -EINVAL; | 2855 | return -EINVAL; |
2855 | 2856 | ||
2856 | /* Not even root can pretend to send signals from the kernel. | 2857 | /* Not even root can pretend to send signals from the kernel. |
2857 | * Nor can they impersonate a kill()/tgkill(), which adds source info. | 2858 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
2858 | */ | 2859 | */ |
2859 | if (info->si_code >= 0 || info->si_code == SI_TKILL) { | 2860 | if (info->si_code >= 0 || info->si_code == SI_TKILL) { |
2860 | /* We used to allow any < 0 si_code */ | 2861 | /* We used to allow any < 0 si_code */ |
2861 | WARN_ON_ONCE(info->si_code < 0); | 2862 | WARN_ON_ONCE(info->si_code < 0); |
2862 | return -EPERM; | 2863 | return -EPERM; |
2863 | } | 2864 | } |
2864 | info->si_signo = sig; | 2865 | info->si_signo = sig; |
2865 | 2866 | ||
2866 | return do_send_specific(tgid, pid, sig, info); | 2867 | return do_send_specific(tgid, pid, sig, info); |
2867 | } | 2868 | } |
2868 | 2869 | ||
2869 | SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, | 2870 | SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, |
2870 | siginfo_t __user *, uinfo) | 2871 | siginfo_t __user *, uinfo) |
2871 | { | 2872 | { |
2872 | siginfo_t info; | 2873 | siginfo_t info; |
2873 | 2874 | ||
2874 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) | 2875 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) |
2875 | return -EFAULT; | 2876 | return -EFAULT; |
2876 | 2877 | ||
2877 | return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); | 2878 | return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); |
2878 | } | 2879 | } |
2879 | 2880 | ||
2880 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) | 2881 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) |
2881 | { | 2882 | { |
2882 | struct task_struct *t = current; | 2883 | struct task_struct *t = current; |
2883 | struct k_sigaction *k; | 2884 | struct k_sigaction *k; |
2884 | sigset_t mask; | 2885 | sigset_t mask; |
2885 | 2886 | ||
2886 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) | 2887 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) |
2887 | return -EINVAL; | 2888 | return -EINVAL; |
2888 | 2889 | ||
2889 | k = &t->sighand->action[sig-1]; | 2890 | k = &t->sighand->action[sig-1]; |
2890 | 2891 | ||
2891 | spin_lock_irq(¤t->sighand->siglock); | 2892 | spin_lock_irq(¤t->sighand->siglock); |
2892 | if (oact) | 2893 | if (oact) |
2893 | *oact = *k; | 2894 | *oact = *k; |
2894 | 2895 | ||
2895 | if (act) { | 2896 | if (act) { |
2896 | sigdelsetmask(&act->sa.sa_mask, | 2897 | sigdelsetmask(&act->sa.sa_mask, |
2897 | sigmask(SIGKILL) | sigmask(SIGSTOP)); | 2898 | sigmask(SIGKILL) | sigmask(SIGSTOP)); |
2898 | *k = *act; | 2899 | *k = *act; |
2899 | /* | 2900 | /* |
2900 | * POSIX 3.3.1.3: | 2901 | * POSIX 3.3.1.3: |
2901 | * "Setting a signal action to SIG_IGN for a signal that is | 2902 | * "Setting a signal action to SIG_IGN for a signal that is |
2902 | * pending shall cause the pending signal to be discarded, | 2903 | * pending shall cause the pending signal to be discarded, |
2903 | * whether or not it is blocked." | 2904 | * whether or not it is blocked." |
2904 | * | 2905 | * |
2905 | * "Setting a signal action to SIG_DFL for a signal that is | 2906 | * "Setting a signal action to SIG_DFL for a signal that is |
2906 | * pending and whose default action is to ignore the signal | 2907 | * pending and whose default action is to ignore the signal |
2907 | * (for example, SIGCHLD), shall cause the pending signal to | 2908 | * (for example, SIGCHLD), shall cause the pending signal to |
2908 | * be discarded, whether or not it is blocked" | 2909 | * be discarded, whether or not it is blocked" |
2909 | */ | 2910 | */ |
2910 | if (sig_handler_ignored(sig_handler(t, sig), sig)) { | 2911 | if (sig_handler_ignored(sig_handler(t, sig), sig)) { |
2911 | sigemptyset(&mask); | 2912 | sigemptyset(&mask); |
2912 | sigaddset(&mask, sig); | 2913 | sigaddset(&mask, sig); |
2913 | rm_from_queue_full(&mask, &t->signal->shared_pending); | 2914 | rm_from_queue_full(&mask, &t->signal->shared_pending); |
2914 | do { | 2915 | do { |
2915 | rm_from_queue_full(&mask, &t->pending); | 2916 | rm_from_queue_full(&mask, &t->pending); |
2916 | t = next_thread(t); | 2917 | t = next_thread(t); |
2917 | } while (t != current); | 2918 | } while (t != current); |
2918 | } | 2919 | } |
2919 | } | 2920 | } |
2920 | 2921 | ||
2921 | spin_unlock_irq(¤t->sighand->siglock); | 2922 | spin_unlock_irq(¤t->sighand->siglock); |
2922 | return 0; | 2923 | return 0; |
2923 | } | 2924 | } |
2924 | 2925 | ||
2925 | int | 2926 | int |
2926 | do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) | 2927 | do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) |
2927 | { | 2928 | { |
2928 | stack_t oss; | 2929 | stack_t oss; |
2929 | int error; | 2930 | int error; |
2930 | 2931 | ||
2931 | oss.ss_sp = (void __user *) current->sas_ss_sp; | 2932 | oss.ss_sp = (void __user *) current->sas_ss_sp; |
2932 | oss.ss_size = current->sas_ss_size; | 2933 | oss.ss_size = current->sas_ss_size; |
2933 | oss.ss_flags = sas_ss_flags(sp); | 2934 | oss.ss_flags = sas_ss_flags(sp); |
2934 | 2935 | ||
2935 | if (uss) { | 2936 | if (uss) { |
2936 | void __user *ss_sp; | 2937 | void __user *ss_sp; |
2937 | size_t ss_size; | 2938 | size_t ss_size; |
2938 | int ss_flags; | 2939 | int ss_flags; |
2939 | 2940 | ||
2940 | error = -EFAULT; | 2941 | error = -EFAULT; |
2941 | if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) | 2942 | if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) |
2942 | goto out; | 2943 | goto out; |
2943 | error = __get_user(ss_sp, &uss->ss_sp) | | 2944 | error = __get_user(ss_sp, &uss->ss_sp) | |
2944 | __get_user(ss_flags, &uss->ss_flags) | | 2945 | __get_user(ss_flags, &uss->ss_flags) | |
2945 | __get_user(ss_size, &uss->ss_size); | 2946 | __get_user(ss_size, &uss->ss_size); |
2946 | if (error) | 2947 | if (error) |
2947 | goto out; | 2948 | goto out; |
2948 | 2949 | ||
2949 | error = -EPERM; | 2950 | error = -EPERM; |
2950 | if (on_sig_stack(sp)) | 2951 | if (on_sig_stack(sp)) |
2951 | goto out; | 2952 | goto out; |
2952 | 2953 | ||
2953 | error = -EINVAL; | 2954 | error = -EINVAL; |
2954 | /* | 2955 | /* |
2955 | * Note - this code used to test ss_flags incorrectly: | 2956 | * Note - this code used to test ss_flags incorrectly: |
2956 | * old code may have been written using ss_flags==0 | 2957 | * old code may have been written using ss_flags==0 |
2957 | * to mean ss_flags==SS_ONSTACK (as this was the only | 2958 | * to mean ss_flags==SS_ONSTACK (as this was the only |
2958 | * way that worked) - this fix preserves that older | 2959 | * way that worked) - this fix preserves that older |
2959 | * mechanism. | 2960 | * mechanism. |
2960 | */ | 2961 | */ |
2961 | if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) | 2962 | if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) |
2962 | goto out; | 2963 | goto out; |
2963 | 2964 | ||
2964 | if (ss_flags == SS_DISABLE) { | 2965 | if (ss_flags == SS_DISABLE) { |
2965 | ss_size = 0; | 2966 | ss_size = 0; |
2966 | ss_sp = NULL; | 2967 | ss_sp = NULL; |
2967 | } else { | 2968 | } else { |
2968 | error = -ENOMEM; | 2969 | error = -ENOMEM; |
2969 | if (ss_size < MINSIGSTKSZ) | 2970 | if (ss_size < MINSIGSTKSZ) |
2970 | goto out; | 2971 | goto out; |
2971 | } | 2972 | } |
2972 | 2973 | ||
2973 | current->sas_ss_sp = (unsigned long) ss_sp; | 2974 | current->sas_ss_sp = (unsigned long) ss_sp; |
2974 | current->sas_ss_size = ss_size; | 2975 | current->sas_ss_size = ss_size; |
2975 | } | 2976 | } |
2976 | 2977 | ||
2977 | error = 0; | 2978 | error = 0; |
2978 | if (uoss) { | 2979 | if (uoss) { |
2979 | error = -EFAULT; | 2980 | error = -EFAULT; |
2980 | if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) | 2981 | if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) |
2981 | goto out; | 2982 | goto out; |
2982 | error = __put_user(oss.ss_sp, &uoss->ss_sp) | | 2983 | error = __put_user(oss.ss_sp, &uoss->ss_sp) | |
2983 | __put_user(oss.ss_size, &uoss->ss_size) | | 2984 | __put_user(oss.ss_size, &uoss->ss_size) | |
2984 | __put_user(oss.ss_flags, &uoss->ss_flags); | 2985 | __put_user(oss.ss_flags, &uoss->ss_flags); |
2985 | } | 2986 | } |
2986 | 2987 | ||
2987 | out: | 2988 | out: |
2988 | return error; | 2989 | return error; |
2989 | } | 2990 | } |
2990 | 2991 | ||
2991 | #ifdef __ARCH_WANT_SYS_SIGPENDING | 2992 | #ifdef __ARCH_WANT_SYS_SIGPENDING |
2992 | 2993 | ||
2993 | /** | 2994 | /** |
2994 | * sys_sigpending - examine pending signals | 2995 | * sys_sigpending - examine pending signals |
2995 | * @set: where mask of pending signal is returned | 2996 | * @set: where mask of pending signal is returned |
2996 | */ | 2997 | */ |
2997 | SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) | 2998 | SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) |
2998 | { | 2999 | { |
2999 | return do_sigpending(set, sizeof(*set)); | 3000 | return do_sigpending(set, sizeof(*set)); |
3000 | } | 3001 | } |
3001 | 3002 | ||
3002 | #endif | 3003 | #endif |
3003 | 3004 | ||
3004 | #ifdef __ARCH_WANT_SYS_SIGPROCMASK | 3005 | #ifdef __ARCH_WANT_SYS_SIGPROCMASK |
3005 | /** | 3006 | /** |
3006 | * sys_sigprocmask - examine and change blocked signals | 3007 | * sys_sigprocmask - examine and change blocked signals |
3007 | * @how: whether to add, remove, or set signals | 3008 | * @how: whether to add, remove, or set signals |
3008 | * @nset: signals to add or remove (if non-null) | 3009 | * @nset: signals to add or remove (if non-null) |
3009 | * @oset: previous value of signal mask if non-null | 3010 | * @oset: previous value of signal mask if non-null |
3010 | * | 3011 | * |
3011 | * Some platforms have their own version with special arguments; | 3012 | * Some platforms have their own version with special arguments; |
3012 | * others support only sys_rt_sigprocmask. | 3013 | * others support only sys_rt_sigprocmask. |
3013 | */ | 3014 | */ |
3014 | 3015 | ||
3015 | SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, | 3016 | SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, |
3016 | old_sigset_t __user *, oset) | 3017 | old_sigset_t __user *, oset) |
3017 | { | 3018 | { |
3018 | old_sigset_t old_set, new_set; | 3019 | old_sigset_t old_set, new_set; |
3019 | sigset_t new_blocked; | 3020 | sigset_t new_blocked; |
3020 | 3021 | ||
3021 | old_set = current->blocked.sig[0]; | 3022 | old_set = current->blocked.sig[0]; |
3022 | 3023 | ||
3023 | if (nset) { | 3024 | if (nset) { |
3024 | if (copy_from_user(&new_set, nset, sizeof(*nset))) | 3025 | if (copy_from_user(&new_set, nset, sizeof(*nset))) |
3025 | return -EFAULT; | 3026 | return -EFAULT; |
3026 | new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); | 3027 | new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); |
3027 | 3028 | ||
3028 | new_blocked = current->blocked; | 3029 | new_blocked = current->blocked; |
3029 | 3030 | ||
3030 | switch (how) { | 3031 | switch (how) { |
3031 | case SIG_BLOCK: | 3032 | case SIG_BLOCK: |
3032 | sigaddsetmask(&new_blocked, new_set); | 3033 | sigaddsetmask(&new_blocked, new_set); |
3033 | break; | 3034 | break; |
3034 | case SIG_UNBLOCK: | 3035 | case SIG_UNBLOCK: |
3035 | sigdelsetmask(&new_blocked, new_set); | 3036 | sigdelsetmask(&new_blocked, new_set); |
3036 | break; | 3037 | break; |
3037 | case SIG_SETMASK: | 3038 | case SIG_SETMASK: |
3038 | new_blocked.sig[0] = new_set; | 3039 | new_blocked.sig[0] = new_set; |
3039 | break; | 3040 | break; |
3040 | default: | 3041 | default: |
3041 | return -EINVAL; | 3042 | return -EINVAL; |
3042 | } | 3043 | } |
3043 | 3044 | ||
3044 | set_current_blocked(&new_blocked); | 3045 | set_current_blocked(&new_blocked); |
3045 | } | 3046 | } |
3046 | 3047 | ||
3047 | if (oset) { | 3048 | if (oset) { |
3048 | if (copy_to_user(oset, &old_set, sizeof(*oset))) | 3049 | if (copy_to_user(oset, &old_set, sizeof(*oset))) |
3049 | return -EFAULT; | 3050 | return -EFAULT; |
3050 | } | 3051 | } |
3051 | 3052 | ||
3052 | return 0; | 3053 | return 0; |
3053 | } | 3054 | } |
3054 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ | 3055 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ |
3055 | 3056 | ||
3056 | #ifdef __ARCH_WANT_SYS_RT_SIGACTION | 3057 | #ifdef __ARCH_WANT_SYS_RT_SIGACTION |
3057 | /** | 3058 | /** |
3058 | * sys_rt_sigaction - alter an action taken by a process | 3059 | * sys_rt_sigaction - alter an action taken by a process |
3059 | * @sig: signal to be sent | 3060 | * @sig: signal to be sent |
3060 | * @act: new sigaction | 3061 | * @act: new sigaction |
3061 | * @oact: used to save the previous sigaction | 3062 | * @oact: used to save the previous sigaction |
3062 | * @sigsetsize: size of sigset_t type | 3063 | * @sigsetsize: size of sigset_t type |
3063 | */ | 3064 | */ |
3064 | SYSCALL_DEFINE4(rt_sigaction, int, sig, | 3065 | SYSCALL_DEFINE4(rt_sigaction, int, sig, |
3065 | const struct sigaction __user *, act, | 3066 | const struct sigaction __user *, act, |
3066 | struct sigaction __user *, oact, | 3067 | struct sigaction __user *, oact, |
3067 | size_t, sigsetsize) | 3068 | size_t, sigsetsize) |
3068 | { | 3069 | { |
3069 | struct k_sigaction new_sa, old_sa; | 3070 | struct k_sigaction new_sa, old_sa; |
3070 | int ret = -EINVAL; | 3071 | int ret = -EINVAL; |
3071 | 3072 | ||
3072 | /* XXX: Don't preclude handling different sized sigset_t's. */ | 3073 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
3073 | if (sigsetsize != sizeof(sigset_t)) | 3074 | if (sigsetsize != sizeof(sigset_t)) |
3074 | goto out; | 3075 | goto out; |
3075 | 3076 | ||
3076 | if (act) { | 3077 | if (act) { |
3077 | if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) | 3078 | if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) |
3078 | return -EFAULT; | 3079 | return -EFAULT; |
3079 | } | 3080 | } |
3080 | 3081 | ||
3081 | ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); | 3082 | ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); |
3082 | 3083 | ||
3083 | if (!ret && oact) { | 3084 | if (!ret && oact) { |
3084 | if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) | 3085 | if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) |
3085 | return -EFAULT; | 3086 | return -EFAULT; |
3086 | } | 3087 | } |
3087 | out: | 3088 | out: |
3088 | return ret; | 3089 | return ret; |
3089 | } | 3090 | } |
3090 | #endif /* __ARCH_WANT_SYS_RT_SIGACTION */ | 3091 | #endif /* __ARCH_WANT_SYS_RT_SIGACTION */ |
3091 | 3092 | ||
3092 | #ifdef __ARCH_WANT_SYS_SGETMASK | 3093 | #ifdef __ARCH_WANT_SYS_SGETMASK |
3093 | 3094 | ||
3094 | /* | 3095 | /* |
3095 | * For backwards compatibility. Functionality superseded by sigprocmask. | 3096 | * For backwards compatibility. Functionality superseded by sigprocmask. |
3096 | */ | 3097 | */ |
3097 | SYSCALL_DEFINE0(sgetmask) | 3098 | SYSCALL_DEFINE0(sgetmask) |
3098 | { | 3099 | { |
3099 | /* SMP safe */ | 3100 | /* SMP safe */ |
3100 | return current->blocked.sig[0]; | 3101 | return current->blocked.sig[0]; |
3101 | } | 3102 | } |
3102 | 3103 | ||
3103 | SYSCALL_DEFINE1(ssetmask, int, newmask) | 3104 | SYSCALL_DEFINE1(ssetmask, int, newmask) |
3104 | { | 3105 | { |
3105 | int old; | 3106 | int old; |
3106 | 3107 | ||
3107 | spin_lock_irq(¤t->sighand->siglock); | 3108 | spin_lock_irq(¤t->sighand->siglock); |
3108 | old = current->blocked.sig[0]; | 3109 | old = current->blocked.sig[0]; |
3109 | 3110 | ||
3110 | siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)| | 3111 | siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)| |
3111 | sigmask(SIGSTOP))); | 3112 | sigmask(SIGSTOP))); |
3112 | recalc_sigpending(); | 3113 | recalc_sigpending(); |
3113 | spin_unlock_irq(¤t->sighand->siglock); | 3114 | spin_unlock_irq(¤t->sighand->siglock); |
3114 | 3115 | ||
3115 | return old; | 3116 | return old; |
3116 | } | 3117 | } |
3117 | #endif /* __ARCH_WANT_SGETMASK */ | 3118 | #endif /* __ARCH_WANT_SGETMASK */ |
3118 | 3119 | ||
3119 | #ifdef __ARCH_WANT_SYS_SIGNAL | 3120 | #ifdef __ARCH_WANT_SYS_SIGNAL |
3120 | /* | 3121 | /* |
3121 | * For backwards compatibility. Functionality superseded by sigaction. | 3122 | * For backwards compatibility. Functionality superseded by sigaction. |
3122 | */ | 3123 | */ |
3123 | SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) | 3124 | SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) |
3124 | { | 3125 | { |
3125 | struct k_sigaction new_sa, old_sa; | 3126 | struct k_sigaction new_sa, old_sa; |
3126 | int ret; | 3127 | int ret; |
3127 | 3128 | ||
3128 | new_sa.sa.sa_handler = handler; | 3129 | new_sa.sa.sa_handler = handler; |
3129 | new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; | 3130 | new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; |
3130 | sigemptyset(&new_sa.sa.sa_mask); | 3131 | sigemptyset(&new_sa.sa.sa_mask); |
3131 | 3132 | ||
3132 | ret = do_sigaction(sig, &new_sa, &old_sa); | 3133 | ret = do_sigaction(sig, &new_sa, &old_sa); |
3133 | 3134 | ||
3134 | return ret ? ret : (unsigned long)old_sa.sa.sa_handler; | 3135 | return ret ? ret : (unsigned long)old_sa.sa.sa_handler; |
3135 | } | 3136 | } |
3136 | #endif /* __ARCH_WANT_SYS_SIGNAL */ | 3137 | #endif /* __ARCH_WANT_SYS_SIGNAL */ |
3137 | 3138 | ||
3138 | #ifdef __ARCH_WANT_SYS_PAUSE | 3139 | #ifdef __ARCH_WANT_SYS_PAUSE |
3139 | 3140 | ||
3140 | SYSCALL_DEFINE0(pause) | 3141 | SYSCALL_DEFINE0(pause) |
3141 | { | 3142 | { |
3142 | while (!signal_pending(current)) { | 3143 | while (!signal_pending(current)) { |
3143 | current->state = TASK_INTERRUPTIBLE; | 3144 | current->state = TASK_INTERRUPTIBLE; |
3144 | schedule(); | 3145 | schedule(); |
3145 | } | 3146 | } |
3146 | return -ERESTARTNOHAND; | 3147 | return -ERESTARTNOHAND; |
3147 | } | 3148 | } |
3148 | 3149 | ||
3149 | #endif | 3150 | #endif |
3150 | 3151 | ||
3151 | #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND | 3152 | #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND |
3152 | /** | 3153 | /** |
3153 | * sys_rt_sigsuspend - replace the signal mask for a value with the | 3154 | * sys_rt_sigsuspend - replace the signal mask for a value with the |
3154 | * @unewset value until a signal is received | 3155 | * @unewset value until a signal is received |
3155 | * @unewset: new signal mask value | 3156 | * @unewset: new signal mask value |
3156 | * @sigsetsize: size of sigset_t type | 3157 | * @sigsetsize: size of sigset_t type |
3157 | */ | 3158 | */ |
3158 | SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) | 3159 | SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) |
3159 | { | 3160 | { |
3160 | sigset_t newset; | 3161 | sigset_t newset; |
3161 | 3162 | ||
3162 | /* XXX: Don't preclude handling different sized sigset_t's. */ | 3163 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
3163 | if (sigsetsize != sizeof(sigset_t)) | 3164 | if (sigsetsize != sizeof(sigset_t)) |
3164 | return -EINVAL; | 3165 | return -EINVAL; |
3165 | 3166 | ||
3166 | if (copy_from_user(&newset, unewset, sizeof(newset))) | 3167 | if (copy_from_user(&newset, unewset, sizeof(newset))) |
3167 | return -EFAULT; | 3168 | return -EFAULT; |
3168 | sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); | 3169 | sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
3169 | 3170 | ||
3170 | spin_lock_irq(¤t->sighand->siglock); | 3171 | spin_lock_irq(¤t->sighand->siglock); |
3171 | current->saved_sigmask = current->blocked; | 3172 | current->saved_sigmask = current->blocked; |
3172 | current->blocked = newset; | 3173 | current->blocked = newset; |
3173 | recalc_sigpending(); | 3174 | recalc_sigpending(); |
3174 | spin_unlock_irq(¤t->sighand->siglock); | 3175 | spin_unlock_irq(¤t->sighand->siglock); |
3175 | 3176 | ||
3176 | current->state = TASK_INTERRUPTIBLE; | 3177 | current->state = TASK_INTERRUPTIBLE; |
3177 | schedule(); | 3178 | schedule(); |
3178 | set_restore_sigmask(); | 3179 | set_restore_sigmask(); |
3179 | return -ERESTARTNOHAND; | 3180 | return -ERESTARTNOHAND; |
3180 | } | 3181 | } |
3181 | #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ | 3182 | #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ |
3182 | 3183 | ||
3183 | __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) | 3184 | __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) |
3184 | { | 3185 | { |
3185 | return NULL; | 3186 | return NULL; |
3186 | } | 3187 | } |
3187 | 3188 | ||
3188 | void __init signals_init(void) | 3189 | void __init signals_init(void) |
3189 | { | 3190 | { |
3190 | sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); | 3191 | sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); |
3191 | } | 3192 | } |
3192 | 3193 | ||
3193 | #ifdef CONFIG_KGDB_KDB | 3194 | #ifdef CONFIG_KGDB_KDB |
3194 | #include <linux/kdb.h> | 3195 | #include <linux/kdb.h> |
3195 | /* | 3196 | /* |
3196 | * kdb_send_sig_info - Allows kdb to send signals without exposing | 3197 | * kdb_send_sig_info - Allows kdb to send signals without exposing |
3197 | * signal internals. This function checks if the required locks are | 3198 | * signal internals. This function checks if the required locks are |
3198 | * available before calling the main signal code, to avoid kdb | 3199 | * available before calling the main signal code, to avoid kdb |
3199 | * deadlocks. | 3200 | * deadlocks. |
3200 | */ | 3201 | */ |
3201 | void | 3202 | void |
3202 | kdb_send_sig_info(struct task_struct *t, struct siginfo *info) | 3203 | kdb_send_sig_info(struct task_struct *t, struct siginfo *info) |
3203 | { | 3204 | { |
3204 | static struct task_struct *kdb_prev_t; | 3205 | static struct task_struct *kdb_prev_t; |
3205 | int sig, new_t; | 3206 | int sig, new_t; |
3206 | if (!spin_trylock(&t->sighand->siglock)) { | 3207 | if (!spin_trylock(&t->sighand->siglock)) { |
3207 | kdb_printf("Can't do kill command now.\n" | 3208 | kdb_printf("Can't do kill command now.\n" |
3208 | "The sigmask lock is held somewhere else in " | 3209 | "The sigmask lock is held somewhere else in " |
3209 | "kernel, try again later\n"); | 3210 | "kernel, try again later\n"); |
3210 | return; | 3211 | return; |
3211 | } | 3212 | } |
3212 | spin_unlock(&t->sighand->siglock); | 3213 | spin_unlock(&t->sighand->siglock); |
3213 | new_t = kdb_prev_t != t; | 3214 | new_t = kdb_prev_t != t; |
3214 | kdb_prev_t = t; | 3215 | kdb_prev_t = t; |
3215 | if (t->state != TASK_RUNNING && new_t) { | 3216 | if (t->state != TASK_RUNNING && new_t) { |
3216 | kdb_printf("Process is not RUNNING, sending a signal from " | 3217 | kdb_printf("Process is not RUNNING, sending a signal from " |
3217 | "kdb risks deadlock\n" | 3218 | "kdb risks deadlock\n" |
3218 | "on the run queue locks. " | 3219 | "on the run queue locks. " |
3219 | "The signal has _not_ been sent.\n" | 3220 | "The signal has _not_ been sent.\n" |
3220 | "Reissue the kill command if you want to risk " | 3221 | "Reissue the kill command if you want to risk " |
3221 | "the deadlock.\n"); | 3222 | "the deadlock.\n"); |
3222 | return; | 3223 | return; |
3223 | } | 3224 | } |
3224 | sig = info->si_signo; | 3225 | sig = info->si_signo; |
3225 | if (send_sig_info(sig, info, t)) | 3226 | if (send_sig_info(sig, info, t)) |
3226 | kdb_printf("Fail to deliver Signal %d to process %d.\n", | 3227 | kdb_printf("Fail to deliver Signal %d to process %d.\n", |
3227 | sig, t->pid); | 3228 | sig, t->pid); |
3228 | else | 3229 | else |
3229 | kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); | 3230 | kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); |
3230 | } | 3231 | } |
3231 | #endif /* CONFIG_KGDB_KDB */ | 3232 | #endif /* CONFIG_KGDB_KDB */ |
3232 | 3233 |