Blame view
kernel/exit.c
45.7 KB
1da177e4c
|
1 2 3 4 5 |
/* * linux/kernel/exit.c * * Copyright (C) 1991, 1992 Linus Torvalds */ |
1da177e4c
|
6 7 8 |
#include <linux/mm.h> #include <linux/slab.h> #include <linux/interrupt.h> |
1da177e4c
|
9 |
#include <linux/module.h> |
c59ede7b7
|
10 |
#include <linux/capability.h> |
1da177e4c
|
11 12 13 |
#include <linux/completion.h> #include <linux/personality.h> #include <linux/tty.h> |
6b3286ed1
|
14 |
#include <linux/mnt_namespace.h> |
da9cbc873
|
15 |
#include <linux/iocontext.h> |
1da177e4c
|
16 17 18 19 |
#include <linux/key.h> #include <linux/security.h> #include <linux/cpu.h> #include <linux/acct.h> |
8f0ab5147
|
20 |
#include <linux/tsacct_kern.h> |
1da177e4c
|
21 |
#include <linux/file.h> |
9f3acc314
|
22 |
#include <linux/fdtable.h> |
1da177e4c
|
23 |
#include <linux/binfmts.h> |
ab516013a
|
24 |
#include <linux/nsproxy.h> |
84d737866
|
25 |
#include <linux/pid_namespace.h> |
1da177e4c
|
26 27 28 29 |
#include <linux/ptrace.h> #include <linux/profile.h> #include <linux/mount.h> #include <linux/proc_fs.h> |
49d769d52
|
30 |
#include <linux/kthread.h> |
1da177e4c
|
31 |
#include <linux/mempolicy.h> |
c757249af
|
32 |
#include <linux/taskstats_kern.h> |
ca74e92b4
|
33 |
#include <linux/delayacct.h> |
831441862
|
34 |
#include <linux/freezer.h> |
b4f48b636
|
35 |
#include <linux/cgroup.h> |
1da177e4c
|
36 |
#include <linux/syscalls.h> |
7ed20e1ad
|
37 |
#include <linux/signal.h> |
6a14c5c9d
|
38 |
#include <linux/posix-timers.h> |
9f46080c4
|
39 |
#include <linux/cn_proc.h> |
de5097c2e
|
40 |
#include <linux/mutex.h> |
0771dfefc
|
41 |
#include <linux/futex.h> |
b92ce5589
|
42 |
#include <linux/pipe_fs_i.h> |
fa84cb935
|
43 |
#include <linux/audit.h> /* for audit_free() */ |
83cc5ed3c
|
44 |
#include <linux/resource.h> |
0d67a46df
|
45 |
#include <linux/blkdev.h> |
6eaeeaba3
|
46 |
#include <linux/task_io_accounting_ops.h> |
30199f5a4
|
47 |
#include <linux/tracehook.h> |
d84f4f992
|
48 |
#include <linux/init_task.h> |
0a16b6075
|
49 |
#include <trace/sched.h> |
1da177e4c
|
50 51 52 53 54 |
#include <asm/uaccess.h> #include <asm/unistd.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> |
d84f4f992
|
55 |
#include "cred-internals.h" |
1da177e4c
|
56 |
|
7e066fb87
|
57 58 59 |
DEFINE_TRACE(sched_process_free); DEFINE_TRACE(sched_process_exit); DEFINE_TRACE(sched_process_wait); |
1da177e4c
|
60 |
|
408b664a7
|
61 |
static void exit_mm(struct task_struct * tsk); |
d839fd4d2
|
62 63 64 65 |
static inline int task_detached(struct task_struct *p) { return p->exit_signal == -1; } |
1da177e4c
|
66 67 68 69 |
static void __unhash_process(struct task_struct *p) { nr_threads--; detach_pid(p, PIDTYPE_PID); |
1da177e4c
|
70 71 72 |
if (thread_group_leader(p)) { detach_pid(p, PIDTYPE_PGID); detach_pid(p, PIDTYPE_SID); |
c97d98931
|
73 |
|
5e85d4abe
|
74 |
list_del_rcu(&p->tasks); |
73b9ebfe1
|
75 |
__get_cpu_var(process_counts)--; |
1da177e4c
|
76 |
} |
47e65328a
|
77 |
list_del_rcu(&p->thread_group); |
f470021ad
|
78 |
list_del_init(&p->sibling); |
1da177e4c
|
79 |
} |
6a14c5c9d
|
80 81 82 83 84 85 86 87 88 89 |
/* * This function expects the tasklist_lock write-locked. */ static void __exit_signal(struct task_struct *tsk) { struct signal_struct *sig = tsk->signal; struct sighand_struct *sighand; BUG_ON(!sig); BUG_ON(!atomic_read(&sig->count)); |
6a14c5c9d
|
90 91 92 93 94 95 96 97 98 99 100 |
sighand = rcu_dereference(tsk->sighand); spin_lock(&sighand->siglock); posix_cpu_timers_exit(tsk); if (atomic_dec_and_test(&sig->count)) posix_cpu_timers_exit_group(tsk); else { /* * If there is any task waiting for the group exit * then notify it: */ |
6db840fa7
|
101 |
if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) |
6a14c5c9d
|
102 |
wake_up_process(sig->group_exit_task); |
6db840fa7
|
103 |
|
6a14c5c9d
|
104 105 106 107 108 109 110 111 112 113 114 115 |
if (tsk == sig->curr_target) sig->curr_target = next_thread(tsk); /* * Accumulate here the counters for all threads but the * group leader as they die, so they can be added into * the process-wide totals when those are taken. * The group leader stays around as a zombie as long * as there are other threads. When it gets reaped, * the exit.c code will add its counts into these totals. * We won't ever get here for the group leader, since it * will have been the last reference on the signal_struct. */ |
32bd671d6
|
116 117 |
sig->utime = cputime_add(sig->utime, task_utime(tsk)); sig->stime = cputime_add(sig->stime, task_stime(tsk)); |
49048622e
|
118 |
sig->gtime = cputime_add(sig->gtime, task_gtime(tsk)); |
6a14c5c9d
|
119 120 121 122 |
sig->min_flt += tsk->min_flt; sig->maj_flt += tsk->maj_flt; sig->nvcsw += tsk->nvcsw; sig->nivcsw += tsk->nivcsw; |
6eaeeaba3
|
123 124 |
sig->inblock += task_io_get_inblock(tsk); sig->oublock += task_io_get_oublock(tsk); |
5995477ab
|
125 |
task_io_accounting_add(&sig->ioac, &tsk->ioac); |
32bd671d6
|
126 |
sig->sum_sched_runtime += tsk->se.sum_exec_runtime; |
6a14c5c9d
|
127 128 |
sig = NULL; /* Marker for below. */ } |
5876700cd
|
129 |
__unhash_process(tsk); |
da7978b03
|
130 131 132 133 134 |
/* * Do this under ->siglock, we can race with another thread * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. */ flush_sigqueue(&tsk->pending); |
6a14c5c9d
|
135 |
tsk->signal = NULL; |
a7e5328a0
|
136 |
tsk->sighand = NULL; |
6a14c5c9d
|
137 |
spin_unlock(&sighand->siglock); |
6a14c5c9d
|
138 |
|
a7e5328a0
|
139 |
__cleanup_sighand(sighand); |
6a14c5c9d
|
140 |
clear_tsk_thread_flag(tsk,TIF_SIGPENDING); |
6a14c5c9d
|
141 142 |
if (sig) { flush_sigqueue(&sig->shared_pending); |
093a8e8ae
|
143 |
taskstats_tgid_free(sig); |
ad474caca
|
144 145 146 147 148 |
/* * Make sure ->signal can't go away under rq->lock, * see account_group_exec_runtime(). */ task_rq_unlock_wait(tsk); |
6a14c5c9d
|
149 150 151 |
__cleanup_signal(sig); } } |
8c7904a00
|
152 153 |
static void delayed_put_task_struct(struct rcu_head *rhp) { |
0a16b6075
|
154 155 156 157 |
struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); trace_sched_process_free(tsk); put_task_struct(tsk); |
8c7904a00
|
158 |
} |
f470021ad
|
159 |
|
1da177e4c
|
160 161 |
void release_task(struct task_struct * p) { |
36c8b5868
|
162 |
struct task_struct *leader; |
1da177e4c
|
163 |
int zap_leader; |
1f09f9749
|
164 |
repeat: |
dae33574d
|
165 |
tracehook_prepare_release_task(p); |
c69e8d9c0
|
166 167 168 |
/* don't need to get the RCU readlock here - the process is dead and * can't be modifying its own credentials */ atomic_dec(&__task_cred(p)->user->processes); |
60347f671
|
169 |
proc_flush_task(p); |
1da177e4c
|
170 |
write_lock_irq(&tasklist_lock); |
dae33574d
|
171 |
tracehook_finish_release_task(p); |
1da177e4c
|
172 |
__exit_signal(p); |
35f5cad8c
|
173 |
|
1da177e4c
|
174 175 176 177 178 179 180 181 |
/* * If we are the last non-leader member of the thread * group, and the leader is zombie, then notify the * group leader's parent process. (if it wants notification.) */ zap_leader = 0; leader = p->group_leader; if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { |
d839fd4d2
|
182 |
BUG_ON(task_detached(leader)); |
1da177e4c
|
183 184 185 186 187 188 189 190 191 |
do_notify_parent(leader, leader->exit_signal); /* * If we were the last child thread and the leader has * exited already, and the leader's parent ignores SIGCHLD, * then we are the one who should release the leader. * * do_notify_parent() will have marked it self-reaping in * that case. */ |
d839fd4d2
|
192 |
zap_leader = task_detached(leader); |
dae33574d
|
193 194 195 196 197 198 199 |
/* * This maintains the invariant that release_task() * only runs on a task in EXIT_DEAD, just for sanity. */ if (zap_leader) leader->exit_state = EXIT_DEAD; |
1da177e4c
|
200 |
} |
1da177e4c
|
201 |
write_unlock_irq(&tasklist_lock); |
1da177e4c
|
202 |
release_thread(p); |
8c7904a00
|
203 |
call_rcu(&p->rcu, delayed_put_task_struct); |
1da177e4c
|
204 205 206 207 208 |
p = leader; if (unlikely(zap_leader)) goto repeat; } |
1da177e4c
|
209 210 211 212 |
/* * This checks not only the pgrp, but falls back on the pid if no * satisfactory pgrp is found. I dunno - gdb doesn't work correctly * without this... |
04a2e6a5c
|
213 214 |
* * The caller must hold rcu lock or the tasklist lock. |
1da177e4c
|
215 |
*/ |
04a2e6a5c
|
216 |
struct pid *session_of_pgrp(struct pid *pgrp) |
1da177e4c
|
217 218 |
{ struct task_struct *p; |
04a2e6a5c
|
219 |
struct pid *sid = NULL; |
62dfb5541
|
220 |
|
04a2e6a5c
|
221 |
p = pid_task(pgrp, PIDTYPE_PGID); |
62dfb5541
|
222 |
if (p == NULL) |
04a2e6a5c
|
223 |
p = pid_task(pgrp, PIDTYPE_PID); |
62dfb5541
|
224 |
if (p != NULL) |
04a2e6a5c
|
225 |
sid = task_session(p); |
62dfb5541
|
226 |
|
1da177e4c
|
227 228 229 230 231 232 233 234 235 236 237 |
return sid; } /* * Determine if a process group is "orphaned", according to the POSIX * definition in 2.2.2.52. Orphaned process groups are not to be affected * by terminal-generated stop signals. Newly orphaned process groups are * to receive a SIGHUP and a SIGCONT. * * "I ask you, have you ever known what it is to be an orphan?" */ |
0475ac084
|
238 |
static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task) |
1da177e4c
|
239 240 |
{ struct task_struct *p; |
1da177e4c
|
241 |
|
0475ac084
|
242 |
do_each_pid_task(pgrp, PIDTYPE_PGID, p) { |
05e83df62
|
243 244 245 |
if ((p == ignored_task) || (p->exit_state && thread_group_empty(p)) || is_global_init(p->real_parent)) |
1da177e4c
|
246 |
continue; |
05e83df62
|
247 |
|
0475ac084
|
248 |
if (task_pgrp(p->real_parent) != pgrp && |
05e83df62
|
249 250 |
task_session(p->real_parent) == task_session(p)) return 0; |
0475ac084
|
251 |
} while_each_pid_task(pgrp, PIDTYPE_PGID, p); |
05e83df62
|
252 253 |
return 1; |
1da177e4c
|
254 |
} |
3e7cd6c41
|
255 |
int is_current_pgrp_orphaned(void) |
1da177e4c
|
256 257 258 259 |
{ int retval; read_lock(&tasklist_lock); |
3e7cd6c41
|
260 |
retval = will_become_orphaned_pgrp(task_pgrp(current), NULL); |
1da177e4c
|
261 262 263 264 |
read_unlock(&tasklist_lock); return retval; } |
0475ac084
|
265 |
static int has_stopped_jobs(struct pid *pgrp) |
1da177e4c
|
266 267 268 |
{ int retval = 0; struct task_struct *p; |
0475ac084
|
269 |
do_each_pid_task(pgrp, PIDTYPE_PGID, p) { |
338077e54
|
270 |
if (!task_is_stopped(p)) |
1da177e4c
|
271 |
continue; |
1da177e4c
|
272 273 |
retval = 1; break; |
0475ac084
|
274 |
} while_each_pid_task(pgrp, PIDTYPE_PGID, p); |
1da177e4c
|
275 276 |
return retval; } |
f49ee505b
|
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 |
/* * Check to see if any process groups have become orphaned as * a result of our exiting, and if they have any stopped jobs, * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) */ static void kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) { struct pid *pgrp = task_pgrp(tsk); struct task_struct *ignored_task = tsk; if (!parent) /* exit: our father is in a different pgrp than * we are and we were the only connection outside. */ parent = tsk->real_parent; else /* reparent: our child is in a different pgrp than * we are, and it was the only connection outside. */ ignored_task = NULL; if (task_pgrp(parent) != pgrp && task_session(parent) == task_session(tsk) && will_become_orphaned_pgrp(pgrp, ignored_task) && has_stopped_jobs(pgrp)) { __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); } } |
1da177e4c
|
307 |
/** |
49d769d52
|
308 |
* reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd |
1da177e4c
|
309 310 |
* * If a kernel thread is launched as a result of a system call, or if |
49d769d52
|
311 312 |
* it ever exits, it should generally reparent itself to kthreadd so it * isn't in the way of other processes and is correctly cleaned up on exit. |
1da177e4c
|
313 314 315 316 |
* * The various task state such as scheduling policy and priority may have * been inherited from a user process, so we reset them to sane values here. * |
49d769d52
|
317 |
* NOTE that reparent_to_kthreadd() gives the caller full capabilities. |
1da177e4c
|
318 |
*/ |
49d769d52
|
319 |
static void reparent_to_kthreadd(void) |
1da177e4c
|
320 321 322 323 324 |
{ write_lock_irq(&tasklist_lock); ptrace_unlink(current); /* Reparent to init */ |
49d769d52
|
325 |
current->real_parent = current->parent = kthreadd_task; |
f470021ad
|
326 |
list_move_tail(¤t->sibling, ¤t->real_parent->children); |
1da177e4c
|
327 328 329 |
/* Set the exit signal to SIGCHLD so we signal init on exit */ current->exit_signal = SIGCHLD; |
e05606d33
|
330 |
if (task_nice(current) < 0) |
1da177e4c
|
331 332 333 334 |
set_user_nice(current, 0); /* cpus_allowed? */ /* rt_priority? */ /* signals? */ |
1da177e4c
|
335 336 |
memcpy(current->signal->rlim, init_task.signal->rlim, sizeof(current->signal->rlim)); |
d84f4f992
|
337 338 339 |
atomic_inc(&init_cred.usage); commit_creds(&init_cred); |
1da177e4c
|
340 |
write_unlock_irq(&tasklist_lock); |
1da177e4c
|
341 |
} |
8520d7c7f
|
342 |
void __set_special_pids(struct pid *pid) |
1da177e4c
|
343 |
{ |
e19f247a3
|
344 |
struct task_struct *curr = current->group_leader; |
8520d7c7f
|
345 |
pid_t nr = pid_nr(pid); |
1da177e4c
|
346 |
|
8520d7c7f
|
347 |
if (task_session(curr) != pid) { |
7d8da0962
|
348 |
change_pid(curr, PIDTYPE_SID, pid); |
8520d7c7f
|
349 |
set_task_session(curr, nr); |
1da177e4c
|
350 |
} |
8520d7c7f
|
351 |
if (task_pgrp(curr) != pid) { |
7d8da0962
|
352 |
change_pid(curr, PIDTYPE_PGID, pid); |
8520d7c7f
|
353 |
set_task_pgrp(curr, nr); |
1da177e4c
|
354 355 |
} } |
8520d7c7f
|
356 |
static void set_special_pids(struct pid *pid) |
1da177e4c
|
357 358 |
{ write_lock_irq(&tasklist_lock); |
8520d7c7f
|
359 |
__set_special_pids(pid); |
1da177e4c
|
360 361 362 363 364 365 366 367 368 369 |
write_unlock_irq(&tasklist_lock); } /* * Let kernel threads use this to say that they * allow a certain signal (since daemonize() will * have disabled all of them by default). */ int allow_signal(int sig) { |
7ed20e1ad
|
370 |
if (!valid_signal(sig) || sig < 1) |
1da177e4c
|
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 |
return -EINVAL; spin_lock_irq(¤t->sighand->siglock); sigdelset(¤t->blocked, sig); if (!current->mm) { /* Kernel threads handle their own signals. Let the signal code know it'll be handled, so that they don't get converted to SIGKILL or just silently dropped */ current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; } recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); return 0; } EXPORT_SYMBOL(allow_signal); int disallow_signal(int sig) { |
7ed20e1ad
|
391 |
if (!valid_signal(sig) || sig < 1) |
1da177e4c
|
392 393 394 |
return -EINVAL; spin_lock_irq(¤t->sighand->siglock); |
10ab825bd
|
395 |
current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN; |
1da177e4c
|
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 |
recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); return 0; } EXPORT_SYMBOL(disallow_signal); /* * Put all the gunge required to become a kernel thread without * attached user resources in one place where it belongs. */ void daemonize(const char *name, ...) { va_list args; struct fs_struct *fs; sigset_t blocked; va_start(args, name); vsnprintf(current->comm, sizeof(current->comm), name, args); va_end(args); /* * If we were started as result of loading a module, close all of the * user space pages. We don't need them, and if we didn't close them * they would be locked into memory. */ exit_mm(current); |
831441862
|
424 425 426 427 |
/* * We don't want to have TIF_FREEZE set if the system-wide hibernation * or suspend transition begins right now. */ |
7b34e4283
|
428 |
current->flags |= (PF_NOFREEZE | PF_KTHREAD); |
1da177e4c
|
429 |
|
8520d7c7f
|
430 431 432 433 |
if (current->nsproxy != &init_nsproxy) { get_nsproxy(&init_nsproxy); switch_task_namespaces(current, &init_nsproxy); } |
297bd42b1
|
434 |
set_special_pids(&init_struct_pid); |
24ec839c4
|
435 |
proc_clear_tty(current); |
1da177e4c
|
436 437 438 439 440 441 442 443 444 445 446 447 |
/* Block and flush all signals */ sigfillset(&blocked); sigprocmask(SIG_BLOCK, &blocked, NULL); flush_signals(current); /* Become as one with the init task */ exit_fs(current); /* current->fs->count--; */ fs = init_task.fs; current->fs = fs; atomic_inc(&fs->count); |
ab516013a
|
448 |
|
d4c5e41f3
|
449 |
exit_files(current); |
1da177e4c
|
450 451 |
current->files = init_task.files; atomic_inc(¤t->files->count); |
49d769d52
|
452 |
reparent_to_kthreadd(); |
1da177e4c
|
453 454 455 |
} EXPORT_SYMBOL(daemonize); |
858119e15
|
456 |
static void close_files(struct files_struct * files) |
1da177e4c
|
457 458 |
{ int i, j; |
badf16621
|
459 |
struct fdtable *fdt; |
1da177e4c
|
460 461 |
j = 0; |
4fb3a5386
|
462 463 464 465 466 467 |
/* * It is safe to dereference the fd table without RCU or * ->file_lock because this is the last reference to the * files structure. */ |
badf16621
|
468 |
fdt = files_fdtable(files); |
1da177e4c
|
469 470 471 |
for (;;) { unsigned long set; i = j * __NFDBITS; |
bbea9f696
|
472 |
if (i >= fdt->max_fds) |
1da177e4c
|
473 |
break; |
badf16621
|
474 |
set = fdt->open_fds->fds_bits[j++]; |
1da177e4c
|
475 476 |
while (set) { if (set & 1) { |
badf16621
|
477 |
struct file * file = xchg(&fdt->fd[i], NULL); |
944be0b22
|
478 |
if (file) { |
1da177e4c
|
479 |
filp_close(file, files); |
944be0b22
|
480 481 |
cond_resched(); } |
1da177e4c
|
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 |
} i++; set >>= 1; } } } struct files_struct *get_files_struct(struct task_struct *task) { struct files_struct *files; task_lock(task); files = task->files; if (files) atomic_inc(&files->count); task_unlock(task); return files; } |
7ad5b3a50
|
501 |
void put_files_struct(struct files_struct *files) |
1da177e4c
|
502 |
{ |
badf16621
|
503 |
struct fdtable *fdt; |
1da177e4c
|
504 505 506 507 |
if (atomic_dec_and_test(&files->count)) { close_files(files); /* * Free the fd and fdset arrays if we expanded them. |
ab2af1f50
|
508 509 510 |
* If the fdtable was embedded, pass files for freeing * at the end of the RCU grace period. Otherwise, * you can free files immediately. |
1da177e4c
|
511 |
*/ |
badf16621
|
512 |
fdt = files_fdtable(files); |
4fd45812c
|
513 |
if (fdt != &files->fdtab) |
ab2af1f50
|
514 |
kmem_cache_free(files_cachep, files); |
01b2d93ca
|
515 |
free_fdtable(fdt); |
1da177e4c
|
516 517 |
} } |
3b1253880
|
518 |
void reset_files_struct(struct files_struct *files) |
3b9b8ab65
|
519 |
{ |
3b1253880
|
520 |
struct task_struct *tsk = current; |
3b9b8ab65
|
521 522 523 524 525 526 527 528 |
struct files_struct *old; old = tsk->files; task_lock(tsk); tsk->files = files; task_unlock(tsk); put_files_struct(old); } |
3b9b8ab65
|
529 |
|
1ec7f1ddb
|
530 |
void exit_files(struct task_struct *tsk) |
1da177e4c
|
531 532 533 534 535 536 537 538 539 540 |
{ struct files_struct * files = tsk->files; if (files) { task_lock(tsk); tsk->files = NULL; task_unlock(tsk); put_files_struct(files); } } |
1ec7f1ddb
|
541 |
void put_fs_struct(struct fs_struct *fs) |
1da177e4c
|
542 543 544 |
{ /* No need to hold fs->lock if we are killing it */ if (atomic_dec_and_test(&fs->count)) { |
6ac08c39a
|
545 546 |
path_put(&fs->root); path_put(&fs->pwd); |
1da177e4c
|
547 548 549 |
kmem_cache_free(fs_cachep, fs); } } |
1ec7f1ddb
|
550 |
void exit_fs(struct task_struct *tsk) |
1da177e4c
|
551 552 553 554 555 556 557 |
{ struct fs_struct * fs = tsk->fs; if (fs) { task_lock(tsk); tsk->fs = NULL; task_unlock(tsk); |
1ec7f1ddb
|
558 |
put_fs_struct(fs); |
1da177e4c
|
559 560 |
} } |
1da177e4c
|
561 |
EXPORT_SYMBOL_GPL(exit_fs); |
cf475ad28
|
562 563 564 565 566 567 568 569 570 571 572 |
#ifdef CONFIG_MM_OWNER /* * Task p is exiting and it owned mm, lets find a new owner for it */ static inline int mm_need_new_owner(struct mm_struct *mm, struct task_struct *p) { /* * If there are other users of the mm and the owner (us) is exiting * we need to find a new owner to take on the responsibility. */ |
cf475ad28
|
573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 |
if (atomic_read(&mm->mm_users) <= 1) return 0; if (mm->owner != p) return 0; return 1; } void mm_update_next_owner(struct mm_struct *mm) { struct task_struct *c, *g, *p = current; retry: if (!mm_need_new_owner(mm, p)) return; read_lock(&tasklist_lock); /* * Search in the children */ list_for_each_entry(c, &p->children, sibling) { if (c->mm == mm) goto assign_new_owner; } /* * Search in the siblings */ list_for_each_entry(c, &p->parent->children, sibling) { if (c->mm == mm) goto assign_new_owner; } /* * Search through everything else. We should not get * here often */ do_each_thread(g, c) { if (c->mm == mm) goto assign_new_owner; } while_each_thread(g, c); read_unlock(&tasklist_lock); |
31a78f23b
|
615 616 617 |
/* * We found no owner yet mm_users > 1: this implies that we are * most likely racing with swapoff (try_to_unuse()) or /proc or |
e5991371e
|
618 |
* ptrace or page migration (get_task_mm()). Mark owner as NULL. |
31a78f23b
|
619 |
*/ |
31a78f23b
|
620 |
mm->owner = NULL; |
cf475ad28
|
621 622 623 624 625 626 627 628 629 630 |
return; assign_new_owner: BUG_ON(c == p); get_task_struct(c); /* * The task_lock protects c->mm from changing. * We always want mm->owner->mm == mm */ task_lock(c); |
e5991371e
|
631 632 633 634 635 |
/* * Delay read_unlock() till we have the task_lock() * to ensure that c does not slip away underneath us */ read_unlock(&tasklist_lock); |
cf475ad28
|
636 637 638 639 640 |
if (c->mm != mm) { task_unlock(c); put_task_struct(c); goto retry; } |
cf475ad28
|
641 642 643 644 645 |
mm->owner = c; task_unlock(c); put_task_struct(c); } #endif /* CONFIG_MM_OWNER */ |
1da177e4c
|
646 647 648 649 |
/* * Turn us into a lazy TLB process if we * aren't already.. */ |
408b664a7
|
650 |
static void exit_mm(struct task_struct * tsk) |
1da177e4c
|
651 652 |
{ struct mm_struct *mm = tsk->mm; |
b564daf80
|
653 |
struct core_state *core_state; |
1da177e4c
|
654 655 656 657 658 659 |
mm_release(tsk, mm); if (!mm) return; /* * Serialize with any possible pending coredump. |
999d9fc16
|
660 |
* We must hold mmap_sem around checking core_state |
1da177e4c
|
661 |
* and clearing tsk->mm. The core-inducing thread |
999d9fc16
|
662 |
* will increment ->nr_threads for each thread in the |
1da177e4c
|
663 664 665 |
* group with ->mm != NULL. */ down_read(&mm->mmap_sem); |
b564daf80
|
666 667 668 |
core_state = mm->core_state; if (core_state) { struct core_thread self; |
1da177e4c
|
669 |
up_read(&mm->mmap_sem); |
1da177e4c
|
670 |
|
b564daf80
|
671 672 673 674 675 676 677 678 |
self.task = tsk; self.next = xchg(&core_state->dumper.next, &self); /* * Implies mb(), the result of xchg() must be visible * to core_state->dumper. */ if (atomic_dec_and_test(&core_state->nr_threads)) complete(&core_state->startup); |
1da177e4c
|
679 |
|
a94e2d408
|
680 681 682 683 684 685 686 |
for (;;) { set_task_state(tsk, TASK_UNINTERRUPTIBLE); if (!self.task) /* see coredump_finish() */ break; schedule(); } __set_task_state(tsk, TASK_RUNNING); |
1da177e4c
|
687 688 689 |
down_read(&mm->mmap_sem); } atomic_inc(&mm->mm_count); |
125e18745
|
690 |
BUG_ON(mm != tsk->active_mm); |
1da177e4c
|
691 692 693 694 695 |
/* more a memory barrier than a real lock */ task_lock(tsk); tsk->mm = NULL; up_read(&mm->mmap_sem); enter_lazy_tlb(mm, current); |
0c1eecfb3
|
696 697 |
/* We don't want this task to be frozen prematurely */ clear_freeze_flag(tsk); |
1da177e4c
|
698 |
task_unlock(tsk); |
cf475ad28
|
699 |
mm_update_next_owner(mm); |
1da177e4c
|
700 701 |
mmput(mm); } |
f470021ad
|
702 |
/* |
666f164f4
|
703 704 705 706 707 |
* Return nonzero if @parent's children should reap themselves. * * Called with write_lock_irq(&tasklist_lock) held. */ static int ignoring_children(struct task_struct *parent) |
1da177e4c
|
708 |
{ |
666f164f4
|
709 710 711 712 713 714 715 716 717 |
int ret; struct sighand_struct *psig = parent->sighand; unsigned long flags; spin_lock_irqsave(&psig->siglock, flags); ret = (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT)); spin_unlock_irqrestore(&psig->siglock, flags); return ret; } |
241ceee0b
|
718 |
|
666f164f4
|
719 |
/* |
f470021ad
|
720 721 722 723 724 725 |
* Detach all tasks we were using ptrace on. * Any that need to be release_task'd are put on the @dead list. * * Called with write_lock(&tasklist_lock) held. */ static void ptrace_exit(struct task_struct *parent, struct list_head *dead) |
1da177e4c
|
726 |
{ |
f470021ad
|
727 |
struct task_struct *p, *n; |
666f164f4
|
728 |
int ign = -1; |
241ceee0b
|
729 |
|
f470021ad
|
730 731 732 733 734 735 736 737 738 739 740 741 742 743 |
list_for_each_entry_safe(p, n, &parent->ptraced, ptrace_entry) { __ptrace_unlink(p); if (p->exit_state != EXIT_ZOMBIE) continue; /* * If it's a zombie, our attachedness prevented normal * parent notification or self-reaping. Do notification * now if it would have happened earlier. If it should * reap itself, add it to the @dead list. We can't call * release_task() here because we already hold tasklist_lock. * * If it's our own child, there is no notification to do. |
666f164f4
|
744 745 |
* But if our normal children self-reap, then this child * was prevented by ptrace and we must reap it now. |
1da177e4c
|
746 |
*/ |
f470021ad
|
747 748 749 |
if (!task_detached(p) && thread_group_empty(p)) { if (!same_thread_group(p->real_parent, parent)) do_notify_parent(p, p->exit_signal); |
666f164f4
|
750 751 752 753 754 755 |
else { if (ign < 0) ign = ignoring_children(parent); if (ign) p->exit_signal = -1; } |
f470021ad
|
756 |
} |
1da177e4c
|
757 |
|
f470021ad
|
758 |
if (task_detached(p)) { |
1da177e4c
|
759 |
/* |
f470021ad
|
760 |
* Mark it as in the process of being reaped. |
1da177e4c
|
761 |
*/ |
f470021ad
|
762 763 |
p->exit_state = EXIT_DEAD; list_add(&p->ptrace_entry, dead); |
1da177e4c
|
764 765 |
} } |
f470021ad
|
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 |
} /* * Finish up exit-time ptrace cleanup. * * Called without locks. */ static void ptrace_exit_finish(struct task_struct *parent, struct list_head *dead) { struct task_struct *p, *n; BUG_ON(!list_empty(&parent->ptraced)); list_for_each_entry_safe(p, n, dead, ptrace_entry) { list_del_init(&p->ptrace_entry); release_task(p); } } static void reparent_thread(struct task_struct *p, struct task_struct *father) { if (p->pdeath_signal) /* We already hold the tasklist_lock here. */ group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p); list_move_tail(&p->sibling, &p->real_parent->children); |
1da177e4c
|
793 |
|
b2b2cbc4b
|
794 795 796 |
/* If this is a threaded reparent there is no need to * notify anyone anything has happened. */ |
376e1d253
|
797 |
if (same_thread_group(p->real_parent, father)) |
b2b2cbc4b
|
798 799 800 |
return; /* We don't want people slaying init. */ |
d839fd4d2
|
801 |
if (!task_detached(p)) |
b2b2cbc4b
|
802 |
p->exit_signal = SIGCHLD; |
b2b2cbc4b
|
803 804 805 806 |
/* If we'd notified the old parent about this child's death, * also notify the new parent. */ |
f470021ad
|
807 808 |
if (!ptrace_reparented(p) && p->exit_state == EXIT_ZOMBIE && |
d839fd4d2
|
809 |
!task_detached(p) && thread_group_empty(p)) |
b2b2cbc4b
|
810 |
do_notify_parent(p, p->exit_signal); |
f49ee505b
|
811 |
kill_orphaned_pgrp(p, father); |
1da177e4c
|
812 813 814 815 816 817 |
} /* * When we die, we re-parent all our children. * Try to give them to another thread in our thread * group, and if no such member exists, give it to |
84d737866
|
818 819 |
* the child reaper process (ie "init") in our pid * space. |
1da177e4c
|
820 |
*/ |
950bbabb5
|
821 |
static struct task_struct *find_new_reaper(struct task_struct *father) |
1da177e4c
|
822 |
{ |
950bbabb5
|
823 824 |
struct pid_namespace *pid_ns = task_active_pid_ns(father); struct task_struct *thread; |
1da177e4c
|
825 |
|
950bbabb5
|
826 827 828 829 830 831 832 833 |
thread = father; while_each_thread(father, thread) { if (thread->flags & PF_EXITING) continue; if (unlikely(pid_ns->child_reaper == father)) pid_ns->child_reaper = thread; return thread; } |
1da177e4c
|
834 |
|
950bbabb5
|
835 836 837 838 |
if (unlikely(pid_ns->child_reaper == father)) { write_unlock_irq(&tasklist_lock); if (unlikely(pid_ns == &init_pid_ns)) panic("Attempted to kill init!"); |
1da177e4c
|
839 |
|
950bbabb5
|
840 841 |
zap_pid_ns_processes(pid_ns); write_lock_irq(&tasklist_lock); |
1da177e4c
|
842 |
/* |
950bbabb5
|
843 844 845 |
* We can not clear ->child_reaper or leave it alone. * There may by stealth EXIT_DEAD tasks on ->children, * forget_original_parent() must move them somewhere. |
1da177e4c
|
846 |
*/ |
950bbabb5
|
847 |
pid_ns->child_reaper = init_pid_ns.child_reaper; |
1da177e4c
|
848 |
} |
762a24bee
|
849 |
|
950bbabb5
|
850 851 |
return pid_ns->child_reaper; } |
762a24bee
|
852 |
static void forget_original_parent(struct task_struct *father) |
1da177e4c
|
853 |
{ |
950bbabb5
|
854 |
struct task_struct *p, *n, *reaper; |
f470021ad
|
855 |
LIST_HEAD(ptrace_dead); |
762a24bee
|
856 857 |
write_lock_irq(&tasklist_lock); |
950bbabb5
|
858 |
reaper = find_new_reaper(father); |
f470021ad
|
859 860 861 862 |
/* * First clean up ptrace if we were using it. */ ptrace_exit(father, &ptrace_dead); |
03ff17979
|
863 |
list_for_each_entry_safe(p, n, &father->children, sibling) { |
84eb646b6
|
864 |
p->real_parent = reaper; |
f470021ad
|
865 866 867 868 869 |
if (p->parent == father) { BUG_ON(p->ptrace); p->parent = p->real_parent; } reparent_thread(p, father); |
1da177e4c
|
870 |
} |
762a24bee
|
871 872 873 |
write_unlock_irq(&tasklist_lock); BUG_ON(!list_empty(&father->children)); |
762a24bee
|
874 |
|
f470021ad
|
875 |
ptrace_exit_finish(father, &ptrace_dead); |
1da177e4c
|
876 877 878 879 880 881 |
} /* * Send signals to all our closest relatives so that they know * to properly mourn us.. */ |
821c7de71
|
882 |
static void exit_notify(struct task_struct *tsk, int group_dead) |
1da177e4c
|
883 |
{ |
2b2a1ff64
|
884 885 |
int signal; void *cookie; |
1da177e4c
|
886 |
|
1da177e4c
|
887 888 889 890 891 892 893 894 |
/* * This does two things: * * A. Make init inherit all the child processes * B. Check to see if any process groups have become orphaned * as a result of our exiting, and if they have any stopped * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) */ |
762a24bee
|
895 |
forget_original_parent(tsk); |
2e4a70726
|
896 |
exit_task_namespaces(tsk); |
1da177e4c
|
897 |
|
762a24bee
|
898 |
write_lock_irq(&tasklist_lock); |
821c7de71
|
899 900 |
if (group_dead) kill_orphaned_pgrp(tsk->group_leader, NULL); |
1da177e4c
|
901 |
|
247284481
|
902 |
/* Let father know we died |
1da177e4c
|
903 904 |
* * Thread signals are configurable, but you aren't going to use |
d4c5e41f3
|
905 |
* that to send signals to arbitary processes. |
1da177e4c
|
906 907 908 909 910 911 912 913 914 |
* That stops right now. * * If the parent exec id doesn't match the exec id we saved * when we started then we know the parent has changed security * domain. * * If our self_exec id doesn't match our parent_exec_id then * we have changed execution domain as these two values started * the same after a fork. |
1da177e4c
|
915 |
*/ |
d839fd4d2
|
916 |
if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) && |
f49ee505b
|
917 |
(tsk->parent_exec_id != tsk->real_parent->self_exec_id || |
d839fd4d2
|
918 919 |
tsk->self_exec_id != tsk->parent_exec_id) && !capable(CAP_KILL)) |
1da177e4c
|
920 |
tsk->exit_signal = SIGCHLD; |
2b2a1ff64
|
921 |
signal = tracehook_notify_death(tsk, &cookie, group_dead); |
5c7edcd7e
|
922 |
if (signal >= 0) |
2b2a1ff64
|
923 |
signal = do_notify_parent(tsk, signal); |
1da177e4c
|
924 |
|
5c7edcd7e
|
925 |
tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE; |
1da177e4c
|
926 |
|
2800d8d19
|
927 |
/* mt-exec, de_thread() is waiting for us */ |
6db840fa7
|
928 |
if (thread_group_leader(tsk) && |
2633f0e57
|
929 930 |
tsk->signal->group_exit_task && tsk->signal->notify_count < 0) |
6db840fa7
|
931 |
wake_up_process(tsk->signal->group_exit_task); |
1da177e4c
|
932 |
write_unlock_irq(&tasklist_lock); |
2b2a1ff64
|
933 |
tracehook_report_death(tsk, signal, cookie, group_dead); |
1da177e4c
|
934 |
/* If the process is dead, release it - nobody will wait for it */ |
5c7edcd7e
|
935 |
if (signal == DEATH_REAP) |
1da177e4c
|
936 |
release_task(tsk); |
1da177e4c
|
937 |
} |
e18eecb8b
|
938 939 940 941 942 |
#ifdef CONFIG_DEBUG_STACK_USAGE static void check_stack_usage(void) { static DEFINE_SPINLOCK(low_water_lock); static int lowest_to_date = THREAD_SIZE; |
e18eecb8b
|
943 |
unsigned long free; |
7c9f8861e
|
944 |
free = stack_not_used(current); |
e18eecb8b
|
945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 |
if (free >= lowest_to_date) return; spin_lock(&low_water_lock); if (free < lowest_to_date) { printk(KERN_WARNING "%s used greatest stack depth: %lu bytes " "left ", current->comm, free); lowest_to_date = free; } spin_unlock(&low_water_lock); } #else static inline void check_stack_usage(void) {} #endif |
7ad5b3a50
|
962 |
NORET_TYPE void do_exit(long code) |
1da177e4c
|
963 964 965 966 967 |
{ struct task_struct *tsk = current; int group_dead; profile_task_exit(tsk); |
22e2c507c
|
968 |
WARN_ON(atomic_read(&tsk->fs_excl)); |
1da177e4c
|
969 970 971 972 |
if (unlikely(in_interrupt())) panic("Aiee, killing interrupt handler!"); if (unlikely(!tsk->pid)) panic("Attempted to kill the idle task!"); |
1da177e4c
|
973 |
|
30199f5a4
|
974 |
tracehook_report_exit(&code); |
1da177e4c
|
975 |
|
df164db5f
|
976 977 978 979 980 981 982 983 |
/* * We're taking recursive faults here in do_exit. Safest is to just * leave this task alone and wait for reboot. */ if (unlikely(tsk->flags & PF_EXITING)) { printk(KERN_ALERT "Fixing recursive fault but reboot is needed! "); |
778e9a9c3
|
984 985 986 987 988 989 990 991 992 993 |
/* * We can do this unlocked here. The futex code uses * this flag just to verify whether the pi state * cleanup has been done or not. In the worst case it * loops once more. We pretend that the cleanup was * done as there is no way to return. Either the * OWNER_DIED bit is set by now or we push the blocked * task into the wait for ever nirwana as well. */ tsk->flags |= PF_EXITPIDONE; |
df164db5f
|
994 995 996 |
set_current_state(TASK_UNINTERRUPTIBLE); schedule(); } |
d12619b5f
|
997 |
exit_signals(tsk); /* sets PF_EXITING */ |
778e9a9c3
|
998 999 1000 1001 |
/* * tsk->flags are checked in the futex code to protect against * an exiting task cleaning up the robust pi futexes. */ |
d2ee7198c
|
1002 1003 |
smp_mb(); spin_unlock_wait(&tsk->pi_lock); |
1da177e4c
|
1004 |
|
1da177e4c
|
1005 1006 1007 |
if (unlikely(in_atomic())) printk(KERN_INFO "note: %s[%d] exited with preempt_count %d ", |
ba25f9dcc
|
1008 |
current->comm, task_pid_nr(current), |
1da177e4c
|
1009 1010 1011 |
preempt_count()); acct_update_integrals(tsk); |
901608d90
|
1012 |
|
1da177e4c
|
1013 |
group_dead = atomic_dec_and_test(&tsk->signal->live); |
c30689516
|
1014 |
if (group_dead) { |
778e9a9c3
|
1015 |
hrtimer_cancel(&tsk->signal->real_timer); |
25f407f0b
|
1016 |
exit_itimers(tsk->signal); |
c30689516
|
1017 |
} |
f6ec29a42
|
1018 |
acct_collect(code, group_dead); |
522ed7767
|
1019 1020 |
if (group_dead) tty_audit_exit(); |
fa84cb935
|
1021 1022 |
if (unlikely(tsk->audit_context)) audit_free(tsk); |
115085ea0
|
1023 |
|
f2ab6d888
|
1024 |
tsk->exit_code = code; |
115085ea0
|
1025 |
taskstats_exit(tsk, group_dead); |
c757249af
|
1026 |
|
1da177e4c
|
1027 |
exit_mm(tsk); |
0e4648141
|
1028 |
if (group_dead) |
f6ec29a42
|
1029 |
acct_process(); |
0a16b6075
|
1030 |
trace_sched_process_exit(tsk); |
1da177e4c
|
1031 |
exit_sem(tsk); |
1ec7f1ddb
|
1032 1033 |
exit_files(tsk); exit_fs(tsk); |
e18eecb8b
|
1034 |
check_stack_usage(); |
1da177e4c
|
1035 |
exit_thread(); |
b4f48b636
|
1036 |
cgroup_exit(tsk, 1); |
1da177e4c
|
1037 1038 1039 |
if (group_dead && tsk->signal->leader) disassociate_ctty(1); |
a1261f546
|
1040 |
module_put(task_thread_info(tsk)->exec_domain->module); |
1da177e4c
|
1041 1042 |
if (tsk->binfmt) module_put(tsk->binfmt->module); |
9f46080c4
|
1043 |
proc_exit_connector(tsk); |
821c7de71
|
1044 |
exit_notify(tsk, group_dead); |
1da177e4c
|
1045 |
#ifdef CONFIG_NUMA |
f0be3d32b
|
1046 |
mpol_put(tsk->mempolicy); |
1da177e4c
|
1047 1048 |
tsk->mempolicy = NULL; #endif |
42b2dd0a0
|
1049 |
#ifdef CONFIG_FUTEX |
de5097c2e
|
1050 |
/* |
c87e2837b
|
1051 1052 1053 1054 1055 1056 1057 |
* This must happen late, after the PID is not * hashed anymore: */ if (unlikely(!list_empty(&tsk->pi_state_list))) exit_pi_state_list(tsk); if (unlikely(current->pi_state_cache)) kfree(current->pi_state_cache); |
42b2dd0a0
|
1058 |
#endif |
c87e2837b
|
1059 |
/* |
9a11b49a8
|
1060 |
* Make sure we are holding no locks: |
de5097c2e
|
1061 |
*/ |
9a11b49a8
|
1062 |
debug_check_no_locks_held(tsk); |
778e9a9c3
|
1063 1064 1065 1066 1067 1068 |
/* * We can do this unlocked here. The futex code uses this flag * just to verify whether the pi state cleanup has been done * or not. In the worst case it loops once more. */ tsk->flags |= PF_EXITPIDONE; |
1da177e4c
|
1069 |
|
afc847b7d
|
1070 1071 |
if (tsk->io_context) exit_io_context(); |
b92ce5589
|
1072 1073 |
if (tsk->splice_pipe) __free_pipe_info(tsk->splice_pipe); |
7407251a0
|
1074 |
preempt_disable(); |
55a101f8f
|
1075 |
/* causes final put_task_struct in finish_task_switch(). */ |
c394cc9fb
|
1076 |
tsk->state = TASK_DEAD; |
1da177e4c
|
1077 1078 1079 |
schedule(); BUG(); /* Avoid "noreturn function does return". */ |
54306cf04
|
1080 1081 |
for (;;) cpu_relax(); /* For when BUG is null */ |
1da177e4c
|
1082 |
} |
012914dad
|
1083 |
EXPORT_SYMBOL_GPL(do_exit); |
1da177e4c
|
1084 1085 1086 1087 |
NORET_TYPE void complete_and_exit(struct completion *comp, long code) { if (comp) complete(comp); |
55a101f8f
|
1088 |
|
1da177e4c
|
1089 1090 1091 1092 |
do_exit(code); } EXPORT_SYMBOL(complete_and_exit); |
754fe8d29
|
1093 |
SYSCALL_DEFINE1(exit, int, error_code) |
1da177e4c
|
1094 1095 1096 |
{ do_exit((error_code&0xff)<<8); } |
1da177e4c
|
1097 1098 1099 1100 1101 1102 1103 |
/* * Take down every thread in the group. This is called by fatal signals * as well as by sys_exit_group (below). */ NORET_TYPE void do_group_exit(int exit_code) { |
bfc4b0890
|
1104 |
struct signal_struct *sig = current->signal; |
1da177e4c
|
1105 |
BUG_ON(exit_code & 0x80); /* core dumps don't get here */ |
bfc4b0890
|
1106 1107 |
if (signal_group_exit(sig)) exit_code = sig->group_exit_code; |
1da177e4c
|
1108 |
else if (!thread_group_empty(current)) { |
1da177e4c
|
1109 |
struct sighand_struct *const sighand = current->sighand; |
1da177e4c
|
1110 |
spin_lock_irq(&sighand->siglock); |
ed5d2cac1
|
1111 |
if (signal_group_exit(sig)) |
1da177e4c
|
1112 1113 1114 |
/* Another thread got here before we took the lock. */ exit_code = sig->group_exit_code; else { |
1da177e4c
|
1115 |
sig->group_exit_code = exit_code; |
ed5d2cac1
|
1116 |
sig->flags = SIGNAL_GROUP_EXIT; |
1da177e4c
|
1117 1118 1119 |
zap_other_threads(current); } spin_unlock_irq(&sighand->siglock); |
1da177e4c
|
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 |
} do_exit(exit_code); /* NOTREACHED */ } /* * this kills every thread in the thread group. Note that any externally * wait4()-ing process will get the correct exit code - even if this * thread is not the thread group leader. */ |
754fe8d29
|
1131 |
SYSCALL_DEFINE1(exit_group, int, error_code) |
1da177e4c
|
1132 1133 |
{ do_group_exit((error_code & 0xff) << 8); |
2ed7c03ec
|
1134 1135 |
/* NOTREACHED */ return 0; |
1da177e4c
|
1136 |
} |
161550d74
|
1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 |
static struct pid *task_pid_type(struct task_struct *task, enum pid_type type) { struct pid *pid = NULL; if (type == PIDTYPE_PID) pid = task->pids[type].pid; else if (type < PIDTYPE_MAX) pid = task->group_leader->pids[type].pid; return pid; } static int eligible_child(enum pid_type type, struct pid *pid, int options, struct task_struct *p) |
1da177e4c
|
1149 |
{ |
732432844
|
1150 |
int err; |
161550d74
|
1151 1152 |
if (type < PIDTYPE_MAX) { if (task_pid_type(p, type) != pid) |
1da177e4c
|
1153 1154 |
return 0; } |
1da177e4c
|
1155 1156 1157 1158 1159 1160 1161 1162 |
/* Wait for all children (clone and not) if __WALL is set; * otherwise, wait for clone children *only* if __WCLONE is * set; otherwise, wait for non-clone children *only*. (Note: * A "clone" child here is one that reports to its parent * using a signal other than SIGCHLD.) */ if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0)) && !(options & __WALL)) return 0; |
1da177e4c
|
1163 |
|
732432844
|
1164 |
err = security_task_wait(p); |
14dd0b814
|
1165 1166 |
if (err) return err; |
1da177e4c
|
1167 |
|
14dd0b814
|
1168 |
return 1; |
1da177e4c
|
1169 |
} |
36c8b5868
|
1170 |
static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, |
1da177e4c
|
1171 1172 1173 1174 1175 |
int why, int status, struct siginfo __user *infop, struct rusage __user *rusagep) { int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0; |
36c8b5868
|
1176 |
|
1da177e4c
|
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 |
put_task_struct(p); if (!retval) retval = put_user(SIGCHLD, &infop->si_signo); if (!retval) retval = put_user(0, &infop->si_errno); if (!retval) retval = put_user((short)why, &infop->si_code); if (!retval) retval = put_user(pid, &infop->si_pid); if (!retval) retval = put_user(uid, &infop->si_uid); if (!retval) retval = put_user(status, &infop->si_status); if (!retval) retval = pid; return retval; } /* * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold * read_lock(&tasklist_lock) on entry. If we return zero, we still hold * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ |
98abed020
|
1201 |
static int wait_task_zombie(struct task_struct *p, int options, |
1da177e4c
|
1202 1203 1204 1205 |
struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { unsigned long state; |
2f4e6e2a8
|
1206 |
int retval, status, traced; |
6c5f3e7b4
|
1207 |
pid_t pid = task_pid_vnr(p); |
c69e8d9c0
|
1208 |
uid_t uid = __task_cred(p)->uid; |
1da177e4c
|
1209 |
|
98abed020
|
1210 1211 1212 1213 |
if (!likely(options & WEXITED)) return 0; if (unlikely(options & WNOWAIT)) { |
1da177e4c
|
1214 1215 |
int exit_code = p->exit_code; int why, status; |
1da177e4c
|
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 |
get_task_struct(p); read_unlock(&tasklist_lock); if ((exit_code & 0x7f) == 0) { why = CLD_EXITED; status = exit_code >> 8; } else { why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED; status = exit_code & 0x7f; } return wait_noreap_copyout(p, pid, uid, why, status, infop, ru); } /* * Try to move the task's state to DEAD * only one thread is allowed to do this: */ state = xchg(&p->exit_state, EXIT_DEAD); if (state != EXIT_ZOMBIE) { BUG_ON(state != EXIT_DEAD); return 0; } |
1da177e4c
|
1238 |
|
53b6f9fbd
|
1239 |
traced = ptrace_reparented(p); |
2f4e6e2a8
|
1240 1241 |
if (likely(!traced)) { |
3795e1616
|
1242 1243 |
struct signal_struct *psig; struct signal_struct *sig; |
f06febc96
|
1244 |
struct task_cputime cputime; |
3795e1616
|
1245 |
|
1da177e4c
|
1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 |
/* * The resource counters for the group leader are in its * own task_struct. Those for dead threads in the group * are in its signal_struct, as are those for the child * processes it has previously reaped. All these * accumulate in the parent's signal_struct c* fields. * * We don't bother to take a lock here to protect these * p->signal fields, because they are only touched by * __exit_signal, which runs with tasklist_lock * write-locked anyway, and so is excluded here. We do * need to protect the access to p->parent->signal fields, * as other threads in the parent group can be right * here reaping other children at the same time. |
f06febc96
|
1260 1261 1262 1263 |
* * We use thread_group_cputime() to get times for the thread * group, which consolidates times for all threads in the * group including the group leader. |
1da177e4c
|
1264 |
*/ |
2b5fe6de5
|
1265 |
thread_group_cputime(p, &cputime); |
1da177e4c
|
1266 |
spin_lock_irq(&p->parent->sighand->siglock); |
3795e1616
|
1267 1268 1269 1270 |
psig = p->parent->signal; sig = p->signal; psig->cutime = cputime_add(psig->cutime, |
f06febc96
|
1271 1272 |
cputime_add(cputime.utime, sig->cutime)); |
3795e1616
|
1273 1274 |
psig->cstime = cputime_add(psig->cstime, |
f06febc96
|
1275 1276 |
cputime_add(cputime.stime, sig->cstime)); |
9ac52315d
|
1277 1278 1279 1280 1281 |
psig->cgtime = cputime_add(psig->cgtime, cputime_add(p->gtime, cputime_add(sig->gtime, sig->cgtime))); |
3795e1616
|
1282 1283 1284 1285 1286 1287 1288 1289 |
psig->cmin_flt += p->min_flt + sig->min_flt + sig->cmin_flt; psig->cmaj_flt += p->maj_flt + sig->maj_flt + sig->cmaj_flt; psig->cnvcsw += p->nvcsw + sig->nvcsw + sig->cnvcsw; psig->cnivcsw += p->nivcsw + sig->nivcsw + sig->cnivcsw; |
6eaeeaba3
|
1290 1291 1292 1293 1294 1295 |
psig->cinblock += task_io_get_inblock(p) + sig->inblock + sig->cinblock; psig->coublock += task_io_get_oublock(p) + sig->oublock + sig->coublock; |
5995477ab
|
1296 1297 |
task_io_accounting_add(&psig->ioac, &p->ioac); task_io_accounting_add(&psig->ioac, &sig->ioac); |
1da177e4c
|
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 |
spin_unlock_irq(&p->parent->sighand->siglock); } /* * Now we are sure this task is interesting, and no other * thread can reap it because we set its state to EXIT_DEAD. */ read_unlock(&tasklist_lock); retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; status = (p->signal->flags & SIGNAL_GROUP_EXIT) ? p->signal->group_exit_code : p->exit_code; if (!retval && stat_addr) retval = put_user(status, stat_addr); if (!retval && infop) retval = put_user(SIGCHLD, &infop->si_signo); if (!retval && infop) retval = put_user(0, &infop->si_errno); if (!retval && infop) { int why; if ((status & 0x7f) == 0) { why = CLD_EXITED; status >>= 8; } else { why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; status &= 0x7f; } retval = put_user((short)why, &infop->si_code); if (!retval) retval = put_user(status, &infop->si_status); } if (!retval && infop) |
3a515e4a6
|
1331 |
retval = put_user(pid, &infop->si_pid); |
1da177e4c
|
1332 |
if (!retval && infop) |
c69e8d9c0
|
1333 |
retval = put_user(uid, &infop->si_uid); |
2f4e6e2a8
|
1334 |
if (!retval) |
3a515e4a6
|
1335 |
retval = pid; |
2f4e6e2a8
|
1336 1337 |
if (traced) { |
1da177e4c
|
1338 |
write_lock_irq(&tasklist_lock); |
2f4e6e2a8
|
1339 1340 1341 1342 1343 1344 1345 |
/* We dropped tasklist, ptracer could die and untrace */ ptrace_unlink(p); /* * If this is not a detached task, notify the parent. * If it's still not detached after that, don't release * it now. */ |
d839fd4d2
|
1346 |
if (!task_detached(p)) { |
2f4e6e2a8
|
1347 |
do_notify_parent(p, p->exit_signal); |
d839fd4d2
|
1348 |
if (!task_detached(p)) { |
2f4e6e2a8
|
1349 1350 |
p->exit_state = EXIT_ZOMBIE; p = NULL; |
1da177e4c
|
1351 1352 1353 1354 1355 1356 |
} } write_unlock_irq(&tasklist_lock); } if (p != NULL) release_task(p); |
2f4e6e2a8
|
1357 |
|
1da177e4c
|
1358 1359 1360 1361 1362 1363 1364 1365 1366 |
return retval; } /* * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold * read_lock(&tasklist_lock) on entry. If we return zero, we still hold * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ |
f470021ad
|
1367 |
static int wait_task_stopped(int ptrace, struct task_struct *p, |
98abed020
|
1368 |
int options, struct siginfo __user *infop, |
1da177e4c
|
1369 1370 |
int __user *stat_addr, struct rusage __user *ru) { |
ee7c82da8
|
1371 1372 |
int retval, exit_code, why; uid_t uid = 0; /* unneeded, required by compiler */ |
c89507835
|
1373 |
pid_t pid; |
1da177e4c
|
1374 |
|
f470021ad
|
1375 |
if (!(options & WUNTRACED)) |
98abed020
|
1376 |
return 0; |
ee7c82da8
|
1377 1378 1379 1380 1381 |
exit_code = 0; spin_lock_irq(&p->sighand->siglock); if (unlikely(!task_is_stopped_or_traced(p))) goto unlock_sig; |
f470021ad
|
1382 |
if (!ptrace && p->signal->group_stop_count > 0) |
1da177e4c
|
1383 1384 1385 1386 |
/* * A group stop is in progress and this is the group leader. * We won't report until all threads have stopped. */ |
ee7c82da8
|
1387 1388 1389 1390 1391 |
goto unlock_sig; exit_code = p->exit_code; if (!exit_code) goto unlock_sig; |
98abed020
|
1392 |
if (!unlikely(options & WNOWAIT)) |
ee7c82da8
|
1393 |
p->exit_code = 0; |
c69e8d9c0
|
1394 1395 |
/* don't need the RCU readlock here as we're holding a spinlock */ uid = __task_cred(p)->uid; |
ee7c82da8
|
1396 1397 1398 |
unlock_sig: spin_unlock_irq(&p->sighand->siglock); if (!exit_code) |
1da177e4c
|
1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 |
return 0; /* * Now we are pretty sure this task is interesting. * Make sure it doesn't get reaped out from under us while we * give up the lock and then examine it below. We don't want to * keep holding onto the tasklist_lock while we call getrusage and * possibly take page faults for user memory. */ get_task_struct(p); |
6c5f3e7b4
|
1409 |
pid = task_pid_vnr(p); |
f470021ad
|
1410 |
why = ptrace ? CLD_TRAPPED : CLD_STOPPED; |
1da177e4c
|
1411 |
read_unlock(&tasklist_lock); |
98abed020
|
1412 |
if (unlikely(options & WNOWAIT)) |
1da177e4c
|
1413 |
return wait_noreap_copyout(p, pid, uid, |
e6ceb32aa
|
1414 |
why, exit_code, |
1da177e4c
|
1415 |
infop, ru); |
1da177e4c
|
1416 1417 1418 1419 1420 1421 1422 1423 1424 |
retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; if (!retval && stat_addr) retval = put_user((exit_code << 8) | 0x7f, stat_addr); if (!retval && infop) retval = put_user(SIGCHLD, &infop->si_signo); if (!retval && infop) retval = put_user(0, &infop->si_errno); if (!retval && infop) |
6efcae460
|
1425 |
retval = put_user((short)why, &infop->si_code); |
1da177e4c
|
1426 1427 1428 |
if (!retval && infop) retval = put_user(exit_code, &infop->si_status); if (!retval && infop) |
c89507835
|
1429 |
retval = put_user(pid, &infop->si_pid); |
1da177e4c
|
1430 |
if (!retval && infop) |
ee7c82da8
|
1431 |
retval = put_user(uid, &infop->si_uid); |
1da177e4c
|
1432 |
if (!retval) |
c89507835
|
1433 |
retval = pid; |
1da177e4c
|
1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 |
put_task_struct(p); BUG_ON(!retval); return retval; } /* * Handle do_wait work for one task in a live, non-stopped state. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ |
98abed020
|
1446 |
static int wait_task_continued(struct task_struct *p, int options, |
1da177e4c
|
1447 1448 1449 1450 1451 1452 |
struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { int retval; pid_t pid; uid_t uid; |
98abed020
|
1453 1454 |
if (!unlikely(options & WCONTINUED)) return 0; |
1da177e4c
|
1455 1456 1457 1458 1459 1460 1461 1462 1463 |
if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) return 0; spin_lock_irq(&p->sighand->siglock); /* Re-check with the lock held. */ if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { spin_unlock_irq(&p->sighand->siglock); return 0; } |
98abed020
|
1464 |
if (!unlikely(options & WNOWAIT)) |
1da177e4c
|
1465 |
p->signal->flags &= ~SIGNAL_STOP_CONTINUED; |
c69e8d9c0
|
1466 |
uid = __task_cred(p)->uid; |
1da177e4c
|
1467 |
spin_unlock_irq(&p->sighand->siglock); |
6c5f3e7b4
|
1468 |
pid = task_pid_vnr(p); |
1da177e4c
|
1469 1470 1471 1472 1473 1474 1475 1476 1477 |
get_task_struct(p); read_unlock(&tasklist_lock); if (!infop) { retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; put_task_struct(p); if (!retval && stat_addr) retval = put_user(0xffff, stat_addr); if (!retval) |
3a515e4a6
|
1478 |
retval = pid; |
1da177e4c
|
1479 1480 1481 1482 1483 1484 1485 1486 1487 |
} else { retval = wait_noreap_copyout(p, pid, uid, CLD_CONTINUED, SIGCONT, infop, ru); BUG_ON(retval == 0); } return retval; } |
98abed020
|
1488 1489 1490 1491 1492 1493 |
/* * Consider @p for a wait by @parent. * * -ECHILD should be in *@notask_error before the first call. * Returns nonzero for a final return, when we have unlocked tasklist_lock. * Returns zero if the search for a child should continue; |
14dd0b814
|
1494 1495 |
* then *@notask_error is 0 if @p is an eligible child, * or another error from security_task_wait(), or still -ECHILD. |
98abed020
|
1496 |
*/ |
f470021ad
|
1497 |
static int wait_consider_task(struct task_struct *parent, int ptrace, |
98abed020
|
1498 1499 1500 1501 1502 1503 |
struct task_struct *p, int *notask_error, enum pid_type type, struct pid *pid, int options, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { int ret = eligible_child(type, pid, options, p); |
14dd0b814
|
1504 |
if (!ret) |
98abed020
|
1505 |
return ret; |
14dd0b814
|
1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 |
if (unlikely(ret < 0)) { /* * If we have not yet seen any eligible child, * then let this error code replace -ECHILD. * A permission error will give the user a clue * to look for security policy problems, rather * than for mysterious wait bugs. */ if (*notask_error) *notask_error = ret; } |
f470021ad
|
1517 1518 1519 1520 1521 1522 1523 1524 |
if (likely(!ptrace) && unlikely(p->ptrace)) { /* * This child is hidden by ptrace. * We aren't allowed to see it now, but eventually we will. */ *notask_error = 0; return 0; } |
98abed020
|
1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 |
if (p->exit_state == EXIT_DEAD) return 0; /* * We don't reap group leaders with subthreads. */ if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p)) return wait_task_zombie(p, options, infop, stat_addr, ru); /* * It's stopped or running now, so it might * later continue, exit, or stop again. */ *notask_error = 0; if (task_is_stopped_or_traced(p)) |
f470021ad
|
1541 1542 |
return wait_task_stopped(ptrace, p, options, infop, stat_addr, ru); |
98abed020
|
1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 |
return wait_task_continued(p, options, infop, stat_addr, ru); } /* * Do the work of do_wait() for one thread in the group, @tsk. * * -ECHILD should be in *@notask_error before the first call. * Returns nonzero for a final return, when we have unlocked tasklist_lock. * Returns zero if the search for a child should continue; then |
14dd0b814
|
1553 1554 |
* *@notask_error is 0 if there were any eligible children, * or another error from security_task_wait(), or still -ECHILD. |
98abed020
|
1555 1556 1557 1558 1559 1560 1561 1562 1563 |
*/ static int do_wait_thread(struct task_struct *tsk, int *notask_error, enum pid_type type, struct pid *pid, int options, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { struct task_struct *p; list_for_each_entry(p, &tsk->children, sibling) { |
f470021ad
|
1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 |
/* * Do not consider detached threads. */ if (!task_detached(p)) { int ret = wait_consider_task(tsk, 0, p, notask_error, type, pid, options, infop, stat_addr, ru); if (ret) return ret; } |
98abed020
|
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 |
} return 0; } static int ptrace_do_wait(struct task_struct *tsk, int *notask_error, enum pid_type type, struct pid *pid, int options, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { struct task_struct *p; /* |
f470021ad
|
1587 |
* Traditionally we see ptrace'd stopped tasks regardless of options. |
98abed020
|
1588 |
*/ |
f470021ad
|
1589 |
options |= WUNTRACED; |
98abed020
|
1590 |
|
f470021ad
|
1591 1592 1593 1594 1595 |
list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { int ret = wait_consider_task(tsk, 1, p, notask_error, type, pid, options, infop, stat_addr, ru); if (ret) |
98abed020
|
1596 |
return ret; |
98abed020
|
1597 1598 1599 1600 |
} return 0; } |
161550d74
|
1601 1602 1603 |
static long do_wait(enum pid_type type, struct pid *pid, int options, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) |
1da177e4c
|
1604 1605 1606 |
{ DECLARE_WAITQUEUE(wait, current); struct task_struct *tsk; |
98abed020
|
1607 |
int retval; |
1da177e4c
|
1608 |
|
0a16b6075
|
1609 |
trace_sched_process_wait(pid); |
1da177e4c
|
1610 1611 |
add_wait_queue(¤t->signal->wait_chldexit,&wait); repeat: |
98abed020
|
1612 1613 1614 1615 1616 |
/* * If there is nothing that can match our critiera just get out. * We will clear @retval to zero if we see any child that might later * match our criteria, even if we are not able to reap it yet. */ |
161550d74
|
1617 1618 1619 |
retval = -ECHILD; if ((type < PIDTYPE_MAX) && (!pid || hlist_empty(&pid->tasks[type]))) goto end; |
1da177e4c
|
1620 1621 1622 1623 |
current->state = TASK_INTERRUPTIBLE; read_lock(&tasklist_lock); tsk = current; do { |
98abed020
|
1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 |
int tsk_result = do_wait_thread(tsk, &retval, type, pid, options, infop, stat_addr, ru); if (!tsk_result) tsk_result = ptrace_do_wait(tsk, &retval, type, pid, options, infop, stat_addr, ru); if (tsk_result) { /* * tasklist_lock is unlocked and we have a final result. */ retval = tsk_result; goto end; |
1da177e4c
|
1637 |
} |
98abed020
|
1638 |
|
1da177e4c
|
1639 1640 1641 |
if (options & __WNOTHREAD) break; tsk = next_thread(tsk); |
125e18745
|
1642 |
BUG_ON(tsk->signal != current->signal); |
1da177e4c
|
1643 |
} while (tsk != current); |
1da177e4c
|
1644 |
read_unlock(&tasklist_lock); |
f2cc3eb13
|
1645 |
|
98abed020
|
1646 |
if (!retval && !(options & WNOHANG)) { |
1da177e4c
|
1647 |
retval = -ERESTARTSYS; |
98abed020
|
1648 1649 1650 1651 |
if (!signal_pending(current)) { schedule(); goto repeat; } |
1da177e4c
|
1652 |
} |
98abed020
|
1653 |
|
1da177e4c
|
1654 1655 1656 1657 1658 |
end: current->state = TASK_RUNNING; remove_wait_queue(¤t->signal->wait_chldexit,&wait); if (infop) { if (retval > 0) |
9cbab8100
|
1659 |
retval = 0; |
1da177e4c
|
1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 |
else { /* * For a WNOHANG return, clear out all the fields * we would set so the user can easily tell the * difference. */ if (!retval) retval = put_user(0, &infop->si_signo); if (!retval) retval = put_user(0, &infop->si_errno); if (!retval) retval = put_user(0, &infop->si_code); if (!retval) retval = put_user(0, &infop->si_pid); if (!retval) retval = put_user(0, &infop->si_uid); if (!retval) retval = put_user(0, &infop->si_status); } } return retval; } |
17da2bd90
|
1682 1683 |
SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, infop, int, options, struct rusage __user *, ru) |
1da177e4c
|
1684 |
{ |
161550d74
|
1685 1686 |
struct pid *pid = NULL; enum pid_type type; |
1da177e4c
|
1687 1688 1689 1690 1691 1692 1693 1694 1695 |
long ret; if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED)) return -EINVAL; if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) return -EINVAL; switch (which) { case P_ALL: |
161550d74
|
1696 |
type = PIDTYPE_MAX; |
1da177e4c
|
1697 1698 |
break; case P_PID: |
161550d74
|
1699 1700 |
type = PIDTYPE_PID; if (upid <= 0) |
1da177e4c
|
1701 1702 1703 |
return -EINVAL; break; case P_PGID: |
161550d74
|
1704 1705 |
type = PIDTYPE_PGID; if (upid <= 0) |
1da177e4c
|
1706 |
return -EINVAL; |
1da177e4c
|
1707 1708 1709 1710 |
break; default: return -EINVAL; } |
161550d74
|
1711 1712 1713 1714 |
if (type < PIDTYPE_MAX) pid = find_get_pid(upid); ret = do_wait(type, pid, options, infop, NULL, ru); put_pid(pid); |
1da177e4c
|
1715 1716 |
/* avoid REGPARM breakage on x86: */ |
54a015104
|
1717 |
asmlinkage_protect(5, ret, which, upid, infop, options, ru); |
1da177e4c
|
1718 1719 |
return ret; } |
754fe8d29
|
1720 1721 |
SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, int, options, struct rusage __user *, ru) |
1da177e4c
|
1722 |
{ |
161550d74
|
1723 1724 |
struct pid *pid = NULL; enum pid_type type; |
1da177e4c
|
1725 1726 1727 1728 1729 |
long ret; if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| __WNOTHREAD|__WCLONE|__WALL)) return -EINVAL; |
161550d74
|
1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 |
if (upid == -1) type = PIDTYPE_MAX; else if (upid < 0) { type = PIDTYPE_PGID; pid = find_get_pid(-upid); } else if (upid == 0) { type = PIDTYPE_PGID; pid = get_pid(task_pgrp(current)); } else /* upid > 0 */ { type = PIDTYPE_PID; pid = find_get_pid(upid); } ret = do_wait(type, pid, options | WEXITED, NULL, stat_addr, ru); put_pid(pid); |
1da177e4c
|
1746 1747 |
/* avoid REGPARM breakage on x86: */ |
54a015104
|
1748 |
asmlinkage_protect(4, ret, upid, stat_addr, options, ru); |
1da177e4c
|
1749 1750 1751 1752 1753 1754 1755 1756 1757 |
return ret; } #ifdef __ARCH_WANT_SYS_WAITPID /* * sys_waitpid() remains for compatibility. waitpid() should be * implemented by calling sys_wait4() from libc.a. */ |
17da2bd90
|
1758 |
SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options) |
1da177e4c
|
1759 1760 1761 1762 1763 |
{ return sys_wait4(pid, stat_addr, options, NULL); } #endif |