Blame view
kernel/signal.c
83 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 11 |
/* * linux/kernel/signal.c * * Copyright (C) 1991, 1992 Linus Torvalds * * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson * * 2003-06-02 Jim Houston - Concurrent Computer Corp. * Changes to use preallocated sigqueue structures * to allow signals to be sent reliably. */ |
1da177e4c
|
12 |
#include <linux/slab.h> |
9984de1a5
|
13 |
#include <linux/export.h> |
1da177e4c
|
14 15 16 17 18 19 20 21 |
#include <linux/init.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/ptrace.h> |
7ed20e1ad
|
22 |
#include <linux/signal.h> |
fba2afaae
|
23 |
#include <linux/signalfd.h> |
f84d49b21
|
24 |
#include <linux/ratelimit.h> |
35de254dc
|
25 |
#include <linux/tracehook.h> |
c59ede7b7
|
26 |
#include <linux/capability.h> |
7dfb71030
|
27 |
#include <linux/freezer.h> |
84d737866
|
28 29 |
#include <linux/pid_namespace.h> #include <linux/nsproxy.h> |
d1eb650ff
|
30 31 |
#define CREATE_TRACE_POINTS #include <trace/events/signal.h> |
84d737866
|
32 |
|
1da177e4c
|
33 34 35 36 |
#include <asm/param.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <asm/siginfo.h> |
e1396065e
|
37 |
#include "audit.h" /* audit_signal_info() */ |
1da177e4c
|
38 39 40 41 |
/* * SLAB caches for signal bits. */ |
e18b890bb
|
42 |
static struct kmem_cache *sigqueue_cachep; |
1da177e4c
|
43 |
|
f84d49b21
|
44 |
int print_fatal_signals __read_mostly; |
35de254dc
|
45 |
static void __user *sig_handler(struct task_struct *t, int sig) |
93585eeaf
|
46 |
{ |
35de254dc
|
47 48 |
return t->sighand->action[sig - 1].sa.sa_handler; } |
93585eeaf
|
49 |
|
35de254dc
|
50 51 |
static int sig_handler_ignored(void __user *handler, int sig) { |
93585eeaf
|
52 |
/* Is it explicitly or implicitly ignored? */ |
93585eeaf
|
53 54 55 |
return handler == SIG_IGN || (handler == SIG_DFL && sig_kernel_ignore(sig)); } |
1da177e4c
|
56 |
|
921cf9f63
|
57 58 |
static int sig_task_ignored(struct task_struct *t, int sig, int from_ancestor_ns) |
1da177e4c
|
59 |
{ |
35de254dc
|
60 |
void __user *handler; |
1da177e4c
|
61 |
|
f008faff0
|
62 63 64 |
handler = sig_handler(t, sig); if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && |
921cf9f63
|
65 |
handler == SIG_DFL && !from_ancestor_ns) |
f008faff0
|
66 67 68 69 |
return 1; return sig_handler_ignored(handler, sig); } |
921cf9f63
|
70 |
static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns) |
f008faff0
|
71 |
{ |
1da177e4c
|
72 73 74 75 76 |
/* * Blocked signals are never ignored, since the * signal handler may change by the time it is * unblocked. */ |
325d22df7
|
77 |
if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) |
1da177e4c
|
78 |
return 0; |
921cf9f63
|
79 |
if (!sig_task_ignored(t, sig, from_ancestor_ns)) |
35de254dc
|
80 81 82 83 84 |
return 0; /* * Tracers may want to know about even ignored signals. */ |
a288eecce
|
85 |
return !t->ptrace; |
1da177e4c
|
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
} /* * Re-calculate pending state from the set of locally pending * signals, globally pending signals, and blocked signals. */ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) { unsigned long ready; long i; switch (_NSIG_WORDS) { default: for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) ready |= signal->sig[i] &~ blocked->sig[i]; break; case 4: ready = signal->sig[3] &~ blocked->sig[3]; ready |= signal->sig[2] &~ blocked->sig[2]; ready |= signal->sig[1] &~ blocked->sig[1]; ready |= signal->sig[0] &~ blocked->sig[0]; break; case 2: ready = signal->sig[1] &~ blocked->sig[1]; ready |= signal->sig[0] &~ blocked->sig[0]; break; case 1: ready = signal->sig[0] &~ blocked->sig[0]; } return ready != 0; } #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) |
7bb44adef
|
119 |
static int recalc_sigpending_tsk(struct task_struct *t) |
1da177e4c
|
120 |
{ |
3759a0d94
|
121 |
if ((t->jobctl & JOBCTL_PENDING_MASK) || |
1da177e4c
|
122 |
PENDING(&t->pending, &t->blocked) || |
7bb44adef
|
123 |
PENDING(&t->signal->shared_pending, &t->blocked)) { |
1da177e4c
|
124 |
set_tsk_thread_flag(t, TIF_SIGPENDING); |
7bb44adef
|
125 126 |
return 1; } |
b74d0deb9
|
127 128 129 130 131 |
/* * We must never clear the flag in another thread, or in current * when it's possible the current syscall is returning -ERESTART*. * So we don't clear it here, and only callers who know they should do. */ |
7bb44adef
|
132 133 134 135 136 137 138 139 140 141 142 |
return 0; } /* * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. * This is superfluous when called on current, the wakeup is a harmless no-op. */ void recalc_sigpending_and_wake(struct task_struct *t) { if (recalc_sigpending_tsk(t)) signal_wake_up(t, 0); |
1da177e4c
|
143 144 145 146 |
} void recalc_sigpending(void) { |
dd1d67726
|
147 |
if (!recalc_sigpending_tsk(current) && !freezing(current)) |
b74d0deb9
|
148 |
clear_thread_flag(TIF_SIGPENDING); |
1da177e4c
|
149 150 151 |
} /* Given the mask, find the first available signal that should be serviced. */ |
a27341cd5
|
152 153 154 |
#define SYNCHRONOUS_MASK \ (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ sigmask(SIGTRAP) | sigmask(SIGFPE)) |
fba2afaae
|
155 |
int next_signal(struct sigpending *pending, sigset_t *mask) |
1da177e4c
|
156 157 158 |
{ unsigned long i, *s, *m, x; int sig = 0; |
f84d49b21
|
159 |
|
1da177e4c
|
160 161 |
s = pending->signal.sig; m = mask->sig; |
a27341cd5
|
162 163 164 165 166 167 168 169 170 171 172 173 |
/* * Handle the first word specially: it contains the * synchronous signals that need to be dequeued first. */ x = *s &~ *m; if (x) { if (x & SYNCHRONOUS_MASK) x &= SYNCHRONOUS_MASK; sig = ffz(~x) + 1; return sig; } |
1da177e4c
|
174 175 |
switch (_NSIG_WORDS) { default: |
a27341cd5
|
176 177 178 179 180 181 182 |
for (i = 1; i < _NSIG_WORDS; ++i) { x = *++s &~ *++m; if (!x) continue; sig = ffz(~x) + i*_NSIG_BPW + 1; break; } |
1da177e4c
|
183 |
break; |
a27341cd5
|
184 185 186 |
case 2: x = s[1] &~ m[1]; if (!x) |
1da177e4c
|
187 |
break; |
a27341cd5
|
188 |
sig = ffz(~x) + _NSIG_BPW + 1; |
1da177e4c
|
189 |
break; |
a27341cd5
|
190 191 |
case 1: /* Nothing to do */ |
1da177e4c
|
192 193 |
break; } |
f84d49b21
|
194 |
|
1da177e4c
|
195 196 |
return sig; } |
f84d49b21
|
197 198 199 200 201 202 203 204 205 206 207 208 209 210 |
static inline void print_dropped_signal(int sig) { static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); if (!print_fatal_signals) return; if (!__ratelimit(&ratelimit_state)) return; printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d ", current->comm, current->pid, sig); } |
e5c1902e9
|
211 |
/** |
7dd3db54e
|
212 |
* task_set_jobctl_pending - set jobctl pending bits |
d79fdd6d9
|
213 |
* @task: target task |
7dd3db54e
|
214 |
* @mask: pending bits to set |
d79fdd6d9
|
215 |
* |
7dd3db54e
|
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 |
* Clear @mask from @task->jobctl. @mask must be subset of * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is * cleared. If @task is already being killed or exiting, this function * becomes noop. * * CONTEXT: * Must be called with @task->sighand->siglock held. * * RETURNS: * %true if @mask is set, %false if made noop because @task was dying. */ bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask) { BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) return false; if (mask & JOBCTL_STOP_SIGMASK) task->jobctl &= ~JOBCTL_STOP_SIGMASK; task->jobctl |= mask; return true; } /** |
a8f072c1d
|
245 |
* task_clear_jobctl_trapping - clear jobctl trapping bit |
d79fdd6d9
|
246 247 |
* @task: target task * |
a8f072c1d
|
248 249 250 251 |
* If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. * Clear it and wake up the ptracer. Note that we don't need any further * locking. @task->siglock guarantees that @task->parent points to the * ptracer. |
d79fdd6d9
|
252 253 254 255 |
* * CONTEXT: * Must be called with @task->sighand->siglock held. */ |
73ddff2be
|
256 |
void task_clear_jobctl_trapping(struct task_struct *task) |
d79fdd6d9
|
257 |
{ |
a8f072c1d
|
258 259 |
if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { task->jobctl &= ~JOBCTL_TRAPPING; |
62c124ff3
|
260 |
wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); |
d79fdd6d9
|
261 262 263 264 |
} } /** |
3759a0d94
|
265 |
* task_clear_jobctl_pending - clear jobctl pending bits |
e5c1902e9
|
266 |
* @task: target task |
3759a0d94
|
267 |
* @mask: pending bits to clear |
e5c1902e9
|
268 |
* |
3759a0d94
|
269 270 271 |
* Clear @mask from @task->jobctl. @mask must be subset of * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other * STOP bits are cleared together. |
e5c1902e9
|
272 |
* |
6dfca3298
|
273 274 |
* If clearing of @mask leaves no stop or trap pending, this function calls * task_clear_jobctl_trapping(). |
e5c1902e9
|
275 276 277 278 |
* * CONTEXT: * Must be called with @task->sighand->siglock held. */ |
3759a0d94
|
279 |
void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask) |
e5c1902e9
|
280 |
{ |
3759a0d94
|
281 282 283 284 285 286 |
BUG_ON(mask & ~JOBCTL_PENDING_MASK); if (mask & JOBCTL_STOP_PENDING) mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; task->jobctl &= ~mask; |
6dfca3298
|
287 288 289 |
if (!(task->jobctl & JOBCTL_PENDING_MASK)) task_clear_jobctl_trapping(task); |
e5c1902e9
|
290 291 292 293 294 295 |
} /** * task_participate_group_stop - participate in a group stop * @task: task participating in a group stop * |
a8f072c1d
|
296 |
* @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. |
39efa3ef3
|
297 |
* Group stop states are cleared and the group stop count is consumed if |
a8f072c1d
|
298 |
* %JOBCTL_STOP_CONSUME was set. If the consumption completes the group |
39efa3ef3
|
299 |
* stop, the appropriate %SIGNAL_* flags are set. |
e5c1902e9
|
300 301 302 |
* * CONTEXT: * Must be called with @task->sighand->siglock held. |
244056f9d
|
303 304 305 306 |
* * RETURNS: * %true if group stop completion should be notified to the parent, %false * otherwise. |
e5c1902e9
|
307 308 309 310 |
*/ static bool task_participate_group_stop(struct task_struct *task) { struct signal_struct *sig = task->signal; |
a8f072c1d
|
311 |
bool consume = task->jobctl & JOBCTL_STOP_CONSUME; |
e5c1902e9
|
312 |
|
a8f072c1d
|
313 |
WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); |
39efa3ef3
|
314 |
|
3759a0d94
|
315 |
task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); |
e5c1902e9
|
316 317 318 319 320 321 |
if (!consume) return false; if (!WARN_ON_ONCE(sig->group_stop_count == 0)) sig->group_stop_count--; |
244056f9d
|
322 323 324 325 326 |
/* * Tell the caller to notify completion iff we are entering into a * fresh group stop. Read comment in do_signal_stop() for details. */ if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { |
e5c1902e9
|
327 328 329 330 331 |
sig->flags = SIGNAL_STOP_STOPPED; return true; } return false; } |
c69e8d9c0
|
332 333 334 |
/* * allocate a new signal queue record * - this may be called without locks if and only if t == current, otherwise an |
5aba085ed
|
335 |
* appropriate lock must be held to stop the target task from exiting |
c69e8d9c0
|
336 |
*/ |
f84d49b21
|
337 338 |
static struct sigqueue * __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) |
1da177e4c
|
339 340 |
{ struct sigqueue *q = NULL; |
10b1fbdb0
|
341 |
struct user_struct *user; |
1da177e4c
|
342 |
|
10b1fbdb0
|
343 |
/* |
7cf7db8df
|
344 345 |
* Protect access to @t credentials. This can go away when all * callers hold rcu read lock. |
10b1fbdb0
|
346 |
*/ |
7cf7db8df
|
347 |
rcu_read_lock(); |
d84f4f992
|
348 |
user = get_uid(__task_cred(t)->user); |
10b1fbdb0
|
349 |
atomic_inc(&user->sigpending); |
7cf7db8df
|
350 |
rcu_read_unlock(); |
f84d49b21
|
351 |
|
1da177e4c
|
352 |
if (override_rlimit || |
10b1fbdb0
|
353 |
atomic_read(&user->sigpending) <= |
78d7d407b
|
354 |
task_rlimit(t, RLIMIT_SIGPENDING)) { |
1da177e4c
|
355 |
q = kmem_cache_alloc(sigqueue_cachep, flags); |
f84d49b21
|
356 357 358 |
} else { print_dropped_signal(sig); } |
1da177e4c
|
359 |
if (unlikely(q == NULL)) { |
10b1fbdb0
|
360 |
atomic_dec(&user->sigpending); |
d84f4f992
|
361 |
free_uid(user); |
1da177e4c
|
362 363 364 |
} else { INIT_LIST_HEAD(&q->list); q->flags = 0; |
d84f4f992
|
365 |
q->user = user; |
1da177e4c
|
366 |
} |
d84f4f992
|
367 368 |
return q; |
1da177e4c
|
369 |
} |
514a01b88
|
370 |
static void __sigqueue_free(struct sigqueue *q) |
1da177e4c
|
371 372 373 374 375 376 377 |
{ if (q->flags & SIGQUEUE_PREALLOC) return; atomic_dec(&q->user->sigpending); free_uid(q->user); kmem_cache_free(sigqueue_cachep, q); } |
6a14c5c9d
|
378 |
void flush_sigqueue(struct sigpending *queue) |
1da177e4c
|
379 380 381 382 383 384 385 386 387 388 389 390 391 392 |
{ struct sigqueue *q; sigemptyset(&queue->signal); while (!list_empty(&queue->list)) { q = list_entry(queue->list.next, struct sigqueue , list); list_del_init(&q->list); __sigqueue_free(q); } } /* * Flush all pending signals for a task. */ |
3bcac0263
|
393 394 395 396 397 398 |
void __flush_signals(struct task_struct *t) { clear_tsk_thread_flag(t, TIF_SIGPENDING); flush_sigqueue(&t->pending); flush_sigqueue(&t->signal->shared_pending); } |
c81addc9d
|
399 |
void flush_signals(struct task_struct *t) |
1da177e4c
|
400 401 402 403 |
{ unsigned long flags; spin_lock_irqsave(&t->sighand->siglock, flags); |
3bcac0263
|
404 |
__flush_signals(t); |
1da177e4c
|
405 406 |
spin_unlock_irqrestore(&t->sighand->siglock, flags); } |
cbaffba12
|
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 |
static void __flush_itimer_signals(struct sigpending *pending) { sigset_t signal, retain; struct sigqueue *q, *n; signal = pending->signal; sigemptyset(&retain); list_for_each_entry_safe(q, n, &pending->list, list) { int sig = q->info.si_signo; if (likely(q->info.si_code != SI_TIMER)) { sigaddset(&retain, sig); } else { sigdelset(&signal, sig); list_del_init(&q->list); __sigqueue_free(q); } } sigorsets(&pending->signal, &signal, &retain); } void flush_itimer_signals(void) { struct task_struct *tsk = current; unsigned long flags; spin_lock_irqsave(&tsk->sighand->siglock, flags); __flush_itimer_signals(&tsk->pending); __flush_itimer_signals(&tsk->signal->shared_pending); spin_unlock_irqrestore(&tsk->sighand->siglock, flags); } |
10ab825bd
|
440 441 442 443 444 445 446 447 448 |
void ignore_signals(struct task_struct *t) { int i; for (i = 0; i < _NSIG; ++i) t->sighand->action[i].sa.sa_handler = SIG_IGN; flush_signals(t); } |
1da177e4c
|
449 |
/* |
1da177e4c
|
450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 |
* Flush all handlers for a task. */ void flush_signal_handlers(struct task_struct *t, int force_default) { int i; struct k_sigaction *ka = &t->sighand->action[0]; for (i = _NSIG ; i != 0 ; i--) { if (force_default || ka->sa.sa_handler != SIG_IGN) ka->sa.sa_handler = SIG_DFL; ka->sa.sa_flags = 0; sigemptyset(&ka->sa.sa_mask); ka++; } } |
abd4f7505
|
466 467 |
int unhandled_signal(struct task_struct *tsk, int sig) { |
445a91d2f
|
468 |
void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; |
b460cbc58
|
469 |
if (is_global_init(tsk)) |
abd4f7505
|
470 |
return 1; |
445a91d2f
|
471 |
if (handler != SIG_IGN && handler != SIG_DFL) |
abd4f7505
|
472 |
return 0; |
a288eecce
|
473 474 |
/* if ptraced, let the tracer determine */ return !tsk->ptrace; |
abd4f7505
|
475 |
} |
5aba085ed
|
476 477 |
/* * Notify the system that a driver wants to block all signals for this |
1da177e4c
|
478 479 480 481 482 |
* process, and wants to be notified if any signals at all were to be * sent/acted upon. If the notifier routine returns non-zero, then the * signal will be acted upon after all. If the notifier routine returns 0, * then then signal will be blocked. Only one block per process is * allowed. priv is a pointer to private data that the notifier routine |
5aba085ed
|
483 484 |
* can use to determine if the signal should be blocked or not. */ |
1da177e4c
|
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 |
void block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) { unsigned long flags; spin_lock_irqsave(¤t->sighand->siglock, flags); current->notifier_mask = mask; current->notifier_data = priv; current->notifier = notifier; spin_unlock_irqrestore(¤t->sighand->siglock, flags); } /* Notify the system that blocking has ended. */ void unblock_all_signals(void) { unsigned long flags; spin_lock_irqsave(¤t->sighand->siglock, flags); current->notifier = NULL; current->notifier_data = NULL; recalc_sigpending(); spin_unlock_irqrestore(¤t->sighand->siglock, flags); } |
100360f03
|
510 |
static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) |
1da177e4c
|
511 512 |
{ struct sigqueue *q, *first = NULL; |
1da177e4c
|
513 |
|
1da177e4c
|
514 515 516 517 518 519 |
/* * Collect the siginfo appropriate to this signal. Check if * there is another siginfo for the same signal. */ list_for_each_entry(q, &list->list, list) { if (q->info.si_signo == sig) { |
d44342076
|
520 521 |
if (first) goto still_pending; |
1da177e4c
|
522 523 524 |
first = q; } } |
d44342076
|
525 526 |
sigdelset(&list->signal, sig); |
1da177e4c
|
527 |
if (first) { |
d44342076
|
528 |
still_pending: |
1da177e4c
|
529 530 531 |
list_del_init(&first->list); copy_siginfo(info, &first->info); __sigqueue_free(first); |
1da177e4c
|
532 |
} else { |
5aba085ed
|
533 534 535 536 |
/* * Ok, it wasn't in the queue. This must be * a fast-pathed signal or we must have been * out of queue space. So zero out the info. |
1da177e4c
|
537 |
*/ |
1da177e4c
|
538 539 |
info->si_signo = sig; info->si_errno = 0; |
7486e5d9f
|
540 |
info->si_code = SI_USER; |
1da177e4c
|
541 542 543 |
info->si_pid = 0; info->si_uid = 0; } |
1da177e4c
|
544 545 546 547 548 |
} static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, siginfo_t *info) { |
27d91e07f
|
549 |
int sig = next_signal(pending, mask); |
1da177e4c
|
550 |
|
1da177e4c
|
551 552 553 554 555 556 557 558 559 |
if (sig) { if (current->notifier) { if (sigismember(current->notifier_mask, sig)) { if (!(current->notifier)(current->notifier_data)) { clear_thread_flag(TIF_SIGPENDING); return 0; } } } |
100360f03
|
560 |
collect_signal(sig, pending, info); |
1da177e4c
|
561 |
} |
1da177e4c
|
562 563 564 565 566 |
return sig; } /* |
5aba085ed
|
567 |
* Dequeue a signal and return the element to the caller, which is |
1da177e4c
|
568 569 570 571 572 573 |
* expected to free it. * * All callers have to hold the siglock. */ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) { |
c5363d036
|
574 |
int signr; |
caec4e8dc
|
575 576 577 578 |
/* We only dequeue private signals from ourselves, we don't let * signalfd steal them */ |
b8fceee17
|
579 |
signr = __dequeue_signal(&tsk->pending, mask, info); |
8bfd9a7a2
|
580 |
if (!signr) { |
1da177e4c
|
581 582 |
signr = __dequeue_signal(&tsk->signal->shared_pending, mask, info); |
8bfd9a7a2
|
583 584 585 586 587 588 |
/* * itimer signal ? * * itimers are process shared and we restart periodic * itimers in the signal delivery path to prevent DoS * attacks in the high resolution timer case. This is |
5aba085ed
|
589 |
* compliant with the old way of self-restarting |
8bfd9a7a2
|
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 |
* itimers, as the SIGALRM is a legacy signal and only * queued once. Changing the restart behaviour to * restart the timer in the signal dequeue path is * reducing the timer noise on heavy loaded !highres * systems too. */ if (unlikely(signr == SIGALRM)) { struct hrtimer *tmr = &tsk->signal->real_timer; if (!hrtimer_is_queued(tmr) && tsk->signal->it_real_incr.tv64 != 0) { hrtimer_forward(tmr, tmr->base->get_time(), tsk->signal->it_real_incr); hrtimer_restart(tmr); } } } |
c5363d036
|
607 |
|
b8fceee17
|
608 |
recalc_sigpending(); |
c5363d036
|
609 610 611 612 |
if (!signr) return 0; if (unlikely(sig_kernel_stop(signr))) { |
8bfd9a7a2
|
613 614 615 616 617 618 619 620 621 622 623 624 |
/* * Set a marker that we have dequeued a stop signal. Our * caller might release the siglock and then the pending * stop signal it is about to process is no longer in the * pending bitmasks, but must still be cleared by a SIGCONT * (and overruled by a SIGKILL). So those cases clear this * shared flag after we've set it. Note that this flag may * remain set after the signal we return is ignored or * handled. That doesn't matter because its only purpose * is to alert stop-signal processing code when another * processor has come along and cleared the flag. */ |
a8f072c1d
|
625 |
current->jobctl |= JOBCTL_STOP_DEQUEUED; |
8bfd9a7a2
|
626 |
} |
c5363d036
|
627 |
if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { |
1da177e4c
|
628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 |
/* * Release the siglock to ensure proper locking order * of timer locks outside of siglocks. Note, we leave * irqs disabled here, since the posix-timers code is * about to disable them again anyway. */ spin_unlock(&tsk->sighand->siglock); do_schedule_next_timer(info); spin_lock(&tsk->sighand->siglock); } return signr; } /* * Tell a process that it has a new active signal.. * * NOTE! we rely on the previous spin_lock to * lock interrupts for us! We can only be called with * "siglock" held, and the local interrupt must * have been disabled when that got acquired! * * No need to set need_resched since signal event passing * goes through ->blocked */ void signal_wake_up(struct task_struct *t, int resume) { unsigned int mask; set_tsk_thread_flag(t, TIF_SIGPENDING); /* |
f021a3c2b
|
659 660 |
* For SIGKILL, we want to wake it up in the stopped/traced/killable * case. We don't check t->state here because there is a race with it |
1da177e4c
|
661 662 663 664 665 666 |
* executing another processor and just now entering stopped state. * By using wake_up_state, we ensure the process will wake up and * handle its death signal. */ mask = TASK_INTERRUPTIBLE; if (resume) |
f021a3c2b
|
667 |
mask |= TASK_WAKEKILL; |
1da177e4c
|
668 669 670 671 672 673 674 675 676 |
if (!wake_up_state(t, mask)) kick_process(t); } /* * Remove signals in mask from the pending set and queue. * Returns 1 if any signals were found. * * All callers must be holding the siglock. |
71fabd5e4
|
677 678 679 680 681 682 683 684 685 686 687 688 |
* * This version takes a sigset mask and looks at all signals, * not just those in the first mask word. */ static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) { struct sigqueue *q, *n; sigset_t m; sigandsets(&m, mask, &s->signal); if (sigisemptyset(&m)) return 0; |
702a5073f
|
689 |
sigandnsets(&s->signal, &s->signal, mask); |
71fabd5e4
|
690 691 692 693 694 695 696 697 698 699 700 701 702 |
list_for_each_entry_safe(q, n, &s->list, list) { if (sigismember(mask, q->info.si_signo)) { list_del_init(&q->list); __sigqueue_free(q); } } return 1; } /* * Remove signals in mask from the pending set and queue. * Returns 1 if any signals were found. * * All callers must be holding the siglock. |
1da177e4c
|
703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 |
*/ static int rm_from_queue(unsigned long mask, struct sigpending *s) { struct sigqueue *q, *n; if (!sigtestsetmask(&s->signal, mask)) return 0; sigdelsetmask(&s->signal, mask); list_for_each_entry_safe(q, n, &s->list, list) { if (q->info.si_signo < SIGRTMIN && (mask & sigmask(q->info.si_signo))) { list_del_init(&q->list); __sigqueue_free(q); } } return 1; } |
614c517d7
|
721 722 723 724 725 726 727 728 729 730 |
static inline int is_si_special(const struct siginfo *info) { return info <= SEND_SIG_FORCED; } static inline bool si_fromuser(const struct siginfo *info) { return info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)); } |
1da177e4c
|
731 |
/* |
39fd33933
|
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 |
* called with RCU read lock from check_kill_permission() */ static int kill_ok_by_cred(struct task_struct *t) { const struct cred *cred = current_cred(); const struct cred *tcred = __task_cred(t); if (cred->user->user_ns == tcred->user->user_ns && (cred->euid == tcred->suid || cred->euid == tcred->uid || cred->uid == tcred->suid || cred->uid == tcred->uid)) return 1; if (ns_capable(tcred->user->user_ns, CAP_KILL)) return 1; return 0; } /* |
1da177e4c
|
753 |
* Bad permissions for sending the signal |
694f690d2
|
754 |
* - the caller must hold the RCU read lock |
1da177e4c
|
755 756 757 758 |
*/ static int check_kill_permission(int sig, struct siginfo *info, struct task_struct *t) { |
2e2ba22ea
|
759 |
struct pid *sid; |
3b5e9e53c
|
760 |
int error; |
7ed20e1ad
|
761 |
if (!valid_signal(sig)) |
3b5e9e53c
|
762 |
return -EINVAL; |
614c517d7
|
763 |
if (!si_fromuser(info)) |
3b5e9e53c
|
764 |
return 0; |
e54dc2431
|
765 |
|
3b5e9e53c
|
766 767 |
error = audit_signal_info(sig, t); /* Let audit system see the signal */ if (error) |
1da177e4c
|
768 |
return error; |
3b5e9e53c
|
769 |
|
065add394
|
770 |
if (!same_thread_group(current, t) && |
39fd33933
|
771 |
!kill_ok_by_cred(t)) { |
2e2ba22ea
|
772 773 |
switch (sig) { case SIGCONT: |
2e2ba22ea
|
774 |
sid = task_session(t); |
2e2ba22ea
|
775 776 777 778 779 780 781 782 783 784 |
/* * We don't return the error if sid == NULL. The * task was unhashed, the caller must notice this. */ if (!sid || sid == task_session(current)) break; default: return -EPERM; } } |
c2f0c7c35
|
785 |
|
e54dc2431
|
786 |
return security_task_kill(t, info, sig, 0); |
1da177e4c
|
787 |
} |
fb1d910c1
|
788 789 790 791 792 793 794 795 |
/** * ptrace_trap_notify - schedule trap to notify ptracer * @t: tracee wanting to notify tracer * * This function schedules sticky ptrace trap which is cleared on the next * TRAP_STOP to notify ptracer of an event. @t must have been seized by * ptracer. * |
544b2c91a
|
796 797 798 799 800 |
* If @t is running, STOP trap will be taken. If trapped for STOP and * ptracer is listening for events, tracee is woken up so that it can * re-trap for the new event. If trapped otherwise, STOP trap will be * eventually taken without returning to userland after the existing traps * are finished by PTRACE_CONT. |
fb1d910c1
|
801 802 803 804 805 806 807 808 809 810 |
* * CONTEXT: * Must be called with @task->sighand->siglock held. */ static void ptrace_trap_notify(struct task_struct *t) { WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); assert_spin_locked(&t->sighand->siglock); task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); |
544b2c91a
|
811 |
signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); |
fb1d910c1
|
812 |
} |
1da177e4c
|
813 |
/* |
7e695a5ef
|
814 815 |
* Handle magic process-wide effects of stop/continue signals. Unlike * the signal actions, these happen immediately at signal-generation |
1da177e4c
|
816 817 |
* time regardless of blocking, ignoring, or handling. This does the * actual continuing for SIGCONT, but not the actual stopping for stop |
7e695a5ef
|
818 819 820 821 |
* signals. The process stop is done as a signal action for SIG_DFL. * * Returns true if the signal should be actually delivered, otherwise * it should be dropped. |
1da177e4c
|
822 |
*/ |
921cf9f63
|
823 |
static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) |
1da177e4c
|
824 |
{ |
ad16a4606
|
825 |
struct signal_struct *signal = p->signal; |
1da177e4c
|
826 |
struct task_struct *t; |
7e695a5ef
|
827 |
if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) { |
1da177e4c
|
828 |
/* |
7e695a5ef
|
829 |
* The process is in the middle of dying, nothing to do. |
1da177e4c
|
830 |
*/ |
7e695a5ef
|
831 |
} else if (sig_kernel_stop(sig)) { |
1da177e4c
|
832 833 834 |
/* * This is a stop signal. Remove SIGCONT from all queues. */ |
ad16a4606
|
835 |
rm_from_queue(sigmask(SIGCONT), &signal->shared_pending); |
1da177e4c
|
836 837 838 |
t = p; do { rm_from_queue(sigmask(SIGCONT), &t->pending); |
ad16a4606
|
839 |
} while_each_thread(p, t); |
1da177e4c
|
840 |
} else if (sig == SIGCONT) { |
fc321d2e6
|
841 |
unsigned int why; |
1da177e4c
|
842 |
/* |
1deac632f
|
843 |
* Remove all stop signals from all queues, wake all threads. |
1da177e4c
|
844 |
*/ |
ad16a4606
|
845 |
rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); |
1da177e4c
|
846 847 |
t = p; do { |
3759a0d94
|
848 |
task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); |
1da177e4c
|
849 |
rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); |
fb1d910c1
|
850 851 852 853 |
if (likely(!(t->ptrace & PT_SEIZED))) wake_up_state(t, __TASK_STOPPED); else ptrace_trap_notify(t); |
ad16a4606
|
854 |
} while_each_thread(p, t); |
1da177e4c
|
855 |
|
fc321d2e6
|
856 857 858 859 860 861 862 863 864 |
/* * Notify the parent with CLD_CONTINUED if we were stopped. * * If we were in the middle of a group stop, we pretend it * was already finished, and then continued. Since SIGCHLD * doesn't queue we report only CLD_STOPPED, as if the next * CLD_CONTINUED was dropped. */ why = 0; |
ad16a4606
|
865 |
if (signal->flags & SIGNAL_STOP_STOPPED) |
fc321d2e6
|
866 |
why |= SIGNAL_CLD_CONTINUED; |
ad16a4606
|
867 |
else if (signal->group_stop_count) |
fc321d2e6
|
868 869 870 |
why |= SIGNAL_CLD_STOPPED; if (why) { |
021e1ae3d
|
871 |
/* |
ae6d2ed7b
|
872 |
* The first thread which returns from do_signal_stop() |
021e1ae3d
|
873 874 875 |
* will take ->siglock, notice SIGNAL_CLD_MASK, and * notify its parent. See get_signal_to_deliver(). */ |
ad16a4606
|
876 877 878 |
signal->flags = why | SIGNAL_STOP_CONTINUED; signal->group_stop_count = 0; signal->group_exit_code = 0; |
1da177e4c
|
879 |
} |
1da177e4c
|
880 |
} |
7e695a5ef
|
881 |
|
921cf9f63
|
882 |
return !sig_ignored(p, sig, from_ancestor_ns); |
1da177e4c
|
883 |
} |
71f11dc02
|
884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 |
/* * Test if P wants to take SIG. After we've checked all threads with this, * it's equivalent to finding no threads not blocking SIG. Any threads not * blocking SIG were ruled out because they are not running and already * have pending signals. Such threads will dequeue from the shared queue * as soon as they're available, so putting the signal on the shared queue * will be equivalent to sending it to one such thread. */ static inline int wants_signal(int sig, struct task_struct *p) { if (sigismember(&p->blocked, sig)) return 0; if (p->flags & PF_EXITING) return 0; if (sig == SIGKILL) return 1; if (task_is_stopped_or_traced(p)) return 0; return task_curr(p) || !signal_pending(p); } |
5fcd835bf
|
904 |
static void complete_signal(int sig, struct task_struct *p, int group) |
71f11dc02
|
905 906 907 908 909 910 911 912 913 914 915 916 |
{ struct signal_struct *signal = p->signal; struct task_struct *t; /* * Now find a thread we can wake up to take the signal off the queue. * * If the main thread wants the signal, it gets first crack. * Probably the least surprising to the average bear. */ if (wants_signal(sig, p)) t = p; |
5fcd835bf
|
917 |
else if (!group || thread_group_empty(p)) |
71f11dc02
|
918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 |
/* * There is just one thread and it does not need to be woken. * It will dequeue unblocked signals before it runs again. */ return; else { /* * Otherwise try to find a suitable thread. */ t = signal->curr_target; while (!wants_signal(sig, t)) { t = next_thread(t); if (t == signal->curr_target) /* * No thread needs to be woken. * Any eligible threads will see * the signal in the queue soon. */ return; } signal->curr_target = t; } /* * Found a killable thread. If the signal will be fatal, * then start taking the whole group down immediately. */ |
fae5fa44f
|
945 946 |
if (sig_fatal(p, sig) && !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && |
71f11dc02
|
947 |
!sigismember(&t->real_blocked, sig) && |
a288eecce
|
948 |
(sig == SIGKILL || !t->ptrace)) { |
71f11dc02
|
949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 |
/* * This signal will be fatal to the whole group. */ if (!sig_kernel_coredump(sig)) { /* * Start a group exit and wake everybody up. * This way we don't have other threads * running and doing things after a slower * thread has the fatal signal pending. */ signal->flags = SIGNAL_GROUP_EXIT; signal->group_exit_code = sig; signal->group_stop_count = 0; t = p; do { |
6dfca3298
|
964 |
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); |
71f11dc02
|
965 966 967 968 969 970 971 972 973 974 975 976 977 978 |
sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); } while_each_thread(p, t); return; } } /* * The signal is already in the shared-pending queue. * Tell the chosen thread to wake up and dequeue it. */ signal_wake_up(t, sig == SIGKILL); return; } |
af7fff9c1
|
979 980 981 982 |
static inline int legacy_queue(struct sigpending *signals, int sig) { return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); } |
7978b567d
|
983 984 |
static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, int group, int from_ancestor_ns) |
1da177e4c
|
985 |
{ |
2ca3515aa
|
986 |
struct sigpending *pending; |
6e65acba7
|
987 |
struct sigqueue *q; |
7a0aeb14e
|
988 |
int override_rlimit; |
1da177e4c
|
989 |
|
d1eb650ff
|
990 |
trace_signal_generate(sig, info, t); |
0a16b6075
|
991 |
|
6e65acba7
|
992 |
assert_spin_locked(&t->sighand->siglock); |
921cf9f63
|
993 994 |
if (!prepare_signal(sig, t, from_ancestor_ns)) |
7e695a5ef
|
995 |
return 0; |
2ca3515aa
|
996 997 |
pending = group ? &t->signal->shared_pending : &t->pending; |
1da177e4c
|
998 |
/* |
2acb024d5
|
999 1000 1001 1002 |
* Short-circuit ignored signals and support queuing * exactly one non-rt signal, so that we can get more * detailed information about the cause of the signal. */ |
7e695a5ef
|
1003 |
if (legacy_queue(pending, sig)) |
2acb024d5
|
1004 |
return 0; |
fba2afaae
|
1005 |
/* |
1da177e4c
|
1006 1007 1008 |
* fast-pathed signals for kernel-internal things like SIGSTOP * or SIGKILL. */ |
b67a1b9e4
|
1009 |
if (info == SEND_SIG_FORCED) |
1da177e4c
|
1010 |
goto out_set; |
5aba085ed
|
1011 1012 1013 1014 1015 1016 1017 1018 1019 |
/* * Real-time signals must be queued if sent by sigqueue, or * some other real-time mechanism. It is implementation * defined whether kill() does so. We attempt to do so, on * the principle of least surprise, but since kill is not * allowed to fail with EAGAIN when low on memory we just * make sure at least one signal gets delivered and don't * pass on the info struct. */ |
7a0aeb14e
|
1020 1021 1022 1023 |
if (sig < SIGRTMIN) override_rlimit = (is_si_special(info) || info->si_code >= 0); else override_rlimit = 0; |
f84d49b21
|
1024 |
q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, |
7a0aeb14e
|
1025 |
override_rlimit); |
1da177e4c
|
1026 |
if (q) { |
2ca3515aa
|
1027 |
list_add_tail(&q->list, &pending->list); |
1da177e4c
|
1028 |
switch ((unsigned long) info) { |
b67a1b9e4
|
1029 |
case (unsigned long) SEND_SIG_NOINFO: |
1da177e4c
|
1030 1031 1032 |
q->info.si_signo = sig; q->info.si_errno = 0; q->info.si_code = SI_USER; |
9cd4fd104
|
1033 |
q->info.si_pid = task_tgid_nr_ns(current, |
09bca05c9
|
1034 |
task_active_pid_ns(t)); |
76aac0e9a
|
1035 |
q->info.si_uid = current_uid(); |
1da177e4c
|
1036 |
break; |
b67a1b9e4
|
1037 |
case (unsigned long) SEND_SIG_PRIV: |
1da177e4c
|
1038 1039 1040 1041 1042 1043 1044 1045 |
q->info.si_signo = sig; q->info.si_errno = 0; q->info.si_code = SI_KERNEL; q->info.si_pid = 0; q->info.si_uid = 0; break; default: copy_siginfo(&q->info, info); |
6588c1e3f
|
1046 1047 |
if (from_ancestor_ns) q->info.si_pid = 0; |
1da177e4c
|
1048 1049 |
break; } |
621d31219
|
1050 |
} else if (!is_si_special(info)) { |
ba005e1f4
|
1051 1052 1053 1054 1055 1056 1057 |
if (sig >= SIGRTMIN && info->si_code != SI_USER) { /* * Queue overflow, abort. We may abort if the * signal was rt and sent by user using something * other than kill(). */ trace_signal_overflow_fail(sig, group, info); |
1da177e4c
|
1058 |
return -EAGAIN; |
ba005e1f4
|
1059 1060 1061 1062 1063 1064 1065 |
} else { /* * This is a silent loss of information. We still * send the signal, but the *info bits are lost. */ trace_signal_lose_info(sig, group, info); } |
1da177e4c
|
1066 1067 1068 |
} out_set: |
53c30337f
|
1069 |
signalfd_notify(t, sig); |
2ca3515aa
|
1070 |
sigaddset(&pending->signal, sig); |
4cd4b6d4e
|
1071 1072 |
complete_signal(sig, t, group); return 0; |
1da177e4c
|
1073 |
} |
7978b567d
|
1074 1075 1076 |
static int send_signal(int sig, struct siginfo *info, struct task_struct *t, int group) { |
921cf9f63
|
1077 1078 1079 |
int from_ancestor_ns = 0; #ifdef CONFIG_PID_NS |
dd34200ad
|
1080 1081 |
from_ancestor_ns = si_fromuser(info) && !task_pid_nr_ns(current, task_active_pid_ns(t)); |
921cf9f63
|
1082 1083 1084 |
#endif return __send_signal(sig, info, t, group, from_ancestor_ns); |
7978b567d
|
1085 |
} |
45807a1df
|
1086 1087 1088 1089 |
static void print_fatal_signal(struct pt_regs *regs, int signr) { printk("%s/%d: potentially unexpected fatal signal %d. ", |
ba25f9dcc
|
1090 |
current->comm, task_pid_nr(current), signr); |
45807a1df
|
1091 |
|
ca5cd877a
|
1092 |
#if defined(__i386__) && !defined(__arch_um__) |
65ea5b034
|
1093 |
printk("code at %08lx: ", regs->ip); |
45807a1df
|
1094 1095 1096 1097 |
{ int i; for (i = 0; i < 16; i++) { unsigned char insn; |
b45c6e76b
|
1098 1099 |
if (get_user(insn, (unsigned char *)(regs->ip + i))) break; |
45807a1df
|
1100 1101 1102 1103 1104 1105 |
printk("%02x ", insn); } } #endif printk(" "); |
3a9f84d35
|
1106 |
preempt_disable(); |
45807a1df
|
1107 |
show_regs(regs); |
3a9f84d35
|
1108 |
preempt_enable(); |
45807a1df
|
1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 |
} static int __init setup_print_fatal_signals(char *str) { get_option (&str, &print_fatal_signals); return 1; } __setup("print-fatal-signals=", setup_print_fatal_signals); |
1da177e4c
|
1119 |
|
4cd4b6d4e
|
1120 1121 1122 1123 1124 |
int __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) { return send_signal(sig, info, p, 1); } |
1da177e4c
|
1125 1126 1127 |
static int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) { |
4cd4b6d4e
|
1128 |
return send_signal(sig, info, t, 0); |
1da177e4c
|
1129 |
} |
4a30debfb
|
1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 |
int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, bool group) { unsigned long flags; int ret = -ESRCH; if (lock_task_sighand(p, &flags)) { ret = send_signal(sig, info, p, group); unlock_task_sighand(p, &flags); } return ret; } |
1da177e4c
|
1143 1144 1145 |
/* * Force a signal that the process can't ignore: if necessary * we unblock the signal and change any SIG_IGN to SIG_DFL. |
ae74c3b69
|
1146 1147 1148 1149 1150 |
* * Note: If we unblock the signal, we always reset it to SIG_DFL, * since we do not want to have a signal handler that was blocked * be invoked when user space had explicitly blocked it. * |
80fe728d5
|
1151 1152 |
* We don't want to have recursive SIGSEGV's etc, for example, * that is why we also clear SIGNAL_UNKILLABLE. |
1da177e4c
|
1153 |
*/ |
1da177e4c
|
1154 1155 1156 1157 |
int force_sig_info(int sig, struct siginfo *info, struct task_struct *t) { unsigned long int flags; |
ae74c3b69
|
1158 1159 |
int ret, blocked, ignored; struct k_sigaction *action; |
1da177e4c
|
1160 1161 |
spin_lock_irqsave(&t->sighand->siglock, flags); |
ae74c3b69
|
1162 1163 1164 1165 1166 1167 1168 |
action = &t->sighand->action[sig-1]; ignored = action->sa.sa_handler == SIG_IGN; blocked = sigismember(&t->blocked, sig); if (blocked || ignored) { action->sa.sa_handler = SIG_DFL; if (blocked) { sigdelset(&t->blocked, sig); |
7bb44adef
|
1169 |
recalc_sigpending_and_wake(t); |
ae74c3b69
|
1170 |
} |
1da177e4c
|
1171 |
} |
80fe728d5
|
1172 1173 |
if (action->sa.sa_handler == SIG_DFL) t->signal->flags &= ~SIGNAL_UNKILLABLE; |
1da177e4c
|
1174 1175 1176 1177 1178 |
ret = specific_send_sig_info(sig, info, t); spin_unlock_irqrestore(&t->sighand->siglock, flags); return ret; } |
1da177e4c
|
1179 1180 1181 |
/* * Nuke all other threads in the group. */ |
09faef11d
|
1182 |
int zap_other_threads(struct task_struct *p) |
1da177e4c
|
1183 |
{ |
09faef11d
|
1184 1185 |
struct task_struct *t = p; int count = 0; |
1da177e4c
|
1186 |
|
1da177e4c
|
1187 |
p->signal->group_stop_count = 0; |
09faef11d
|
1188 |
while_each_thread(p, t) { |
6dfca3298
|
1189 |
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); |
09faef11d
|
1190 1191 1192 |
count++; /* Don't bother with already dead threads */ |
1da177e4c
|
1193 1194 |
if (t->exit_state) continue; |
1da177e4c
|
1195 |
sigaddset(&t->pending.signal, SIGKILL); |
1da177e4c
|
1196 1197 |
signal_wake_up(t, 1); } |
09faef11d
|
1198 1199 |
return count; |
1da177e4c
|
1200 |
} |
b8ed374e2
|
1201 1202 |
struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, unsigned long *flags) |
f63ee72e0
|
1203 1204 1205 1206 |
{ struct sighand_struct *sighand; for (;;) { |
a841796f1
|
1207 1208 |
local_irq_save(*flags); rcu_read_lock(); |
f63ee72e0
|
1209 |
sighand = rcu_dereference(tsk->sighand); |
a841796f1
|
1210 1211 1212 |
if (unlikely(sighand == NULL)) { rcu_read_unlock(); local_irq_restore(*flags); |
f63ee72e0
|
1213 |
break; |
a841796f1
|
1214 |
} |
f63ee72e0
|
1215 |
|
a841796f1
|
1216 1217 1218 |
spin_lock(&sighand->siglock); if (likely(sighand == tsk->sighand)) { rcu_read_unlock(); |
f63ee72e0
|
1219 |
break; |
a841796f1
|
1220 1221 1222 1223 |
} spin_unlock(&sighand->siglock); rcu_read_unlock(); local_irq_restore(*flags); |
f63ee72e0
|
1224 1225 1226 1227 |
} return sighand; } |
c69e8d9c0
|
1228 1229 |
/* * send signal info to all the members of a group |
c69e8d9c0
|
1230 |
*/ |
1da177e4c
|
1231 1232 |
int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) { |
694f690d2
|
1233 1234 1235 1236 1237 |
int ret; rcu_read_lock(); ret = check_kill_permission(sig, info, p); rcu_read_unlock(); |
f63ee72e0
|
1238 |
|
4a30debfb
|
1239 1240 |
if (!ret && sig) ret = do_send_sig_info(sig, info, p, true); |
1da177e4c
|
1241 1242 1243 1244 1245 |
return ret; } /* |
146a505d4
|
1246 |
* __kill_pgrp_info() sends a signal to a process group: this is what the tty |
1da177e4c
|
1247 |
* control characters do (^C, ^Z etc) |
c69e8d9c0
|
1248 |
* - the caller must hold at least a readlock on tasklist_lock |
1da177e4c
|
1249 |
*/ |
c4b92fc11
|
1250 |
int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) |
1da177e4c
|
1251 1252 1253 |
{ struct task_struct *p = NULL; int retval, success; |
1da177e4c
|
1254 1255 |
success = 0; retval = -ESRCH; |
c4b92fc11
|
1256 |
do_each_pid_task(pgrp, PIDTYPE_PGID, p) { |
1da177e4c
|
1257 1258 1259 |
int err = group_send_sig_info(sig, info, p); success |= !err; retval = err; |
c4b92fc11
|
1260 |
} while_each_pid_task(pgrp, PIDTYPE_PGID, p); |
1da177e4c
|
1261 1262 |
return success ? 0 : retval; } |
c4b92fc11
|
1263 |
int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) |
1da177e4c
|
1264 |
{ |
d36174bc2
|
1265 |
int error = -ESRCH; |
1da177e4c
|
1266 |
struct task_struct *p; |
e56d09031
|
1267 |
rcu_read_lock(); |
d36174bc2
|
1268 |
retry: |
c4b92fc11
|
1269 |
p = pid_task(pid, PIDTYPE_PID); |
d36174bc2
|
1270 |
if (p) { |
1da177e4c
|
1271 |
error = group_send_sig_info(sig, info, p); |
d36174bc2
|
1272 1273 1274 1275 1276 1277 1278 1279 1280 |
if (unlikely(error == -ESRCH)) /* * The task was unhashed in between, try again. * If it is dead, pid_task() will return NULL, * if we race with de_thread() it will find the * new leader. */ goto retry; } |
e56d09031
|
1281 |
rcu_read_unlock(); |
6ca25b551
|
1282 |
|
1da177e4c
|
1283 1284 |
return error; } |
5aba085ed
|
1285 |
int kill_proc_info(int sig, struct siginfo *info, pid_t pid) |
c4b92fc11
|
1286 1287 1288 |
{ int error; rcu_read_lock(); |
b488893a3
|
1289 |
error = kill_pid_info(sig, info, find_vpid(pid)); |
c4b92fc11
|
1290 1291 1292 |
rcu_read_unlock(); return error; } |
d178bc3a7
|
1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 |
static int kill_as_cred_perm(const struct cred *cred, struct task_struct *target) { const struct cred *pcred = __task_cred(target); if (cred->user_ns != pcred->user_ns) return 0; if (cred->euid != pcred->suid && cred->euid != pcred->uid && cred->uid != pcred->suid && cred->uid != pcred->uid) return 0; return 1; } |
2425c08b3
|
1304 |
/* like kill_pid_info(), but doesn't use uid/euid of "current" */ |
d178bc3a7
|
1305 1306 |
int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid, const struct cred *cred, u32 secid) |
46113830a
|
1307 1308 1309 |
{ int ret = -EINVAL; struct task_struct *p; |
14d8c9f3c
|
1310 |
unsigned long flags; |
46113830a
|
1311 1312 1313 |
if (!valid_signal(sig)) return ret; |
14d8c9f3c
|
1314 |
rcu_read_lock(); |
2425c08b3
|
1315 |
p = pid_task(pid, PIDTYPE_PID); |
46113830a
|
1316 1317 1318 1319 |
if (!p) { ret = -ESRCH; goto out_unlock; } |
d178bc3a7
|
1320 |
if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) { |
46113830a
|
1321 1322 1323 |
ret = -EPERM; goto out_unlock; } |
8f95dc58d
|
1324 1325 1326 |
ret = security_task_kill(p, info, sig, secid); if (ret) goto out_unlock; |
14d8c9f3c
|
1327 1328 1329 1330 1331 1332 1333 |
if (sig) { if (lock_task_sighand(p, &flags)) { ret = __send_signal(sig, info, p, 1, 0); unlock_task_sighand(p, &flags); } else ret = -ESRCH; |
46113830a
|
1334 1335 |
} out_unlock: |
14d8c9f3c
|
1336 |
rcu_read_unlock(); |
46113830a
|
1337 1338 |
return ret; } |
d178bc3a7
|
1339 |
EXPORT_SYMBOL_GPL(kill_pid_info_as_cred); |
1da177e4c
|
1340 1341 1342 1343 1344 1345 1346 |
/* * kill_something_info() interprets pid in interesting ways just like kill(2). * * POSIX specifies that kill(-1,sig) is unspecified, but what we have * is probably wrong. Should make it like BSD or SYSV. */ |
bc64efd22
|
1347 |
static int kill_something_info(int sig, struct siginfo *info, pid_t pid) |
1da177e4c
|
1348 |
{ |
8d42db189
|
1349 |
int ret; |
d5df763b8
|
1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 |
if (pid > 0) { rcu_read_lock(); ret = kill_pid_info(sig, info, find_vpid(pid)); rcu_read_unlock(); return ret; } read_lock(&tasklist_lock); if (pid != -1) { ret = __kill_pgrp_info(sig, info, pid ? find_vpid(-pid) : task_pgrp(current)); } else { |
1da177e4c
|
1363 1364 |
int retval = 0, count = 0; struct task_struct * p; |
1da177e4c
|
1365 |
for_each_process(p) { |
d25141a81
|
1366 1367 |
if (task_pid_vnr(p) > 1 && !same_thread_group(p, current)) { |
1da177e4c
|
1368 1369 1370 1371 1372 1373 |
int err = group_send_sig_info(sig, info, p); ++count; if (err != -EPERM) retval = err; } } |
8d42db189
|
1374 |
ret = count ? retval : -ESRCH; |
1da177e4c
|
1375 |
} |
d5df763b8
|
1376 |
read_unlock(&tasklist_lock); |
8d42db189
|
1377 |
return ret; |
1da177e4c
|
1378 1379 1380 1381 1382 |
} /* * These are for backward compatibility with the rest of the kernel source. */ |
5aba085ed
|
1383 |
int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1da177e4c
|
1384 |
{ |
1da177e4c
|
1385 1386 1387 1388 |
/* * Make sure legacy kernel users don't send in bad values * (normal paths check this in check_kill_permission). */ |
7ed20e1ad
|
1389 |
if (!valid_signal(sig)) |
1da177e4c
|
1390 |
return -EINVAL; |
4a30debfb
|
1391 |
return do_send_sig_info(sig, info, p, false); |
1da177e4c
|
1392 |
} |
b67a1b9e4
|
1393 1394 |
#define __si_special(priv) \ ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) |
1da177e4c
|
1395 1396 1397 |
int send_sig(int sig, struct task_struct *p, int priv) { |
b67a1b9e4
|
1398 |
return send_sig_info(sig, __si_special(priv), p); |
1da177e4c
|
1399 |
} |
1da177e4c
|
1400 1401 1402 |
void force_sig(int sig, struct task_struct *p) { |
b67a1b9e4
|
1403 |
force_sig_info(sig, SEND_SIG_PRIV, p); |
1da177e4c
|
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 |
} /* * When things go south during signal handling, we * will force a SIGSEGV. And if the signal that caused * the problem was already a SIGSEGV, we'll want to * make sure we don't even try to deliver the signal.. */ int force_sigsegv(int sig, struct task_struct *p) { if (sig == SIGSEGV) { unsigned long flags; spin_lock_irqsave(&p->sighand->siglock, flags); p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; spin_unlock_irqrestore(&p->sighand->siglock, flags); } force_sig(SIGSEGV, p); return 0; } |
c4b92fc11
|
1424 1425 |
int kill_pgrp(struct pid *pid, int sig, int priv) { |
146a505d4
|
1426 1427 1428 1429 1430 1431 1432 |
int ret; read_lock(&tasklist_lock); ret = __kill_pgrp_info(sig, __si_special(priv), pid); read_unlock(&tasklist_lock); return ret; |
c4b92fc11
|
1433 1434 1435 1436 1437 1438 1439 1440 |
} EXPORT_SYMBOL(kill_pgrp); int kill_pid(struct pid *pid, int sig, int priv) { return kill_pid_info(sig, __si_special(priv), pid); } EXPORT_SYMBOL(kill_pid); |
1da177e4c
|
1441 1442 1443 1444 |
/* * These functions support sending signals using preallocated sigqueue * structures. This is needed "because realtime applications cannot * afford to lose notifications of asynchronous events, like timer |
5aba085ed
|
1445 |
* expirations or I/O completions". In the case of POSIX Timers |
1da177e4c
|
1446 1447 1448 1449 |
* we allocate the sigqueue structure from the timer_create. If this * allocation fails we are able to report the failure to the application * with an EAGAIN error. */ |
1da177e4c
|
1450 1451 |
struct sigqueue *sigqueue_alloc(void) { |
f84d49b21
|
1452 |
struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); |
1da177e4c
|
1453 |
|
f84d49b21
|
1454 |
if (q) |
1da177e4c
|
1455 |
q->flags |= SIGQUEUE_PREALLOC; |
f84d49b21
|
1456 1457 |
return q; |
1da177e4c
|
1458 1459 1460 1461 1462 |
} void sigqueue_free(struct sigqueue *q) { unsigned long flags; |
60187d270
|
1463 |
spinlock_t *lock = ¤t->sighand->siglock; |
1da177e4c
|
1464 1465 |
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); /* |
c8e85b4f4
|
1466 1467 |
* We must hold ->siglock while testing q->list * to serialize with collect_signal() or with |
da7978b03
|
1468 |
* __exit_signal()->flush_sigqueue(). |
1da177e4c
|
1469 |
*/ |
60187d270
|
1470 |
spin_lock_irqsave(lock, flags); |
c8e85b4f4
|
1471 1472 1473 1474 1475 |
q->flags &= ~SIGQUEUE_PREALLOC; /* * If it is queued it will be freed when dequeued, * like the "regular" sigqueue. */ |
60187d270
|
1476 |
if (!list_empty(&q->list)) |
c8e85b4f4
|
1477 |
q = NULL; |
60187d270
|
1478 |
spin_unlock_irqrestore(lock, flags); |
c8e85b4f4
|
1479 1480 |
if (q) __sigqueue_free(q); |
1da177e4c
|
1481 |
} |
ac5c21538
|
1482 |
int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) |
9e3bd6c3f
|
1483 |
{ |
e62e6650e
|
1484 |
int sig = q->info.si_signo; |
2ca3515aa
|
1485 |
struct sigpending *pending; |
e62e6650e
|
1486 1487 |
unsigned long flags; int ret; |
2ca3515aa
|
1488 |
|
4cd4b6d4e
|
1489 |
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
e62e6650e
|
1490 1491 1492 1493 |
ret = -1; if (!likely(lock_task_sighand(t, &flags))) goto ret; |
7e695a5ef
|
1494 |
ret = 1; /* the signal is ignored */ |
921cf9f63
|
1495 |
if (!prepare_signal(sig, t, 0)) |
e62e6650e
|
1496 1497 1498 |
goto out; ret = 0; |
9e3bd6c3f
|
1499 1500 1501 1502 1503 |
if (unlikely(!list_empty(&q->list))) { /* * If an SI_TIMER entry is already queue just increment * the overrun count. */ |
9e3bd6c3f
|
1504 1505 |
BUG_ON(q->info.si_code != SI_TIMER); q->info.si_overrun++; |
e62e6650e
|
1506 |
goto out; |
9e3bd6c3f
|
1507 |
} |
ba661292a
|
1508 |
q->info.si_overrun = 0; |
9e3bd6c3f
|
1509 |
|
9e3bd6c3f
|
1510 |
signalfd_notify(t, sig); |
2ca3515aa
|
1511 |
pending = group ? &t->signal->shared_pending : &t->pending; |
9e3bd6c3f
|
1512 1513 |
list_add_tail(&q->list, &pending->list); sigaddset(&pending->signal, sig); |
4cd4b6d4e
|
1514 |
complete_signal(sig, t, group); |
e62e6650e
|
1515 1516 1517 1518 |
out: unlock_task_sighand(t, &flags); ret: return ret; |
9e3bd6c3f
|
1519 |
} |
1da177e4c
|
1520 |
/* |
1da177e4c
|
1521 1522 |
* Let a parent know about the death of a child. * For a stopped/continued status change, use do_notify_parent_cldstop instead. |
2b2a1ff64
|
1523 |
* |
53c8f9f19
|
1524 1525 |
* Returns true if our parent ignored us and so we've switched to * self-reaping. |
1da177e4c
|
1526 |
*/ |
53c8f9f19
|
1527 |
bool do_notify_parent(struct task_struct *tsk, int sig) |
1da177e4c
|
1528 1529 1530 1531 |
{ struct siginfo info; unsigned long flags; struct sighand_struct *psig; |
53c8f9f19
|
1532 |
bool autoreap = false; |
1da177e4c
|
1533 1534 1535 1536 |
BUG_ON(sig == -1); /* do_notify_parent_cldstop should have been called instead. */ |
e1abb39c6
|
1537 |
BUG_ON(task_is_stopped_or_traced(tsk)); |
1da177e4c
|
1538 |
|
d21142ece
|
1539 |
BUG_ON(!tsk->ptrace && |
1da177e4c
|
1540 1541 1542 1543 |
(tsk->group_leader != tsk || !thread_group_empty(tsk))); info.si_signo = sig; info.si_errno = 0; |
b488893a3
|
1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 |
/* * we are under tasklist_lock here so our parent is tied to * us and cannot exit and release its namespace. * * the only it can is to switch its nsproxy with sys_unshare, * bu uncharing pid namespaces is not allowed, so we'll always * see relevant namespace * * write_lock() currently calls preempt_disable() which is the * same as rcu_read_lock(), but according to Oleg, this is not * correct to rely on this */ rcu_read_lock(); info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); |
c69e8d9c0
|
1558 |
info.si_uid = __task_cred(tsk)->uid; |
b488893a3
|
1559 |
rcu_read_unlock(); |
648616343
|
1560 1561 |
info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime); info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime); |
1da177e4c
|
1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 |
info.si_status = tsk->exit_code & 0x7f; if (tsk->exit_code & 0x80) info.si_code = CLD_DUMPED; else if (tsk->exit_code & 0x7f) info.si_code = CLD_KILLED; else { info.si_code = CLD_EXITED; info.si_status = tsk->exit_code >> 8; } psig = tsk->parent->sighand; spin_lock_irqsave(&psig->siglock, flags); |
d21142ece
|
1575 |
if (!tsk->ptrace && sig == SIGCHLD && |
1da177e4c
|
1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 |
(psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { /* * We are exiting and our parent doesn't care. POSIX.1 * defines special semantics for setting SIGCHLD to SIG_IGN * or setting the SA_NOCLDWAIT flag: we should be reaped * automatically and not left for our parent's wait4 call. * Rather than having the parent do it as a magic kind of * signal handler, we just set this to tell do_exit that we * can be cleaned up without becoming a zombie. Note that * we still call __wake_up_parent in this case, because a * blocked sys_wait4 might now return -ECHILD. * * Whether we send SIGCHLD or not for SA_NOCLDWAIT * is implementation-defined: we do (if you don't want * it, just use SIG_IGN instead). */ |
53c8f9f19
|
1593 |
autoreap = true; |
1da177e4c
|
1594 |
if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) |
53c8f9f19
|
1595 |
sig = 0; |
1da177e4c
|
1596 |
} |
53c8f9f19
|
1597 |
if (valid_signal(sig) && sig) |
1da177e4c
|
1598 1599 1600 |
__group_send_sig_info(sig, &info, tsk->parent); __wake_up_parent(tsk, tsk->parent); spin_unlock_irqrestore(&psig->siglock, flags); |
2b2a1ff64
|
1601 |
|
53c8f9f19
|
1602 |
return autoreap; |
1da177e4c
|
1603 |
} |
75b95953a
|
1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 |
/** * do_notify_parent_cldstop - notify parent of stopped/continued state change * @tsk: task reporting the state change * @for_ptracer: the notification is for ptracer * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report * * Notify @tsk's parent that the stopped/continued state has changed. If * @for_ptracer is %false, @tsk's group leader notifies to its real parent. * If %true, @tsk reports to @tsk->parent which should be the ptracer. * * CONTEXT: * Must be called with tasklist_lock at least read locked. */ static void do_notify_parent_cldstop(struct task_struct *tsk, bool for_ptracer, int why) |
1da177e4c
|
1619 1620 1621 |
{ struct siginfo info; unsigned long flags; |
bc505a478
|
1622 |
struct task_struct *parent; |
1da177e4c
|
1623 |
struct sighand_struct *sighand; |
75b95953a
|
1624 |
if (for_ptracer) { |
bc505a478
|
1625 |
parent = tsk->parent; |
75b95953a
|
1626 |
} else { |
bc505a478
|
1627 1628 1629 |
tsk = tsk->group_leader; parent = tsk->real_parent; } |
1da177e4c
|
1630 1631 |
info.si_signo = SIGCHLD; info.si_errno = 0; |
b488893a3
|
1632 |
/* |
5aba085ed
|
1633 |
* see comment in do_notify_parent() about the following 4 lines |
b488893a3
|
1634 1635 |
*/ rcu_read_lock(); |
d92656633
|
1636 |
info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns); |
c69e8d9c0
|
1637 |
info.si_uid = __task_cred(tsk)->uid; |
b488893a3
|
1638 |
rcu_read_unlock(); |
d8878ba3f
|
1639 1640 |
info.si_utime = cputime_to_clock_t(tsk->utime); info.si_stime = cputime_to_clock_t(tsk->stime); |
1da177e4c
|
1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 |
info.si_code = why; switch (why) { case CLD_CONTINUED: info.si_status = SIGCONT; break; case CLD_STOPPED: info.si_status = tsk->signal->group_exit_code & 0x7f; break; case CLD_TRAPPED: info.si_status = tsk->exit_code & 0x7f; break; default: BUG(); } sighand = parent->sighand; spin_lock_irqsave(&sighand->siglock, flags); if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) __group_send_sig_info(SIGCHLD, &info, parent); /* * Even if SIGCHLD is not generated, we must wake up wait4 calls. */ __wake_up_parent(tsk, parent); spin_unlock_irqrestore(&sighand->siglock, flags); } |
d5f70c00a
|
1668 1669 |
static inline int may_ptrace_stop(void) { |
d21142ece
|
1670 |
if (!likely(current->ptrace)) |
d5f70c00a
|
1671 |
return 0; |
d5f70c00a
|
1672 1673 1674 1675 1676 1677 |
/* * Are we in the middle of do_coredump? * If so and our tracer is also part of the coredump stopping * is a deadlock situation, and pointless because our tracer * is dead so don't allow us to stop. * If SIGKILL was already sent before the caller unlocked |
999d9fc16
|
1678 |
* ->siglock we must see ->core_state != NULL. Otherwise it |
d5f70c00a
|
1679 1680 |
* is safe to enter schedule(). */ |
999d9fc16
|
1681 |
if (unlikely(current->mm->core_state) && |
d5f70c00a
|
1682 1683 1684 1685 1686 |
unlikely(current->mm == current->parent->mm)) return 0; return 1; } |
1da177e4c
|
1687 |
/* |
5aba085ed
|
1688 |
* Return non-zero if there is a SIGKILL that should be waking us up. |
1a669c2f1
|
1689 1690 1691 1692 |
* Called with the siglock held. */ static int sigkill_pending(struct task_struct *tsk) { |
3d749b9e6
|
1693 1694 |
return sigismember(&tsk->pending.signal, SIGKILL) || sigismember(&tsk->signal->shared_pending.signal, SIGKILL); |
1a669c2f1
|
1695 1696 1697 |
} /* |
1da177e4c
|
1698 1699 1700 1701 1702 1703 1704 |
* This must be called with current->sighand->siglock held. * * This should be the path for all ptrace stops. * We always set current->last_siginfo while stopped here. * That makes it a way to test a stopped process for * being ptrace-stopped vs being job-control-stopped. * |
20686a309
|
1705 1706 |
* If we actually decide not to stop at all because the tracer * is gone, we keep current->exit_code unless clear_code. |
1da177e4c
|
1707 |
*/ |
fe1bc6a09
|
1708 |
static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) |
b84011508
|
1709 1710 |
__releases(¤t->sighand->siglock) __acquires(¤t->sighand->siglock) |
1da177e4c
|
1711 |
{ |
ceb6bd67f
|
1712 |
bool gstop_done = false; |
1a669c2f1
|
1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 |
if (arch_ptrace_stop_needed(exit_code, info)) { /* * The arch code has something special to do before a * ptrace stop. This is allowed to block, e.g. for faults * on user stack pages. We can't keep the siglock while * calling arch_ptrace_stop, so we must release it now. * To preserve proper semantics, we must do this before * any signal bookkeeping like checking group_stop_count. * Meanwhile, a SIGKILL could come in before we retake the * siglock. That must prevent us from sleeping in TASK_TRACED. * So after regaining the lock, we must check for SIGKILL. */ spin_unlock_irq(¤t->sighand->siglock); arch_ptrace_stop(exit_code, info); spin_lock_irq(¤t->sighand->siglock); |
3d749b9e6
|
1728 1729 |
if (sigkill_pending(current)) return; |
1a669c2f1
|
1730 |
} |
1da177e4c
|
1731 |
/* |
81be24b8c
|
1732 1733 1734 1735 1736 |
* We're committing to trapping. TRACED should be visible before * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). * Also, transition to TRACED and updates to ->jobctl should be * atomic with respect to siglock and should be done after the arch * hook as siglock is released and regrabbed across it. |
1da177e4c
|
1737 |
*/ |
81be24b8c
|
1738 |
set_current_state(TASK_TRACED); |
1da177e4c
|
1739 1740 1741 |
current->last_siginfo = info; current->exit_code = exit_code; |
d79fdd6d9
|
1742 |
/* |
0ae8ce1c8
|
1743 1744 |
* If @why is CLD_STOPPED, we're trapping to participate in a group * stop. Do the bookkeeping. Note that if SIGCONT was delievered |
73ddff2be
|
1745 1746 1747 |
* across siglock relocks since INTERRUPT was scheduled, PENDING * could be clear now. We act as if SIGCONT is received after * TASK_TRACED is entered - ignore it. |
d79fdd6d9
|
1748 |
*/ |
a8f072c1d
|
1749 |
if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) |
ceb6bd67f
|
1750 |
gstop_done = task_participate_group_stop(current); |
d79fdd6d9
|
1751 |
|
fb1d910c1
|
1752 |
/* any trap clears pending STOP trap, STOP trap clears NOTIFY */ |
73ddff2be
|
1753 |
task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); |
fb1d910c1
|
1754 1755 |
if (info && info->si_code >> 8 == PTRACE_EVENT_STOP) task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); |
73ddff2be
|
1756 |
|
81be24b8c
|
1757 |
/* entering a trap, clear TRAPPING */ |
a8f072c1d
|
1758 |
task_clear_jobctl_trapping(current); |
d79fdd6d9
|
1759 |
|
1da177e4c
|
1760 1761 |
spin_unlock_irq(¤t->sighand->siglock); read_lock(&tasklist_lock); |
3d749b9e6
|
1762 |
if (may_ptrace_stop()) { |
ceb6bd67f
|
1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 |
/* * Notify parents of the stop. * * While ptraced, there are two parents - the ptracer and * the real_parent of the group_leader. The ptracer should * know about every stop while the real parent is only * interested in the completion of group stop. The states * for the two don't interact with each other. Notify * separately unless they're gonna be duplicates. */ do_notify_parent_cldstop(current, true, why); |
bb3696da8
|
1774 |
if (gstop_done && ptrace_reparented(current)) |
ceb6bd67f
|
1775 |
do_notify_parent_cldstop(current, false, why); |
53da1d945
|
1776 1777 1778 1779 1780 1781 1782 |
/* * Don't want to allow preemption here, because * sys_ptrace() needs this task to be inactive. * * XXX: implement read_unlock_no_resched(). */ preempt_disable(); |
1da177e4c
|
1783 |
read_unlock(&tasklist_lock); |
53da1d945
|
1784 |
preempt_enable_no_resched(); |
1da177e4c
|
1785 1786 1787 1788 |
schedule(); } else { /* * By the time we got the lock, our tracer went away. |
6405f7f46
|
1789 |
* Don't drop the lock yet, another tracer may come. |
ceb6bd67f
|
1790 1791 1792 |
* * If @gstop_done, the ptracer went away between group stop * completion and here. During detach, it would have set |
a8f072c1d
|
1793 1794 1795 |
* JOBCTL_STOP_PENDING on us and we'll re-enter * TASK_STOPPED in do_signal_stop() on return, so notifying * the real parent of the group stop completion is enough. |
1da177e4c
|
1796 |
*/ |
ceb6bd67f
|
1797 1798 |
if (gstop_done) do_notify_parent_cldstop(current, false, why); |
6405f7f46
|
1799 |
__set_current_state(TASK_RUNNING); |
20686a309
|
1800 1801 |
if (clear_code) current->exit_code = 0; |
6405f7f46
|
1802 |
read_unlock(&tasklist_lock); |
1da177e4c
|
1803 1804 1805 |
} /* |
13b1c3d4b
|
1806 1807 1808 1809 1810 1811 1812 |
* While in TASK_TRACED, we were considered "frozen enough". * Now that we woke up, it's crucial if we're supposed to be * frozen that we freeze now before running anything substantial. */ try_to_freeze(); /* |
1da177e4c
|
1813 1814 1815 1816 1817 1818 |
* We are back. Now reacquire the siglock before touching * last_siginfo, so that we are sure to have synchronized with * any signal-sending on another CPU that wants to examine it. */ spin_lock_irq(¤t->sighand->siglock); current->last_siginfo = NULL; |
544b2c91a
|
1819 1820 |
/* LISTENING can be set only during STOP traps, clear it */ current->jobctl &= ~JOBCTL_LISTENING; |
1da177e4c
|
1821 1822 1823 |
/* * Queued signals ignored us while we were stopped for tracing. * So check for any that we should take before resuming user mode. |
b74d0deb9
|
1824 |
* This sets TIF_SIGPENDING, but never clears it. |
1da177e4c
|
1825 |
*/ |
b74d0deb9
|
1826 |
recalc_sigpending_tsk(current); |
1da177e4c
|
1827 |
} |
3544d72a0
|
1828 |
static void ptrace_do_notify(int signr, int exit_code, int why) |
1da177e4c
|
1829 1830 |
{ siginfo_t info; |
1da177e4c
|
1831 |
memset(&info, 0, sizeof info); |
3544d72a0
|
1832 |
info.si_signo = signr; |
1da177e4c
|
1833 |
info.si_code = exit_code; |
b488893a3
|
1834 |
info.si_pid = task_pid_vnr(current); |
76aac0e9a
|
1835 |
info.si_uid = current_uid(); |
1da177e4c
|
1836 1837 |
/* Let the debugger run. */ |
3544d72a0
|
1838 1839 1840 1841 1842 1843 |
ptrace_stop(exit_code, why, 1, &info); } void ptrace_notify(int exit_code) { BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); |
1da177e4c
|
1844 |
spin_lock_irq(¤t->sighand->siglock); |
3544d72a0
|
1845 |
ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); |
1da177e4c
|
1846 1847 |
spin_unlock_irq(¤t->sighand->siglock); } |
73ddff2be
|
1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 |
/** * do_signal_stop - handle group stop for SIGSTOP and other stop signals * @signr: signr causing group stop if initiating * * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr * and participate in it. If already set, participate in the existing * group stop. If participated in a group stop (and thus slept), %true is * returned with siglock released. * * If ptraced, this function doesn't handle stop itself. Instead, * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock * untouched. The caller must ensure that INTERRUPT trap handling takes * places afterwards. * * CONTEXT: * Must be called with @current->sighand->siglock held, which is released * on %true return. * * RETURNS: * %false if group stop is already cancelled or ptrace trap is scheduled. * %true if participated in group stop. |
1da177e4c
|
1869 |
*/ |
73ddff2be
|
1870 1871 |
static bool do_signal_stop(int signr) __releases(¤t->sighand->siglock) |
1da177e4c
|
1872 1873 |
{ struct signal_struct *sig = current->signal; |
1da177e4c
|
1874 |
|
a8f072c1d
|
1875 1876 |
if (!(current->jobctl & JOBCTL_STOP_PENDING)) { unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; |
f558b7e40
|
1877 |
struct task_struct *t; |
a8f072c1d
|
1878 1879 |
/* signr will be recorded in task->jobctl for retries */ WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); |
d79fdd6d9
|
1880 |
|
a8f072c1d
|
1881 |
if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || |
573cf9ad7
|
1882 |
unlikely(signal_group_exit(sig))) |
73ddff2be
|
1883 |
return false; |
1da177e4c
|
1884 |
/* |
408a37de6
|
1885 1886 1887 1888 1889 1890 1891 |
* There is no group stop already in progress. We must * initiate one now. * * While ptraced, a task may be resumed while group stop is * still in effect and then receive a stop signal and * initiate another group stop. This deviates from the * usual behavior as two consecutive stop signals can't |
780006eac
|
1892 1893 |
* cause two group stops when !ptraced. That is why we * also check !task_is_stopped(t) below. |
408a37de6
|
1894 1895 1896 1897 1898 1899 1900 1901 |
* * The condition can be distinguished by testing whether * SIGNAL_STOP_STOPPED is already set. Don't generate * group_exit_code in such case. * * This is not necessary for SIGNAL_STOP_CONTINUED because * an intervening stop signal is required to cause two * continued events regardless of ptrace. |
1da177e4c
|
1902 |
*/ |
408a37de6
|
1903 1904 |
if (!(sig->flags & SIGNAL_STOP_STOPPED)) sig->group_exit_code = signr; |
1da177e4c
|
1905 |
|
7dd3db54e
|
1906 1907 1908 1909 |
sig->group_stop_count = 0; if (task_set_jobctl_pending(current, signr | gstop)) sig->group_stop_count++; |
1da177e4c
|
1910 |
|
d79fdd6d9
|
1911 1912 |
for (t = next_thread(current); t != current; t = next_thread(t)) { |
1da177e4c
|
1913 |
/* |
a122b341b
|
1914 1915 1916 |
* Setting state to TASK_STOPPED for a group * stop is always done with the siglock held, * so this check has no races. |
1da177e4c
|
1917 |
*/ |
7dd3db54e
|
1918 1919 |
if (!task_is_stopped(t) && task_set_jobctl_pending(t, signr | gstop)) { |
ae6d2ed7b
|
1920 |
sig->group_stop_count++; |
fb1d910c1
|
1921 1922 1923 1924 |
if (likely(!(t->ptrace & PT_SEIZED))) signal_wake_up(t, 0); else ptrace_trap_notify(t); |
a122b341b
|
1925 |
} |
d79fdd6d9
|
1926 |
} |
1da177e4c
|
1927 |
} |
73ddff2be
|
1928 |
|
d21142ece
|
1929 |
if (likely(!current->ptrace)) { |
5224fa366
|
1930 |
int notify = 0; |
1da177e4c
|
1931 |
|
5224fa366
|
1932 1933 1934 1935 1936 1937 1938 |
/* * If there are no other threads in the group, or if there * is a group stop in progress and we are the last to stop, * report to the parent. */ if (task_participate_group_stop(current)) notify = CLD_STOPPED; |
ae6d2ed7b
|
1939 |
__set_current_state(TASK_STOPPED); |
5224fa366
|
1940 |
spin_unlock_irq(¤t->sighand->siglock); |
62bcf9d99
|
1941 1942 1943 1944 1945 1946 1947 1948 1949 |
/* * Notify the parent of the group stop completion. Because * we're not holding either the siglock or tasklist_lock * here, ptracer may attach inbetween; however, this is for * group stop and should always be delivered to the real * parent of the group leader. The new ptracer will get * its notification when this task transitions into * TASK_TRACED. */ |
5224fa366
|
1950 1951 |
if (notify) { read_lock(&tasklist_lock); |
62bcf9d99
|
1952 |
do_notify_parent_cldstop(current, false, notify); |
5224fa366
|
1953 1954 1955 1956 1957 |
read_unlock(&tasklist_lock); } /* Now we don't run again until woken by SIGCONT or SIGKILL */ schedule(); |
73ddff2be
|
1958 |
return true; |
d79fdd6d9
|
1959 |
} else { |
73ddff2be
|
1960 1961 1962 1963 1964 1965 |
/* * While ptraced, group stop is handled by STOP trap. * Schedule it and let the caller deal with it. */ task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); return false; |
ae6d2ed7b
|
1966 |
} |
73ddff2be
|
1967 |
} |
1da177e4c
|
1968 |
|
73ddff2be
|
1969 1970 1971 |
/** * do_jobctl_trap - take care of ptrace jobctl traps * |
3544d72a0
|
1972 1973 1974 1975 1976 1977 1978 |
* When PT_SEIZED, it's used for both group stop and explicit * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with * accompanying siginfo. If stopped, lower eight bits of exit_code contain * the stop signal; otherwise, %SIGTRAP. * * When !PT_SEIZED, it's used only for group stop trap with stop signal * number as exit_code and no siginfo. |
73ddff2be
|
1979 1980 1981 1982 1983 1984 1985 |
* * CONTEXT: * Must be called with @current->sighand->siglock held, which may be * released and re-acquired before returning with intervening sleep. */ static void do_jobctl_trap(void) { |
3544d72a0
|
1986 |
struct signal_struct *signal = current->signal; |
73ddff2be
|
1987 |
int signr = current->jobctl & JOBCTL_STOP_SIGMASK; |
ae6d2ed7b
|
1988 |
|
3544d72a0
|
1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 |
if (current->ptrace & PT_SEIZED) { if (!signal->group_stop_count && !(signal->flags & SIGNAL_STOP_STOPPED)) signr = SIGTRAP; WARN_ON_ONCE(!signr); ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), CLD_STOPPED); } else { WARN_ON_ONCE(!signr); ptrace_stop(signr, CLD_STOPPED, 0, NULL); current->exit_code = 0; |
ae6d2ed7b
|
2000 |
} |
1da177e4c
|
2001 |
} |
18c98b652
|
2002 2003 2004 |
static int ptrace_signal(int signr, siginfo_t *info, struct pt_regs *regs, void *cookie) { |
18c98b652
|
2005 |
ptrace_signal_deliver(regs, cookie); |
8a3524180
|
2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 |
/* * We do not check sig_kernel_stop(signr) but set this marker * unconditionally because we do not know whether debugger will * change signr. This flag has no meaning unless we are going * to stop after return from ptrace_stop(). In this case it will * be checked in do_signal_stop(), we should only stop if it was * not cleared by SIGCONT while we were sleeping. See also the * comment in dequeue_signal(). */ current->jobctl |= JOBCTL_STOP_DEQUEUED; |
fe1bc6a09
|
2016 |
ptrace_stop(signr, CLD_TRAPPED, 0, info); |
18c98b652
|
2017 2018 2019 2020 2021 2022 2023 |
/* We're back. Did the debugger cancel the sig? */ signr = current->exit_code; if (signr == 0) return signr; current->exit_code = 0; |
5aba085ed
|
2024 2025 2026 2027 2028 2029 |
/* * Update the siginfo structure if the signal has * changed. If the debugger wanted something * specific in the siginfo structure then it should * have updated *info via PTRACE_SETSIGINFO. */ |
18c98b652
|
2030 2031 2032 2033 2034 |
if (signr != info->si_signo) { info->si_signo = signr; info->si_errno = 0; info->si_code = SI_USER; info->si_pid = task_pid_vnr(current->parent); |
c69e8d9c0
|
2035 |
info->si_uid = task_uid(current->parent); |
18c98b652
|
2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 |
} /* If the (new) signal is now blocked, requeue it. */ if (sigismember(¤t->blocked, signr)) { specific_send_sig_info(signr, info, current); signr = 0; } return signr; } |
1da177e4c
|
2046 2047 2048 |
int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie) { |
f6b76d4fb
|
2049 2050 2051 |
struct sighand_struct *sighand = current->sighand; struct signal_struct *signal = current->signal; int signr; |
1da177e4c
|
2052 |
|
13b1c3d4b
|
2053 2054 2055 2056 2057 2058 2059 |
relock: /* * We'll jump back here after any time we were stopped in TASK_STOPPED. * While in TASK_STOPPED, we were considered "frozen enough". * Now that we woke up, it's crucial if we're supposed to be * frozen that we freeze now before running anything substantial. */ |
fc558a749
|
2060 |
try_to_freeze(); |
f6b76d4fb
|
2061 |
spin_lock_irq(&sighand->siglock); |
021e1ae3d
|
2062 2063 2064 2065 2066 |
/* * Every stopped thread goes here after wakeup. Check to see if * we should notify the parent, prepare_signal(SIGCONT) encodes * the CLD_ si_code into SIGNAL_CLD_MASK bits. */ |
f6b76d4fb
|
2067 |
if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { |
c672af35d
|
2068 2069 2070 2071 2072 2073 |
int why; if (signal->flags & SIGNAL_CLD_CONTINUED) why = CLD_CONTINUED; else why = CLD_STOPPED; |
f6b76d4fb
|
2074 |
signal->flags &= ~SIGNAL_CLD_MASK; |
e44205519
|
2075 |
|
ae6d2ed7b
|
2076 |
spin_unlock_irq(&sighand->siglock); |
fa00b80b3
|
2077 |
|
ceb6bd67f
|
2078 2079 2080 2081 2082 2083 2084 2085 |
/* * Notify the parent that we're continuing. This event is * always per-process and doesn't make whole lot of sense * for ptracers, who shouldn't consume the state via * wait(2) either, but, for backward compatibility, notify * the ptracer of the group leader too unless it's gonna be * a duplicate. */ |
edf2ed153
|
2086 |
read_lock(&tasklist_lock); |
ceb6bd67f
|
2087 |
do_notify_parent_cldstop(current, false, why); |
bb3696da8
|
2088 2089 2090 |
if (ptrace_reparented(current->group_leader)) do_notify_parent_cldstop(current->group_leader, true, why); |
edf2ed153
|
2091 |
read_unlock(&tasklist_lock); |
ceb6bd67f
|
2092 |
|
e44205519
|
2093 2094 |
goto relock; } |
1da177e4c
|
2095 2096 |
for (;;) { struct k_sigaction *ka; |
1be53963b
|
2097 |
|
dd1d67726
|
2098 2099 |
if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && do_signal_stop(0)) |
7bcf6a2ca
|
2100 |
goto relock; |
1be53963b
|
2101 |
|
73ddff2be
|
2102 2103 2104 2105 2106 |
if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) { do_jobctl_trap(); spin_unlock_irq(&sighand->siglock); goto relock; } |
1da177e4c
|
2107 |
|
dd1d67726
|
2108 |
signr = dequeue_signal(current, ¤t->blocked, info); |
7bcf6a2ca
|
2109 |
|
dd1d67726
|
2110 2111 |
if (!signr) break; /* will return 0 */ |
7bcf6a2ca
|
2112 |
|
8a3524180
|
2113 |
if (unlikely(current->ptrace) && signr != SIGKILL) { |
dd1d67726
|
2114 2115 2116 2117 |
signr = ptrace_signal(signr, info, regs, cookie); if (!signr) continue; |
1da177e4c
|
2118 |
} |
dd1d67726
|
2119 |
ka = &sighand->action[signr-1]; |
f9d4257e0
|
2120 2121 |
/* Trace actually delivered signals. */ trace_signal_deliver(signr, info, ka); |
1da177e4c
|
2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 |
if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ continue; if (ka->sa.sa_handler != SIG_DFL) { /* Run the handler. */ *return_ka = *ka; if (ka->sa.sa_flags & SA_ONESHOT) ka->sa.sa_handler = SIG_DFL; break; /* will return non-zero "signr" value */ } /* * Now we are doing the default action for this signal. */ if (sig_kernel_ignore(signr)) /* Default is nothing. */ continue; |
84d737866
|
2139 |
/* |
0fbc26a6c
|
2140 |
* Global init gets no signals it doesn't want. |
b3bfa0cba
|
2141 2142 2143 2144 2145 2146 2147 |
* Container-init gets no signals it doesn't want from same * container. * * Note that if global/container-init sees a sig_kernel_only() * signal here, the signal must have been generated internally * or must have come from an ancestor namespace. In either * case, the signal cannot be dropped. |
84d737866
|
2148 |
*/ |
fae5fa44f
|
2149 |
if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && |
b3bfa0cba
|
2150 |
!sig_kernel_only(signr)) |
1da177e4c
|
2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 |
continue; if (sig_kernel_stop(signr)) { /* * The default action is to stop all threads in * the thread group. The job control signals * do nothing in an orphaned pgrp, but SIGSTOP * always works. Note that siglock needs to be * dropped during the call to is_orphaned_pgrp() * because of lock ordering with tasklist_lock. * This allows an intervening SIGCONT to be posted. * We need to check for that and bail out if necessary. */ if (signr != SIGSTOP) { |
f6b76d4fb
|
2165 |
spin_unlock_irq(&sighand->siglock); |
1da177e4c
|
2166 2167 |
/* signals can be posted during this window */ |
3e7cd6c41
|
2168 |
if (is_current_pgrp_orphaned()) |
1da177e4c
|
2169 |
goto relock; |
f6b76d4fb
|
2170 |
spin_lock_irq(&sighand->siglock); |
1da177e4c
|
2171 |
} |
7bcf6a2ca
|
2172 |
if (likely(do_signal_stop(info->si_signo))) { |
1da177e4c
|
2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 |
/* It released the siglock. */ goto relock; } /* * We didn't actually stop, due to a race * with SIGCONT or something like that. */ continue; } |
f6b76d4fb
|
2183 |
spin_unlock_irq(&sighand->siglock); |
1da177e4c
|
2184 2185 2186 2187 2188 |
/* * Anything else is fatal, maybe with a core dump. */ current->flags |= PF_SIGNALED; |
2dce81bff
|
2189 |
|
1da177e4c
|
2190 |
if (sig_kernel_coredump(signr)) { |
2dce81bff
|
2191 |
if (print_fatal_signals) |
7bcf6a2ca
|
2192 |
print_fatal_signal(regs, info->si_signo); |
1da177e4c
|
2193 2194 2195 2196 2197 2198 2199 2200 |
/* * If it was able to dump core, this kills all * other threads in the group and synchronizes with * their demise. If we lost the race with another * thread getting here, it set group_exit_code * first and our do_group_exit call below will use * that value and ignore the one we pass it. */ |
7bcf6a2ca
|
2201 |
do_coredump(info->si_signo, info->si_signo, regs); |
1da177e4c
|
2202 2203 2204 2205 2206 |
} /* * Death signals, no core dump. */ |
7bcf6a2ca
|
2207 |
do_group_exit(info->si_signo); |
1da177e4c
|
2208 2209 |
/* NOTREACHED */ } |
f6b76d4fb
|
2210 |
spin_unlock_irq(&sighand->siglock); |
1da177e4c
|
2211 2212 |
return signr; } |
0edceb7bc
|
2213 2214 |
/* * It could be that complete_signal() picked us to notify about the |
fec9993db
|
2215 2216 |
* group-wide signal. Other threads should be notified now to take * the shared signals in @which since we will not. |
0edceb7bc
|
2217 |
*/ |
f646e227b
|
2218 |
static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) |
0edceb7bc
|
2219 |
{ |
f646e227b
|
2220 |
sigset_t retarget; |
0edceb7bc
|
2221 |
struct task_struct *t; |
f646e227b
|
2222 2223 2224 |
sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); if (sigisemptyset(&retarget)) return; |
0edceb7bc
|
2225 2226 |
t = tsk; while_each_thread(tsk, t) { |
fec9993db
|
2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 |
if (t->flags & PF_EXITING) continue; if (!has_pending_signals(&retarget, &t->blocked)) continue; /* Remove the signals this thread can handle. */ sigandsets(&retarget, &retarget, &t->blocked); if (!signal_pending(t)) signal_wake_up(t, 0); if (sigisemptyset(&retarget)) break; |
0edceb7bc
|
2240 2241 |
} } |
d12619b5f
|
2242 2243 2244 |
void exit_signals(struct task_struct *tsk) { int group_stop = 0; |
f646e227b
|
2245 |
sigset_t unblocked; |
d12619b5f
|
2246 |
|
77e4ef99d
|
2247 2248 2249 2250 2251 |
/* * @tsk is about to have PF_EXITING set - lock out users which * expect stable threadgroup. */ threadgroup_change_begin(tsk); |
5dee1707d
|
2252 2253 |
if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { tsk->flags |= PF_EXITING; |
77e4ef99d
|
2254 |
threadgroup_change_end(tsk); |
5dee1707d
|
2255 |
return; |
d12619b5f
|
2256 |
} |
5dee1707d
|
2257 |
spin_lock_irq(&tsk->sighand->siglock); |
d12619b5f
|
2258 2259 2260 2261 2262 |
/* * From now this task is not visible for group-wide signals, * see wants_signal(), do_signal_stop(). */ tsk->flags |= PF_EXITING; |
77e4ef99d
|
2263 2264 |
threadgroup_change_end(tsk); |
5dee1707d
|
2265 2266 |
if (!signal_pending(tsk)) goto out; |
f646e227b
|
2267 2268 2269 |
unblocked = tsk->blocked; signotset(&unblocked); retarget_shared_pending(tsk, &unblocked); |
5dee1707d
|
2270 |
|
a8f072c1d
|
2271 |
if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && |
e5c1902e9
|
2272 |
task_participate_group_stop(tsk)) |
edf2ed153
|
2273 |
group_stop = CLD_STOPPED; |
5dee1707d
|
2274 |
out: |
d12619b5f
|
2275 |
spin_unlock_irq(&tsk->sighand->siglock); |
62bcf9d99
|
2276 2277 2278 2279 |
/* * If group stop has completed, deliver the notification. This * should always go to the real parent of the group leader. */ |
ae6d2ed7b
|
2280 |
if (unlikely(group_stop)) { |
d12619b5f
|
2281 |
read_lock(&tasklist_lock); |
62bcf9d99
|
2282 |
do_notify_parent_cldstop(tsk, false, group_stop); |
d12619b5f
|
2283 2284 2285 |
read_unlock(&tasklist_lock); } } |
1da177e4c
|
2286 2287 2288 2289 |
EXPORT_SYMBOL(recalc_sigpending); EXPORT_SYMBOL_GPL(dequeue_signal); EXPORT_SYMBOL(flush_signals); EXPORT_SYMBOL(force_sig); |
1da177e4c
|
2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 |
EXPORT_SYMBOL(send_sig); EXPORT_SYMBOL(send_sig_info); EXPORT_SYMBOL(sigprocmask); EXPORT_SYMBOL(block_all_signals); EXPORT_SYMBOL(unblock_all_signals); /* * System call entry points. */ |
41c57892a
|
2300 2301 2302 |
/** * sys_restart_syscall - restart a system call */ |
754fe8d29
|
2303 |
SYSCALL_DEFINE0(restart_syscall) |
1da177e4c
|
2304 2305 2306 2307 2308 2309 2310 2311 2312 |
{ struct restart_block *restart = ¤t_thread_info()->restart_block; return restart->fn(restart); } long do_no_restart_syscall(struct restart_block *param) { return -EINTR; } |
b182801ab
|
2313 2314 2315 2316 2317 |
static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) { if (signal_pending(tsk) && !thread_group_empty(tsk)) { sigset_t newblocked; /* A set of now blocked but previously unblocked signals. */ |
702a5073f
|
2318 |
sigandnsets(&newblocked, newset, ¤t->blocked); |
b182801ab
|
2319 2320 2321 2322 2323 |
retarget_shared_pending(tsk, &newblocked); } tsk->blocked = *newset; recalc_sigpending(); } |
e6fa16ab9
|
2324 2325 2326 2327 2328 2329 |
/** * set_current_blocked - change current->blocked mask * @newset: new mask * * It is wrong to change ->blocked directly, this helper should be used * to ensure the process can't miss a shared signal we are going to block. |
1da177e4c
|
2330 |
*/ |
e6fa16ab9
|
2331 2332 2333 2334 2335 |
void set_current_blocked(const sigset_t *newset) { struct task_struct *tsk = current; spin_lock_irq(&tsk->sighand->siglock); |
b182801ab
|
2336 |
__set_task_blocked(tsk, newset); |
e6fa16ab9
|
2337 2338 |
spin_unlock_irq(&tsk->sighand->siglock); } |
1da177e4c
|
2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 |
/* * This is also useful for kernel threads that want to temporarily * (or permanently) block certain signals. * * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel * interface happily blocks "unblockable" signals like SIGKILL * and friends. */ int sigprocmask(int how, sigset_t *set, sigset_t *oldset) { |
73ef4aeb6
|
2350 2351 |
struct task_struct *tsk = current; sigset_t newset; |
1da177e4c
|
2352 |
|
73ef4aeb6
|
2353 |
/* Lockless, only current can change ->blocked, never from irq */ |
a26fd335b
|
2354 |
if (oldset) |
73ef4aeb6
|
2355 |
*oldset = tsk->blocked; |
a26fd335b
|
2356 |
|
1da177e4c
|
2357 2358 |
switch (how) { case SIG_BLOCK: |
73ef4aeb6
|
2359 |
sigorsets(&newset, &tsk->blocked, set); |
1da177e4c
|
2360 2361 |
break; case SIG_UNBLOCK: |
702a5073f
|
2362 |
sigandnsets(&newset, &tsk->blocked, set); |
1da177e4c
|
2363 2364 |
break; case SIG_SETMASK: |
73ef4aeb6
|
2365 |
newset = *set; |
1da177e4c
|
2366 2367 |
break; default: |
73ef4aeb6
|
2368 |
return -EINVAL; |
1da177e4c
|
2369 |
} |
a26fd335b
|
2370 |
|
e6fa16ab9
|
2371 |
set_current_blocked(&newset); |
73ef4aeb6
|
2372 |
return 0; |
1da177e4c
|
2373 |
} |
41c57892a
|
2374 2375 2376 |
/** * sys_rt_sigprocmask - change the list of currently blocked signals * @how: whether to add, remove, or set signals |
ada9c9331
|
2377 |
* @nset: stores pending signals |
41c57892a
|
2378 2379 2380 |
* @oset: previous value of signal mask if non-null * @sigsetsize: size of sigset_t type */ |
bb7efee2c
|
2381 |
SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, |
17da2bd90
|
2382 |
sigset_t __user *, oset, size_t, sigsetsize) |
1da177e4c
|
2383 |
{ |
1da177e4c
|
2384 |
sigset_t old_set, new_set; |
bb7efee2c
|
2385 |
int error; |
1da177e4c
|
2386 2387 2388 |
/* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) |
bb7efee2c
|
2389 |
return -EINVAL; |
1da177e4c
|
2390 |
|
bb7efee2c
|
2391 2392 2393 2394 2395 |
old_set = current->blocked; if (nset) { if (copy_from_user(&new_set, nset, sizeof(sigset_t))) return -EFAULT; |
1da177e4c
|
2396 |
sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
bb7efee2c
|
2397 |
error = sigprocmask(how, &new_set, NULL); |
1da177e4c
|
2398 |
if (error) |
bb7efee2c
|
2399 2400 |
return error; } |
1da177e4c
|
2401 |
|
bb7efee2c
|
2402 2403 2404 |
if (oset) { if (copy_to_user(oset, &old_set, sizeof(sigset_t))) return -EFAULT; |
1da177e4c
|
2405 |
} |
bb7efee2c
|
2406 2407 |
return 0; |
1da177e4c
|
2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 |
} long do_sigpending(void __user *set, unsigned long sigsetsize) { long error = -EINVAL; sigset_t pending; if (sigsetsize > sizeof(sigset_t)) goto out; spin_lock_irq(¤t->sighand->siglock); sigorsets(&pending, ¤t->pending.signal, ¤t->signal->shared_pending.signal); spin_unlock_irq(¤t->sighand->siglock); /* Outside the lock because only this thread touches it. */ sigandsets(&pending, ¤t->blocked, &pending); error = -EFAULT; if (!copy_to_user(set, &pending, sigsetsize)) error = 0; out: return error; |
5aba085ed
|
2432 |
} |
1da177e4c
|
2433 |
|
41c57892a
|
2434 2435 2436 2437 2438 2439 |
/** * sys_rt_sigpending - examine a pending signal that has been raised * while blocked * @set: stores pending signals * @sigsetsize: size of sigset_t type or larger */ |
17da2bd90
|
2440 |
SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize) |
1da177e4c
|
2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 |
{ return do_sigpending(set, sigsetsize); } #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) { int err; if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) return -EFAULT; if (from->si_code < 0) return __copy_to_user(to, from, sizeof(siginfo_t)) ? -EFAULT : 0; /* * If you change siginfo_t structure, please be sure * this code is fixed accordingly. |
fba2afaae
|
2459 2460 |
* Please remember to update the signalfd_copyinfo() function * inside fs/signalfd.c too, in case siginfo_t changes. |
1da177e4c
|
2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 |
* It should never copy any pad contained in the structure * to avoid security leaks, but must copy the generic * 3 ints plus the relevant union member. */ err = __put_user(from->si_signo, &to->si_signo); err |= __put_user(from->si_errno, &to->si_errno); err |= __put_user((short)from->si_code, &to->si_code); switch (from->si_code & __SI_MASK) { case __SI_KILL: err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); break; case __SI_TIMER: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); err |= __put_user(from->si_ptr, &to->si_ptr); break; case __SI_POLL: err |= __put_user(from->si_band, &to->si_band); err |= __put_user(from->si_fd, &to->si_fd); break; case __SI_FAULT: err |= __put_user(from->si_addr, &to->si_addr); #ifdef __ARCH_SI_TRAPNO err |= __put_user(from->si_trapno, &to->si_trapno); #endif |
a337fdac7
|
2487 |
#ifdef BUS_MCEERR_AO |
5aba085ed
|
2488 |
/* |
a337fdac7
|
2489 |
* Other callers might not initialize the si_lsb field, |
5aba085ed
|
2490 |
* so check explicitly for the right codes here. |
a337fdac7
|
2491 2492 2493 2494 |
*/ if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); #endif |
1da177e4c
|
2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 |
break; case __SI_CHLD: err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_status, &to->si_status); err |= __put_user(from->si_utime, &to->si_utime); err |= __put_user(from->si_stime, &to->si_stime); break; case __SI_RT: /* This is not generated by the kernel as of now. */ case __SI_MESGQ: /* But this is */ err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_ptr, &to->si_ptr); break; default: /* this is just in case for now ... */ err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); break; } return err; } #endif |
41c57892a
|
2518 |
/** |
943df1485
|
2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 |
* do_sigtimedwait - wait for queued signals specified in @which * @which: queued signals to wait for * @info: if non-null, the signal's siginfo is returned here * @ts: upper bound on process time suspension */ int do_sigtimedwait(const sigset_t *which, siginfo_t *info, const struct timespec *ts) { struct task_struct *tsk = current; long timeout = MAX_SCHEDULE_TIMEOUT; sigset_t mask = *which; int sig; if (ts) { if (!timespec_valid(ts)) return -EINVAL; timeout = timespec_to_jiffies(ts); /* * We can be close to the next tick, add another one * to ensure we will wait at least the time asked for. */ if (ts->tv_sec || ts->tv_nsec) timeout++; } /* * Invert the set of allowed signals to get those we want to block. */ sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); signotset(&mask); spin_lock_irq(&tsk->sighand->siglock); sig = dequeue_signal(tsk, &mask, info); if (!sig && timeout) { /* * None ready, temporarily unblock those we're interested * while we are sleeping in so that we'll be awakened when |
b182801ab
|
2556 2557 |
* they arrive. Unblocking is always fine, we can avoid * set_current_blocked(). |
943df1485
|
2558 2559 2560 2561 2562 2563 2564 2565 2566 |
*/ tsk->real_blocked = tsk->blocked; sigandsets(&tsk->blocked, &tsk->blocked, &mask); recalc_sigpending(); spin_unlock_irq(&tsk->sighand->siglock); timeout = schedule_timeout_interruptible(timeout); spin_lock_irq(&tsk->sighand->siglock); |
b182801ab
|
2567 |
__set_task_blocked(tsk, &tsk->real_blocked); |
943df1485
|
2568 |
siginitset(&tsk->real_blocked, 0); |
b182801ab
|
2569 |
sig = dequeue_signal(tsk, &mask, info); |
943df1485
|
2570 2571 2572 2573 2574 2575 2576 2577 2578 |
} spin_unlock_irq(&tsk->sighand->siglock); if (sig) return sig; return timeout ? -EINTR : -EAGAIN; } /** |
41c57892a
|
2579 2580 2581 2582 2583 2584 2585 |
* sys_rt_sigtimedwait - synchronously wait for queued signals specified * in @uthese * @uthese: queued signals to wait for * @uinfo: if non-null, the signal's siginfo is returned here * @uts: upper bound on process time suspension * @sigsetsize: size of sigset_t type */ |
17da2bd90
|
2586 2587 2588 |
SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, siginfo_t __user *, uinfo, const struct timespec __user *, uts, size_t, sigsetsize) |
1da177e4c
|
2589 |
{ |
1da177e4c
|
2590 2591 2592 |
sigset_t these; struct timespec ts; siginfo_t info; |
943df1485
|
2593 |
int ret; |
1da177e4c
|
2594 2595 2596 2597 2598 2599 2600 |
/* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (copy_from_user(&these, uthese, sizeof(these))) return -EFAULT; |
5aba085ed
|
2601 |
|
1da177e4c
|
2602 2603 2604 |
if (uts) { if (copy_from_user(&ts, uts, sizeof(ts))) return -EFAULT; |
1da177e4c
|
2605 |
} |
943df1485
|
2606 |
ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); |
1da177e4c
|
2607 |
|
943df1485
|
2608 2609 2610 |
if (ret > 0 && uinfo) { if (copy_siginfo_to_user(uinfo, &info)) ret = -EFAULT; |
1da177e4c
|
2611 2612 2613 2614 |
} return ret; } |
41c57892a
|
2615 2616 2617 2618 2619 |
/** * sys_kill - send a signal to a process * @pid: the PID of the process * @sig: signal to be sent */ |
17da2bd90
|
2620 |
SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) |
1da177e4c
|
2621 2622 2623 2624 2625 2626 |
{ struct siginfo info; info.si_signo = sig; info.si_errno = 0; info.si_code = SI_USER; |
b488893a3
|
2627 |
info.si_pid = task_tgid_vnr(current); |
76aac0e9a
|
2628 |
info.si_uid = current_uid(); |
1da177e4c
|
2629 2630 2631 |
return kill_something_info(sig, &info, pid); } |
30b4ae8a4
|
2632 2633 |
static int do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) |
1da177e4c
|
2634 |
{ |
1da177e4c
|
2635 |
struct task_struct *p; |
30b4ae8a4
|
2636 |
int error = -ESRCH; |
1da177e4c
|
2637 |
|
3547ff3ae
|
2638 |
rcu_read_lock(); |
228ebcbe6
|
2639 |
p = find_task_by_vpid(pid); |
b488893a3
|
2640 |
if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { |
30b4ae8a4
|
2641 |
error = check_kill_permission(sig, info, p); |
1da177e4c
|
2642 2643 2644 2645 |
/* * The null signal is a permissions and process existence * probe. No signal is actually delivered. */ |
4a30debfb
|
2646 2647 2648 2649 2650 2651 2652 2653 2654 |
if (!error && sig) { error = do_send_sig_info(sig, info, p, false); /* * If lock_task_sighand() failed we pretend the task * dies after receiving the signal. The window is tiny, * and the signal is private anyway. */ if (unlikely(error == -ESRCH)) error = 0; |
1da177e4c
|
2655 2656 |
} } |
3547ff3ae
|
2657 |
rcu_read_unlock(); |
6dd69f106
|
2658 |
|
1da177e4c
|
2659 2660 |
return error; } |
30b4ae8a4
|
2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 |
static int do_tkill(pid_t tgid, pid_t pid, int sig) { struct siginfo info; info.si_signo = sig; info.si_errno = 0; info.si_code = SI_TKILL; info.si_pid = task_tgid_vnr(current); info.si_uid = current_uid(); return do_send_specific(tgid, pid, sig, &info); } |
6dd69f106
|
2673 2674 2675 2676 2677 2678 |
/** * sys_tgkill - send signal to one specific thread * @tgid: the thread group ID of the thread * @pid: the PID of the thread * @sig: signal to be sent * |
72fd4a35a
|
2679 |
* This syscall also checks the @tgid and returns -ESRCH even if the PID |
6dd69f106
|
2680 2681 2682 |
* exists but it's not belonging to the target process anymore. This * method solves the problem of threads exiting and PIDs getting reused. */ |
a5f8fa9e9
|
2683 |
SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) |
6dd69f106
|
2684 2685 2686 2687 2688 2689 2690 |
{ /* This is only valid for single tasks */ if (pid <= 0 || tgid <= 0) return -EINVAL; return do_tkill(tgid, pid, sig); } |
41c57892a
|
2691 2692 2693 2694 2695 |
/** * sys_tkill - send signal to one specific task * @pid: the PID of the task * @sig: signal to be sent * |
1da177e4c
|
2696 2697 |
* Send a signal to only one task, even if it's a CLONE_THREAD task. */ |
a5f8fa9e9
|
2698 |
SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) |
1da177e4c
|
2699 |
{ |
1da177e4c
|
2700 2701 2702 |
/* This is only valid for single tasks */ if (pid <= 0) return -EINVAL; |
6dd69f106
|
2703 |
return do_tkill(0, pid, sig); |
1da177e4c
|
2704 |
} |
41c57892a
|
2705 2706 2707 2708 2709 2710 |
/** * sys_rt_sigqueueinfo - send signal information to a signal * @pid: the PID of the thread * @sig: signal to be sent * @uinfo: signal info to be sent */ |
a5f8fa9e9
|
2711 2712 |
SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t __user *, uinfo) |
1da177e4c
|
2713 2714 2715 2716 2717 2718 2719 |
{ siginfo_t info; if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) return -EFAULT; /* Not even root can pretend to send signals from the kernel. |
da48524eb
|
2720 2721 |
* Nor can they impersonate a kill()/tgkill(), which adds source info. */ |
243b422af
|
2722 |
if (info.si_code >= 0 || info.si_code == SI_TKILL) { |
da48524eb
|
2723 2724 |
/* We used to allow any < 0 si_code */ WARN_ON_ONCE(info.si_code < 0); |
1da177e4c
|
2725 |
return -EPERM; |
da48524eb
|
2726 |
} |
1da177e4c
|
2727 2728 2729 2730 2731 |
info.si_signo = sig; /* POSIX.1b doesn't mention process groups. */ return kill_proc_info(sig, &info, pid); } |
62ab4505e
|
2732 2733 2734 2735 2736 2737 2738 |
long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) { /* This is only valid for single tasks */ if (pid <= 0 || tgid <= 0) return -EINVAL; /* Not even root can pretend to send signals from the kernel. |
da48524eb
|
2739 2740 |
* Nor can they impersonate a kill()/tgkill(), which adds source info. */ |
243b422af
|
2741 |
if (info->si_code >= 0 || info->si_code == SI_TKILL) { |
da48524eb
|
2742 2743 |
/* We used to allow any < 0 si_code */ WARN_ON_ONCE(info->si_code < 0); |
62ab4505e
|
2744 |
return -EPERM; |
da48524eb
|
2745 |
} |
62ab4505e
|
2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 |
info->si_signo = sig; return do_send_specific(tgid, pid, sig, info); } SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, siginfo_t __user *, uinfo) { siginfo_t info; if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) return -EFAULT; return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); } |
88531f725
|
2761 |
int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) |
1da177e4c
|
2762 |
{ |
93585eeaf
|
2763 |
struct task_struct *t = current; |
1da177e4c
|
2764 |
struct k_sigaction *k; |
71fabd5e4
|
2765 |
sigset_t mask; |
1da177e4c
|
2766 |
|
7ed20e1ad
|
2767 |
if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) |
1da177e4c
|
2768 |
return -EINVAL; |
93585eeaf
|
2769 |
k = &t->sighand->action[sig-1]; |
1da177e4c
|
2770 2771 |
spin_lock_irq(¤t->sighand->siglock); |
1da177e4c
|
2772 2773 2774 2775 |
if (oact) *oact = *k; if (act) { |
9ac95f2f9
|
2776 2777 |
sigdelsetmask(&act->sa.sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); |
88531f725
|
2778 |
*k = *act; |
1da177e4c
|
2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 |
/* * POSIX 3.3.1.3: * "Setting a signal action to SIG_IGN for a signal that is * pending shall cause the pending signal to be discarded, * whether or not it is blocked." * * "Setting a signal action to SIG_DFL for a signal that is * pending and whose default action is to ignore the signal * (for example, SIGCHLD), shall cause the pending signal to * be discarded, whether or not it is blocked" */ |
35de254dc
|
2790 |
if (sig_handler_ignored(sig_handler(t, sig), sig)) { |
71fabd5e4
|
2791 2792 2793 |
sigemptyset(&mask); sigaddset(&mask, sig); rm_from_queue_full(&mask, &t->signal->shared_pending); |
1da177e4c
|
2794 |
do { |
71fabd5e4
|
2795 |
rm_from_queue_full(&mask, &t->pending); |
1da177e4c
|
2796 2797 |
t = next_thread(t); } while (t != current); |
1da177e4c
|
2798 |
} |
1da177e4c
|
2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 |
} spin_unlock_irq(¤t->sighand->siglock); return 0; } int do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) { stack_t oss; int error; |
0083fc2c5
|
2810 2811 2812 |
oss.ss_sp = (void __user *) current->sas_ss_sp; oss.ss_size = current->sas_ss_size; oss.ss_flags = sas_ss_flags(sp); |
1da177e4c
|
2813 2814 2815 2816 2817 2818 2819 |
if (uss) { void __user *ss_sp; size_t ss_size; int ss_flags; error = -EFAULT; |
0dd8486b5
|
2820 2821 2822 2823 2824 2825 |
if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) goto out; error = __get_user(ss_sp, &uss->ss_sp) | __get_user(ss_flags, &uss->ss_flags) | __get_user(ss_size, &uss->ss_size); if (error) |
1da177e4c
|
2826 2827 2828 2829 2830 2831 2832 2833 |
goto out; error = -EPERM; if (on_sig_stack(sp)) goto out; error = -EINVAL; /* |
5aba085ed
|
2834 |
* Note - this code used to test ss_flags incorrectly: |
1da177e4c
|
2835 2836 2837 |
* old code may have been written using ss_flags==0 * to mean ss_flags==SS_ONSTACK (as this was the only * way that worked) - this fix preserves that older |
5aba085ed
|
2838 |
* mechanism. |
1da177e4c
|
2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 |
*/ if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) goto out; if (ss_flags == SS_DISABLE) { ss_size = 0; ss_sp = NULL; } else { error = -ENOMEM; if (ss_size < MINSIGSTKSZ) goto out; } current->sas_ss_sp = (unsigned long) ss_sp; current->sas_ss_size = ss_size; } |
0083fc2c5
|
2855 |
error = 0; |
1da177e4c
|
2856 2857 |
if (uoss) { error = -EFAULT; |
0083fc2c5
|
2858 |
if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) |
1da177e4c
|
2859 |
goto out; |
0083fc2c5
|
2860 2861 2862 |
error = __put_user(oss.ss_sp, &uoss->ss_sp) | __put_user(oss.ss_size, &uoss->ss_size) | __put_user(oss.ss_flags, &uoss->ss_flags); |
1da177e4c
|
2863 |
} |
1da177e4c
|
2864 2865 2866 2867 2868 |
out: return error; } #ifdef __ARCH_WANT_SYS_SIGPENDING |
41c57892a
|
2869 2870 2871 2872 |
/** * sys_sigpending - examine pending signals * @set: where mask of pending signal is returned */ |
b290ebe2c
|
2873 |
SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) |
1da177e4c
|
2874 2875 2876 2877 2878 2879 2880 |
{ return do_sigpending(set, sizeof(*set)); } #endif #ifdef __ARCH_WANT_SYS_SIGPROCMASK |
41c57892a
|
2881 2882 2883 |
/** * sys_sigprocmask - examine and change blocked signals * @how: whether to add, remove, or set signals |
b013c3992
|
2884 |
* @nset: signals to add or remove (if non-null) |
41c57892a
|
2885 2886 |
* @oset: previous value of signal mask if non-null * |
5aba085ed
|
2887 2888 2889 |
* Some platforms have their own version with special arguments; * others support only sys_rt_sigprocmask. */ |
1da177e4c
|
2890 |
|
b013c3992
|
2891 |
SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, |
b290ebe2c
|
2892 |
old_sigset_t __user *, oset) |
1da177e4c
|
2893 |
{ |
1da177e4c
|
2894 |
old_sigset_t old_set, new_set; |
2e4f7c776
|
2895 |
sigset_t new_blocked; |
1da177e4c
|
2896 |
|
b013c3992
|
2897 |
old_set = current->blocked.sig[0]; |
1da177e4c
|
2898 |
|
b013c3992
|
2899 2900 2901 |
if (nset) { if (copy_from_user(&new_set, nset, sizeof(*nset))) return -EFAULT; |
1da177e4c
|
2902 |
new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); |
2e4f7c776
|
2903 |
new_blocked = current->blocked; |
1da177e4c
|
2904 |
|
1da177e4c
|
2905 |
switch (how) { |
1da177e4c
|
2906 |
case SIG_BLOCK: |
2e4f7c776
|
2907 |
sigaddsetmask(&new_blocked, new_set); |
1da177e4c
|
2908 2909 |
break; case SIG_UNBLOCK: |
2e4f7c776
|
2910 |
sigdelsetmask(&new_blocked, new_set); |
1da177e4c
|
2911 2912 |
break; case SIG_SETMASK: |
2e4f7c776
|
2913 |
new_blocked.sig[0] = new_set; |
1da177e4c
|
2914 |
break; |
2e4f7c776
|
2915 2916 |
default: return -EINVAL; |
1da177e4c
|
2917 |
} |
2e4f7c776
|
2918 |
set_current_blocked(&new_blocked); |
b013c3992
|
2919 2920 2921 |
} if (oset) { |
1da177e4c
|
2922 |
if (copy_to_user(oset, &old_set, sizeof(*oset))) |
b013c3992
|
2923 |
return -EFAULT; |
1da177e4c
|
2924 |
} |
b013c3992
|
2925 2926 |
return 0; |
1da177e4c
|
2927 2928 2929 2930 |
} #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ #ifdef __ARCH_WANT_SYS_RT_SIGACTION |
41c57892a
|
2931 2932 2933 |
/** * sys_rt_sigaction - alter an action taken by a process * @sig: signal to be sent |
f9fa0bc1f
|
2934 2935 |
* @act: new sigaction * @oact: used to save the previous sigaction |
41c57892a
|
2936 2937 |
* @sigsetsize: size of sigset_t type */ |
d4e82042c
|
2938 2939 2940 2941 |
SYSCALL_DEFINE4(rt_sigaction, int, sig, const struct sigaction __user *, act, struct sigaction __user *, oact, size_t, sigsetsize) |
1da177e4c
|
2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 |
{ struct k_sigaction new_sa, old_sa; int ret = -EINVAL; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) goto out; if (act) { if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) return -EFAULT; } ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); if (!ret && oact) { if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) return -EFAULT; } out: return ret; } #endif /* __ARCH_WANT_SYS_RT_SIGACTION */ #ifdef __ARCH_WANT_SYS_SGETMASK /* * For backwards compatibility. Functionality superseded by sigprocmask. */ |
a5f8fa9e9
|
2971 |
SYSCALL_DEFINE0(sgetmask) |
1da177e4c
|
2972 2973 2974 2975 |
{ /* SMP safe */ return current->blocked.sig[0]; } |
a5f8fa9e9
|
2976 |
SYSCALL_DEFINE1(ssetmask, int, newmask) |
1da177e4c
|
2977 |
{ |
c1095c6da
|
2978 2979 |
int old = current->blocked.sig[0]; sigset_t newset; |
1da177e4c
|
2980 |
|
c1095c6da
|
2981 2982 |
siginitset(&newset, newmask & ~(sigmask(SIGKILL) | sigmask(SIGSTOP))); set_current_blocked(&newset); |
1da177e4c
|
2983 2984 2985 2986 2987 2988 2989 2990 2991 |
return old; } #endif /* __ARCH_WANT_SGETMASK */ #ifdef __ARCH_WANT_SYS_SIGNAL /* * For backwards compatibility. Functionality superseded by sigaction. */ |
a5f8fa9e9
|
2992 |
SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) |
1da177e4c
|
2993 2994 2995 2996 2997 2998 |
{ struct k_sigaction new_sa, old_sa; int ret; new_sa.sa.sa_handler = handler; new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; |
c70d3d703
|
2999 |
sigemptyset(&new_sa.sa.sa_mask); |
1da177e4c
|
3000 3001 3002 3003 3004 3005 3006 3007 |
ret = do_sigaction(sig, &new_sa, &old_sa); return ret ? ret : (unsigned long)old_sa.sa.sa_handler; } #endif /* __ARCH_WANT_SYS_SIGNAL */ #ifdef __ARCH_WANT_SYS_PAUSE |
a5f8fa9e9
|
3008 |
SYSCALL_DEFINE0(pause) |
1da177e4c
|
3009 |
{ |
d92fcf055
|
3010 3011 3012 3013 |
while (!signal_pending(current)) { current->state = TASK_INTERRUPTIBLE; schedule(); } |
1da177e4c
|
3014 3015 3016 3017 |
return -ERESTARTNOHAND; } #endif |
150256d8a
|
3018 |
#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND |
41c57892a
|
3019 3020 3021 3022 3023 3024 |
/** * sys_rt_sigsuspend - replace the signal mask for a value with the * @unewset value until a signal is received * @unewset: new signal mask value * @sigsetsize: size of sigset_t type */ |
d4e82042c
|
3025 |
SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) |
150256d8a
|
3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 |
{ sigset_t newset; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (copy_from_user(&newset, unewset, sizeof(newset))) return -EFAULT; sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
150256d8a
|
3036 |
current->saved_sigmask = current->blocked; |
c1095c6da
|
3037 |
set_current_blocked(&newset); |
150256d8a
|
3038 3039 3040 |
current->state = TASK_INTERRUPTIBLE; schedule(); |
4e4c22c71
|
3041 |
set_restore_sigmask(); |
150256d8a
|
3042 3043 3044 |
return -ERESTARTNOHAND; } #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ |
f269fdd18
|
3045 3046 3047 3048 |
__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) { return NULL; } |
1da177e4c
|
3049 3050 |
void __init signals_init(void) { |
0a31bd5f2
|
3051 |
sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); |
1da177e4c
|
3052 |
} |
67fc4e0cb
|
3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 |
#ifdef CONFIG_KGDB_KDB #include <linux/kdb.h> /* * kdb_send_sig_info - Allows kdb to send signals without exposing * signal internals. This function checks if the required locks are * available before calling the main signal code, to avoid kdb * deadlocks. */ void kdb_send_sig_info(struct task_struct *t, struct siginfo *info) { static struct task_struct *kdb_prev_t; int sig, new_t; if (!spin_trylock(&t->sighand->siglock)) { kdb_printf("Can't do kill command now. " "The sigmask lock is held somewhere else in " "kernel, try again later "); return; } spin_unlock(&t->sighand->siglock); new_t = kdb_prev_t != t; kdb_prev_t = t; if (t->state != TASK_RUNNING && new_t) { kdb_printf("Process is not RUNNING, sending a signal from " "kdb risks deadlock " "on the run queue locks. " "The signal has _not_ been sent. " "Reissue the kill command if you want to risk " "the deadlock. "); return; } sig = info->si_signo; if (send_sig_info(sig, info, t)) kdb_printf("Fail to deliver Signal %d to process %d. ", sig, t->pid); else kdb_printf("Signal %d is sent to process %d. ", sig, t->pid); } #endif /* CONFIG_KGDB_KDB */ |