Blame view
kernel/signal.c
94.8 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 11 |
/* * linux/kernel/signal.c * * Copyright (C) 1991, 1992 Linus Torvalds * * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson * * 2003-06-02 Jim Houston - Concurrent Computer Corp. * Changes to use preallocated sigqueue structures * to allow signals to be sent reliably. */ |
1da177e4c
|
12 |
#include <linux/slab.h> |
9984de1a5
|
13 |
#include <linux/export.h> |
1da177e4c
|
14 15 16 17 18 |
#include <linux/init.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/tty.h> #include <linux/binfmts.h> |
179899fd5
|
19 |
#include <linux/coredump.h> |
1da177e4c
|
20 21 22 |
#include <linux/security.h> #include <linux/syscalls.h> #include <linux/ptrace.h> |
7ed20e1ad
|
23 |
#include <linux/signal.h> |
fba2afaae
|
24 |
#include <linux/signalfd.h> |
f84d49b21
|
25 |
#include <linux/ratelimit.h> |
35de254dc
|
26 |
#include <linux/tracehook.h> |
c59ede7b7
|
27 |
#include <linux/capability.h> |
7dfb71030
|
28 |
#include <linux/freezer.h> |
84d737866
|
29 30 |
#include <linux/pid_namespace.h> #include <linux/nsproxy.h> |
6b550f949
|
31 |
#include <linux/user_namespace.h> |
0326f5a94
|
32 |
#include <linux/uprobes.h> |
902684395
|
33 |
#include <linux/compat.h> |
2b5faa4c5
|
34 |
#include <linux/cn_proc.h> |
52f5684c8
|
35 |
#include <linux/compiler.h> |
d1eb650ff
|
36 37 |
#define CREATE_TRACE_POINTS #include <trace/events/signal.h> |
84d737866
|
38 |
|
1da177e4c
|
39 40 41 42 |
#include <asm/param.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <asm/siginfo.h> |
d550bbd40
|
43 |
#include <asm/cacheflush.h> |
e1396065e
|
44 |
#include "audit.h" /* audit_signal_info() */ |
1da177e4c
|
45 46 47 48 |
/* * SLAB caches for signal bits. */ |
e18b890bb
|
49 |
static struct kmem_cache *sigqueue_cachep; |
1da177e4c
|
50 |
|
f84d49b21
|
51 |
int print_fatal_signals __read_mostly; |
35de254dc
|
52 |
static void __user *sig_handler(struct task_struct *t, int sig) |
93585eeaf
|
53 |
{ |
35de254dc
|
54 55 |
return t->sighand->action[sig - 1].sa.sa_handler; } |
93585eeaf
|
56 |
|
35de254dc
|
57 58 |
static int sig_handler_ignored(void __user *handler, int sig) { |
93585eeaf
|
59 |
/* Is it explicitly or implicitly ignored? */ |
93585eeaf
|
60 61 62 |
return handler == SIG_IGN || (handler == SIG_DFL && sig_kernel_ignore(sig)); } |
1da177e4c
|
63 |
|
def8cf725
|
64 |
static int sig_task_ignored(struct task_struct *t, int sig, bool force) |
1da177e4c
|
65 |
{ |
35de254dc
|
66 |
void __user *handler; |
1da177e4c
|
67 |
|
f008faff0
|
68 69 70 |
handler = sig_handler(t, sig); if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && |
def8cf725
|
71 |
handler == SIG_DFL && !force) |
f008faff0
|
72 73 74 75 |
return 1; return sig_handler_ignored(handler, sig); } |
def8cf725
|
76 |
static int sig_ignored(struct task_struct *t, int sig, bool force) |
f008faff0
|
77 |
{ |
1da177e4c
|
78 79 80 81 82 |
/* * Blocked signals are never ignored, since the * signal handler may change by the time it is * unblocked. */ |
325d22df7
|
83 |
if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) |
1da177e4c
|
84 |
return 0; |
def8cf725
|
85 |
if (!sig_task_ignored(t, sig, force)) |
35de254dc
|
86 87 88 89 90 |
return 0; /* * Tracers may want to know about even ignored signals. */ |
a288eecce
|
91 |
return !t->ptrace; |
1da177e4c
|
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
} /* * Re-calculate pending state from the set of locally pending * signals, globally pending signals, and blocked signals. */ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) { unsigned long ready; long i; switch (_NSIG_WORDS) { default: for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) ready |= signal->sig[i] &~ blocked->sig[i]; break; case 4: ready = signal->sig[3] &~ blocked->sig[3]; ready |= signal->sig[2] &~ blocked->sig[2]; ready |= signal->sig[1] &~ blocked->sig[1]; ready |= signal->sig[0] &~ blocked->sig[0]; break; case 2: ready = signal->sig[1] &~ blocked->sig[1]; ready |= signal->sig[0] &~ blocked->sig[0]; break; case 1: ready = signal->sig[0] &~ blocked->sig[0]; } return ready != 0; } #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) |
7bb44adef
|
125 |
static int recalc_sigpending_tsk(struct task_struct *t) |
1da177e4c
|
126 |
{ |
3759a0d94
|
127 |
if ((t->jobctl & JOBCTL_PENDING_MASK) || |
1da177e4c
|
128 |
PENDING(&t->pending, &t->blocked) || |
7bb44adef
|
129 |
PENDING(&t->signal->shared_pending, &t->blocked)) { |
1da177e4c
|
130 |
set_tsk_thread_flag(t, TIF_SIGPENDING); |
7bb44adef
|
131 132 |
return 1; } |
b74d0deb9
|
133 134 135 136 137 |
/* * We must never clear the flag in another thread, or in current * when it's possible the current syscall is returning -ERESTART*. * So we don't clear it here, and only callers who know they should do. */ |
7bb44adef
|
138 139 140 141 142 143 144 145 146 147 148 |
return 0; } /* * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. * This is superfluous when called on current, the wakeup is a harmless no-op. */ void recalc_sigpending_and_wake(struct task_struct *t) { if (recalc_sigpending_tsk(t)) signal_wake_up(t, 0); |
1da177e4c
|
149 150 151 152 |
} void recalc_sigpending(void) { |
dd1d67726
|
153 |
if (!recalc_sigpending_tsk(current) && !freezing(current)) |
b74d0deb9
|
154 |
clear_thread_flag(TIF_SIGPENDING); |
1da177e4c
|
155 156 157 |
} /* Given the mask, find the first available signal that should be serviced. */ |
a27341cd5
|
158 159 |
#define SYNCHRONOUS_MASK \ (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ |
a0727e8ce
|
160 |
sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS)) |
a27341cd5
|
161 |
|
fba2afaae
|
162 |
int next_signal(struct sigpending *pending, sigset_t *mask) |
1da177e4c
|
163 164 165 |
{ unsigned long i, *s, *m, x; int sig = 0; |
f84d49b21
|
166 |
|
1da177e4c
|
167 168 |
s = pending->signal.sig; m = mask->sig; |
a27341cd5
|
169 170 171 172 173 174 175 176 177 178 179 180 |
/* * Handle the first word specially: it contains the * synchronous signals that need to be dequeued first. */ x = *s &~ *m; if (x) { if (x & SYNCHRONOUS_MASK) x &= SYNCHRONOUS_MASK; sig = ffz(~x) + 1; return sig; } |
1da177e4c
|
181 182 |
switch (_NSIG_WORDS) { default: |
a27341cd5
|
183 184 185 186 187 188 189 |
for (i = 1; i < _NSIG_WORDS; ++i) { x = *++s &~ *++m; if (!x) continue; sig = ffz(~x) + i*_NSIG_BPW + 1; break; } |
1da177e4c
|
190 |
break; |
a27341cd5
|
191 192 193 |
case 2: x = s[1] &~ m[1]; if (!x) |
1da177e4c
|
194 |
break; |
a27341cd5
|
195 |
sig = ffz(~x) + _NSIG_BPW + 1; |
1da177e4c
|
196 |
break; |
a27341cd5
|
197 198 |
case 1: /* Nothing to do */ |
1da177e4c
|
199 200 |
break; } |
f84d49b21
|
201 |
|
1da177e4c
|
202 203 |
return sig; } |
f84d49b21
|
204 205 206 207 208 209 210 211 212 213 214 215 216 217 |
static inline void print_dropped_signal(int sig) { static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); if (!print_fatal_signals) return; if (!__ratelimit(&ratelimit_state)) return; printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d ", current->comm, current->pid, sig); } |
e5c1902e9
|
218 |
/** |
7dd3db54e
|
219 |
* task_set_jobctl_pending - set jobctl pending bits |
d79fdd6d9
|
220 |
* @task: target task |
7dd3db54e
|
221 |
* @mask: pending bits to set |
d79fdd6d9
|
222 |
* |
7dd3db54e
|
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 |
* Clear @mask from @task->jobctl. @mask must be subset of * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is * cleared. If @task is already being killed or exiting, this function * becomes noop. * * CONTEXT: * Must be called with @task->sighand->siglock held. * * RETURNS: * %true if @mask is set, %false if made noop because @task was dying. */ bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask) { BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) return false; if (mask & JOBCTL_STOP_SIGMASK) task->jobctl &= ~JOBCTL_STOP_SIGMASK; task->jobctl |= mask; return true; } /** |
a8f072c1d
|
252 |
* task_clear_jobctl_trapping - clear jobctl trapping bit |
d79fdd6d9
|
253 254 |
* @task: target task * |
a8f072c1d
|
255 256 257 258 |
* If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. * Clear it and wake up the ptracer. Note that we don't need any further * locking. @task->siglock guarantees that @task->parent points to the * ptracer. |
d79fdd6d9
|
259 260 261 262 |
* * CONTEXT: * Must be called with @task->sighand->siglock held. */ |
73ddff2be
|
263 |
void task_clear_jobctl_trapping(struct task_struct *task) |
d79fdd6d9
|
264 |
{ |
a8f072c1d
|
265 266 |
if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { task->jobctl &= ~JOBCTL_TRAPPING; |
650226bd9
|
267 |
smp_mb(); /* advised by wake_up_bit() */ |
62c124ff3
|
268 |
wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); |
d79fdd6d9
|
269 270 271 272 |
} } /** |
3759a0d94
|
273 |
* task_clear_jobctl_pending - clear jobctl pending bits |
e5c1902e9
|
274 |
* @task: target task |
3759a0d94
|
275 |
* @mask: pending bits to clear |
e5c1902e9
|
276 |
* |
3759a0d94
|
277 278 279 |
* Clear @mask from @task->jobctl. @mask must be subset of * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other * STOP bits are cleared together. |
e5c1902e9
|
280 |
* |
6dfca3298
|
281 282 |
* If clearing of @mask leaves no stop or trap pending, this function calls * task_clear_jobctl_trapping(). |
e5c1902e9
|
283 284 285 286 |
* * CONTEXT: * Must be called with @task->sighand->siglock held. */ |
3759a0d94
|
287 |
void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask) |
e5c1902e9
|
288 |
{ |
3759a0d94
|
289 290 291 292 293 294 |
BUG_ON(mask & ~JOBCTL_PENDING_MASK); if (mask & JOBCTL_STOP_PENDING) mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; task->jobctl &= ~mask; |
6dfca3298
|
295 296 297 |
if (!(task->jobctl & JOBCTL_PENDING_MASK)) task_clear_jobctl_trapping(task); |
e5c1902e9
|
298 299 300 301 302 303 |
} /** * task_participate_group_stop - participate in a group stop * @task: task participating in a group stop * |
a8f072c1d
|
304 |
* @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. |
39efa3ef3
|
305 |
* Group stop states are cleared and the group stop count is consumed if |
a8f072c1d
|
306 |
* %JOBCTL_STOP_CONSUME was set. If the consumption completes the group |
39efa3ef3
|
307 |
* stop, the appropriate %SIGNAL_* flags are set. |
e5c1902e9
|
308 309 310 |
* * CONTEXT: * Must be called with @task->sighand->siglock held. |
244056f9d
|
311 312 313 314 |
* * RETURNS: * %true if group stop completion should be notified to the parent, %false * otherwise. |
e5c1902e9
|
315 316 317 318 |
*/ static bool task_participate_group_stop(struct task_struct *task) { struct signal_struct *sig = task->signal; |
a8f072c1d
|
319 |
bool consume = task->jobctl & JOBCTL_STOP_CONSUME; |
e5c1902e9
|
320 |
|
a8f072c1d
|
321 |
WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); |
39efa3ef3
|
322 |
|
3759a0d94
|
323 |
task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); |
e5c1902e9
|
324 325 326 327 328 329 |
if (!consume) return false; if (!WARN_ON_ONCE(sig->group_stop_count == 0)) sig->group_stop_count--; |
244056f9d
|
330 331 332 333 334 |
/* * Tell the caller to notify completion iff we are entering into a * fresh group stop. Read comment in do_signal_stop() for details. */ if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { |
e5c1902e9
|
335 336 337 338 339 |
sig->flags = SIGNAL_STOP_STOPPED; return true; } return false; } |
c69e8d9c0
|
340 341 342 |
/* * allocate a new signal queue record * - this may be called without locks if and only if t == current, otherwise an |
5aba085ed
|
343 |
* appropriate lock must be held to stop the target task from exiting |
c69e8d9c0
|
344 |
*/ |
f84d49b21
|
345 346 |
static struct sigqueue * __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) |
1da177e4c
|
347 348 |
{ struct sigqueue *q = NULL; |
10b1fbdb0
|
349 |
struct user_struct *user; |
1da177e4c
|
350 |
|
10b1fbdb0
|
351 |
/* |
7cf7db8df
|
352 353 |
* Protect access to @t credentials. This can go away when all * callers hold rcu read lock. |
10b1fbdb0
|
354 |
*/ |
7cf7db8df
|
355 |
rcu_read_lock(); |
d84f4f992
|
356 |
user = get_uid(__task_cred(t)->user); |
10b1fbdb0
|
357 |
atomic_inc(&user->sigpending); |
7cf7db8df
|
358 |
rcu_read_unlock(); |
f84d49b21
|
359 |
|
1da177e4c
|
360 |
if (override_rlimit || |
10b1fbdb0
|
361 |
atomic_read(&user->sigpending) <= |
78d7d407b
|
362 |
task_rlimit(t, RLIMIT_SIGPENDING)) { |
1da177e4c
|
363 |
q = kmem_cache_alloc(sigqueue_cachep, flags); |
f84d49b21
|
364 365 366 |
} else { print_dropped_signal(sig); } |
1da177e4c
|
367 |
if (unlikely(q == NULL)) { |
10b1fbdb0
|
368 |
atomic_dec(&user->sigpending); |
d84f4f992
|
369 |
free_uid(user); |
1da177e4c
|
370 371 372 |
} else { INIT_LIST_HEAD(&q->list); q->flags = 0; |
d84f4f992
|
373 |
q->user = user; |
1da177e4c
|
374 |
} |
d84f4f992
|
375 376 |
return q; |
1da177e4c
|
377 |
} |
514a01b88
|
378 |
static void __sigqueue_free(struct sigqueue *q) |
1da177e4c
|
379 380 381 382 383 384 385 |
{ if (q->flags & SIGQUEUE_PREALLOC) return; atomic_dec(&q->user->sigpending); free_uid(q->user); kmem_cache_free(sigqueue_cachep, q); } |
6a14c5c9d
|
386 |
void flush_sigqueue(struct sigpending *queue) |
1da177e4c
|
387 388 389 390 391 392 393 394 395 396 397 398 399 400 |
{ struct sigqueue *q; sigemptyset(&queue->signal); while (!list_empty(&queue->list)) { q = list_entry(queue->list.next, struct sigqueue , list); list_del_init(&q->list); __sigqueue_free(q); } } /* * Flush all pending signals for a task. */ |
3bcac0263
|
401 402 403 404 405 406 |
void __flush_signals(struct task_struct *t) { clear_tsk_thread_flag(t, TIF_SIGPENDING); flush_sigqueue(&t->pending); flush_sigqueue(&t->signal->shared_pending); } |
c81addc9d
|
407 |
void flush_signals(struct task_struct *t) |
1da177e4c
|
408 409 410 411 |
{ unsigned long flags; spin_lock_irqsave(&t->sighand->siglock, flags); |
3bcac0263
|
412 |
__flush_signals(t); |
1da177e4c
|
413 414 |
spin_unlock_irqrestore(&t->sighand->siglock, flags); } |
cbaffba12
|
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 |
static void __flush_itimer_signals(struct sigpending *pending) { sigset_t signal, retain; struct sigqueue *q, *n; signal = pending->signal; sigemptyset(&retain); list_for_each_entry_safe(q, n, &pending->list, list) { int sig = q->info.si_signo; if (likely(q->info.si_code != SI_TIMER)) { sigaddset(&retain, sig); } else { sigdelset(&signal, sig); list_del_init(&q->list); __sigqueue_free(q); } } sigorsets(&pending->signal, &signal, &retain); } void flush_itimer_signals(void) { struct task_struct *tsk = current; unsigned long flags; spin_lock_irqsave(&tsk->sighand->siglock, flags); __flush_itimer_signals(&tsk->pending); __flush_itimer_signals(&tsk->signal->shared_pending); spin_unlock_irqrestore(&tsk->sighand->siglock, flags); } |
10ab825bd
|
448 449 450 451 452 453 454 455 456 |
void ignore_signals(struct task_struct *t) { int i; for (i = 0; i < _NSIG; ++i) t->sighand->action[i].sa.sa_handler = SIG_IGN; flush_signals(t); } |
1da177e4c
|
457 |
/* |
1da177e4c
|
458 459 460 461 462 463 464 465 466 467 468 469 |
* Flush all handlers for a task. */ void flush_signal_handlers(struct task_struct *t, int force_default) { int i; struct k_sigaction *ka = &t->sighand->action[0]; for (i = _NSIG ; i != 0 ; i--) { if (force_default || ka->sa.sa_handler != SIG_IGN) ka->sa.sa_handler = SIG_DFL; ka->sa.sa_flags = 0; |
522cff142
|
470 |
#ifdef __ARCH_HAS_SA_RESTORER |
2ca39528c
|
471 472 |
ka->sa.sa_restorer = NULL; #endif |
1da177e4c
|
473 474 475 476 |
sigemptyset(&ka->sa.sa_mask); ka++; } } |
abd4f7505
|
477 478 |
int unhandled_signal(struct task_struct *tsk, int sig) { |
445a91d2f
|
479 |
void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; |
b460cbc58
|
480 |
if (is_global_init(tsk)) |
abd4f7505
|
481 |
return 1; |
445a91d2f
|
482 |
if (handler != SIG_IGN && handler != SIG_DFL) |
abd4f7505
|
483 |
return 0; |
a288eecce
|
484 485 |
/* if ptraced, let the tracer determine */ return !tsk->ptrace; |
abd4f7505
|
486 |
} |
5aba085ed
|
487 488 |
/* * Notify the system that a driver wants to block all signals for this |
1da177e4c
|
489 490 491 492 493 |
* process, and wants to be notified if any signals at all were to be * sent/acted upon. If the notifier routine returns non-zero, then the * signal will be acted upon after all. If the notifier routine returns 0, * then then signal will be blocked. Only one block per process is * allowed. priv is a pointer to private data that the notifier routine |
5aba085ed
|
494 495 |
* can use to determine if the signal should be blocked or not. */ |
1da177e4c
|
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 |
void block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) { unsigned long flags; spin_lock_irqsave(¤t->sighand->siglock, flags); current->notifier_mask = mask; current->notifier_data = priv; current->notifier = notifier; spin_unlock_irqrestore(¤t->sighand->siglock, flags); } /* Notify the system that blocking has ended. */ void unblock_all_signals(void) { unsigned long flags; spin_lock_irqsave(¤t->sighand->siglock, flags); current->notifier = NULL; current->notifier_data = NULL; recalc_sigpending(); spin_unlock_irqrestore(¤t->sighand->siglock, flags); } |
100360f03
|
521 |
static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) |
1da177e4c
|
522 523 |
{ struct sigqueue *q, *first = NULL; |
1da177e4c
|
524 |
|
1da177e4c
|
525 526 527 528 529 530 |
/* * Collect the siginfo appropriate to this signal. Check if * there is another siginfo for the same signal. */ list_for_each_entry(q, &list->list, list) { if (q->info.si_signo == sig) { |
d44342076
|
531 532 |
if (first) goto still_pending; |
1da177e4c
|
533 534 535 |
first = q; } } |
d44342076
|
536 537 |
sigdelset(&list->signal, sig); |
1da177e4c
|
538 |
if (first) { |
d44342076
|
539 |
still_pending: |
1da177e4c
|
540 541 542 |
list_del_init(&first->list); copy_siginfo(info, &first->info); __sigqueue_free(first); |
1da177e4c
|
543 |
} else { |
5aba085ed
|
544 545 546 547 |
/* * Ok, it wasn't in the queue. This must be * a fast-pathed signal or we must have been * out of queue space. So zero out the info. |
1da177e4c
|
548 |
*/ |
1da177e4c
|
549 550 |
info->si_signo = sig; info->si_errno = 0; |
7486e5d9f
|
551 |
info->si_code = SI_USER; |
1da177e4c
|
552 553 554 |
info->si_pid = 0; info->si_uid = 0; } |
1da177e4c
|
555 556 557 558 559 |
} static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, siginfo_t *info) { |
27d91e07f
|
560 |
int sig = next_signal(pending, mask); |
1da177e4c
|
561 |
|
1da177e4c
|
562 563 564 565 566 567 568 569 570 |
if (sig) { if (current->notifier) { if (sigismember(current->notifier_mask, sig)) { if (!(current->notifier)(current->notifier_data)) { clear_thread_flag(TIF_SIGPENDING); return 0; } } } |
100360f03
|
571 |
collect_signal(sig, pending, info); |
1da177e4c
|
572 |
} |
1da177e4c
|
573 574 575 576 577 |
return sig; } /* |
5aba085ed
|
578 |
* Dequeue a signal and return the element to the caller, which is |
1da177e4c
|
579 580 581 582 583 584 |
* expected to free it. * * All callers have to hold the siglock. */ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) { |
c5363d036
|
585 |
int signr; |
caec4e8dc
|
586 587 588 589 |
/* We only dequeue private signals from ourselves, we don't let * signalfd steal them */ |
b8fceee17
|
590 |
signr = __dequeue_signal(&tsk->pending, mask, info); |
8bfd9a7a2
|
591 |
if (!signr) { |
1da177e4c
|
592 593 |
signr = __dequeue_signal(&tsk->signal->shared_pending, mask, info); |
8bfd9a7a2
|
594 595 596 597 598 599 |
/* * itimer signal ? * * itimers are process shared and we restart periodic * itimers in the signal delivery path to prevent DoS * attacks in the high resolution timer case. This is |
5aba085ed
|
600 |
* compliant with the old way of self-restarting |
8bfd9a7a2
|
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 |
* itimers, as the SIGALRM is a legacy signal and only * queued once. Changing the restart behaviour to * restart the timer in the signal dequeue path is * reducing the timer noise on heavy loaded !highres * systems too. */ if (unlikely(signr == SIGALRM)) { struct hrtimer *tmr = &tsk->signal->real_timer; if (!hrtimer_is_queued(tmr) && tsk->signal->it_real_incr.tv64 != 0) { hrtimer_forward(tmr, tmr->base->get_time(), tsk->signal->it_real_incr); hrtimer_restart(tmr); } } } |
c5363d036
|
618 |
|
b8fceee17
|
619 |
recalc_sigpending(); |
c5363d036
|
620 621 622 623 |
if (!signr) return 0; if (unlikely(sig_kernel_stop(signr))) { |
8bfd9a7a2
|
624 625 626 627 628 629 630 631 632 633 634 635 |
/* * Set a marker that we have dequeued a stop signal. Our * caller might release the siglock and then the pending * stop signal it is about to process is no longer in the * pending bitmasks, but must still be cleared by a SIGCONT * (and overruled by a SIGKILL). So those cases clear this * shared flag after we've set it. Note that this flag may * remain set after the signal we return is ignored or * handled. That doesn't matter because its only purpose * is to alert stop-signal processing code when another * processor has come along and cleared the flag. */ |
a8f072c1d
|
636 |
current->jobctl |= JOBCTL_STOP_DEQUEUED; |
8bfd9a7a2
|
637 |
} |
c5363d036
|
638 |
if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { |
1da177e4c
|
639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 |
/* * Release the siglock to ensure proper locking order * of timer locks outside of siglocks. Note, we leave * irqs disabled here, since the posix-timers code is * about to disable them again anyway. */ spin_unlock(&tsk->sighand->siglock); do_schedule_next_timer(info); spin_lock(&tsk->sighand->siglock); } return signr; } /* * Tell a process that it has a new active signal.. * * NOTE! we rely on the previous spin_lock to * lock interrupts for us! We can only be called with * "siglock" held, and the local interrupt must * have been disabled when that got acquired! * * No need to set need_resched since signal event passing * goes through ->blocked */ |
910ffdb18
|
663 |
void signal_wake_up_state(struct task_struct *t, unsigned int state) |
1da177e4c
|
664 |
{ |
1da177e4c
|
665 |
set_tsk_thread_flag(t, TIF_SIGPENDING); |
1da177e4c
|
666 |
/* |
910ffdb18
|
667 |
* TASK_WAKEKILL also means wake it up in the stopped/traced/killable |
f021a3c2b
|
668 |
* case. We don't check t->state here because there is a race with it |
1da177e4c
|
669 670 671 672 |
* executing another processor and just now entering stopped state. * By using wake_up_state, we ensure the process will wake up and * handle its death signal. */ |
910ffdb18
|
673 |
if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) |
1da177e4c
|
674 675 676 677 678 679 680 681 |
kick_process(t); } /* * Remove signals in mask from the pending set and queue. * Returns 1 if any signals were found. * * All callers must be holding the siglock. |
71fabd5e4
|
682 |
*/ |
c09c14413
|
683 |
static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s) |
71fabd5e4
|
684 685 686 687 688 689 690 |
{ struct sigqueue *q, *n; sigset_t m; sigandsets(&m, mask, &s->signal); if (sigisemptyset(&m)) return 0; |
702a5073f
|
691 |
sigandnsets(&s->signal, &s->signal, mask); |
71fabd5e4
|
692 693 694 695 696 697 698 699 |
list_for_each_entry_safe(q, n, &s->list, list) { if (sigismember(mask, q->info.si_signo)) { list_del_init(&q->list); __sigqueue_free(q); } } return 1; } |
1da177e4c
|
700 |
|
614c517d7
|
701 702 703 704 705 706 707 708 709 710 |
static inline int is_si_special(const struct siginfo *info) { return info <= SEND_SIG_FORCED; } static inline bool si_fromuser(const struct siginfo *info) { return info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)); } |
1da177e4c
|
711 |
/* |
39fd33933
|
712 713 714 715 716 717 |
* called with RCU read lock from check_kill_permission() */ static int kill_ok_by_cred(struct task_struct *t) { const struct cred *cred = current_cred(); const struct cred *tcred = __task_cred(t); |
5af662030
|
718 719 720 721 |
if (uid_eq(cred->euid, tcred->suid) || uid_eq(cred->euid, tcred->uid) || uid_eq(cred->uid, tcred->suid) || uid_eq(cred->uid, tcred->uid)) |
39fd33933
|
722 |
return 1; |
c4a4d6037
|
723 |
if (ns_capable(tcred->user_ns, CAP_KILL)) |
39fd33933
|
724 725 726 727 728 729 |
return 1; return 0; } /* |
1da177e4c
|
730 |
* Bad permissions for sending the signal |
694f690d2
|
731 |
* - the caller must hold the RCU read lock |
1da177e4c
|
732 733 734 735 |
*/ static int check_kill_permission(int sig, struct siginfo *info, struct task_struct *t) { |
2e2ba22ea
|
736 |
struct pid *sid; |
3b5e9e53c
|
737 |
int error; |
7ed20e1ad
|
738 |
if (!valid_signal(sig)) |
3b5e9e53c
|
739 |
return -EINVAL; |
614c517d7
|
740 |
if (!si_fromuser(info)) |
3b5e9e53c
|
741 |
return 0; |
e54dc2431
|
742 |
|
3b5e9e53c
|
743 744 |
error = audit_signal_info(sig, t); /* Let audit system see the signal */ if (error) |
1da177e4c
|
745 |
return error; |
3b5e9e53c
|
746 |
|
065add394
|
747 |
if (!same_thread_group(current, t) && |
39fd33933
|
748 |
!kill_ok_by_cred(t)) { |
2e2ba22ea
|
749 750 |
switch (sig) { case SIGCONT: |
2e2ba22ea
|
751 |
sid = task_session(t); |
2e2ba22ea
|
752 753 754 755 756 757 758 759 760 761 |
/* * We don't return the error if sid == NULL. The * task was unhashed, the caller must notice this. */ if (!sid || sid == task_session(current)) break; default: return -EPERM; } } |
c2f0c7c35
|
762 |
|
e54dc2431
|
763 |
return security_task_kill(t, info, sig, 0); |
1da177e4c
|
764 |
} |
fb1d910c1
|
765 766 767 768 769 770 771 772 |
/** * ptrace_trap_notify - schedule trap to notify ptracer * @t: tracee wanting to notify tracer * * This function schedules sticky ptrace trap which is cleared on the next * TRAP_STOP to notify ptracer of an event. @t must have been seized by * ptracer. * |
544b2c91a
|
773 774 775 776 777 |
* If @t is running, STOP trap will be taken. If trapped for STOP and * ptracer is listening for events, tracee is woken up so that it can * re-trap for the new event. If trapped otherwise, STOP trap will be * eventually taken without returning to userland after the existing traps * are finished by PTRACE_CONT. |
fb1d910c1
|
778 779 780 781 782 783 784 785 786 787 |
* * CONTEXT: * Must be called with @task->sighand->siglock held. */ static void ptrace_trap_notify(struct task_struct *t) { WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); assert_spin_locked(&t->sighand->siglock); task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); |
910ffdb18
|
788 |
ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); |
fb1d910c1
|
789 |
} |
1da177e4c
|
790 |
/* |
7e695a5ef
|
791 792 |
* Handle magic process-wide effects of stop/continue signals. Unlike * the signal actions, these happen immediately at signal-generation |
1da177e4c
|
793 794 |
* time regardless of blocking, ignoring, or handling. This does the * actual continuing for SIGCONT, but not the actual stopping for stop |
7e695a5ef
|
795 796 797 798 |
* signals. The process stop is done as a signal action for SIG_DFL. * * Returns true if the signal should be actually delivered, otherwise * it should be dropped. |
1da177e4c
|
799 |
*/ |
403bad72b
|
800 |
static bool prepare_signal(int sig, struct task_struct *p, bool force) |
1da177e4c
|
801 |
{ |
ad16a4606
|
802 |
struct signal_struct *signal = p->signal; |
1da177e4c
|
803 |
struct task_struct *t; |
9490592f2
|
804 |
sigset_t flush; |
1da177e4c
|
805 |
|
403bad72b
|
806 807 808 |
if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) { if (signal->flags & SIGNAL_GROUP_COREDUMP) return sig == SIGKILL; |
1da177e4c
|
809 |
/* |
7e695a5ef
|
810 |
* The process is in the middle of dying, nothing to do. |
1da177e4c
|
811 |
*/ |
7e695a5ef
|
812 |
} else if (sig_kernel_stop(sig)) { |
1da177e4c
|
813 814 815 |
/* * This is a stop signal. Remove SIGCONT from all queues. */ |
9490592f2
|
816 |
siginitset(&flush, sigmask(SIGCONT)); |
c09c14413
|
817 |
flush_sigqueue_mask(&flush, &signal->shared_pending); |
9490592f2
|
818 |
for_each_thread(p, t) |
c09c14413
|
819 |
flush_sigqueue_mask(&flush, &t->pending); |
1da177e4c
|
820 |
} else if (sig == SIGCONT) { |
fc321d2e6
|
821 |
unsigned int why; |
1da177e4c
|
822 |
/* |
1deac632f
|
823 |
* Remove all stop signals from all queues, wake all threads. |
1da177e4c
|
824 |
*/ |
9490592f2
|
825 |
siginitset(&flush, SIG_KERNEL_STOP_MASK); |
c09c14413
|
826 |
flush_sigqueue_mask(&flush, &signal->shared_pending); |
9490592f2
|
827 |
for_each_thread(p, t) { |
c09c14413
|
828 |
flush_sigqueue_mask(&flush, &t->pending); |
3759a0d94
|
829 |
task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); |
fb1d910c1
|
830 831 832 833 |
if (likely(!(t->ptrace & PT_SEIZED))) wake_up_state(t, __TASK_STOPPED); else ptrace_trap_notify(t); |
9490592f2
|
834 |
} |
1da177e4c
|
835 |
|
fc321d2e6
|
836 837 838 839 840 841 842 843 844 |
/* * Notify the parent with CLD_CONTINUED if we were stopped. * * If we were in the middle of a group stop, we pretend it * was already finished, and then continued. Since SIGCHLD * doesn't queue we report only CLD_STOPPED, as if the next * CLD_CONTINUED was dropped. */ why = 0; |
ad16a4606
|
845 |
if (signal->flags & SIGNAL_STOP_STOPPED) |
fc321d2e6
|
846 |
why |= SIGNAL_CLD_CONTINUED; |
ad16a4606
|
847 |
else if (signal->group_stop_count) |
fc321d2e6
|
848 849 850 |
why |= SIGNAL_CLD_STOPPED; if (why) { |
021e1ae3d
|
851 |
/* |
ae6d2ed7b
|
852 |
* The first thread which returns from do_signal_stop() |
021e1ae3d
|
853 854 855 |
* will take ->siglock, notice SIGNAL_CLD_MASK, and * notify its parent. See get_signal_to_deliver(). */ |
ad16a4606
|
856 857 858 |
signal->flags = why | SIGNAL_STOP_CONTINUED; signal->group_stop_count = 0; signal->group_exit_code = 0; |
1da177e4c
|
859 |
} |
1da177e4c
|
860 |
} |
7e695a5ef
|
861 |
|
def8cf725
|
862 |
return !sig_ignored(p, sig, force); |
1da177e4c
|
863 |
} |
71f11dc02
|
864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 |
/* * Test if P wants to take SIG. After we've checked all threads with this, * it's equivalent to finding no threads not blocking SIG. Any threads not * blocking SIG were ruled out because they are not running and already * have pending signals. Such threads will dequeue from the shared queue * as soon as they're available, so putting the signal on the shared queue * will be equivalent to sending it to one such thread. */ static inline int wants_signal(int sig, struct task_struct *p) { if (sigismember(&p->blocked, sig)) return 0; if (p->flags & PF_EXITING) return 0; if (sig == SIGKILL) return 1; if (task_is_stopped_or_traced(p)) return 0; return task_curr(p) || !signal_pending(p); } |
5fcd835bf
|
884 |
static void complete_signal(int sig, struct task_struct *p, int group) |
71f11dc02
|
885 886 887 888 889 890 891 892 893 894 895 896 |
{ struct signal_struct *signal = p->signal; struct task_struct *t; /* * Now find a thread we can wake up to take the signal off the queue. * * If the main thread wants the signal, it gets first crack. * Probably the least surprising to the average bear. */ if (wants_signal(sig, p)) t = p; |
5fcd835bf
|
897 |
else if (!group || thread_group_empty(p)) |
71f11dc02
|
898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 |
/* * There is just one thread and it does not need to be woken. * It will dequeue unblocked signals before it runs again. */ return; else { /* * Otherwise try to find a suitable thread. */ t = signal->curr_target; while (!wants_signal(sig, t)) { t = next_thread(t); if (t == signal->curr_target) /* * No thread needs to be woken. * Any eligible threads will see * the signal in the queue soon. */ return; } signal->curr_target = t; } /* * Found a killable thread. If the signal will be fatal, * then start taking the whole group down immediately. */ |
fae5fa44f
|
925 926 |
if (sig_fatal(p, sig) && !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && |
71f11dc02
|
927 |
!sigismember(&t->real_blocked, sig) && |
a288eecce
|
928 |
(sig == SIGKILL || !t->ptrace)) { |
71f11dc02
|
929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 |
/* * This signal will be fatal to the whole group. */ if (!sig_kernel_coredump(sig)) { /* * Start a group exit and wake everybody up. * This way we don't have other threads * running and doing things after a slower * thread has the fatal signal pending. */ signal->flags = SIGNAL_GROUP_EXIT; signal->group_exit_code = sig; signal->group_stop_count = 0; t = p; do { |
6dfca3298
|
944 |
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); |
71f11dc02
|
945 946 947 948 949 950 951 952 953 954 955 956 957 958 |
sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); } while_each_thread(p, t); return; } } /* * The signal is already in the shared-pending queue. * Tell the chosen thread to wake up and dequeue it. */ signal_wake_up(t, sig == SIGKILL); return; } |
af7fff9c1
|
959 960 961 962 |
static inline int legacy_queue(struct sigpending *signals, int sig) { return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); } |
6b550f949
|
963 964 965 966 967 968 969 970 |
#ifdef CONFIG_USER_NS static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) { if (current_user_ns() == task_cred_xxx(t, user_ns)) return; if (SI_FROMKERNEL(info)) return; |
078de5f70
|
971 972 973 974 |
rcu_read_lock(); info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns), make_kuid(current_user_ns(), info->si_uid)); rcu_read_unlock(); |
6b550f949
|
975 976 977 978 979 980 981 |
} #else static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) { return; } #endif |
7978b567d
|
982 983 |
static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, int group, int from_ancestor_ns) |
1da177e4c
|
984 |
{ |
2ca3515aa
|
985 |
struct sigpending *pending; |
6e65acba7
|
986 |
struct sigqueue *q; |
7a0aeb14e
|
987 |
int override_rlimit; |
6c303d3ab
|
988 |
int ret = 0, result; |
0a16b6075
|
989 |
|
6e65acba7
|
990 |
assert_spin_locked(&t->sighand->siglock); |
921cf9f63
|
991 |
|
6c303d3ab
|
992 |
result = TRACE_SIGNAL_IGNORED; |
629d362b9
|
993 994 |
if (!prepare_signal(sig, t, from_ancestor_ns || (info == SEND_SIG_FORCED))) |
6c303d3ab
|
995 |
goto ret; |
2ca3515aa
|
996 997 |
pending = group ? &t->signal->shared_pending : &t->pending; |
1da177e4c
|
998 |
/* |
2acb024d5
|
999 1000 1001 1002 |
* Short-circuit ignored signals and support queuing * exactly one non-rt signal, so that we can get more * detailed information about the cause of the signal. */ |
6c303d3ab
|
1003 |
result = TRACE_SIGNAL_ALREADY_PENDING; |
7e695a5ef
|
1004 |
if (legacy_queue(pending, sig)) |
6c303d3ab
|
1005 1006 1007 |
goto ret; result = TRACE_SIGNAL_DELIVERED; |
fba2afaae
|
1008 |
/* |
1da177e4c
|
1009 1010 1011 |
* fast-pathed signals for kernel-internal things like SIGSTOP * or SIGKILL. */ |
b67a1b9e4
|
1012 |
if (info == SEND_SIG_FORCED) |
1da177e4c
|
1013 |
goto out_set; |
5aba085ed
|
1014 1015 1016 1017 1018 1019 1020 1021 1022 |
/* * Real-time signals must be queued if sent by sigqueue, or * some other real-time mechanism. It is implementation * defined whether kill() does so. We attempt to do so, on * the principle of least surprise, but since kill is not * allowed to fail with EAGAIN when low on memory we just * make sure at least one signal gets delivered and don't * pass on the info struct. */ |
7a0aeb14e
|
1023 1024 1025 1026 |
if (sig < SIGRTMIN) override_rlimit = (is_si_special(info) || info->si_code >= 0); else override_rlimit = 0; |
f84d49b21
|
1027 |
q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, |
7a0aeb14e
|
1028 |
override_rlimit); |
1da177e4c
|
1029 |
if (q) { |
2ca3515aa
|
1030 |
list_add_tail(&q->list, &pending->list); |
1da177e4c
|
1031 |
switch ((unsigned long) info) { |
b67a1b9e4
|
1032 |
case (unsigned long) SEND_SIG_NOINFO: |
1da177e4c
|
1033 1034 1035 |
q->info.si_signo = sig; q->info.si_errno = 0; q->info.si_code = SI_USER; |
9cd4fd104
|
1036 |
q->info.si_pid = task_tgid_nr_ns(current, |
09bca05c9
|
1037 |
task_active_pid_ns(t)); |
078de5f70
|
1038 |
q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
1da177e4c
|
1039 |
break; |
b67a1b9e4
|
1040 |
case (unsigned long) SEND_SIG_PRIV: |
1da177e4c
|
1041 1042 1043 1044 1045 1046 1047 1048 |
q->info.si_signo = sig; q->info.si_errno = 0; q->info.si_code = SI_KERNEL; q->info.si_pid = 0; q->info.si_uid = 0; break; default: copy_siginfo(&q->info, info); |
6588c1e3f
|
1049 1050 |
if (from_ancestor_ns) q->info.si_pid = 0; |
1da177e4c
|
1051 1052 |
break; } |
6b550f949
|
1053 1054 |
userns_fixup_signal_uid(&q->info, t); |
621d31219
|
1055 |
} else if (!is_si_special(info)) { |
ba005e1f4
|
1056 1057 1058 1059 1060 1061 |
if (sig >= SIGRTMIN && info->si_code != SI_USER) { /* * Queue overflow, abort. We may abort if the * signal was rt and sent by user using something * other than kill(). */ |
6c303d3ab
|
1062 1063 1064 |
result = TRACE_SIGNAL_OVERFLOW_FAIL; ret = -EAGAIN; goto ret; |
ba005e1f4
|
1065 1066 1067 1068 1069 |
} else { /* * This is a silent loss of information. We still * send the signal, but the *info bits are lost. */ |
6c303d3ab
|
1070 |
result = TRACE_SIGNAL_LOSE_INFO; |
ba005e1f4
|
1071 |
} |
1da177e4c
|
1072 1073 1074 |
} out_set: |
53c30337f
|
1075 |
signalfd_notify(t, sig); |
2ca3515aa
|
1076 |
sigaddset(&pending->signal, sig); |
4cd4b6d4e
|
1077 |
complete_signal(sig, t, group); |
6c303d3ab
|
1078 1079 1080 |
ret: trace_signal_generate(sig, info, t, group, result); return ret; |
1da177e4c
|
1081 |
} |
7978b567d
|
1082 1083 1084 |
static int send_signal(int sig, struct siginfo *info, struct task_struct *t, int group) { |
921cf9f63
|
1085 1086 1087 |
int from_ancestor_ns = 0; #ifdef CONFIG_PID_NS |
dd34200ad
|
1088 1089 |
from_ancestor_ns = si_fromuser(info) && !task_pid_nr_ns(current, task_active_pid_ns(t)); |
921cf9f63
|
1090 1091 1092 |
#endif return __send_signal(sig, info, t, group, from_ancestor_ns); |
7978b567d
|
1093 |
} |
4aaefee58
|
1094 |
static void print_fatal_signal(int signr) |
45807a1df
|
1095 |
{ |
4aaefee58
|
1096 |
struct pt_regs *regs = signal_pt_regs(); |
681a90ffe
|
1097 1098 |
printk(KERN_INFO "potentially unexpected fatal signal %d. ", signr); |
45807a1df
|
1099 |
|
ca5cd877a
|
1100 |
#if defined(__i386__) && !defined(__arch_um__) |
5d1fadc14
|
1101 |
printk(KERN_INFO "code at %08lx: ", regs->ip); |
45807a1df
|
1102 1103 1104 1105 |
{ int i; for (i = 0; i < 16; i++) { unsigned char insn; |
b45c6e76b
|
1106 1107 |
if (get_user(insn, (unsigned char *)(regs->ip + i))) break; |
5d1fadc14
|
1108 |
printk(KERN_CONT "%02x ", insn); |
45807a1df
|
1109 1110 |
} } |
5d1fadc14
|
1111 1112 |
printk(KERN_CONT " "); |
45807a1df
|
1113 |
#endif |
3a9f84d35
|
1114 |
preempt_disable(); |
45807a1df
|
1115 |
show_regs(regs); |
3a9f84d35
|
1116 |
preempt_enable(); |
45807a1df
|
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 |
} static int __init setup_print_fatal_signals(char *str) { get_option (&str, &print_fatal_signals); return 1; } __setup("print-fatal-signals=", setup_print_fatal_signals); |
1da177e4c
|
1127 |
|
4cd4b6d4e
|
1128 1129 1130 1131 1132 |
int __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) { return send_signal(sig, info, p, 1); } |
1da177e4c
|
1133 1134 1135 |
static int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) { |
4cd4b6d4e
|
1136 |
return send_signal(sig, info, t, 0); |
1da177e4c
|
1137 |
} |
4a30debfb
|
1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 |
int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, bool group) { unsigned long flags; int ret = -ESRCH; if (lock_task_sighand(p, &flags)) { ret = send_signal(sig, info, p, group); unlock_task_sighand(p, &flags); } return ret; } |
1da177e4c
|
1151 1152 1153 |
/* * Force a signal that the process can't ignore: if necessary * we unblock the signal and change any SIG_IGN to SIG_DFL. |
ae74c3b69
|
1154 1155 1156 1157 1158 |
* * Note: If we unblock the signal, we always reset it to SIG_DFL, * since we do not want to have a signal handler that was blocked * be invoked when user space had explicitly blocked it. * |
80fe728d5
|
1159 1160 |
* We don't want to have recursive SIGSEGV's etc, for example, * that is why we also clear SIGNAL_UNKILLABLE. |
1da177e4c
|
1161 |
*/ |
1da177e4c
|
1162 1163 1164 1165 |
int force_sig_info(int sig, struct siginfo *info, struct task_struct *t) { unsigned long int flags; |
ae74c3b69
|
1166 1167 |
int ret, blocked, ignored; struct k_sigaction *action; |
1da177e4c
|
1168 1169 |
spin_lock_irqsave(&t->sighand->siglock, flags); |
ae74c3b69
|
1170 1171 1172 1173 1174 1175 1176 |
action = &t->sighand->action[sig-1]; ignored = action->sa.sa_handler == SIG_IGN; blocked = sigismember(&t->blocked, sig); if (blocked || ignored) { action->sa.sa_handler = SIG_DFL; if (blocked) { sigdelset(&t->blocked, sig); |
7bb44adef
|
1177 |
recalc_sigpending_and_wake(t); |
ae74c3b69
|
1178 |
} |
1da177e4c
|
1179 |
} |
80fe728d5
|
1180 1181 |
if (action->sa.sa_handler == SIG_DFL) t->signal->flags &= ~SIGNAL_UNKILLABLE; |
1da177e4c
|
1182 1183 1184 1185 1186 |
ret = specific_send_sig_info(sig, info, t); spin_unlock_irqrestore(&t->sighand->siglock, flags); return ret; } |
1da177e4c
|
1187 1188 1189 |
/* * Nuke all other threads in the group. */ |
09faef11d
|
1190 |
int zap_other_threads(struct task_struct *p) |
1da177e4c
|
1191 |
{ |
09faef11d
|
1192 1193 |
struct task_struct *t = p; int count = 0; |
1da177e4c
|
1194 |
|
1da177e4c
|
1195 |
p->signal->group_stop_count = 0; |
09faef11d
|
1196 |
while_each_thread(p, t) { |
6dfca3298
|
1197 |
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); |
09faef11d
|
1198 1199 1200 |
count++; /* Don't bother with already dead threads */ |
1da177e4c
|
1201 1202 |
if (t->exit_state) continue; |
1da177e4c
|
1203 |
sigaddset(&t->pending.signal, SIGKILL); |
1da177e4c
|
1204 1205 |
signal_wake_up(t, 1); } |
09faef11d
|
1206 1207 |
return count; |
1da177e4c
|
1208 |
} |
b8ed374e2
|
1209 1210 |
struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, unsigned long *flags) |
f63ee72e0
|
1211 1212 1213 1214 |
{ struct sighand_struct *sighand; for (;;) { |
c41247e1d
|
1215 1216 1217 1218 |
/* * Disable interrupts early to avoid deadlocks. * See rcu_read_unlock() comment header for details. */ |
a841796f1
|
1219 1220 |
local_irq_save(*flags); rcu_read_lock(); |
f63ee72e0
|
1221 |
sighand = rcu_dereference(tsk->sighand); |
a841796f1
|
1222 1223 1224 |
if (unlikely(sighand == NULL)) { rcu_read_unlock(); local_irq_restore(*flags); |
f63ee72e0
|
1225 |
break; |
a841796f1
|
1226 |
} |
f63ee72e0
|
1227 |
|
a841796f1
|
1228 1229 1230 |
spin_lock(&sighand->siglock); if (likely(sighand == tsk->sighand)) { rcu_read_unlock(); |
f63ee72e0
|
1231 |
break; |
a841796f1
|
1232 1233 1234 1235 |
} spin_unlock(&sighand->siglock); rcu_read_unlock(); local_irq_restore(*flags); |
f63ee72e0
|
1236 1237 1238 1239 |
} return sighand; } |
c69e8d9c0
|
1240 1241 |
/* * send signal info to all the members of a group |
c69e8d9c0
|
1242 |
*/ |
1da177e4c
|
1243 1244 |
int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) { |
694f690d2
|
1245 1246 1247 1248 1249 |
int ret; rcu_read_lock(); ret = check_kill_permission(sig, info, p); rcu_read_unlock(); |
f63ee72e0
|
1250 |
|
4a30debfb
|
1251 1252 |
if (!ret && sig) ret = do_send_sig_info(sig, info, p, true); |
1da177e4c
|
1253 1254 1255 1256 1257 |
return ret; } /* |
146a505d4
|
1258 |
* __kill_pgrp_info() sends a signal to a process group: this is what the tty |
1da177e4c
|
1259 |
* control characters do (^C, ^Z etc) |
c69e8d9c0
|
1260 |
* - the caller must hold at least a readlock on tasklist_lock |
1da177e4c
|
1261 |
*/ |
c4b92fc11
|
1262 |
int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) |
1da177e4c
|
1263 1264 1265 |
{ struct task_struct *p = NULL; int retval, success; |
1da177e4c
|
1266 1267 |
success = 0; retval = -ESRCH; |
c4b92fc11
|
1268 |
do_each_pid_task(pgrp, PIDTYPE_PGID, p) { |
1da177e4c
|
1269 1270 1271 |
int err = group_send_sig_info(sig, info, p); success |= !err; retval = err; |
c4b92fc11
|
1272 |
} while_each_pid_task(pgrp, PIDTYPE_PGID, p); |
1da177e4c
|
1273 1274 |
return success ? 0 : retval; } |
c4b92fc11
|
1275 |
int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) |
1da177e4c
|
1276 |
{ |
d36174bc2
|
1277 |
int error = -ESRCH; |
1da177e4c
|
1278 |
struct task_struct *p; |
e56d09031
|
1279 |
rcu_read_lock(); |
d36174bc2
|
1280 |
retry: |
c4b92fc11
|
1281 |
p = pid_task(pid, PIDTYPE_PID); |
d36174bc2
|
1282 |
if (p) { |
1da177e4c
|
1283 |
error = group_send_sig_info(sig, info, p); |
d36174bc2
|
1284 1285 1286 1287 1288 1289 1290 1291 1292 |
if (unlikely(error == -ESRCH)) /* * The task was unhashed in between, try again. * If it is dead, pid_task() will return NULL, * if we race with de_thread() it will find the * new leader. */ goto retry; } |
e56d09031
|
1293 |
rcu_read_unlock(); |
6ca25b551
|
1294 |
|
1da177e4c
|
1295 1296 |
return error; } |
5aba085ed
|
1297 |
int kill_proc_info(int sig, struct siginfo *info, pid_t pid) |
c4b92fc11
|
1298 1299 1300 |
{ int error; rcu_read_lock(); |
b488893a3
|
1301 |
error = kill_pid_info(sig, info, find_vpid(pid)); |
c4b92fc11
|
1302 1303 1304 |
rcu_read_unlock(); return error; } |
d178bc3a7
|
1305 1306 1307 1308 |
static int kill_as_cred_perm(const struct cred *cred, struct task_struct *target) { const struct cred *pcred = __task_cred(target); |
5af662030
|
1309 1310 |
if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) && !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid)) |
d178bc3a7
|
1311 1312 1313 |
return 0; return 1; } |
2425c08b3
|
1314 |
/* like kill_pid_info(), but doesn't use uid/euid of "current" */ |
d178bc3a7
|
1315 1316 |
int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid, const struct cred *cred, u32 secid) |
46113830a
|
1317 1318 1319 |
{ int ret = -EINVAL; struct task_struct *p; |
14d8c9f3c
|
1320 |
unsigned long flags; |
46113830a
|
1321 1322 1323 |
if (!valid_signal(sig)) return ret; |
14d8c9f3c
|
1324 |
rcu_read_lock(); |
2425c08b3
|
1325 |
p = pid_task(pid, PIDTYPE_PID); |
46113830a
|
1326 1327 1328 1329 |
if (!p) { ret = -ESRCH; goto out_unlock; } |
d178bc3a7
|
1330 |
if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) { |
46113830a
|
1331 1332 1333 |
ret = -EPERM; goto out_unlock; } |
8f95dc58d
|
1334 1335 1336 |
ret = security_task_kill(p, info, sig, secid); if (ret) goto out_unlock; |
14d8c9f3c
|
1337 1338 1339 1340 1341 1342 1343 |
if (sig) { if (lock_task_sighand(p, &flags)) { ret = __send_signal(sig, info, p, 1, 0); unlock_task_sighand(p, &flags); } else ret = -ESRCH; |
46113830a
|
1344 1345 |
} out_unlock: |
14d8c9f3c
|
1346 |
rcu_read_unlock(); |
46113830a
|
1347 1348 |
return ret; } |
d178bc3a7
|
1349 |
EXPORT_SYMBOL_GPL(kill_pid_info_as_cred); |
1da177e4c
|
1350 1351 1352 1353 1354 1355 1356 |
/* * kill_something_info() interprets pid in interesting ways just like kill(2). * * POSIX specifies that kill(-1,sig) is unspecified, but what we have * is probably wrong. Should make it like BSD or SYSV. */ |
bc64efd22
|
1357 |
static int kill_something_info(int sig, struct siginfo *info, pid_t pid) |
1da177e4c
|
1358 |
{ |
8d42db189
|
1359 |
int ret; |
d5df763b8
|
1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 |
if (pid > 0) { rcu_read_lock(); ret = kill_pid_info(sig, info, find_vpid(pid)); rcu_read_unlock(); return ret; } read_lock(&tasklist_lock); if (pid != -1) { ret = __kill_pgrp_info(sig, info, pid ? find_vpid(-pid) : task_pgrp(current)); } else { |
1da177e4c
|
1373 1374 |
int retval = 0, count = 0; struct task_struct * p; |
1da177e4c
|
1375 |
for_each_process(p) { |
d25141a81
|
1376 1377 |
if (task_pid_vnr(p) > 1 && !same_thread_group(p, current)) { |
1da177e4c
|
1378 1379 1380 1381 1382 1383 |
int err = group_send_sig_info(sig, info, p); ++count; if (err != -EPERM) retval = err; } } |
8d42db189
|
1384 |
ret = count ? retval : -ESRCH; |
1da177e4c
|
1385 |
} |
d5df763b8
|
1386 |
read_unlock(&tasklist_lock); |
8d42db189
|
1387 |
return ret; |
1da177e4c
|
1388 1389 1390 1391 1392 |
} /* * These are for backward compatibility with the rest of the kernel source. */ |
5aba085ed
|
1393 |
int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1da177e4c
|
1394 |
{ |
1da177e4c
|
1395 1396 1397 1398 |
/* * Make sure legacy kernel users don't send in bad values * (normal paths check this in check_kill_permission). */ |
7ed20e1ad
|
1399 |
if (!valid_signal(sig)) |
1da177e4c
|
1400 |
return -EINVAL; |
4a30debfb
|
1401 |
return do_send_sig_info(sig, info, p, false); |
1da177e4c
|
1402 |
} |
b67a1b9e4
|
1403 1404 |
#define __si_special(priv) \ ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) |
1da177e4c
|
1405 1406 1407 |
int send_sig(int sig, struct task_struct *p, int priv) { |
b67a1b9e4
|
1408 |
return send_sig_info(sig, __si_special(priv), p); |
1da177e4c
|
1409 |
} |
1da177e4c
|
1410 1411 1412 |
void force_sig(int sig, struct task_struct *p) { |
b67a1b9e4
|
1413 |
force_sig_info(sig, SEND_SIG_PRIV, p); |
1da177e4c
|
1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 |
} /* * When things go south during signal handling, we * will force a SIGSEGV. And if the signal that caused * the problem was already a SIGSEGV, we'll want to * make sure we don't even try to deliver the signal.. */ int force_sigsegv(int sig, struct task_struct *p) { if (sig == SIGSEGV) { unsigned long flags; spin_lock_irqsave(&p->sighand->siglock, flags); p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; spin_unlock_irqrestore(&p->sighand->siglock, flags); } force_sig(SIGSEGV, p); return 0; } |
c4b92fc11
|
1434 1435 |
int kill_pgrp(struct pid *pid, int sig, int priv) { |
146a505d4
|
1436 1437 1438 1439 1440 1441 1442 |
int ret; read_lock(&tasklist_lock); ret = __kill_pgrp_info(sig, __si_special(priv), pid); read_unlock(&tasklist_lock); return ret; |
c4b92fc11
|
1443 1444 1445 1446 1447 1448 1449 1450 |
} EXPORT_SYMBOL(kill_pgrp); int kill_pid(struct pid *pid, int sig, int priv) { return kill_pid_info(sig, __si_special(priv), pid); } EXPORT_SYMBOL(kill_pid); |
1da177e4c
|
1451 1452 1453 1454 |
/* * These functions support sending signals using preallocated sigqueue * structures. This is needed "because realtime applications cannot * afford to lose notifications of asynchronous events, like timer |
5aba085ed
|
1455 |
* expirations or I/O completions". In the case of POSIX Timers |
1da177e4c
|
1456 1457 1458 1459 |
* we allocate the sigqueue structure from the timer_create. If this * allocation fails we are able to report the failure to the application * with an EAGAIN error. */ |
1da177e4c
|
1460 1461 |
struct sigqueue *sigqueue_alloc(void) { |
f84d49b21
|
1462 |
struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); |
1da177e4c
|
1463 |
|
f84d49b21
|
1464 |
if (q) |
1da177e4c
|
1465 |
q->flags |= SIGQUEUE_PREALLOC; |
f84d49b21
|
1466 1467 |
return q; |
1da177e4c
|
1468 1469 1470 1471 1472 |
} void sigqueue_free(struct sigqueue *q) { unsigned long flags; |
60187d270
|
1473 |
spinlock_t *lock = ¤t->sighand->siglock; |
1da177e4c
|
1474 1475 |
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); /* |
c8e85b4f4
|
1476 1477 |
* We must hold ->siglock while testing q->list * to serialize with collect_signal() or with |
da7978b03
|
1478 |
* __exit_signal()->flush_sigqueue(). |
1da177e4c
|
1479 |
*/ |
60187d270
|
1480 |
spin_lock_irqsave(lock, flags); |
c8e85b4f4
|
1481 1482 1483 1484 1485 |
q->flags &= ~SIGQUEUE_PREALLOC; /* * If it is queued it will be freed when dequeued, * like the "regular" sigqueue. */ |
60187d270
|
1486 |
if (!list_empty(&q->list)) |
c8e85b4f4
|
1487 |
q = NULL; |
60187d270
|
1488 |
spin_unlock_irqrestore(lock, flags); |
c8e85b4f4
|
1489 1490 |
if (q) __sigqueue_free(q); |
1da177e4c
|
1491 |
} |
ac5c21538
|
1492 |
int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) |
9e3bd6c3f
|
1493 |
{ |
e62e6650e
|
1494 |
int sig = q->info.si_signo; |
2ca3515aa
|
1495 |
struct sigpending *pending; |
e62e6650e
|
1496 |
unsigned long flags; |
163566f60
|
1497 |
int ret, result; |
2ca3515aa
|
1498 |
|
4cd4b6d4e
|
1499 |
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
e62e6650e
|
1500 1501 1502 1503 |
ret = -1; if (!likely(lock_task_sighand(t, &flags))) goto ret; |
7e695a5ef
|
1504 |
ret = 1; /* the signal is ignored */ |
163566f60
|
1505 |
result = TRACE_SIGNAL_IGNORED; |
def8cf725
|
1506 |
if (!prepare_signal(sig, t, false)) |
e62e6650e
|
1507 1508 1509 |
goto out; ret = 0; |
9e3bd6c3f
|
1510 1511 1512 1513 1514 |
if (unlikely(!list_empty(&q->list))) { /* * If an SI_TIMER entry is already queue just increment * the overrun count. */ |
9e3bd6c3f
|
1515 1516 |
BUG_ON(q->info.si_code != SI_TIMER); q->info.si_overrun++; |
163566f60
|
1517 |
result = TRACE_SIGNAL_ALREADY_PENDING; |
e62e6650e
|
1518 |
goto out; |
9e3bd6c3f
|
1519 |
} |
ba661292a
|
1520 |
q->info.si_overrun = 0; |
9e3bd6c3f
|
1521 |
|
9e3bd6c3f
|
1522 |
signalfd_notify(t, sig); |
2ca3515aa
|
1523 |
pending = group ? &t->signal->shared_pending : &t->pending; |
9e3bd6c3f
|
1524 1525 |
list_add_tail(&q->list, &pending->list); sigaddset(&pending->signal, sig); |
4cd4b6d4e
|
1526 |
complete_signal(sig, t, group); |
163566f60
|
1527 |
result = TRACE_SIGNAL_DELIVERED; |
e62e6650e
|
1528 |
out: |
163566f60
|
1529 |
trace_signal_generate(sig, &q->info, t, group, result); |
e62e6650e
|
1530 1531 1532 |
unlock_task_sighand(t, &flags); ret: return ret; |
9e3bd6c3f
|
1533 |
} |
1da177e4c
|
1534 |
/* |
1da177e4c
|
1535 1536 |
* Let a parent know about the death of a child. * For a stopped/continued status change, use do_notify_parent_cldstop instead. |
2b2a1ff64
|
1537 |
* |
53c8f9f19
|
1538 1539 |
* Returns true if our parent ignored us and so we've switched to * self-reaping. |
1da177e4c
|
1540 |
*/ |
53c8f9f19
|
1541 |
bool do_notify_parent(struct task_struct *tsk, int sig) |
1da177e4c
|
1542 1543 1544 1545 |
{ struct siginfo info; unsigned long flags; struct sighand_struct *psig; |
53c8f9f19
|
1546 |
bool autoreap = false; |
6fac4829c
|
1547 |
cputime_t utime, stime; |
1da177e4c
|
1548 1549 1550 1551 |
BUG_ON(sig == -1); /* do_notify_parent_cldstop should have been called instead. */ |
e1abb39c6
|
1552 |
BUG_ON(task_is_stopped_or_traced(tsk)); |
1da177e4c
|
1553 |
|
d21142ece
|
1554 |
BUG_ON(!tsk->ptrace && |
1da177e4c
|
1555 |
(tsk->group_leader != tsk || !thread_group_empty(tsk))); |
b6e238dce
|
1556 1557 1558 1559 1560 1561 1562 1563 |
if (sig != SIGCHLD) { /* * This is only possible if parent == real_parent. * Check if it has changed security domain. */ if (tsk->parent_exec_id != tsk->parent->self_exec_id) sig = SIGCHLD; } |
1da177e4c
|
1564 1565 |
info.si_signo = sig; info.si_errno = 0; |
b488893a3
|
1566 |
/* |
320845048
|
1567 1568 |
* We are under tasklist_lock here so our parent is tied to * us and cannot change. |
b488893a3
|
1569 |
* |
320845048
|
1570 1571 |
* task_active_pid_ns will always return the same pid namespace * until a task passes through release_task. |
b488893a3
|
1572 1573 1574 1575 1576 1577 |
* * write_lock() currently calls preempt_disable() which is the * same as rcu_read_lock(), but according to Oleg, this is not * correct to rely on this */ rcu_read_lock(); |
320845048
|
1578 |
info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent)); |
54ba47eda
|
1579 1580 |
info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns), task_uid(tsk)); |
b488893a3
|
1581 |
rcu_read_unlock(); |
6fac4829c
|
1582 1583 1584 |
task_cputime(tsk, &utime, &stime); info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime); info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime); |
1da177e4c
|
1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 |
info.si_status = tsk->exit_code & 0x7f; if (tsk->exit_code & 0x80) info.si_code = CLD_DUMPED; else if (tsk->exit_code & 0x7f) info.si_code = CLD_KILLED; else { info.si_code = CLD_EXITED; info.si_status = tsk->exit_code >> 8; } psig = tsk->parent->sighand; spin_lock_irqsave(&psig->siglock, flags); |
d21142ece
|
1598 |
if (!tsk->ptrace && sig == SIGCHLD && |
1da177e4c
|
1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 |
(psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { /* * We are exiting and our parent doesn't care. POSIX.1 * defines special semantics for setting SIGCHLD to SIG_IGN * or setting the SA_NOCLDWAIT flag: we should be reaped * automatically and not left for our parent's wait4 call. * Rather than having the parent do it as a magic kind of * signal handler, we just set this to tell do_exit that we * can be cleaned up without becoming a zombie. Note that * we still call __wake_up_parent in this case, because a * blocked sys_wait4 might now return -ECHILD. * * Whether we send SIGCHLD or not for SA_NOCLDWAIT * is implementation-defined: we do (if you don't want * it, just use SIG_IGN instead). */ |
53c8f9f19
|
1616 |
autoreap = true; |
1da177e4c
|
1617 |
if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) |
53c8f9f19
|
1618 |
sig = 0; |
1da177e4c
|
1619 |
} |
53c8f9f19
|
1620 |
if (valid_signal(sig) && sig) |
1da177e4c
|
1621 1622 1623 |
__group_send_sig_info(sig, &info, tsk->parent); __wake_up_parent(tsk, tsk->parent); spin_unlock_irqrestore(&psig->siglock, flags); |
2b2a1ff64
|
1624 |
|
53c8f9f19
|
1625 |
return autoreap; |
1da177e4c
|
1626 |
} |
75b95953a
|
1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 |
/** * do_notify_parent_cldstop - notify parent of stopped/continued state change * @tsk: task reporting the state change * @for_ptracer: the notification is for ptracer * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report * * Notify @tsk's parent that the stopped/continued state has changed. If * @for_ptracer is %false, @tsk's group leader notifies to its real parent. * If %true, @tsk reports to @tsk->parent which should be the ptracer. * * CONTEXT: * Must be called with tasklist_lock at least read locked. */ static void do_notify_parent_cldstop(struct task_struct *tsk, bool for_ptracer, int why) |
1da177e4c
|
1642 1643 1644 |
{ struct siginfo info; unsigned long flags; |
bc505a478
|
1645 |
struct task_struct *parent; |
1da177e4c
|
1646 |
struct sighand_struct *sighand; |
6fac4829c
|
1647 |
cputime_t utime, stime; |
1da177e4c
|
1648 |
|
75b95953a
|
1649 |
if (for_ptracer) { |
bc505a478
|
1650 |
parent = tsk->parent; |
75b95953a
|
1651 |
} else { |
bc505a478
|
1652 1653 1654 |
tsk = tsk->group_leader; parent = tsk->real_parent; } |
1da177e4c
|
1655 1656 |
info.si_signo = SIGCHLD; info.si_errno = 0; |
b488893a3
|
1657 |
/* |
5aba085ed
|
1658 |
* see comment in do_notify_parent() about the following 4 lines |
b488893a3
|
1659 1660 |
*/ rcu_read_lock(); |
17cf22c33
|
1661 |
info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent)); |
54ba47eda
|
1662 |
info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); |
b488893a3
|
1663 |
rcu_read_unlock(); |
6fac4829c
|
1664 1665 1666 |
task_cputime(tsk, &utime, &stime); info.si_utime = cputime_to_clock_t(utime); info.si_stime = cputime_to_clock_t(stime); |
1da177e4c
|
1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 |
info.si_code = why; switch (why) { case CLD_CONTINUED: info.si_status = SIGCONT; break; case CLD_STOPPED: info.si_status = tsk->signal->group_exit_code & 0x7f; break; case CLD_TRAPPED: info.si_status = tsk->exit_code & 0x7f; break; default: BUG(); } sighand = parent->sighand; spin_lock_irqsave(&sighand->siglock, flags); if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) __group_send_sig_info(SIGCHLD, &info, parent); /* * Even if SIGCHLD is not generated, we must wake up wait4 calls. */ __wake_up_parent(tsk, parent); spin_unlock_irqrestore(&sighand->siglock, flags); } |
d5f70c00a
|
1694 1695 |
static inline int may_ptrace_stop(void) { |
d21142ece
|
1696 |
if (!likely(current->ptrace)) |
d5f70c00a
|
1697 |
return 0; |
d5f70c00a
|
1698 1699 1700 1701 1702 1703 |
/* * Are we in the middle of do_coredump? * If so and our tracer is also part of the coredump stopping * is a deadlock situation, and pointless because our tracer * is dead so don't allow us to stop. * If SIGKILL was already sent before the caller unlocked |
999d9fc16
|
1704 |
* ->siglock we must see ->core_state != NULL. Otherwise it |
d5f70c00a
|
1705 |
* is safe to enter schedule(). |
9899d11f6
|
1706 1707 1708 1709 |
* * This is almost outdated, a task with the pending SIGKILL can't * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported * after SIGKILL was already dequeued. |
d5f70c00a
|
1710 |
*/ |
999d9fc16
|
1711 |
if (unlikely(current->mm->core_state) && |
d5f70c00a
|
1712 1713 1714 1715 1716 |
unlikely(current->mm == current->parent->mm)) return 0; return 1; } |
1da177e4c
|
1717 |
/* |
5aba085ed
|
1718 |
* Return non-zero if there is a SIGKILL that should be waking us up. |
1a669c2f1
|
1719 1720 1721 1722 |
* Called with the siglock held. */ static int sigkill_pending(struct task_struct *tsk) { |
3d749b9e6
|
1723 1724 |
return sigismember(&tsk->pending.signal, SIGKILL) || sigismember(&tsk->signal->shared_pending.signal, SIGKILL); |
1a669c2f1
|
1725 1726 1727 |
} /* |
1da177e4c
|
1728 1729 1730 1731 1732 1733 1734 |
* This must be called with current->sighand->siglock held. * * This should be the path for all ptrace stops. * We always set current->last_siginfo while stopped here. * That makes it a way to test a stopped process for * being ptrace-stopped vs being job-control-stopped. * |
20686a309
|
1735 1736 |
* If we actually decide not to stop at all because the tracer * is gone, we keep current->exit_code unless clear_code. |
1da177e4c
|
1737 |
*/ |
fe1bc6a09
|
1738 |
static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) |
b84011508
|
1739 1740 |
__releases(¤t->sighand->siglock) __acquires(¤t->sighand->siglock) |
1da177e4c
|
1741 |
{ |
ceb6bd67f
|
1742 |
bool gstop_done = false; |
1a669c2f1
|
1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 |
if (arch_ptrace_stop_needed(exit_code, info)) { /* * The arch code has something special to do before a * ptrace stop. This is allowed to block, e.g. for faults * on user stack pages. We can't keep the siglock while * calling arch_ptrace_stop, so we must release it now. * To preserve proper semantics, we must do this before * any signal bookkeeping like checking group_stop_count. * Meanwhile, a SIGKILL could come in before we retake the * siglock. That must prevent us from sleeping in TASK_TRACED. * So after regaining the lock, we must check for SIGKILL. */ spin_unlock_irq(¤t->sighand->siglock); arch_ptrace_stop(exit_code, info); spin_lock_irq(¤t->sighand->siglock); |
3d749b9e6
|
1758 1759 |
if (sigkill_pending(current)) return; |
1a669c2f1
|
1760 |
} |
1da177e4c
|
1761 |
/* |
81be24b8c
|
1762 1763 1764 1765 1766 |
* We're committing to trapping. TRACED should be visible before * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). * Also, transition to TRACED and updates to ->jobctl should be * atomic with respect to siglock and should be done after the arch * hook as siglock is released and regrabbed across it. |
1da177e4c
|
1767 |
*/ |
81be24b8c
|
1768 |
set_current_state(TASK_TRACED); |
1da177e4c
|
1769 1770 1771 |
current->last_siginfo = info; current->exit_code = exit_code; |
d79fdd6d9
|
1772 |
/* |
0ae8ce1c8
|
1773 1774 |
* If @why is CLD_STOPPED, we're trapping to participate in a group * stop. Do the bookkeeping. Note that if SIGCONT was delievered |
73ddff2be
|
1775 1776 1777 |
* across siglock relocks since INTERRUPT was scheduled, PENDING * could be clear now. We act as if SIGCONT is received after * TASK_TRACED is entered - ignore it. |
d79fdd6d9
|
1778 |
*/ |
a8f072c1d
|
1779 |
if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) |
ceb6bd67f
|
1780 |
gstop_done = task_participate_group_stop(current); |
d79fdd6d9
|
1781 |
|
fb1d910c1
|
1782 |
/* any trap clears pending STOP trap, STOP trap clears NOTIFY */ |
73ddff2be
|
1783 |
task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); |
fb1d910c1
|
1784 1785 |
if (info && info->si_code >> 8 == PTRACE_EVENT_STOP) task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); |
73ddff2be
|
1786 |
|
81be24b8c
|
1787 |
/* entering a trap, clear TRAPPING */ |
a8f072c1d
|
1788 |
task_clear_jobctl_trapping(current); |
d79fdd6d9
|
1789 |
|
1da177e4c
|
1790 1791 |
spin_unlock_irq(¤t->sighand->siglock); read_lock(&tasklist_lock); |
3d749b9e6
|
1792 |
if (may_ptrace_stop()) { |
ceb6bd67f
|
1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 |
/* * Notify parents of the stop. * * While ptraced, there are two parents - the ptracer and * the real_parent of the group_leader. The ptracer should * know about every stop while the real parent is only * interested in the completion of group stop. The states * for the two don't interact with each other. Notify * separately unless they're gonna be duplicates. */ do_notify_parent_cldstop(current, true, why); |
bb3696da8
|
1804 |
if (gstop_done && ptrace_reparented(current)) |
ceb6bd67f
|
1805 |
do_notify_parent_cldstop(current, false, why); |
53da1d945
|
1806 1807 1808 1809 1810 1811 1812 |
/* * Don't want to allow preemption here, because * sys_ptrace() needs this task to be inactive. * * XXX: implement read_unlock_no_resched(). */ preempt_disable(); |
1da177e4c
|
1813 |
read_unlock(&tasklist_lock); |
53da1d945
|
1814 |
preempt_enable_no_resched(); |
5d8f72b55
|
1815 |
freezable_schedule(); |
1da177e4c
|
1816 1817 1818 |
} else { /* * By the time we got the lock, our tracer went away. |
6405f7f46
|
1819 |
* Don't drop the lock yet, another tracer may come. |
ceb6bd67f
|
1820 1821 1822 |
* * If @gstop_done, the ptracer went away between group stop * completion and here. During detach, it would have set |
a8f072c1d
|
1823 1824 1825 |
* JOBCTL_STOP_PENDING on us and we'll re-enter * TASK_STOPPED in do_signal_stop() on return, so notifying * the real parent of the group stop completion is enough. |
1da177e4c
|
1826 |
*/ |
ceb6bd67f
|
1827 1828 |
if (gstop_done) do_notify_parent_cldstop(current, false, why); |
9899d11f6
|
1829 |
/* tasklist protects us from ptrace_freeze_traced() */ |
6405f7f46
|
1830 |
__set_current_state(TASK_RUNNING); |
20686a309
|
1831 1832 |
if (clear_code) current->exit_code = 0; |
6405f7f46
|
1833 |
read_unlock(&tasklist_lock); |
1da177e4c
|
1834 1835 1836 1837 1838 1839 1840 1841 1842 |
} /* * We are back. Now reacquire the siglock before touching * last_siginfo, so that we are sure to have synchronized with * any signal-sending on another CPU that wants to examine it. */ spin_lock_irq(¤t->sighand->siglock); current->last_siginfo = NULL; |
544b2c91a
|
1843 1844 |
/* LISTENING can be set only during STOP traps, clear it */ current->jobctl &= ~JOBCTL_LISTENING; |
1da177e4c
|
1845 1846 1847 |
/* * Queued signals ignored us while we were stopped for tracing. * So check for any that we should take before resuming user mode. |
b74d0deb9
|
1848 |
* This sets TIF_SIGPENDING, but never clears it. |
1da177e4c
|
1849 |
*/ |
b74d0deb9
|
1850 |
recalc_sigpending_tsk(current); |
1da177e4c
|
1851 |
} |
3544d72a0
|
1852 |
static void ptrace_do_notify(int signr, int exit_code, int why) |
1da177e4c
|
1853 1854 |
{ siginfo_t info; |
1da177e4c
|
1855 |
memset(&info, 0, sizeof info); |
3544d72a0
|
1856 |
info.si_signo = signr; |
1da177e4c
|
1857 |
info.si_code = exit_code; |
b488893a3
|
1858 |
info.si_pid = task_pid_vnr(current); |
078de5f70
|
1859 |
info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
1da177e4c
|
1860 1861 |
/* Let the debugger run. */ |
3544d72a0
|
1862 1863 1864 1865 1866 1867 |
ptrace_stop(exit_code, why, 1, &info); } void ptrace_notify(int exit_code) { BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); |
f784e8a79
|
1868 1869 |
if (unlikely(current->task_works)) task_work_run(); |
3544d72a0
|
1870 |
|
1da177e4c
|
1871 |
spin_lock_irq(¤t->sighand->siglock); |
3544d72a0
|
1872 |
ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); |
1da177e4c
|
1873 1874 |
spin_unlock_irq(¤t->sighand->siglock); } |
73ddff2be
|
1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 |
/** * do_signal_stop - handle group stop for SIGSTOP and other stop signals * @signr: signr causing group stop if initiating * * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr * and participate in it. If already set, participate in the existing * group stop. If participated in a group stop (and thus slept), %true is * returned with siglock released. * * If ptraced, this function doesn't handle stop itself. Instead, * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock * untouched. The caller must ensure that INTERRUPT trap handling takes * places afterwards. * * CONTEXT: * Must be called with @current->sighand->siglock held, which is released * on %true return. * * RETURNS: * %false if group stop is already cancelled or ptrace trap is scheduled. * %true if participated in group stop. |
1da177e4c
|
1896 |
*/ |
73ddff2be
|
1897 1898 |
static bool do_signal_stop(int signr) __releases(¤t->sighand->siglock) |
1da177e4c
|
1899 1900 |
{ struct signal_struct *sig = current->signal; |
1da177e4c
|
1901 |
|
a8f072c1d
|
1902 1903 |
if (!(current->jobctl & JOBCTL_STOP_PENDING)) { unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; |
f558b7e40
|
1904 |
struct task_struct *t; |
a8f072c1d
|
1905 1906 |
/* signr will be recorded in task->jobctl for retries */ WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); |
d79fdd6d9
|
1907 |
|
a8f072c1d
|
1908 |
if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || |
573cf9ad7
|
1909 |
unlikely(signal_group_exit(sig))) |
73ddff2be
|
1910 |
return false; |
1da177e4c
|
1911 |
/* |
408a37de6
|
1912 1913 1914 1915 1916 1917 1918 |
* There is no group stop already in progress. We must * initiate one now. * * While ptraced, a task may be resumed while group stop is * still in effect and then receive a stop signal and * initiate another group stop. This deviates from the * usual behavior as two consecutive stop signals can't |
780006eac
|
1919 1920 |
* cause two group stops when !ptraced. That is why we * also check !task_is_stopped(t) below. |
408a37de6
|
1921 1922 1923 1924 1925 1926 1927 1928 |
* * The condition can be distinguished by testing whether * SIGNAL_STOP_STOPPED is already set. Don't generate * group_exit_code in such case. * * This is not necessary for SIGNAL_STOP_CONTINUED because * an intervening stop signal is required to cause two * continued events regardless of ptrace. |
1da177e4c
|
1929 |
*/ |
408a37de6
|
1930 1931 |
if (!(sig->flags & SIGNAL_STOP_STOPPED)) sig->group_exit_code = signr; |
1da177e4c
|
1932 |
|
7dd3db54e
|
1933 1934 1935 1936 |
sig->group_stop_count = 0; if (task_set_jobctl_pending(current, signr | gstop)) sig->group_stop_count++; |
1da177e4c
|
1937 |
|
8d38f203b
|
1938 1939 |
t = current; while_each_thread(current, t) { |
1da177e4c
|
1940 |
/* |
a122b341b
|
1941 1942 1943 |
* Setting state to TASK_STOPPED for a group * stop is always done with the siglock held, * so this check has no races. |
1da177e4c
|
1944 |
*/ |
7dd3db54e
|
1945 1946 |
if (!task_is_stopped(t) && task_set_jobctl_pending(t, signr | gstop)) { |
ae6d2ed7b
|
1947 |
sig->group_stop_count++; |
fb1d910c1
|
1948 1949 1950 1951 |
if (likely(!(t->ptrace & PT_SEIZED))) signal_wake_up(t, 0); else ptrace_trap_notify(t); |
a122b341b
|
1952 |
} |
d79fdd6d9
|
1953 |
} |
1da177e4c
|
1954 |
} |
73ddff2be
|
1955 |
|
d21142ece
|
1956 |
if (likely(!current->ptrace)) { |
5224fa366
|
1957 |
int notify = 0; |
1da177e4c
|
1958 |
|
5224fa366
|
1959 1960 1961 1962 1963 1964 1965 |
/* * If there are no other threads in the group, or if there * is a group stop in progress and we are the last to stop, * report to the parent. */ if (task_participate_group_stop(current)) notify = CLD_STOPPED; |
ae6d2ed7b
|
1966 |
__set_current_state(TASK_STOPPED); |
5224fa366
|
1967 |
spin_unlock_irq(¤t->sighand->siglock); |
62bcf9d99
|
1968 1969 1970 1971 1972 1973 1974 1975 1976 |
/* * Notify the parent of the group stop completion. Because * we're not holding either the siglock or tasklist_lock * here, ptracer may attach inbetween; however, this is for * group stop and should always be delivered to the real * parent of the group leader. The new ptracer will get * its notification when this task transitions into * TASK_TRACED. */ |
5224fa366
|
1977 1978 |
if (notify) { read_lock(&tasklist_lock); |
62bcf9d99
|
1979 |
do_notify_parent_cldstop(current, false, notify); |
5224fa366
|
1980 1981 1982 1983 |
read_unlock(&tasklist_lock); } /* Now we don't run again until woken by SIGCONT or SIGKILL */ |
5d8f72b55
|
1984 |
freezable_schedule(); |
73ddff2be
|
1985 |
return true; |
d79fdd6d9
|
1986 |
} else { |
73ddff2be
|
1987 1988 1989 1990 1991 1992 |
/* * While ptraced, group stop is handled by STOP trap. * Schedule it and let the caller deal with it. */ task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); return false; |
ae6d2ed7b
|
1993 |
} |
73ddff2be
|
1994 |
} |
1da177e4c
|
1995 |
|
73ddff2be
|
1996 1997 1998 |
/** * do_jobctl_trap - take care of ptrace jobctl traps * |
3544d72a0
|
1999 2000 2001 2002 2003 2004 2005 |
* When PT_SEIZED, it's used for both group stop and explicit * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with * accompanying siginfo. If stopped, lower eight bits of exit_code contain * the stop signal; otherwise, %SIGTRAP. * * When !PT_SEIZED, it's used only for group stop trap with stop signal * number as exit_code and no siginfo. |
73ddff2be
|
2006 2007 2008 2009 2010 2011 2012 |
* * CONTEXT: * Must be called with @current->sighand->siglock held, which may be * released and re-acquired before returning with intervening sleep. */ static void do_jobctl_trap(void) { |
3544d72a0
|
2013 |
struct signal_struct *signal = current->signal; |
73ddff2be
|
2014 |
int signr = current->jobctl & JOBCTL_STOP_SIGMASK; |
ae6d2ed7b
|
2015 |
|
3544d72a0
|
2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 |
if (current->ptrace & PT_SEIZED) { if (!signal->group_stop_count && !(signal->flags & SIGNAL_STOP_STOPPED)) signr = SIGTRAP; WARN_ON_ONCE(!signr); ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), CLD_STOPPED); } else { WARN_ON_ONCE(!signr); ptrace_stop(signr, CLD_STOPPED, 0, NULL); current->exit_code = 0; |
ae6d2ed7b
|
2027 |
} |
1da177e4c
|
2028 |
} |
94eb22d50
|
2029 |
static int ptrace_signal(int signr, siginfo_t *info) |
18c98b652
|
2030 |
{ |
b7f9591c4
|
2031 |
ptrace_signal_deliver(); |
8a3524180
|
2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 |
/* * We do not check sig_kernel_stop(signr) but set this marker * unconditionally because we do not know whether debugger will * change signr. This flag has no meaning unless we are going * to stop after return from ptrace_stop(). In this case it will * be checked in do_signal_stop(), we should only stop if it was * not cleared by SIGCONT while we were sleeping. See also the * comment in dequeue_signal(). */ current->jobctl |= JOBCTL_STOP_DEQUEUED; |
fe1bc6a09
|
2042 |
ptrace_stop(signr, CLD_TRAPPED, 0, info); |
18c98b652
|
2043 2044 2045 2046 2047 2048 2049 |
/* We're back. Did the debugger cancel the sig? */ signr = current->exit_code; if (signr == 0) return signr; current->exit_code = 0; |
5aba085ed
|
2050 2051 2052 2053 2054 2055 |
/* * Update the siginfo structure if the signal has * changed. If the debugger wanted something * specific in the siginfo structure then it should * have updated *info via PTRACE_SETSIGINFO. */ |
18c98b652
|
2056 2057 2058 2059 |
if (signr != info->si_signo) { info->si_signo = signr; info->si_errno = 0; info->si_code = SI_USER; |
6b550f949
|
2060 |
rcu_read_lock(); |
18c98b652
|
2061 |
info->si_pid = task_pid_vnr(current->parent); |
54ba47eda
|
2062 2063 |
info->si_uid = from_kuid_munged(current_user_ns(), task_uid(current->parent)); |
6b550f949
|
2064 |
rcu_read_unlock(); |
18c98b652
|
2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 |
} /* If the (new) signal is now blocked, requeue it. */ if (sigismember(¤t->blocked, signr)) { specific_send_sig_info(signr, info, current); signr = 0; } return signr; } |
828b1f65d
|
2075 |
int get_signal(struct ksignal *ksig) |
1da177e4c
|
2076 |
{ |
f6b76d4fb
|
2077 2078 2079 |
struct sighand_struct *sighand = current->sighand; struct signal_struct *signal = current->signal; int signr; |
1da177e4c
|
2080 |
|
f784e8a79
|
2081 2082 |
if (unlikely(current->task_works)) task_work_run(); |
726670280
|
2083 |
|
0326f5a94
|
2084 2085 |
if (unlikely(uprobe_deny_signal())) return 0; |
13b1c3d4b
|
2086 |
/* |
5d8f72b55
|
2087 2088 2089 |
* Do this once, we can't return to user-mode if freezing() == T. * do_signal_stop() and ptrace_stop() do freezable_schedule() and * thus do not need another check after return. |
13b1c3d4b
|
2090 |
*/ |
fc558a749
|
2091 |
try_to_freeze(); |
5d8f72b55
|
2092 |
relock: |
f6b76d4fb
|
2093 |
spin_lock_irq(&sighand->siglock); |
021e1ae3d
|
2094 2095 2096 2097 2098 |
/* * Every stopped thread goes here after wakeup. Check to see if * we should notify the parent, prepare_signal(SIGCONT) encodes * the CLD_ si_code into SIGNAL_CLD_MASK bits. */ |
f6b76d4fb
|
2099 |
if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { |
c672af35d
|
2100 2101 2102 2103 2104 2105 |
int why; if (signal->flags & SIGNAL_CLD_CONTINUED) why = CLD_CONTINUED; else why = CLD_STOPPED; |
f6b76d4fb
|
2106 |
signal->flags &= ~SIGNAL_CLD_MASK; |
e44205519
|
2107 |
|
ae6d2ed7b
|
2108 |
spin_unlock_irq(&sighand->siglock); |
fa00b80b3
|
2109 |
|
ceb6bd67f
|
2110 2111 2112 2113 2114 2115 2116 2117 |
/* * Notify the parent that we're continuing. This event is * always per-process and doesn't make whole lot of sense * for ptracers, who shouldn't consume the state via * wait(2) either, but, for backward compatibility, notify * the ptracer of the group leader too unless it's gonna be * a duplicate. */ |
edf2ed153
|
2118 |
read_lock(&tasklist_lock); |
ceb6bd67f
|
2119 |
do_notify_parent_cldstop(current, false, why); |
bb3696da8
|
2120 2121 2122 |
if (ptrace_reparented(current->group_leader)) do_notify_parent_cldstop(current->group_leader, true, why); |
edf2ed153
|
2123 |
read_unlock(&tasklist_lock); |
ceb6bd67f
|
2124 |
|
e44205519
|
2125 2126 |
goto relock; } |
1da177e4c
|
2127 2128 |
for (;;) { struct k_sigaction *ka; |
1be53963b
|
2129 |
|
dd1d67726
|
2130 2131 |
if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && do_signal_stop(0)) |
7bcf6a2ca
|
2132 |
goto relock; |
1be53963b
|
2133 |
|
73ddff2be
|
2134 2135 2136 2137 2138 |
if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) { do_jobctl_trap(); spin_unlock_irq(&sighand->siglock); goto relock; } |
1da177e4c
|
2139 |
|
828b1f65d
|
2140 |
signr = dequeue_signal(current, ¤t->blocked, &ksig->info); |
7bcf6a2ca
|
2141 |
|
dd1d67726
|
2142 2143 |
if (!signr) break; /* will return 0 */ |
7bcf6a2ca
|
2144 |
|
8a3524180
|
2145 |
if (unlikely(current->ptrace) && signr != SIGKILL) { |
828b1f65d
|
2146 |
signr = ptrace_signal(signr, &ksig->info); |
dd1d67726
|
2147 2148 |
if (!signr) continue; |
1da177e4c
|
2149 |
} |
dd1d67726
|
2150 |
ka = &sighand->action[signr-1]; |
f9d4257e0
|
2151 |
/* Trace actually delivered signals. */ |
828b1f65d
|
2152 |
trace_signal_deliver(signr, &ksig->info, ka); |
f9d4257e0
|
2153 |
|
1da177e4c
|
2154 2155 2156 2157 |
if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ continue; if (ka->sa.sa_handler != SIG_DFL) { /* Run the handler. */ |
828b1f65d
|
2158 |
ksig->ka = *ka; |
1da177e4c
|
2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 |
if (ka->sa.sa_flags & SA_ONESHOT) ka->sa.sa_handler = SIG_DFL; break; /* will return non-zero "signr" value */ } /* * Now we are doing the default action for this signal. */ if (sig_kernel_ignore(signr)) /* Default is nothing. */ continue; |
84d737866
|
2171 |
/* |
0fbc26a6c
|
2172 |
* Global init gets no signals it doesn't want. |
b3bfa0cba
|
2173 2174 2175 2176 2177 2178 2179 |
* Container-init gets no signals it doesn't want from same * container. * * Note that if global/container-init sees a sig_kernel_only() * signal here, the signal must have been generated internally * or must have come from an ancestor namespace. In either * case, the signal cannot be dropped. |
84d737866
|
2180 |
*/ |
fae5fa44f
|
2181 |
if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && |
b3bfa0cba
|
2182 |
!sig_kernel_only(signr)) |
1da177e4c
|
2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 |
continue; if (sig_kernel_stop(signr)) { /* * The default action is to stop all threads in * the thread group. The job control signals * do nothing in an orphaned pgrp, but SIGSTOP * always works. Note that siglock needs to be * dropped during the call to is_orphaned_pgrp() * because of lock ordering with tasklist_lock. * This allows an intervening SIGCONT to be posted. * We need to check for that and bail out if necessary. */ if (signr != SIGSTOP) { |
f6b76d4fb
|
2197 |
spin_unlock_irq(&sighand->siglock); |
1da177e4c
|
2198 2199 |
/* signals can be posted during this window */ |
3e7cd6c41
|
2200 |
if (is_current_pgrp_orphaned()) |
1da177e4c
|
2201 |
goto relock; |
f6b76d4fb
|
2202 |
spin_lock_irq(&sighand->siglock); |
1da177e4c
|
2203 |
} |
828b1f65d
|
2204 |
if (likely(do_signal_stop(ksig->info.si_signo))) { |
1da177e4c
|
2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 |
/* It released the siglock. */ goto relock; } /* * We didn't actually stop, due to a race * with SIGCONT or something like that. */ continue; } |
f6b76d4fb
|
2215 |
spin_unlock_irq(&sighand->siglock); |
1da177e4c
|
2216 2217 2218 2219 2220 |
/* * Anything else is fatal, maybe with a core dump. */ current->flags |= PF_SIGNALED; |
2dce81bff
|
2221 |
|
1da177e4c
|
2222 |
if (sig_kernel_coredump(signr)) { |
2dce81bff
|
2223 |
if (print_fatal_signals) |
828b1f65d
|
2224 |
print_fatal_signal(ksig->info.si_signo); |
2b5faa4c5
|
2225 |
proc_coredump_connector(current); |
1da177e4c
|
2226 2227 2228 2229 2230 2231 2232 2233 |
/* * If it was able to dump core, this kills all * other threads in the group and synchronizes with * their demise. If we lost the race with another * thread getting here, it set group_exit_code * first and our do_group_exit call below will use * that value and ignore the one we pass it. */ |
828b1f65d
|
2234 |
do_coredump(&ksig->info); |
1da177e4c
|
2235 2236 2237 2238 2239 |
} /* * Death signals, no core dump. */ |
828b1f65d
|
2240 |
do_group_exit(ksig->info.si_signo); |
1da177e4c
|
2241 2242 |
/* NOTREACHED */ } |
f6b76d4fb
|
2243 |
spin_unlock_irq(&sighand->siglock); |
828b1f65d
|
2244 2245 2246 |
ksig->sig = signr; return ksig->sig > 0; |
1da177e4c
|
2247 |
} |
5e6292c0f
|
2248 |
/** |
efee984c2
|
2249 |
* signal_delivered - |
10b1c7ac8
|
2250 |
* @ksig: kernel signal struct |
efee984c2
|
2251 |
* @stepping: nonzero if debugger single-step or block-step in use |
5e6292c0f
|
2252 |
* |
e227867f1
|
2253 |
* This function should be called when a signal has successfully been |
10b1c7ac8
|
2254 |
* delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask |
efee984c2
|
2255 |
* is always blocked, and the signal itself is blocked unless %SA_NODEFER |
10b1c7ac8
|
2256 |
* is set in @ksig->ka.sa.sa_flags. Tracing is notified. |
5e6292c0f
|
2257 |
*/ |
10b1c7ac8
|
2258 |
static void signal_delivered(struct ksignal *ksig, int stepping) |
5e6292c0f
|
2259 2260 |
{ sigset_t blocked; |
a610d6e67
|
2261 2262 2263 2264 2265 |
/* A signal was successfully delivered, and the saved sigmask was stored on the signal frame, and will be restored by sigreturn. So we can simply clear the restore sigmask flag. */ clear_restore_sigmask(); |
10b1c7ac8
|
2266 2267 2268 |
sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask); if (!(ksig->ka.sa.sa_flags & SA_NODEFER)) sigaddset(&blocked, ksig->sig); |
5e6292c0f
|
2269 |
set_current_blocked(&blocked); |
df5601f9c
|
2270 |
tracehook_signal_handler(stepping); |
5e6292c0f
|
2271 |
} |
2ce5da175
|
2272 2273 2274 2275 2276 |
void signal_setup_done(int failed, struct ksignal *ksig, int stepping) { if (failed) force_sigsegv(ksig->sig, current); else |
10b1c7ac8
|
2277 |
signal_delivered(ksig, stepping); |
2ce5da175
|
2278 |
} |
0edceb7bc
|
2279 2280 |
/* * It could be that complete_signal() picked us to notify about the |
fec9993db
|
2281 2282 |
* group-wide signal. Other threads should be notified now to take * the shared signals in @which since we will not. |
0edceb7bc
|
2283 |
*/ |
f646e227b
|
2284 |
static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) |
0edceb7bc
|
2285 |
{ |
f646e227b
|
2286 |
sigset_t retarget; |
0edceb7bc
|
2287 |
struct task_struct *t; |
f646e227b
|
2288 2289 2290 |
sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); if (sigisemptyset(&retarget)) return; |
0edceb7bc
|
2291 2292 |
t = tsk; while_each_thread(tsk, t) { |
fec9993db
|
2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 |
if (t->flags & PF_EXITING) continue; if (!has_pending_signals(&retarget, &t->blocked)) continue; /* Remove the signals this thread can handle. */ sigandsets(&retarget, &retarget, &t->blocked); if (!signal_pending(t)) signal_wake_up(t, 0); if (sigisemptyset(&retarget)) break; |
0edceb7bc
|
2306 2307 |
} } |
d12619b5f
|
2308 2309 2310 |
void exit_signals(struct task_struct *tsk) { int group_stop = 0; |
f646e227b
|
2311 |
sigset_t unblocked; |
d12619b5f
|
2312 |
|
77e4ef99d
|
2313 2314 2315 2316 2317 |
/* * @tsk is about to have PF_EXITING set - lock out users which * expect stable threadgroup. */ threadgroup_change_begin(tsk); |
5dee1707d
|
2318 2319 |
if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { tsk->flags |= PF_EXITING; |
77e4ef99d
|
2320 |
threadgroup_change_end(tsk); |
5dee1707d
|
2321 |
return; |
d12619b5f
|
2322 |
} |
5dee1707d
|
2323 |
spin_lock_irq(&tsk->sighand->siglock); |
d12619b5f
|
2324 2325 2326 2327 2328 |
/* * From now this task is not visible for group-wide signals, * see wants_signal(), do_signal_stop(). */ tsk->flags |= PF_EXITING; |
77e4ef99d
|
2329 2330 |
threadgroup_change_end(tsk); |
5dee1707d
|
2331 2332 |
if (!signal_pending(tsk)) goto out; |
f646e227b
|
2333 2334 2335 |
unblocked = tsk->blocked; signotset(&unblocked); retarget_shared_pending(tsk, &unblocked); |
5dee1707d
|
2336 |
|
a8f072c1d
|
2337 |
if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && |
e5c1902e9
|
2338 |
task_participate_group_stop(tsk)) |
edf2ed153
|
2339 |
group_stop = CLD_STOPPED; |
5dee1707d
|
2340 |
out: |
d12619b5f
|
2341 |
spin_unlock_irq(&tsk->sighand->siglock); |
62bcf9d99
|
2342 2343 2344 2345 |
/* * If group stop has completed, deliver the notification. This * should always go to the real parent of the group leader. */ |
ae6d2ed7b
|
2346 |
if (unlikely(group_stop)) { |
d12619b5f
|
2347 |
read_lock(&tasklist_lock); |
62bcf9d99
|
2348 |
do_notify_parent_cldstop(tsk, false, group_stop); |
d12619b5f
|
2349 2350 2351 |
read_unlock(&tasklist_lock); } } |
1da177e4c
|
2352 2353 2354 2355 |
EXPORT_SYMBOL(recalc_sigpending); EXPORT_SYMBOL_GPL(dequeue_signal); EXPORT_SYMBOL(flush_signals); EXPORT_SYMBOL(force_sig); |
1da177e4c
|
2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 |
EXPORT_SYMBOL(send_sig); EXPORT_SYMBOL(send_sig_info); EXPORT_SYMBOL(sigprocmask); EXPORT_SYMBOL(block_all_signals); EXPORT_SYMBOL(unblock_all_signals); /* * System call entry points. */ |
41c57892a
|
2366 2367 2368 |
/** * sys_restart_syscall - restart a system call */ |
754fe8d29
|
2369 |
SYSCALL_DEFINE0(restart_syscall) |
1da177e4c
|
2370 2371 2372 2373 2374 2375 2376 2377 2378 |
{ struct restart_block *restart = ¤t_thread_info()->restart_block; return restart->fn(restart); } long do_no_restart_syscall(struct restart_block *param) { return -EINTR; } |
b182801ab
|
2379 2380 2381 2382 2383 |
static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) { if (signal_pending(tsk) && !thread_group_empty(tsk)) { sigset_t newblocked; /* A set of now blocked but previously unblocked signals. */ |
702a5073f
|
2384 |
sigandnsets(&newblocked, newset, ¤t->blocked); |
b182801ab
|
2385 2386 2387 2388 2389 |
retarget_shared_pending(tsk, &newblocked); } tsk->blocked = *newset; recalc_sigpending(); } |
e6fa16ab9
|
2390 2391 2392 2393 2394 2395 |
/** * set_current_blocked - change current->blocked mask * @newset: new mask * * It is wrong to change ->blocked directly, this helper should be used * to ensure the process can't miss a shared signal we are going to block. |
1da177e4c
|
2396 |
*/ |
77097ae50
|
2397 2398 |
void set_current_blocked(sigset_t *newset) { |
77097ae50
|
2399 |
sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP)); |
0c4a84234
|
2400 |
__set_current_blocked(newset); |
77097ae50
|
2401 2402 2403 |
} void __set_current_blocked(const sigset_t *newset) |
e6fa16ab9
|
2404 2405 2406 2407 |
{ struct task_struct *tsk = current; spin_lock_irq(&tsk->sighand->siglock); |
b182801ab
|
2408 |
__set_task_blocked(tsk, newset); |
e6fa16ab9
|
2409 2410 |
spin_unlock_irq(&tsk->sighand->siglock); } |
1da177e4c
|
2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 |
/* * This is also useful for kernel threads that want to temporarily * (or permanently) block certain signals. * * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel * interface happily blocks "unblockable" signals like SIGKILL * and friends. */ int sigprocmask(int how, sigset_t *set, sigset_t *oldset) { |
73ef4aeb6
|
2422 2423 |
struct task_struct *tsk = current; sigset_t newset; |
1da177e4c
|
2424 |
|
73ef4aeb6
|
2425 |
/* Lockless, only current can change ->blocked, never from irq */ |
a26fd335b
|
2426 |
if (oldset) |
73ef4aeb6
|
2427 |
*oldset = tsk->blocked; |
a26fd335b
|
2428 |
|
1da177e4c
|
2429 2430 |
switch (how) { case SIG_BLOCK: |
73ef4aeb6
|
2431 |
sigorsets(&newset, &tsk->blocked, set); |
1da177e4c
|
2432 2433 |
break; case SIG_UNBLOCK: |
702a5073f
|
2434 |
sigandnsets(&newset, &tsk->blocked, set); |
1da177e4c
|
2435 2436 |
break; case SIG_SETMASK: |
73ef4aeb6
|
2437 |
newset = *set; |
1da177e4c
|
2438 2439 |
break; default: |
73ef4aeb6
|
2440 |
return -EINVAL; |
1da177e4c
|
2441 |
} |
a26fd335b
|
2442 |
|
77097ae50
|
2443 |
__set_current_blocked(&newset); |
73ef4aeb6
|
2444 |
return 0; |
1da177e4c
|
2445 |
} |
41c57892a
|
2446 2447 2448 |
/** * sys_rt_sigprocmask - change the list of currently blocked signals * @how: whether to add, remove, or set signals |
ada9c9331
|
2449 |
* @nset: stores pending signals |
41c57892a
|
2450 2451 2452 |
* @oset: previous value of signal mask if non-null * @sigsetsize: size of sigset_t type */ |
bb7efee2c
|
2453 |
SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, |
17da2bd90
|
2454 |
sigset_t __user *, oset, size_t, sigsetsize) |
1da177e4c
|
2455 |
{ |
1da177e4c
|
2456 |
sigset_t old_set, new_set; |
bb7efee2c
|
2457 |
int error; |
1da177e4c
|
2458 2459 2460 |
/* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) |
bb7efee2c
|
2461 |
return -EINVAL; |
1da177e4c
|
2462 |
|
bb7efee2c
|
2463 2464 2465 2466 2467 |
old_set = current->blocked; if (nset) { if (copy_from_user(&new_set, nset, sizeof(sigset_t))) return -EFAULT; |
1da177e4c
|
2468 |
sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
bb7efee2c
|
2469 |
error = sigprocmask(how, &new_set, NULL); |
1da177e4c
|
2470 |
if (error) |
bb7efee2c
|
2471 2472 |
return error; } |
1da177e4c
|
2473 |
|
bb7efee2c
|
2474 2475 2476 |
if (oset) { if (copy_to_user(oset, &old_set, sizeof(sigset_t))) return -EFAULT; |
1da177e4c
|
2477 |
} |
bb7efee2c
|
2478 2479 |
return 0; |
1da177e4c
|
2480 |
} |
322a56cb1
|
2481 |
#ifdef CONFIG_COMPAT |
322a56cb1
|
2482 2483 |
COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset, compat_sigset_t __user *, oset, compat_size_t, sigsetsize) |
1da177e4c
|
2484 |
{ |
322a56cb1
|
2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 |
#ifdef __BIG_ENDIAN sigset_t old_set = current->blocked; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (nset) { compat_sigset_t new32; sigset_t new_set; int error; if (copy_from_user(&new32, nset, sizeof(compat_sigset_t))) return -EFAULT; sigset_from_compat(&new_set, &new32); sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); error = sigprocmask(how, &new_set, NULL); if (error) return error; } if (oset) { compat_sigset_t old32; sigset_to_compat(&old32, &old_set); |
db61ec29f
|
2509 |
if (copy_to_user(oset, &old32, sizeof(compat_sigset_t))) |
322a56cb1
|
2510 2511 2512 2513 2514 2515 2516 2517 2518 |
return -EFAULT; } return 0; #else return sys_rt_sigprocmask(how, (sigset_t __user *)nset, (sigset_t __user *)oset, sigsetsize); #endif } #endif |
1da177e4c
|
2519 |
|
fe9c1db2c
|
2520 |
static int do_sigpending(void *set, unsigned long sigsetsize) |
1da177e4c
|
2521 |
{ |
1da177e4c
|
2522 |
if (sigsetsize > sizeof(sigset_t)) |
fe9c1db2c
|
2523 |
return -EINVAL; |
1da177e4c
|
2524 2525 |
spin_lock_irq(¤t->sighand->siglock); |
fe9c1db2c
|
2526 |
sigorsets(set, ¤t->pending.signal, |
1da177e4c
|
2527 2528 2529 2530 |
¤t->signal->shared_pending.signal); spin_unlock_irq(¤t->sighand->siglock); /* Outside the lock because only this thread touches it. */ |
fe9c1db2c
|
2531 2532 |
sigandsets(set, ¤t->blocked, set); return 0; |
5aba085ed
|
2533 |
} |
1da177e4c
|
2534 |
|
41c57892a
|
2535 2536 2537 |
/** * sys_rt_sigpending - examine a pending signal that has been raised * while blocked |
20f22ab42
|
2538 |
* @uset: stores pending signals |
41c57892a
|
2539 2540 |
* @sigsetsize: size of sigset_t type or larger */ |
fe9c1db2c
|
2541 |
SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) |
1da177e4c
|
2542 |
{ |
fe9c1db2c
|
2543 2544 2545 2546 2547 2548 2549 2550 |
sigset_t set; int err = do_sigpending(&set, sigsetsize); if (!err && copy_to_user(uset, &set, sigsetsize)) err = -EFAULT; return err; } #ifdef CONFIG_COMPAT |
fe9c1db2c
|
2551 2552 |
COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset, compat_size_t, sigsetsize) |
1da177e4c
|
2553 |
{ |
fe9c1db2c
|
2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 |
#ifdef __BIG_ENDIAN sigset_t set; int err = do_sigpending(&set, sigsetsize); if (!err) { compat_sigset_t set32; sigset_to_compat(&set32, &set); /* we can get here only if sigsetsize <= sizeof(set) */ if (copy_to_user(uset, &set32, sigsetsize)) err = -EFAULT; } return err; #else return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize); #endif |
1da177e4c
|
2568 |
} |
fe9c1db2c
|
2569 |
#endif |
1da177e4c
|
2570 2571 |
#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER |
ce3959604
|
2572 |
int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from) |
1da177e4c
|
2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 |
{ int err; if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) return -EFAULT; if (from->si_code < 0) return __copy_to_user(to, from, sizeof(siginfo_t)) ? -EFAULT : 0; /* * If you change siginfo_t structure, please be sure * this code is fixed accordingly. |
fba2afaae
|
2584 2585 |
* Please remember to update the signalfd_copyinfo() function * inside fs/signalfd.c too, in case siginfo_t changes. |
1da177e4c
|
2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 |
* It should never copy any pad contained in the structure * to avoid security leaks, but must copy the generic * 3 ints plus the relevant union member. */ err = __put_user(from->si_signo, &to->si_signo); err |= __put_user(from->si_errno, &to->si_errno); err |= __put_user((short)from->si_code, &to->si_code); switch (from->si_code & __SI_MASK) { case __SI_KILL: err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); break; case __SI_TIMER: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); err |= __put_user(from->si_ptr, &to->si_ptr); break; case __SI_POLL: err |= __put_user(from->si_band, &to->si_band); err |= __put_user(from->si_fd, &to->si_fd); break; case __SI_FAULT: err |= __put_user(from->si_addr, &to->si_addr); #ifdef __ARCH_SI_TRAPNO err |= __put_user(from->si_trapno, &to->si_trapno); #endif |
a337fdac7
|
2612 |
#ifdef BUS_MCEERR_AO |
5aba085ed
|
2613 |
/* |
a337fdac7
|
2614 |
* Other callers might not initialize the si_lsb field, |
5aba085ed
|
2615 |
* so check explicitly for the right codes here. |
a337fdac7
|
2616 2617 2618 2619 |
*/ if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); #endif |
1da177e4c
|
2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 |
break; case __SI_CHLD: err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_status, &to->si_status); err |= __put_user(from->si_utime, &to->si_utime); err |= __put_user(from->si_stime, &to->si_stime); break; case __SI_RT: /* This is not generated by the kernel as of now. */ case __SI_MESGQ: /* But this is */ err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_ptr, &to->si_ptr); break; |
a0727e8ce
|
2634 2635 2636 2637 2638 2639 2640 |
#ifdef __ARCH_SIGSYS case __SI_SYS: err |= __put_user(from->si_call_addr, &to->si_call_addr); err |= __put_user(from->si_syscall, &to->si_syscall); err |= __put_user(from->si_arch, &to->si_arch); break; #endif |
1da177e4c
|
2641 2642 2643 2644 2645 2646 2647 2648 2649 |
default: /* this is just in case for now ... */ err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); break; } return err; } #endif |
41c57892a
|
2650 |
/** |
943df1485
|
2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 |
* do_sigtimedwait - wait for queued signals specified in @which * @which: queued signals to wait for * @info: if non-null, the signal's siginfo is returned here * @ts: upper bound on process time suspension */ int do_sigtimedwait(const sigset_t *which, siginfo_t *info, const struct timespec *ts) { struct task_struct *tsk = current; long timeout = MAX_SCHEDULE_TIMEOUT; sigset_t mask = *which; int sig; if (ts) { if (!timespec_valid(ts)) return -EINVAL; timeout = timespec_to_jiffies(ts); /* * We can be close to the next tick, add another one * to ensure we will wait at least the time asked for. */ if (ts->tv_sec || ts->tv_nsec) timeout++; } /* * Invert the set of allowed signals to get those we want to block. */ sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); signotset(&mask); spin_lock_irq(&tsk->sighand->siglock); sig = dequeue_signal(tsk, &mask, info); if (!sig && timeout) { /* * None ready, temporarily unblock those we're interested * while we are sleeping in so that we'll be awakened when |
b182801ab
|
2688 2689 |
* they arrive. Unblocking is always fine, we can avoid * set_current_blocked(). |
943df1485
|
2690 2691 2692 2693 2694 |
*/ tsk->real_blocked = tsk->blocked; sigandsets(&tsk->blocked, &tsk->blocked, &mask); recalc_sigpending(); spin_unlock_irq(&tsk->sighand->siglock); |
a2d5f1f5d
|
2695 |
timeout = freezable_schedule_timeout_interruptible(timeout); |
943df1485
|
2696 2697 |
spin_lock_irq(&tsk->sighand->siglock); |
b182801ab
|
2698 |
__set_task_blocked(tsk, &tsk->real_blocked); |
6114041aa
|
2699 |
sigemptyset(&tsk->real_blocked); |
b182801ab
|
2700 |
sig = dequeue_signal(tsk, &mask, info); |
943df1485
|
2701 2702 2703 2704 2705 2706 2707 2708 2709 |
} spin_unlock_irq(&tsk->sighand->siglock); if (sig) return sig; return timeout ? -EINTR : -EAGAIN; } /** |
41c57892a
|
2710 2711 2712 2713 2714 2715 2716 |
* sys_rt_sigtimedwait - synchronously wait for queued signals specified * in @uthese * @uthese: queued signals to wait for * @uinfo: if non-null, the signal's siginfo is returned here * @uts: upper bound on process time suspension * @sigsetsize: size of sigset_t type */ |
17da2bd90
|
2717 2718 2719 |
SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, siginfo_t __user *, uinfo, const struct timespec __user *, uts, size_t, sigsetsize) |
1da177e4c
|
2720 |
{ |
1da177e4c
|
2721 2722 2723 |
sigset_t these; struct timespec ts; siginfo_t info; |
943df1485
|
2724 |
int ret; |
1da177e4c
|
2725 2726 2727 2728 2729 2730 2731 |
/* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (copy_from_user(&these, uthese, sizeof(these))) return -EFAULT; |
5aba085ed
|
2732 |
|
1da177e4c
|
2733 2734 2735 |
if (uts) { if (copy_from_user(&ts, uts, sizeof(ts))) return -EFAULT; |
1da177e4c
|
2736 |
} |
943df1485
|
2737 |
ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); |
1da177e4c
|
2738 |
|
943df1485
|
2739 2740 2741 |
if (ret > 0 && uinfo) { if (copy_siginfo_to_user(uinfo, &info)) ret = -EFAULT; |
1da177e4c
|
2742 2743 2744 2745 |
} return ret; } |
41c57892a
|
2746 2747 2748 2749 2750 |
/** * sys_kill - send a signal to a process * @pid: the PID of the process * @sig: signal to be sent */ |
17da2bd90
|
2751 |
SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) |
1da177e4c
|
2752 2753 2754 2755 2756 2757 |
{ struct siginfo info; info.si_signo = sig; info.si_errno = 0; info.si_code = SI_USER; |
b488893a3
|
2758 |
info.si_pid = task_tgid_vnr(current); |
078de5f70
|
2759 |
info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
1da177e4c
|
2760 2761 2762 |
return kill_something_info(sig, &info, pid); } |
30b4ae8a4
|
2763 2764 |
static int do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) |
1da177e4c
|
2765 |
{ |
1da177e4c
|
2766 |
struct task_struct *p; |
30b4ae8a4
|
2767 |
int error = -ESRCH; |
1da177e4c
|
2768 |
|
3547ff3ae
|
2769 |
rcu_read_lock(); |
228ebcbe6
|
2770 |
p = find_task_by_vpid(pid); |
b488893a3
|
2771 |
if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { |
30b4ae8a4
|
2772 |
error = check_kill_permission(sig, info, p); |
1da177e4c
|
2773 2774 2775 2776 |
/* * The null signal is a permissions and process existence * probe. No signal is actually delivered. */ |
4a30debfb
|
2777 2778 2779 2780 2781 2782 2783 2784 2785 |
if (!error && sig) { error = do_send_sig_info(sig, info, p, false); /* * If lock_task_sighand() failed we pretend the task * dies after receiving the signal. The window is tiny, * and the signal is private anyway. */ if (unlikely(error == -ESRCH)) error = 0; |
1da177e4c
|
2786 2787 |
} } |
3547ff3ae
|
2788 |
rcu_read_unlock(); |
6dd69f106
|
2789 |
|
1da177e4c
|
2790 2791 |
return error; } |
30b4ae8a4
|
2792 2793 |
static int do_tkill(pid_t tgid, pid_t pid, int sig) { |
b9e146d8e
|
2794 |
struct siginfo info = {}; |
30b4ae8a4
|
2795 2796 2797 2798 2799 |
info.si_signo = sig; info.si_errno = 0; info.si_code = SI_TKILL; info.si_pid = task_tgid_vnr(current); |
078de5f70
|
2800 |
info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
30b4ae8a4
|
2801 2802 2803 |
return do_send_specific(tgid, pid, sig, &info); } |
6dd69f106
|
2804 2805 2806 2807 2808 2809 |
/** * sys_tgkill - send signal to one specific thread * @tgid: the thread group ID of the thread * @pid: the PID of the thread * @sig: signal to be sent * |
72fd4a35a
|
2810 |
* This syscall also checks the @tgid and returns -ESRCH even if the PID |
6dd69f106
|
2811 2812 2813 |
* exists but it's not belonging to the target process anymore. This * method solves the problem of threads exiting and PIDs getting reused. */ |
a5f8fa9e9
|
2814 |
SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) |
6dd69f106
|
2815 2816 2817 2818 2819 2820 2821 |
{ /* This is only valid for single tasks */ if (pid <= 0 || tgid <= 0) return -EINVAL; return do_tkill(tgid, pid, sig); } |
41c57892a
|
2822 2823 2824 2825 2826 |
/** * sys_tkill - send signal to one specific task * @pid: the PID of the task * @sig: signal to be sent * |
1da177e4c
|
2827 2828 |
* Send a signal to only one task, even if it's a CLONE_THREAD task. */ |
a5f8fa9e9
|
2829 |
SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) |
1da177e4c
|
2830 |
{ |
1da177e4c
|
2831 2832 2833 |
/* This is only valid for single tasks */ if (pid <= 0) return -EINVAL; |
6dd69f106
|
2834 |
return do_tkill(0, pid, sig); |
1da177e4c
|
2835 |
} |
75907d4d7
|
2836 2837 2838 2839 2840 |
static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info) { /* Not even root can pretend to send signals from the kernel. * Nor can they impersonate a kill()/tgkill(), which adds source info. */ |
66dd34ad3
|
2841 2842 |
if ((info->si_code >= 0 || info->si_code == SI_TKILL) && (task_pid_vnr(current) != pid)) { |
75907d4d7
|
2843 2844 2845 2846 2847 2848 2849 2850 2851 |
/* We used to allow any < 0 si_code */ WARN_ON_ONCE(info->si_code < 0); return -EPERM; } info->si_signo = sig; /* POSIX.1b doesn't mention process groups. */ return kill_proc_info(sig, info, pid); } |
41c57892a
|
2852 2853 2854 2855 2856 2857 |
/** * sys_rt_sigqueueinfo - send signal information to a signal * @pid: the PID of the thread * @sig: signal to be sent * @uinfo: signal info to be sent */ |
a5f8fa9e9
|
2858 2859 |
SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t __user *, uinfo) |
1da177e4c
|
2860 2861 |
{ siginfo_t info; |
1da177e4c
|
2862 2863 |
if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) return -EFAULT; |
75907d4d7
|
2864 2865 |
return do_rt_sigqueueinfo(pid, sig, &info); } |
1da177e4c
|
2866 |
|
75907d4d7
|
2867 |
#ifdef CONFIG_COMPAT |
75907d4d7
|
2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 |
COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, compat_pid_t, pid, int, sig, struct compat_siginfo __user *, uinfo) { siginfo_t info; int ret = copy_siginfo_from_user32(&info, uinfo); if (unlikely(ret)) return ret; return do_rt_sigqueueinfo(pid, sig, &info); |
1da177e4c
|
2878 |
} |
75907d4d7
|
2879 |
#endif |
1da177e4c
|
2880 |
|
9aae8fc05
|
2881 |
static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) |
62ab4505e
|
2882 2883 2884 2885 2886 2887 |
{ /* This is only valid for single tasks */ if (pid <= 0 || tgid <= 0) return -EINVAL; /* Not even root can pretend to send signals from the kernel. |
da48524eb
|
2888 2889 |
* Nor can they impersonate a kill()/tgkill(), which adds source info. */ |
66dd34ad3
|
2890 2891 |
if (((info->si_code >= 0 || info->si_code == SI_TKILL)) && (task_pid_vnr(current) != pid)) { |
da48524eb
|
2892 2893 |
/* We used to allow any < 0 si_code */ WARN_ON_ONCE(info->si_code < 0); |
62ab4505e
|
2894 |
return -EPERM; |
da48524eb
|
2895 |
} |
62ab4505e
|
2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 |
info->si_signo = sig; return do_send_specific(tgid, pid, sig, info); } SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, siginfo_t __user *, uinfo) { siginfo_t info; if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) return -EFAULT; return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); } |
9aae8fc05
|
2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 |
#ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, compat_pid_t, tgid, compat_pid_t, pid, int, sig, struct compat_siginfo __user *, uinfo) { siginfo_t info; if (copy_siginfo_from_user32(&info, uinfo)) return -EFAULT; return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); } #endif |
0341729b4
|
2925 |
/* |
b4e74264e
|
2926 |
* For kthreads only, must not be used if cloned with CLONE_SIGHAND |
0341729b4
|
2927 |
*/ |
b4e74264e
|
2928 |
void kernel_sigaction(int sig, __sighandler_t action) |
0341729b4
|
2929 |
{ |
ec5955b8f
|
2930 |
spin_lock_irq(¤t->sighand->siglock); |
b4e74264e
|
2931 2932 2933 |
current->sighand->action[sig - 1].sa.sa_handler = action; if (action == SIG_IGN) { sigset_t mask; |
0341729b4
|
2934 |
|
b4e74264e
|
2935 2936 |
sigemptyset(&mask); sigaddset(&mask, sig); |
580d34e42
|
2937 |
|
b4e74264e
|
2938 2939 2940 2941 |
flush_sigqueue_mask(&mask, ¤t->signal->shared_pending); flush_sigqueue_mask(&mask, ¤t->pending); recalc_sigpending(); } |
0341729b4
|
2942 2943 |
spin_unlock_irq(¤t->sighand->siglock); } |
b4e74264e
|
2944 |
EXPORT_SYMBOL(kernel_sigaction); |
0341729b4
|
2945 |
|
88531f725
|
2946 |
int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) |
1da177e4c
|
2947 |
{ |
afe2b0386
|
2948 |
struct task_struct *p = current, *t; |
1da177e4c
|
2949 |
struct k_sigaction *k; |
71fabd5e4
|
2950 |
sigset_t mask; |
1da177e4c
|
2951 |
|
7ed20e1ad
|
2952 |
if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) |
1da177e4c
|
2953 |
return -EINVAL; |
afe2b0386
|
2954 |
k = &p->sighand->action[sig-1]; |
1da177e4c
|
2955 |
|
afe2b0386
|
2956 |
spin_lock_irq(&p->sighand->siglock); |
1da177e4c
|
2957 2958 2959 2960 |
if (oact) *oact = *k; if (act) { |
9ac95f2f9
|
2961 2962 |
sigdelsetmask(&act->sa.sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); |
88531f725
|
2963 |
*k = *act; |
1da177e4c
|
2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 |
/* * POSIX 3.3.1.3: * "Setting a signal action to SIG_IGN for a signal that is * pending shall cause the pending signal to be discarded, * whether or not it is blocked." * * "Setting a signal action to SIG_DFL for a signal that is * pending and whose default action is to ignore the signal * (for example, SIGCHLD), shall cause the pending signal to * be discarded, whether or not it is blocked" */ |
afe2b0386
|
2975 |
if (sig_handler_ignored(sig_handler(p, sig), sig)) { |
71fabd5e4
|
2976 2977 |
sigemptyset(&mask); sigaddset(&mask, sig); |
afe2b0386
|
2978 2979 |
flush_sigqueue_mask(&mask, &p->signal->shared_pending); for_each_thread(p, t) |
c09c14413
|
2980 |
flush_sigqueue_mask(&mask, &t->pending); |
1da177e4c
|
2981 |
} |
1da177e4c
|
2982 |
} |
afe2b0386
|
2983 |
spin_unlock_irq(&p->sighand->siglock); |
1da177e4c
|
2984 2985 |
return 0; } |
c09c14413
|
2986 |
static int |
1da177e4c
|
2987 2988 2989 2990 |
do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) { stack_t oss; int error; |
0083fc2c5
|
2991 2992 2993 |
oss.ss_sp = (void __user *) current->sas_ss_sp; oss.ss_size = current->sas_ss_size; oss.ss_flags = sas_ss_flags(sp); |
1da177e4c
|
2994 2995 2996 2997 2998 2999 3000 |
if (uss) { void __user *ss_sp; size_t ss_size; int ss_flags; error = -EFAULT; |
0dd8486b5
|
3001 3002 3003 3004 3005 3006 |
if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) goto out; error = __get_user(ss_sp, &uss->ss_sp) | __get_user(ss_flags, &uss->ss_flags) | __get_user(ss_size, &uss->ss_size); if (error) |
1da177e4c
|
3007 3008 3009 3010 3011 3012 3013 3014 |
goto out; error = -EPERM; if (on_sig_stack(sp)) goto out; error = -EINVAL; /* |
5aba085ed
|
3015 |
* Note - this code used to test ss_flags incorrectly: |
1da177e4c
|
3016 3017 3018 |
* old code may have been written using ss_flags==0 * to mean ss_flags==SS_ONSTACK (as this was the only * way that worked) - this fix preserves that older |
5aba085ed
|
3019 |
* mechanism. |
1da177e4c
|
3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 |
*/ if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) goto out; if (ss_flags == SS_DISABLE) { ss_size = 0; ss_sp = NULL; } else { error = -ENOMEM; if (ss_size < MINSIGSTKSZ) goto out; } current->sas_ss_sp = (unsigned long) ss_sp; current->sas_ss_size = ss_size; } |
0083fc2c5
|
3036 |
error = 0; |
1da177e4c
|
3037 3038 |
if (uoss) { error = -EFAULT; |
0083fc2c5
|
3039 |
if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) |
1da177e4c
|
3040 |
goto out; |
0083fc2c5
|
3041 3042 3043 |
error = __put_user(oss.ss_sp, &uoss->ss_sp) | __put_user(oss.ss_size, &uoss->ss_size) | __put_user(oss.ss_flags, &uoss->ss_flags); |
1da177e4c
|
3044 |
} |
1da177e4c
|
3045 3046 3047 |
out: return error; } |
6bf9adfc9
|
3048 3049 3050 3051 |
SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) { return do_sigaltstack(uss, uoss, current_user_stack_pointer()); } |
1da177e4c
|
3052 |
|
5c49574ff
|
3053 3054 3055 3056 3057 3058 |
int restore_altstack(const stack_t __user *uss) { int err = do_sigaltstack(uss, NULL, current_user_stack_pointer()); /* squash all but EFAULT for now */ return err == -EFAULT ? err : 0; } |
c40702c49
|
3059 3060 3061 3062 3063 3064 3065 |
int __save_altstack(stack_t __user *uss, unsigned long sp) { struct task_struct *t = current; return __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) | __put_user(sas_ss_flags(sp), &uss->ss_flags) | __put_user(t->sas_ss_size, &uss->ss_size); } |
902684395
|
3066 |
#ifdef CONFIG_COMPAT |
90228fc11
|
3067 3068 3069 |
COMPAT_SYSCALL_DEFINE2(sigaltstack, const compat_stack_t __user *, uss_ptr, compat_stack_t __user *, uoss_ptr) |
902684395
|
3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 |
{ stack_t uss, uoss; int ret; mm_segment_t seg; if (uss_ptr) { compat_stack_t uss32; memset(&uss, 0, sizeof(stack_t)); if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t))) return -EFAULT; uss.ss_sp = compat_ptr(uss32.ss_sp); uss.ss_flags = uss32.ss_flags; uss.ss_size = uss32.ss_size; } seg = get_fs(); set_fs(KERNEL_DS); ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL), (stack_t __force __user *) &uoss, compat_user_stack_pointer()); set_fs(seg); if (ret >= 0 && uoss_ptr) { if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) || __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) || __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) || __put_user(uoss.ss_size, &uoss_ptr->ss_size)) ret = -EFAULT; } return ret; } int compat_restore_altstack(const compat_stack_t __user *uss) { int err = compat_sys_sigaltstack(uss, NULL); /* squash all but -EFAULT for now */ return err == -EFAULT ? err : 0; } |
c40702c49
|
3107 3108 3109 3110 3111 3112 3113 3114 |
int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) { struct task_struct *t = current; return __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) | __put_user(sas_ss_flags(sp), &uss->ss_flags) | __put_user(t->sas_ss_size, &uss->ss_size); } |
902684395
|
3115 |
#endif |
1da177e4c
|
3116 3117 |
#ifdef __ARCH_WANT_SYS_SIGPENDING |
41c57892a
|
3118 3119 3120 3121 |
/** * sys_sigpending - examine pending signals * @set: where mask of pending signal is returned */ |
b290ebe2c
|
3122 |
SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) |
1da177e4c
|
3123 |
{ |
fe9c1db2c
|
3124 |
return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t)); |
1da177e4c
|
3125 3126 3127 3128 3129 |
} #endif #ifdef __ARCH_WANT_SYS_SIGPROCMASK |
41c57892a
|
3130 3131 3132 |
/** * sys_sigprocmask - examine and change blocked signals * @how: whether to add, remove, or set signals |
b013c3992
|
3133 |
* @nset: signals to add or remove (if non-null) |
41c57892a
|
3134 3135 |
* @oset: previous value of signal mask if non-null * |
5aba085ed
|
3136 3137 3138 |
* Some platforms have their own version with special arguments; * others support only sys_rt_sigprocmask. */ |
1da177e4c
|
3139 |
|
b013c3992
|
3140 |
SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, |
b290ebe2c
|
3141 |
old_sigset_t __user *, oset) |
1da177e4c
|
3142 |
{ |
1da177e4c
|
3143 |
old_sigset_t old_set, new_set; |
2e4f7c776
|
3144 |
sigset_t new_blocked; |
1da177e4c
|
3145 |
|
b013c3992
|
3146 |
old_set = current->blocked.sig[0]; |
1da177e4c
|
3147 |
|
b013c3992
|
3148 3149 3150 |
if (nset) { if (copy_from_user(&new_set, nset, sizeof(*nset))) return -EFAULT; |
1da177e4c
|
3151 |
|
2e4f7c776
|
3152 |
new_blocked = current->blocked; |
1da177e4c
|
3153 |
|
1da177e4c
|
3154 |
switch (how) { |
1da177e4c
|
3155 |
case SIG_BLOCK: |
2e4f7c776
|
3156 |
sigaddsetmask(&new_blocked, new_set); |
1da177e4c
|
3157 3158 |
break; case SIG_UNBLOCK: |
2e4f7c776
|
3159 |
sigdelsetmask(&new_blocked, new_set); |
1da177e4c
|
3160 3161 |
break; case SIG_SETMASK: |
2e4f7c776
|
3162 |
new_blocked.sig[0] = new_set; |
1da177e4c
|
3163 |
break; |
2e4f7c776
|
3164 3165 |
default: return -EINVAL; |
1da177e4c
|
3166 |
} |
0c4a84234
|
3167 |
set_current_blocked(&new_blocked); |
b013c3992
|
3168 3169 3170 |
} if (oset) { |
1da177e4c
|
3171 |
if (copy_to_user(oset, &old_set, sizeof(*oset))) |
b013c3992
|
3172 |
return -EFAULT; |
1da177e4c
|
3173 |
} |
b013c3992
|
3174 3175 |
return 0; |
1da177e4c
|
3176 3177 |
} #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ |
eaca6eae3
|
3178 |
#ifndef CONFIG_ODD_RT_SIGACTION |
41c57892a
|
3179 3180 3181 |
/** * sys_rt_sigaction - alter an action taken by a process * @sig: signal to be sent |
f9fa0bc1f
|
3182 3183 |
* @act: new sigaction * @oact: used to save the previous sigaction |
41c57892a
|
3184 3185 |
* @sigsetsize: size of sigset_t type */ |
d4e82042c
|
3186 3187 3188 3189 |
SYSCALL_DEFINE4(rt_sigaction, int, sig, const struct sigaction __user *, act, struct sigaction __user *, oact, size_t, sigsetsize) |
1da177e4c
|
3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 |
{ struct k_sigaction new_sa, old_sa; int ret = -EINVAL; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) goto out; if (act) { if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) return -EFAULT; } ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); if (!ret && oact) { if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) return -EFAULT; } out: return ret; } |
08d32fe50
|
3212 |
#ifdef CONFIG_COMPAT |
08d32fe50
|
3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 |
COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig, const struct compat_sigaction __user *, act, struct compat_sigaction __user *, oact, compat_size_t, sigsetsize) { struct k_sigaction new_ka, old_ka; compat_sigset_t mask; #ifdef __ARCH_HAS_SA_RESTORER compat_uptr_t restorer; #endif int ret; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(compat_sigset_t)) return -EINVAL; if (act) { compat_uptr_t handler; ret = get_user(handler, &act->sa_handler); new_ka.sa.sa_handler = compat_ptr(handler); #ifdef __ARCH_HAS_SA_RESTORER ret |= get_user(restorer, &act->sa_restorer); new_ka.sa.sa_restorer = compat_ptr(restorer); #endif ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask)); |
3ddc5b46a
|
3238 |
ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); |
08d32fe50
|
3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 |
if (ret) return -EFAULT; sigset_from_compat(&new_ka.sa.sa_mask, &mask); } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { sigset_to_compat(&mask, &old_ka.sa.sa_mask); ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler); ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask)); |
3ddc5b46a
|
3250 |
ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); |
08d32fe50
|
3251 3252 3253 3254 3255 3256 3257 3258 |
#ifdef __ARCH_HAS_SA_RESTORER ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer); #endif } return ret; } #endif |
eaca6eae3
|
3259 |
#endif /* !CONFIG_ODD_RT_SIGACTION */ |
1da177e4c
|
3260 |
|
495dfbf76
|
3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 |
#ifdef CONFIG_OLD_SIGACTION SYSCALL_DEFINE3(sigaction, int, sig, const struct old_sigaction __user *, act, struct old_sigaction __user *, oact) { struct k_sigaction new_ka, old_ka; int ret; if (act) { old_sigset_t mask; if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(new_ka.sa.sa_handler, &act->sa_handler) || __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || __get_user(new_ka.sa.sa_flags, &act->sa_flags) || __get_user(mask, &act->sa_mask)) return -EFAULT; #ifdef __ARCH_HAS_KA_RESTORER new_ka.ka_restorer = NULL; #endif siginitset(&new_ka.sa.sa_mask, mask); } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) return -EFAULT; } return ret; } #endif #ifdef CONFIG_COMPAT_OLD_SIGACTION COMPAT_SYSCALL_DEFINE3(sigaction, int, sig, const struct compat_old_sigaction __user *, act, struct compat_old_sigaction __user *, oact) { struct k_sigaction new_ka, old_ka; int ret; compat_old_sigset_t mask; compat_uptr_t handler, restorer; if (act) { if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(handler, &act->sa_handler) || __get_user(restorer, &act->sa_restorer) || __get_user(new_ka.sa.sa_flags, &act->sa_flags) || __get_user(mask, &act->sa_mask)) return -EFAULT; #ifdef __ARCH_HAS_KA_RESTORER new_ka.ka_restorer = NULL; #endif new_ka.sa.sa_handler = compat_ptr(handler); new_ka.sa.sa_restorer = compat_ptr(restorer); siginitset(&new_ka.sa.sa_mask, mask); } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler) || __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer) || __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) return -EFAULT; } return ret; } #endif |
1da177e4c
|
3338 |
|
f6187769d
|
3339 |
#ifdef CONFIG_SGETMASK_SYSCALL |
1da177e4c
|
3340 3341 3342 3343 |
/* * For backwards compatibility. Functionality superseded by sigprocmask. */ |
a5f8fa9e9
|
3344 |
SYSCALL_DEFINE0(sgetmask) |
1da177e4c
|
3345 3346 3347 3348 |
{ /* SMP safe */ return current->blocked.sig[0]; } |
a5f8fa9e9
|
3349 |
SYSCALL_DEFINE1(ssetmask, int, newmask) |
1da177e4c
|
3350 |
{ |
c1095c6da
|
3351 3352 |
int old = current->blocked.sig[0]; sigset_t newset; |
1da177e4c
|
3353 |
|
5ba53ff64
|
3354 |
siginitset(&newset, newmask); |
c1095c6da
|
3355 |
set_current_blocked(&newset); |
1da177e4c
|
3356 3357 3358 |
return old; } |
f6187769d
|
3359 |
#endif /* CONFIG_SGETMASK_SYSCALL */ |
1da177e4c
|
3360 3361 3362 3363 3364 |
#ifdef __ARCH_WANT_SYS_SIGNAL /* * For backwards compatibility. Functionality superseded by sigaction. */ |
a5f8fa9e9
|
3365 |
SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) |
1da177e4c
|
3366 3367 3368 3369 3370 3371 |
{ struct k_sigaction new_sa, old_sa; int ret; new_sa.sa.sa_handler = handler; new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; |
c70d3d703
|
3372 |
sigemptyset(&new_sa.sa.sa_mask); |
1da177e4c
|
3373 3374 3375 3376 3377 3378 3379 3380 |
ret = do_sigaction(sig, &new_sa, &old_sa); return ret ? ret : (unsigned long)old_sa.sa.sa_handler; } #endif /* __ARCH_WANT_SYS_SIGNAL */ #ifdef __ARCH_WANT_SYS_PAUSE |
a5f8fa9e9
|
3381 |
SYSCALL_DEFINE0(pause) |
1da177e4c
|
3382 |
{ |
d92fcf055
|
3383 3384 3385 3386 |
while (!signal_pending(current)) { current->state = TASK_INTERRUPTIBLE; schedule(); } |
1da177e4c
|
3387 3388 3389 3390 |
return -ERESTARTNOHAND; } #endif |
68f3f16d9
|
3391 3392 |
int sigsuspend(sigset_t *set) { |
68f3f16d9
|
3393 3394 3395 3396 3397 3398 3399 3400 |
current->saved_sigmask = current->blocked; set_current_blocked(set); current->state = TASK_INTERRUPTIBLE; schedule(); set_restore_sigmask(); return -ERESTARTNOHAND; } |
68f3f16d9
|
3401 |
|
41c57892a
|
3402 3403 3404 3405 3406 3407 |
/** * sys_rt_sigsuspend - replace the signal mask for a value with the * @unewset value until a signal is received * @unewset: new signal mask value * @sigsetsize: size of sigset_t type */ |
d4e82042c
|
3408 |
SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) |
150256d8a
|
3409 3410 3411 3412 3413 3414 3415 3416 3417 |
{ sigset_t newset; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (copy_from_user(&newset, unewset, sizeof(newset))) return -EFAULT; |
68f3f16d9
|
3418 |
return sigsuspend(&newset); |
150256d8a
|
3419 |
} |
ad4b65a43
|
3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 |
#ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize) { #ifdef __BIG_ENDIAN sigset_t newset; compat_sigset_t newset32; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t))) return -EFAULT; sigset_from_compat(&newset, &newset32); return sigsuspend(&newset); #else /* on little-endian bitmaps don't care about granularity */ return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize); #endif } #endif |
150256d8a
|
3442 |
|
0a0e8cdf7
|
3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 |
#ifdef CONFIG_OLD_SIGSUSPEND SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask) { sigset_t blocked; siginitset(&blocked, mask); return sigsuspend(&blocked); } #endif #ifdef CONFIG_OLD_SIGSUSPEND3 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask) { sigset_t blocked; siginitset(&blocked, mask); return sigsuspend(&blocked); } #endif |
150256d8a
|
3459 |
|
52f5684c8
|
3460 |
__weak const char *arch_vma_name(struct vm_area_struct *vma) |
f269fdd18
|
3461 3462 3463 |
{ return NULL; } |
1da177e4c
|
3464 3465 |
void __init signals_init(void) { |
0a31bd5f2
|
3466 |
sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); |
1da177e4c
|
3467 |
} |
67fc4e0cb
|
3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 |
#ifdef CONFIG_KGDB_KDB #include <linux/kdb.h> /* * kdb_send_sig_info - Allows kdb to send signals without exposing * signal internals. This function checks if the required locks are * available before calling the main signal code, to avoid kdb * deadlocks. */ void kdb_send_sig_info(struct task_struct *t, struct siginfo *info) { static struct task_struct *kdb_prev_t; int sig, new_t; if (!spin_trylock(&t->sighand->siglock)) { kdb_printf("Can't do kill command now. " "The sigmask lock is held somewhere else in " "kernel, try again later "); return; } spin_unlock(&t->sighand->siglock); new_t = kdb_prev_t != t; kdb_prev_t = t; if (t->state != TASK_RUNNING && new_t) { kdb_printf("Process is not RUNNING, sending a signal from " "kdb risks deadlock " "on the run queue locks. " "The signal has _not_ been sent. " "Reissue the kill command if you want to risk " "the deadlock. "); return; } sig = info->si_signo; if (send_sig_info(sig, info, t)) kdb_printf("Fail to deliver Signal %d to process %d. ", sig, t->pid); else kdb_printf("Signal %d is sent to process %d. ", sig, t->pid); } #endif /* CONFIG_KGDB_KDB */ |