Commit 10c28d937e2cca577c2d804106b50dd0562fb062
Committed by
Al Viro
1 parent
f34f9d186d
Exists in
master
and in
20 other branches
coredump: move core dump functionality into its own file
This prepares for making core dump functionality optional. The variable "suid_dumpable" and associated functions are left in fs/exec.c because they're used elsewhere, such as in ptrace. Signed-off-by: Alex Kelly <alex.page.kelly@gmail.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org> Acked-by: Serge Hallyn <serge.hallyn@canonical.com> Acked-by: Kees Cook <keescook@chromium.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Showing 4 changed files with 689 additions and 645 deletions Side-by-side Diff
fs/Makefile
... | ... | @@ -11,7 +11,7 @@ |
11 | 11 | attr.o bad_inode.o file.o filesystems.o namespace.o \ |
12 | 12 | seq_file.o xattr.o libfs.o fs-writeback.o \ |
13 | 13 | pnode.o drop_caches.o splice.o sync.o utimes.o \ |
14 | - stack.o fs_struct.o statfs.o | |
14 | + stack.o fs_struct.o statfs.o coredump.o | |
15 | 15 | |
16 | 16 | ifeq ($(CONFIG_BLOCK),y) |
17 | 17 | obj-y += buffer.o bio.o block_dev.o direct-io.o mpage.o ioprio.o |
fs/coredump.c
1 | +#include <linux/slab.h> | |
2 | +#include <linux/file.h> | |
3 | +#include <linux/fdtable.h> | |
4 | +#include <linux/mm.h> | |
5 | +#include <linux/stat.h> | |
6 | +#include <linux/fcntl.h> | |
7 | +#include <linux/swap.h> | |
8 | +#include <linux/string.h> | |
9 | +#include <linux/init.h> | |
10 | +#include <linux/pagemap.h> | |
11 | +#include <linux/perf_event.h> | |
12 | +#include <linux/highmem.h> | |
13 | +#include <linux/spinlock.h> | |
14 | +#include <linux/key.h> | |
15 | +#include <linux/personality.h> | |
16 | +#include <linux/binfmts.h> | |
17 | +#include <linux/utsname.h> | |
18 | +#include <linux/pid_namespace.h> | |
19 | +#include <linux/module.h> | |
20 | +#include <linux/namei.h> | |
21 | +#include <linux/mount.h> | |
22 | +#include <linux/security.h> | |
23 | +#include <linux/syscalls.h> | |
24 | +#include <linux/tsacct_kern.h> | |
25 | +#include <linux/cn_proc.h> | |
26 | +#include <linux/audit.h> | |
27 | +#include <linux/tracehook.h> | |
28 | +#include <linux/kmod.h> | |
29 | +#include <linux/fsnotify.h> | |
30 | +#include <linux/fs_struct.h> | |
31 | +#include <linux/pipe_fs_i.h> | |
32 | +#include <linux/oom.h> | |
33 | +#include <linux/compat.h> | |
34 | + | |
35 | +#include <asm/uaccess.h> | |
36 | +#include <asm/mmu_context.h> | |
37 | +#include <asm/tlb.h> | |
38 | +#include <asm/exec.h> | |
39 | + | |
40 | +#include <trace/events/task.h> | |
41 | +#include "internal.h" | |
42 | + | |
43 | +#include <trace/events/sched.h> | |
44 | + | |
45 | +int core_uses_pid; | |
46 | +char core_pattern[CORENAME_MAX_SIZE] = "core"; | |
47 | +unsigned int core_pipe_limit; | |
48 | + | |
49 | +struct core_name { | |
50 | + char *corename; | |
51 | + int used, size; | |
52 | +}; | |
53 | +static atomic_t call_count = ATOMIC_INIT(1); | |
54 | + | |
55 | +/* The maximal length of core_pattern is also specified in sysctl.c */ | |
56 | + | |
57 | +static int expand_corename(struct core_name *cn) | |
58 | +{ | |
59 | + char *old_corename = cn->corename; | |
60 | + | |
61 | + cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count); | |
62 | + cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL); | |
63 | + | |
64 | + if (!cn->corename) { | |
65 | + kfree(old_corename); | |
66 | + return -ENOMEM; | |
67 | + } | |
68 | + | |
69 | + return 0; | |
70 | +} | |
71 | + | |
72 | +static int cn_printf(struct core_name *cn, const char *fmt, ...) | |
73 | +{ | |
74 | + char *cur; | |
75 | + int need; | |
76 | + int ret; | |
77 | + va_list arg; | |
78 | + | |
79 | + va_start(arg, fmt); | |
80 | + need = vsnprintf(NULL, 0, fmt, arg); | |
81 | + va_end(arg); | |
82 | + | |
83 | + if (likely(need < cn->size - cn->used - 1)) | |
84 | + goto out_printf; | |
85 | + | |
86 | + ret = expand_corename(cn); | |
87 | + if (ret) | |
88 | + goto expand_fail; | |
89 | + | |
90 | +out_printf: | |
91 | + cur = cn->corename + cn->used; | |
92 | + va_start(arg, fmt); | |
93 | + vsnprintf(cur, need + 1, fmt, arg); | |
94 | + va_end(arg); | |
95 | + cn->used += need; | |
96 | + return 0; | |
97 | + | |
98 | +expand_fail: | |
99 | + return ret; | |
100 | +} | |
101 | + | |
102 | +static void cn_escape(char *str) | |
103 | +{ | |
104 | + for (; *str; str++) | |
105 | + if (*str == '/') | |
106 | + *str = '!'; | |
107 | +} | |
108 | + | |
109 | +static int cn_print_exe_file(struct core_name *cn) | |
110 | +{ | |
111 | + struct file *exe_file; | |
112 | + char *pathbuf, *path; | |
113 | + int ret; | |
114 | + | |
115 | + exe_file = get_mm_exe_file(current->mm); | |
116 | + if (!exe_file) { | |
117 | + char *commstart = cn->corename + cn->used; | |
118 | + ret = cn_printf(cn, "%s (path unknown)", current->comm); | |
119 | + cn_escape(commstart); | |
120 | + return ret; | |
121 | + } | |
122 | + | |
123 | + pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY); | |
124 | + if (!pathbuf) { | |
125 | + ret = -ENOMEM; | |
126 | + goto put_exe_file; | |
127 | + } | |
128 | + | |
129 | + path = d_path(&exe_file->f_path, pathbuf, PATH_MAX); | |
130 | + if (IS_ERR(path)) { | |
131 | + ret = PTR_ERR(path); | |
132 | + goto free_buf; | |
133 | + } | |
134 | + | |
135 | + cn_escape(path); | |
136 | + | |
137 | + ret = cn_printf(cn, "%s", path); | |
138 | + | |
139 | +free_buf: | |
140 | + kfree(pathbuf); | |
141 | +put_exe_file: | |
142 | + fput(exe_file); | |
143 | + return ret; | |
144 | +} | |
145 | + | |
146 | +/* format_corename will inspect the pattern parameter, and output a | |
147 | + * name into corename, which must have space for at least | |
148 | + * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. | |
149 | + */ | |
150 | +static int format_corename(struct core_name *cn, long signr) | |
151 | +{ | |
152 | + const struct cred *cred = current_cred(); | |
153 | + const char *pat_ptr = core_pattern; | |
154 | + int ispipe = (*pat_ptr == '|'); | |
155 | + int pid_in_pattern = 0; | |
156 | + int err = 0; | |
157 | + | |
158 | + cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count); | |
159 | + cn->corename = kmalloc(cn->size, GFP_KERNEL); | |
160 | + cn->used = 0; | |
161 | + | |
162 | + if (!cn->corename) | |
163 | + return -ENOMEM; | |
164 | + | |
165 | + /* Repeat as long as we have more pattern to process and more output | |
166 | + space */ | |
167 | + while (*pat_ptr) { | |
168 | + if (*pat_ptr != '%') { | |
169 | + if (*pat_ptr == 0) | |
170 | + goto out; | |
171 | + err = cn_printf(cn, "%c", *pat_ptr++); | |
172 | + } else { | |
173 | + switch (*++pat_ptr) { | |
174 | + /* single % at the end, drop that */ | |
175 | + case 0: | |
176 | + goto out; | |
177 | + /* Double percent, output one percent */ | |
178 | + case '%': | |
179 | + err = cn_printf(cn, "%c", '%'); | |
180 | + break; | |
181 | + /* pid */ | |
182 | + case 'p': | |
183 | + pid_in_pattern = 1; | |
184 | + err = cn_printf(cn, "%d", | |
185 | + task_tgid_vnr(current)); | |
186 | + break; | |
187 | + /* uid */ | |
188 | + case 'u': | |
189 | + err = cn_printf(cn, "%d", cred->uid); | |
190 | + break; | |
191 | + /* gid */ | |
192 | + case 'g': | |
193 | + err = cn_printf(cn, "%d", cred->gid); | |
194 | + break; | |
195 | + /* signal that caused the coredump */ | |
196 | + case 's': | |
197 | + err = cn_printf(cn, "%ld", signr); | |
198 | + break; | |
199 | + /* UNIX time of coredump */ | |
200 | + case 't': { | |
201 | + struct timeval tv; | |
202 | + do_gettimeofday(&tv); | |
203 | + err = cn_printf(cn, "%lu", tv.tv_sec); | |
204 | + break; | |
205 | + } | |
206 | + /* hostname */ | |
207 | + case 'h': { | |
208 | + char *namestart = cn->corename + cn->used; | |
209 | + down_read(&uts_sem); | |
210 | + err = cn_printf(cn, "%s", | |
211 | + utsname()->nodename); | |
212 | + up_read(&uts_sem); | |
213 | + cn_escape(namestart); | |
214 | + break; | |
215 | + } | |
216 | + /* executable */ | |
217 | + case 'e': { | |
218 | + char *commstart = cn->corename + cn->used; | |
219 | + err = cn_printf(cn, "%s", current->comm); | |
220 | + cn_escape(commstart); | |
221 | + break; | |
222 | + } | |
223 | + case 'E': | |
224 | + err = cn_print_exe_file(cn); | |
225 | + break; | |
226 | + /* core limit size */ | |
227 | + case 'c': | |
228 | + err = cn_printf(cn, "%lu", | |
229 | + rlimit(RLIMIT_CORE)); | |
230 | + break; | |
231 | + default: | |
232 | + break; | |
233 | + } | |
234 | + ++pat_ptr; | |
235 | + } | |
236 | + | |
237 | + if (err) | |
238 | + return err; | |
239 | + } | |
240 | + | |
241 | + /* Backward compatibility with core_uses_pid: | |
242 | + * | |
243 | + * If core_pattern does not include a %p (as is the default) | |
244 | + * and core_uses_pid is set, then .%pid will be appended to | |
245 | + * the filename. Do not do this for piped commands. */ | |
246 | + if (!ispipe && !pid_in_pattern && core_uses_pid) { | |
247 | + err = cn_printf(cn, ".%d", task_tgid_vnr(current)); | |
248 | + if (err) | |
249 | + return err; | |
250 | + } | |
251 | +out: | |
252 | + return ispipe; | |
253 | +} | |
254 | + | |
255 | +static int zap_process(struct task_struct *start, int exit_code) | |
256 | +{ | |
257 | + struct task_struct *t; | |
258 | + int nr = 0; | |
259 | + | |
260 | + start->signal->flags = SIGNAL_GROUP_EXIT; | |
261 | + start->signal->group_exit_code = exit_code; | |
262 | + start->signal->group_stop_count = 0; | |
263 | + | |
264 | + t = start; | |
265 | + do { | |
266 | + task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); | |
267 | + if (t != current && t->mm) { | |
268 | + sigaddset(&t->pending.signal, SIGKILL); | |
269 | + signal_wake_up(t, 1); | |
270 | + nr++; | |
271 | + } | |
272 | + } while_each_thread(start, t); | |
273 | + | |
274 | + return nr; | |
275 | +} | |
276 | + | |
277 | +static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm, | |
278 | + struct core_state *core_state, int exit_code) | |
279 | +{ | |
280 | + struct task_struct *g, *p; | |
281 | + unsigned long flags; | |
282 | + int nr = -EAGAIN; | |
283 | + | |
284 | + spin_lock_irq(&tsk->sighand->siglock); | |
285 | + if (!signal_group_exit(tsk->signal)) { | |
286 | + mm->core_state = core_state; | |
287 | + nr = zap_process(tsk, exit_code); | |
288 | + } | |
289 | + spin_unlock_irq(&tsk->sighand->siglock); | |
290 | + if (unlikely(nr < 0)) | |
291 | + return nr; | |
292 | + | |
293 | + if (atomic_read(&mm->mm_users) == nr + 1) | |
294 | + goto done; | |
295 | + /* | |
296 | + * We should find and kill all tasks which use this mm, and we should | |
297 | + * count them correctly into ->nr_threads. We don't take tasklist | |
298 | + * lock, but this is safe wrt: | |
299 | + * | |
300 | + * fork: | |
301 | + * None of sub-threads can fork after zap_process(leader). All | |
302 | + * processes which were created before this point should be | |
303 | + * visible to zap_threads() because copy_process() adds the new | |
304 | + * process to the tail of init_task.tasks list, and lock/unlock | |
305 | + * of ->siglock provides a memory barrier. | |
306 | + * | |
307 | + * do_exit: | |
308 | + * The caller holds mm->mmap_sem. This means that the task which | |
309 | + * uses this mm can't pass exit_mm(), so it can't exit or clear | |
310 | + * its ->mm. | |
311 | + * | |
312 | + * de_thread: | |
313 | + * It does list_replace_rcu(&leader->tasks, ¤t->tasks), | |
314 | + * we must see either old or new leader, this does not matter. | |
315 | + * However, it can change p->sighand, so lock_task_sighand(p) | |
316 | + * must be used. Since p->mm != NULL and we hold ->mmap_sem | |
317 | + * it can't fail. | |
318 | + * | |
319 | + * Note also that "g" can be the old leader with ->mm == NULL | |
320 | + * and already unhashed and thus removed from ->thread_group. | |
321 | + * This is OK, __unhash_process()->list_del_rcu() does not | |
322 | + * clear the ->next pointer, we will find the new leader via | |
323 | + * next_thread(). | |
324 | + */ | |
325 | + rcu_read_lock(); | |
326 | + for_each_process(g) { | |
327 | + if (g == tsk->group_leader) | |
328 | + continue; | |
329 | + if (g->flags & PF_KTHREAD) | |
330 | + continue; | |
331 | + p = g; | |
332 | + do { | |
333 | + if (p->mm) { | |
334 | + if (unlikely(p->mm == mm)) { | |
335 | + lock_task_sighand(p, &flags); | |
336 | + nr += zap_process(p, exit_code); | |
337 | + unlock_task_sighand(p, &flags); | |
338 | + } | |
339 | + break; | |
340 | + } | |
341 | + } while_each_thread(g, p); | |
342 | + } | |
343 | + rcu_read_unlock(); | |
344 | +done: | |
345 | + atomic_set(&core_state->nr_threads, nr); | |
346 | + return nr; | |
347 | +} | |
348 | + | |
349 | +static int coredump_wait(int exit_code, struct core_state *core_state) | |
350 | +{ | |
351 | + struct task_struct *tsk = current; | |
352 | + struct mm_struct *mm = tsk->mm; | |
353 | + int core_waiters = -EBUSY; | |
354 | + | |
355 | + init_completion(&core_state->startup); | |
356 | + core_state->dumper.task = tsk; | |
357 | + core_state->dumper.next = NULL; | |
358 | + | |
359 | + down_write(&mm->mmap_sem); | |
360 | + if (!mm->core_state) | |
361 | + core_waiters = zap_threads(tsk, mm, core_state, exit_code); | |
362 | + up_write(&mm->mmap_sem); | |
363 | + | |
364 | + if (core_waiters > 0) { | |
365 | + struct core_thread *ptr; | |
366 | + | |
367 | + wait_for_completion(&core_state->startup); | |
368 | + /* | |
369 | + * Wait for all the threads to become inactive, so that | |
370 | + * all the thread context (extended register state, like | |
371 | + * fpu etc) gets copied to the memory. | |
372 | + */ | |
373 | + ptr = core_state->dumper.next; | |
374 | + while (ptr != NULL) { | |
375 | + wait_task_inactive(ptr->task, 0); | |
376 | + ptr = ptr->next; | |
377 | + } | |
378 | + } | |
379 | + | |
380 | + return core_waiters; | |
381 | +} | |
382 | + | |
383 | +static void coredump_finish(struct mm_struct *mm) | |
384 | +{ | |
385 | + struct core_thread *curr, *next; | |
386 | + struct task_struct *task; | |
387 | + | |
388 | + next = mm->core_state->dumper.next; | |
389 | + while ((curr = next) != NULL) { | |
390 | + next = curr->next; | |
391 | + task = curr->task; | |
392 | + /* | |
393 | + * see exit_mm(), curr->task must not see | |
394 | + * ->task == NULL before we read ->next. | |
395 | + */ | |
396 | + smp_mb(); | |
397 | + curr->task = NULL; | |
398 | + wake_up_process(task); | |
399 | + } | |
400 | + | |
401 | + mm->core_state = NULL; | |
402 | +} | |
403 | + | |
404 | +static void wait_for_dump_helpers(struct file *file) | |
405 | +{ | |
406 | + struct pipe_inode_info *pipe; | |
407 | + | |
408 | + pipe = file->f_path.dentry->d_inode->i_pipe; | |
409 | + | |
410 | + pipe_lock(pipe); | |
411 | + pipe->readers++; | |
412 | + pipe->writers--; | |
413 | + | |
414 | + while ((pipe->readers > 1) && (!signal_pending(current))) { | |
415 | + wake_up_interruptible_sync(&pipe->wait); | |
416 | + kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); | |
417 | + pipe_wait(pipe); | |
418 | + } | |
419 | + | |
420 | + pipe->readers--; | |
421 | + pipe->writers++; | |
422 | + pipe_unlock(pipe); | |
423 | + | |
424 | +} | |
425 | + | |
426 | +/* | |
427 | + * umh_pipe_setup | |
428 | + * helper function to customize the process used | |
429 | + * to collect the core in userspace. Specifically | |
430 | + * it sets up a pipe and installs it as fd 0 (stdin) | |
431 | + * for the process. Returns 0 on success, or | |
432 | + * PTR_ERR on failure. | |
433 | + * Note that it also sets the core limit to 1. This | |
434 | + * is a special value that we use to trap recursive | |
435 | + * core dumps | |
436 | + */ | |
437 | +static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) | |
438 | +{ | |
439 | + struct file *files[2]; | |
440 | + struct coredump_params *cp = (struct coredump_params *)info->data; | |
441 | + int err = create_pipe_files(files, 0); | |
442 | + if (err) | |
443 | + return err; | |
444 | + | |
445 | + cp->file = files[1]; | |
446 | + | |
447 | + replace_fd(0, files[0], 0); | |
448 | + /* and disallow core files too */ | |
449 | + current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1}; | |
450 | + | |
451 | + return 0; | |
452 | +} | |
453 | + | |
454 | +void do_coredump(long signr, int exit_code, struct pt_regs *regs) | |
455 | +{ | |
456 | + struct core_state core_state; | |
457 | + struct core_name cn; | |
458 | + struct mm_struct *mm = current->mm; | |
459 | + struct linux_binfmt * binfmt; | |
460 | + const struct cred *old_cred; | |
461 | + struct cred *cred; | |
462 | + int retval = 0; | |
463 | + int flag = 0; | |
464 | + int ispipe; | |
465 | + struct files_struct *displaced; | |
466 | + bool need_nonrelative = false; | |
467 | + static atomic_t core_dump_count = ATOMIC_INIT(0); | |
468 | + struct coredump_params cprm = { | |
469 | + .signr = signr, | |
470 | + .regs = regs, | |
471 | + .limit = rlimit(RLIMIT_CORE), | |
472 | + /* | |
473 | + * We must use the same mm->flags while dumping core to avoid | |
474 | + * inconsistency of bit flags, since this flag is not protected | |
475 | + * by any locks. | |
476 | + */ | |
477 | + .mm_flags = mm->flags, | |
478 | + }; | |
479 | + | |
480 | + audit_core_dumps(signr); | |
481 | + | |
482 | + binfmt = mm->binfmt; | |
483 | + if (!binfmt || !binfmt->core_dump) | |
484 | + goto fail; | |
485 | + if (!__get_dumpable(cprm.mm_flags)) | |
486 | + goto fail; | |
487 | + | |
488 | + cred = prepare_creds(); | |
489 | + if (!cred) | |
490 | + goto fail; | |
491 | + /* | |
492 | + * We cannot trust fsuid as being the "true" uid of the process | |
493 | + * nor do we know its entire history. We only know it was tainted | |
494 | + * so we dump it as root in mode 2, and only into a controlled | |
495 | + * environment (pipe handler or fully qualified path). | |
496 | + */ | |
497 | + if (__get_dumpable(cprm.mm_flags) == SUID_DUMPABLE_SAFE) { | |
498 | + /* Setuid core dump mode */ | |
499 | + flag = O_EXCL; /* Stop rewrite attacks */ | |
500 | + cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */ | |
501 | + need_nonrelative = true; | |
502 | + } | |
503 | + | |
504 | + retval = coredump_wait(exit_code, &core_state); | |
505 | + if (retval < 0) | |
506 | + goto fail_creds; | |
507 | + | |
508 | + old_cred = override_creds(cred); | |
509 | + | |
510 | + /* | |
511 | + * Clear any false indication of pending signals that might | |
512 | + * be seen by the filesystem code called to write the core file. | |
513 | + */ | |
514 | + clear_thread_flag(TIF_SIGPENDING); | |
515 | + | |
516 | + ispipe = format_corename(&cn, signr); | |
517 | + | |
518 | + if (ispipe) { | |
519 | + int dump_count; | |
520 | + char **helper_argv; | |
521 | + | |
522 | + if (ispipe < 0) { | |
523 | + printk(KERN_WARNING "format_corename failed\n"); | |
524 | + printk(KERN_WARNING "Aborting core\n"); | |
525 | + goto fail_corename; | |
526 | + } | |
527 | + | |
528 | + if (cprm.limit == 1) { | |
529 | + /* See umh_pipe_setup() which sets RLIMIT_CORE = 1. | |
530 | + * | |
531 | + * Normally core limits are irrelevant to pipes, since | |
532 | + * we're not writing to the file system, but we use | |
533 | + * cprm.limit of 1 here as a speacial value, this is a | |
534 | + * consistent way to catch recursive crashes. | |
535 | + * We can still crash if the core_pattern binary sets | |
536 | + * RLIM_CORE = !1, but it runs as root, and can do | |
537 | + * lots of stupid things. | |
538 | + * | |
539 | + * Note that we use task_tgid_vnr here to grab the pid | |
540 | + * of the process group leader. That way we get the | |
541 | + * right pid if a thread in a multi-threaded | |
542 | + * core_pattern process dies. | |
543 | + */ | |
544 | + printk(KERN_WARNING | |
545 | + "Process %d(%s) has RLIMIT_CORE set to 1\n", | |
546 | + task_tgid_vnr(current), current->comm); | |
547 | + printk(KERN_WARNING "Aborting core\n"); | |
548 | + goto fail_unlock; | |
549 | + } | |
550 | + cprm.limit = RLIM_INFINITY; | |
551 | + | |
552 | + dump_count = atomic_inc_return(&core_dump_count); | |
553 | + if (core_pipe_limit && (core_pipe_limit < dump_count)) { | |
554 | + printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n", | |
555 | + task_tgid_vnr(current), current->comm); | |
556 | + printk(KERN_WARNING "Skipping core dump\n"); | |
557 | + goto fail_dropcount; | |
558 | + } | |
559 | + | |
560 | + helper_argv = argv_split(GFP_KERNEL, cn.corename+1, NULL); | |
561 | + if (!helper_argv) { | |
562 | + printk(KERN_WARNING "%s failed to allocate memory\n", | |
563 | + __func__); | |
564 | + goto fail_dropcount; | |
565 | + } | |
566 | + | |
567 | + retval = call_usermodehelper_fns(helper_argv[0], helper_argv, | |
568 | + NULL, UMH_WAIT_EXEC, umh_pipe_setup, | |
569 | + NULL, &cprm); | |
570 | + argv_free(helper_argv); | |
571 | + if (retval) { | |
572 | + printk(KERN_INFO "Core dump to %s pipe failed\n", | |
573 | + cn.corename); | |
574 | + goto close_fail; | |
575 | + } | |
576 | + } else { | |
577 | + struct inode *inode; | |
578 | + | |
579 | + if (cprm.limit < binfmt->min_coredump) | |
580 | + goto fail_unlock; | |
581 | + | |
582 | + if (need_nonrelative && cn.corename[0] != '/') { | |
583 | + printk(KERN_WARNING "Pid %d(%s) can only dump core "\ | |
584 | + "to fully qualified path!\n", | |
585 | + task_tgid_vnr(current), current->comm); | |
586 | + printk(KERN_WARNING "Skipping core dump\n"); | |
587 | + goto fail_unlock; | |
588 | + } | |
589 | + | |
590 | + cprm.file = filp_open(cn.corename, | |
591 | + O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, | |
592 | + 0600); | |
593 | + if (IS_ERR(cprm.file)) | |
594 | + goto fail_unlock; | |
595 | + | |
596 | + inode = cprm.file->f_path.dentry->d_inode; | |
597 | + if (inode->i_nlink > 1) | |
598 | + goto close_fail; | |
599 | + if (d_unhashed(cprm.file->f_path.dentry)) | |
600 | + goto close_fail; | |
601 | + /* | |
602 | + * AK: actually i see no reason to not allow this for named | |
603 | + * pipes etc, but keep the previous behaviour for now. | |
604 | + */ | |
605 | + if (!S_ISREG(inode->i_mode)) | |
606 | + goto close_fail; | |
607 | + /* | |
608 | + * Dont allow local users get cute and trick others to coredump | |
609 | + * into their pre-created files. | |
610 | + */ | |
611 | + if (!uid_eq(inode->i_uid, current_fsuid())) | |
612 | + goto close_fail; | |
613 | + if (!cprm.file->f_op || !cprm.file->f_op->write) | |
614 | + goto close_fail; | |
615 | + if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file)) | |
616 | + goto close_fail; | |
617 | + } | |
618 | + | |
619 | + /* get us an unshared descriptor table; almost always a no-op */ | |
620 | + retval = unshare_files(&displaced); | |
621 | + if (retval) | |
622 | + goto close_fail; | |
623 | + if (displaced) | |
624 | + put_files_struct(displaced); | |
625 | + retval = binfmt->core_dump(&cprm); | |
626 | + if (retval) | |
627 | + current->signal->group_exit_code |= 0x80; | |
628 | + | |
629 | + if (ispipe && core_pipe_limit) | |
630 | + wait_for_dump_helpers(cprm.file); | |
631 | +close_fail: | |
632 | + if (cprm.file) | |
633 | + filp_close(cprm.file, NULL); | |
634 | +fail_dropcount: | |
635 | + if (ispipe) | |
636 | + atomic_dec(&core_dump_count); | |
637 | +fail_unlock: | |
638 | + kfree(cn.corename); | |
639 | +fail_corename: | |
640 | + coredump_finish(mm); | |
641 | + revert_creds(old_cred); | |
642 | +fail_creds: | |
643 | + put_cred(cred); | |
644 | +fail: | |
645 | + return; | |
646 | +} | |
647 | + | |
648 | +/* | |
649 | + * Core dumping helper functions. These are the only things you should | |
650 | + * do on a core-file: use only these functions to write out all the | |
651 | + * necessary info. | |
652 | + */ | |
653 | +int dump_write(struct file *file, const void *addr, int nr) | |
654 | +{ | |
655 | + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr; | |
656 | +} | |
657 | +EXPORT_SYMBOL(dump_write); | |
658 | + | |
659 | +int dump_seek(struct file *file, loff_t off) | |
660 | +{ | |
661 | + int ret = 1; | |
662 | + | |
663 | + if (file->f_op->llseek && file->f_op->llseek != no_llseek) { | |
664 | + if (file->f_op->llseek(file, off, SEEK_CUR) < 0) | |
665 | + return 0; | |
666 | + } else { | |
667 | + char *buf = (char *)get_zeroed_page(GFP_KERNEL); | |
668 | + | |
669 | + if (!buf) | |
670 | + return 0; | |
671 | + while (off > 0) { | |
672 | + unsigned long n = off; | |
673 | + | |
674 | + if (n > PAGE_SIZE) | |
675 | + n = PAGE_SIZE; | |
676 | + if (!dump_write(file, buf, n)) { | |
677 | + ret = 0; | |
678 | + break; | |
679 | + } | |
680 | + off -= n; | |
681 | + } | |
682 | + free_page((unsigned long)buf); | |
683 | + } | |
684 | + return ret; | |
685 | +} | |
686 | +EXPORT_SYMBOL(dump_seek); |
fs/exec.c
... | ... | @@ -66,19 +66,8 @@ |
66 | 66 | |
67 | 67 | #include <trace/events/sched.h> |
68 | 68 | |
69 | -int core_uses_pid; | |
70 | -char core_pattern[CORENAME_MAX_SIZE] = "core"; | |
71 | -unsigned int core_pipe_limit; | |
72 | 69 | int suid_dumpable = 0; |
73 | 70 | |
74 | -struct core_name { | |
75 | - char *corename; | |
76 | - int used, size; | |
77 | -}; | |
78 | -static atomic_t call_count = ATOMIC_INIT(1); | |
79 | - | |
80 | -/* The maximal length of core_pattern is also specified in sysctl.c */ | |
81 | - | |
82 | 71 | static LIST_HEAD(formats); |
83 | 72 | static DEFINE_RWLOCK(binfmt_lock); |
84 | 73 | |
... | ... | @@ -1603,353 +1592,6 @@ |
1603 | 1592 | |
1604 | 1593 | EXPORT_SYMBOL(set_binfmt); |
1605 | 1594 | |
1606 | -static int expand_corename(struct core_name *cn) | |
1607 | -{ | |
1608 | - char *old_corename = cn->corename; | |
1609 | - | |
1610 | - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count); | |
1611 | - cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL); | |
1612 | - | |
1613 | - if (!cn->corename) { | |
1614 | - kfree(old_corename); | |
1615 | - return -ENOMEM; | |
1616 | - } | |
1617 | - | |
1618 | - return 0; | |
1619 | -} | |
1620 | - | |
1621 | -static int cn_printf(struct core_name *cn, const char *fmt, ...) | |
1622 | -{ | |
1623 | - char *cur; | |
1624 | - int need; | |
1625 | - int ret; | |
1626 | - va_list arg; | |
1627 | - | |
1628 | - va_start(arg, fmt); | |
1629 | - need = vsnprintf(NULL, 0, fmt, arg); | |
1630 | - va_end(arg); | |
1631 | - | |
1632 | - if (likely(need < cn->size - cn->used - 1)) | |
1633 | - goto out_printf; | |
1634 | - | |
1635 | - ret = expand_corename(cn); | |
1636 | - if (ret) | |
1637 | - goto expand_fail; | |
1638 | - | |
1639 | -out_printf: | |
1640 | - cur = cn->corename + cn->used; | |
1641 | - va_start(arg, fmt); | |
1642 | - vsnprintf(cur, need + 1, fmt, arg); | |
1643 | - va_end(arg); | |
1644 | - cn->used += need; | |
1645 | - return 0; | |
1646 | - | |
1647 | -expand_fail: | |
1648 | - return ret; | |
1649 | -} | |
1650 | - | |
1651 | -static void cn_escape(char *str) | |
1652 | -{ | |
1653 | - for (; *str; str++) | |
1654 | - if (*str == '/') | |
1655 | - *str = '!'; | |
1656 | -} | |
1657 | - | |
1658 | -static int cn_print_exe_file(struct core_name *cn) | |
1659 | -{ | |
1660 | - struct file *exe_file; | |
1661 | - char *pathbuf, *path; | |
1662 | - int ret; | |
1663 | - | |
1664 | - exe_file = get_mm_exe_file(current->mm); | |
1665 | - if (!exe_file) { | |
1666 | - char *commstart = cn->corename + cn->used; | |
1667 | - ret = cn_printf(cn, "%s (path unknown)", current->comm); | |
1668 | - cn_escape(commstart); | |
1669 | - return ret; | |
1670 | - } | |
1671 | - | |
1672 | - pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY); | |
1673 | - if (!pathbuf) { | |
1674 | - ret = -ENOMEM; | |
1675 | - goto put_exe_file; | |
1676 | - } | |
1677 | - | |
1678 | - path = d_path(&exe_file->f_path, pathbuf, PATH_MAX); | |
1679 | - if (IS_ERR(path)) { | |
1680 | - ret = PTR_ERR(path); | |
1681 | - goto free_buf; | |
1682 | - } | |
1683 | - | |
1684 | - cn_escape(path); | |
1685 | - | |
1686 | - ret = cn_printf(cn, "%s", path); | |
1687 | - | |
1688 | -free_buf: | |
1689 | - kfree(pathbuf); | |
1690 | -put_exe_file: | |
1691 | - fput(exe_file); | |
1692 | - return ret; | |
1693 | -} | |
1694 | - | |
1695 | -/* format_corename will inspect the pattern parameter, and output a | |
1696 | - * name into corename, which must have space for at least | |
1697 | - * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. | |
1698 | - */ | |
1699 | -static int format_corename(struct core_name *cn, long signr) | |
1700 | -{ | |
1701 | - const struct cred *cred = current_cred(); | |
1702 | - const char *pat_ptr = core_pattern; | |
1703 | - int ispipe = (*pat_ptr == '|'); | |
1704 | - int pid_in_pattern = 0; | |
1705 | - int err = 0; | |
1706 | - | |
1707 | - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count); | |
1708 | - cn->corename = kmalloc(cn->size, GFP_KERNEL); | |
1709 | - cn->used = 0; | |
1710 | - | |
1711 | - if (!cn->corename) | |
1712 | - return -ENOMEM; | |
1713 | - | |
1714 | - /* Repeat as long as we have more pattern to process and more output | |
1715 | - space */ | |
1716 | - while (*pat_ptr) { | |
1717 | - if (*pat_ptr != '%') { | |
1718 | - if (*pat_ptr == 0) | |
1719 | - goto out; | |
1720 | - err = cn_printf(cn, "%c", *pat_ptr++); | |
1721 | - } else { | |
1722 | - switch (*++pat_ptr) { | |
1723 | - /* single % at the end, drop that */ | |
1724 | - case 0: | |
1725 | - goto out; | |
1726 | - /* Double percent, output one percent */ | |
1727 | - case '%': | |
1728 | - err = cn_printf(cn, "%c", '%'); | |
1729 | - break; | |
1730 | - /* pid */ | |
1731 | - case 'p': | |
1732 | - pid_in_pattern = 1; | |
1733 | - err = cn_printf(cn, "%d", | |
1734 | - task_tgid_vnr(current)); | |
1735 | - break; | |
1736 | - /* uid */ | |
1737 | - case 'u': | |
1738 | - err = cn_printf(cn, "%d", cred->uid); | |
1739 | - break; | |
1740 | - /* gid */ | |
1741 | - case 'g': | |
1742 | - err = cn_printf(cn, "%d", cred->gid); | |
1743 | - break; | |
1744 | - /* signal that caused the coredump */ | |
1745 | - case 's': | |
1746 | - err = cn_printf(cn, "%ld", signr); | |
1747 | - break; | |
1748 | - /* UNIX time of coredump */ | |
1749 | - case 't': { | |
1750 | - struct timeval tv; | |
1751 | - do_gettimeofday(&tv); | |
1752 | - err = cn_printf(cn, "%lu", tv.tv_sec); | |
1753 | - break; | |
1754 | - } | |
1755 | - /* hostname */ | |
1756 | - case 'h': { | |
1757 | - char *namestart = cn->corename + cn->used; | |
1758 | - down_read(&uts_sem); | |
1759 | - err = cn_printf(cn, "%s", | |
1760 | - utsname()->nodename); | |
1761 | - up_read(&uts_sem); | |
1762 | - cn_escape(namestart); | |
1763 | - break; | |
1764 | - } | |
1765 | - /* executable */ | |
1766 | - case 'e': { | |
1767 | - char *commstart = cn->corename + cn->used; | |
1768 | - err = cn_printf(cn, "%s", current->comm); | |
1769 | - cn_escape(commstart); | |
1770 | - break; | |
1771 | - } | |
1772 | - case 'E': | |
1773 | - err = cn_print_exe_file(cn); | |
1774 | - break; | |
1775 | - /* core limit size */ | |
1776 | - case 'c': | |
1777 | - err = cn_printf(cn, "%lu", | |
1778 | - rlimit(RLIMIT_CORE)); | |
1779 | - break; | |
1780 | - default: | |
1781 | - break; | |
1782 | - } | |
1783 | - ++pat_ptr; | |
1784 | - } | |
1785 | - | |
1786 | - if (err) | |
1787 | - return err; | |
1788 | - } | |
1789 | - | |
1790 | - /* Backward compatibility with core_uses_pid: | |
1791 | - * | |
1792 | - * If core_pattern does not include a %p (as is the default) | |
1793 | - * and core_uses_pid is set, then .%pid will be appended to | |
1794 | - * the filename. Do not do this for piped commands. */ | |
1795 | - if (!ispipe && !pid_in_pattern && core_uses_pid) { | |
1796 | - err = cn_printf(cn, ".%d", task_tgid_vnr(current)); | |
1797 | - if (err) | |
1798 | - return err; | |
1799 | - } | |
1800 | -out: | |
1801 | - return ispipe; | |
1802 | -} | |
1803 | - | |
1804 | -static int zap_process(struct task_struct *start, int exit_code) | |
1805 | -{ | |
1806 | - struct task_struct *t; | |
1807 | - int nr = 0; | |
1808 | - | |
1809 | - start->signal->flags = SIGNAL_GROUP_EXIT; | |
1810 | - start->signal->group_exit_code = exit_code; | |
1811 | - start->signal->group_stop_count = 0; | |
1812 | - | |
1813 | - t = start; | |
1814 | - do { | |
1815 | - task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); | |
1816 | - if (t != current && t->mm) { | |
1817 | - sigaddset(&t->pending.signal, SIGKILL); | |
1818 | - signal_wake_up(t, 1); | |
1819 | - nr++; | |
1820 | - } | |
1821 | - } while_each_thread(start, t); | |
1822 | - | |
1823 | - return nr; | |
1824 | -} | |
1825 | - | |
1826 | -static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm, | |
1827 | - struct core_state *core_state, int exit_code) | |
1828 | -{ | |
1829 | - struct task_struct *g, *p; | |
1830 | - unsigned long flags; | |
1831 | - int nr = -EAGAIN; | |
1832 | - | |
1833 | - spin_lock_irq(&tsk->sighand->siglock); | |
1834 | - if (!signal_group_exit(tsk->signal)) { | |
1835 | - mm->core_state = core_state; | |
1836 | - nr = zap_process(tsk, exit_code); | |
1837 | - } | |
1838 | - spin_unlock_irq(&tsk->sighand->siglock); | |
1839 | - if (unlikely(nr < 0)) | |
1840 | - return nr; | |
1841 | - | |
1842 | - if (atomic_read(&mm->mm_users) == nr + 1) | |
1843 | - goto done; | |
1844 | - /* | |
1845 | - * We should find and kill all tasks which use this mm, and we should | |
1846 | - * count them correctly into ->nr_threads. We don't take tasklist | |
1847 | - * lock, but this is safe wrt: | |
1848 | - * | |
1849 | - * fork: | |
1850 | - * None of sub-threads can fork after zap_process(leader). All | |
1851 | - * processes which were created before this point should be | |
1852 | - * visible to zap_threads() because copy_process() adds the new | |
1853 | - * process to the tail of init_task.tasks list, and lock/unlock | |
1854 | - * of ->siglock provides a memory barrier. | |
1855 | - * | |
1856 | - * do_exit: | |
1857 | - * The caller holds mm->mmap_sem. This means that the task which | |
1858 | - * uses this mm can't pass exit_mm(), so it can't exit or clear | |
1859 | - * its ->mm. | |
1860 | - * | |
1861 | - * de_thread: | |
1862 | - * It does list_replace_rcu(&leader->tasks, ¤t->tasks), | |
1863 | - * we must see either old or new leader, this does not matter. | |
1864 | - * However, it can change p->sighand, so lock_task_sighand(p) | |
1865 | - * must be used. Since p->mm != NULL and we hold ->mmap_sem | |
1866 | - * it can't fail. | |
1867 | - * | |
1868 | - * Note also that "g" can be the old leader with ->mm == NULL | |
1869 | - * and already unhashed and thus removed from ->thread_group. | |
1870 | - * This is OK, __unhash_process()->list_del_rcu() does not | |
1871 | - * clear the ->next pointer, we will find the new leader via | |
1872 | - * next_thread(). | |
1873 | - */ | |
1874 | - rcu_read_lock(); | |
1875 | - for_each_process(g) { | |
1876 | - if (g == tsk->group_leader) | |
1877 | - continue; | |
1878 | - if (g->flags & PF_KTHREAD) | |
1879 | - continue; | |
1880 | - p = g; | |
1881 | - do { | |
1882 | - if (p->mm) { | |
1883 | - if (unlikely(p->mm == mm)) { | |
1884 | - lock_task_sighand(p, &flags); | |
1885 | - nr += zap_process(p, exit_code); | |
1886 | - unlock_task_sighand(p, &flags); | |
1887 | - } | |
1888 | - break; | |
1889 | - } | |
1890 | - } while_each_thread(g, p); | |
1891 | - } | |
1892 | - rcu_read_unlock(); | |
1893 | -done: | |
1894 | - atomic_set(&core_state->nr_threads, nr); | |
1895 | - return nr; | |
1896 | -} | |
1897 | - | |
1898 | -static int coredump_wait(int exit_code, struct core_state *core_state) | |
1899 | -{ | |
1900 | - struct task_struct *tsk = current; | |
1901 | - struct mm_struct *mm = tsk->mm; | |
1902 | - int core_waiters = -EBUSY; | |
1903 | - | |
1904 | - init_completion(&core_state->startup); | |
1905 | - core_state->dumper.task = tsk; | |
1906 | - core_state->dumper.next = NULL; | |
1907 | - | |
1908 | - down_write(&mm->mmap_sem); | |
1909 | - if (!mm->core_state) | |
1910 | - core_waiters = zap_threads(tsk, mm, core_state, exit_code); | |
1911 | - up_write(&mm->mmap_sem); | |
1912 | - | |
1913 | - if (core_waiters > 0) { | |
1914 | - struct core_thread *ptr; | |
1915 | - | |
1916 | - wait_for_completion(&core_state->startup); | |
1917 | - /* | |
1918 | - * Wait for all the threads to become inactive, so that | |
1919 | - * all the thread context (extended register state, like | |
1920 | - * fpu etc) gets copied to the memory. | |
1921 | - */ | |
1922 | - ptr = core_state->dumper.next; | |
1923 | - while (ptr != NULL) { | |
1924 | - wait_task_inactive(ptr->task, 0); | |
1925 | - ptr = ptr->next; | |
1926 | - } | |
1927 | - } | |
1928 | - | |
1929 | - return core_waiters; | |
1930 | -} | |
1931 | - | |
1932 | -static void coredump_finish(struct mm_struct *mm) | |
1933 | -{ | |
1934 | - struct core_thread *curr, *next; | |
1935 | - struct task_struct *task; | |
1936 | - | |
1937 | - next = mm->core_state->dumper.next; | |
1938 | - while ((curr = next) != NULL) { | |
1939 | - next = curr->next; | |
1940 | - task = curr->task; | |
1941 | - /* | |
1942 | - * see exit_mm(), curr->task must not see | |
1943 | - * ->task == NULL before we read ->next. | |
1944 | - */ | |
1945 | - smp_mb(); | |
1946 | - curr->task = NULL; | |
1947 | - wake_up_process(task); | |
1948 | - } | |
1949 | - | |
1950 | - mm->core_state = NULL; | |
1951 | -} | |
1952 | - | |
1953 | 1595 | /* |
1954 | 1596 | * set_dumpable converts traditional three-value dumpable to two flags and |
1955 | 1597 | * stores them into mm->flags. It modifies lower two bits of mm->flags, but |
... | ... | @@ -1991,7 +1633,7 @@ |
1991 | 1633 | } |
1992 | 1634 | } |
1993 | 1635 | |
1994 | -static int __get_dumpable(unsigned long mm_flags) | |
1636 | +int __get_dumpable(unsigned long mm_flags) | |
1995 | 1637 | { |
1996 | 1638 | int ret; |
1997 | 1639 | |
... | ... | @@ -2003,289 +1645,4 @@ |
2003 | 1645 | { |
2004 | 1646 | return __get_dumpable(mm->flags); |
2005 | 1647 | } |
2006 | - | |
2007 | -static void wait_for_dump_helpers(struct file *file) | |
2008 | -{ | |
2009 | - struct pipe_inode_info *pipe; | |
2010 | - | |
2011 | - pipe = file->f_path.dentry->d_inode->i_pipe; | |
2012 | - | |
2013 | - pipe_lock(pipe); | |
2014 | - pipe->readers++; | |
2015 | - pipe->writers--; | |
2016 | - | |
2017 | - while ((pipe->readers > 1) && (!signal_pending(current))) { | |
2018 | - wake_up_interruptible_sync(&pipe->wait); | |
2019 | - kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); | |
2020 | - pipe_wait(pipe); | |
2021 | - } | |
2022 | - | |
2023 | - pipe->readers--; | |
2024 | - pipe->writers++; | |
2025 | - pipe_unlock(pipe); | |
2026 | - | |
2027 | -} | |
2028 | - | |
2029 | - | |
2030 | -/* | |
2031 | - * umh_pipe_setup | |
2032 | - * helper function to customize the process used | |
2033 | - * to collect the core in userspace. Specifically | |
2034 | - * it sets up a pipe and installs it as fd 0 (stdin) | |
2035 | - * for the process. Returns 0 on success, or | |
2036 | - * PTR_ERR on failure. | |
2037 | - * Note that it also sets the core limit to 1. This | |
2038 | - * is a special value that we use to trap recursive | |
2039 | - * core dumps | |
2040 | - */ | |
2041 | -static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) | |
2042 | -{ | |
2043 | - struct file *files[2]; | |
2044 | - struct coredump_params *cp = (struct coredump_params *)info->data; | |
2045 | - int err = create_pipe_files(files, 0); | |
2046 | - if (err) | |
2047 | - return err; | |
2048 | - | |
2049 | - cp->file = files[1]; | |
2050 | - | |
2051 | - replace_fd(0, files[0], 0); | |
2052 | - /* and disallow core files too */ | |
2053 | - current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1}; | |
2054 | - | |
2055 | - return 0; | |
2056 | -} | |
2057 | - | |
2058 | -void do_coredump(long signr, int exit_code, struct pt_regs *regs) | |
2059 | -{ | |
2060 | - struct core_state core_state; | |
2061 | - struct core_name cn; | |
2062 | - struct mm_struct *mm = current->mm; | |
2063 | - struct linux_binfmt * binfmt; | |
2064 | - const struct cred *old_cred; | |
2065 | - struct cred *cred; | |
2066 | - int retval = 0; | |
2067 | - int flag = 0; | |
2068 | - int ispipe; | |
2069 | - struct files_struct *displaced; | |
2070 | - bool need_nonrelative = false; | |
2071 | - static atomic_t core_dump_count = ATOMIC_INIT(0); | |
2072 | - struct coredump_params cprm = { | |
2073 | - .signr = signr, | |
2074 | - .regs = regs, | |
2075 | - .limit = rlimit(RLIMIT_CORE), | |
2076 | - /* | |
2077 | - * We must use the same mm->flags while dumping core to avoid | |
2078 | - * inconsistency of bit flags, since this flag is not protected | |
2079 | - * by any locks. | |
2080 | - */ | |
2081 | - .mm_flags = mm->flags, | |
2082 | - }; | |
2083 | - | |
2084 | - audit_core_dumps(signr); | |
2085 | - | |
2086 | - binfmt = mm->binfmt; | |
2087 | - if (!binfmt || !binfmt->core_dump) | |
2088 | - goto fail; | |
2089 | - if (!__get_dumpable(cprm.mm_flags)) | |
2090 | - goto fail; | |
2091 | - | |
2092 | - cred = prepare_creds(); | |
2093 | - if (!cred) | |
2094 | - goto fail; | |
2095 | - /* | |
2096 | - * We cannot trust fsuid as being the "true" uid of the process | |
2097 | - * nor do we know its entire history. We only know it was tainted | |
2098 | - * so we dump it as root in mode 2, and only into a controlled | |
2099 | - * environment (pipe handler or fully qualified path). | |
2100 | - */ | |
2101 | - if (__get_dumpable(cprm.mm_flags) == SUID_DUMPABLE_SAFE) { | |
2102 | - /* Setuid core dump mode */ | |
2103 | - flag = O_EXCL; /* Stop rewrite attacks */ | |
2104 | - cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */ | |
2105 | - need_nonrelative = true; | |
2106 | - } | |
2107 | - | |
2108 | - retval = coredump_wait(exit_code, &core_state); | |
2109 | - if (retval < 0) | |
2110 | - goto fail_creds; | |
2111 | - | |
2112 | - old_cred = override_creds(cred); | |
2113 | - | |
2114 | - /* | |
2115 | - * Clear any false indication of pending signals that might | |
2116 | - * be seen by the filesystem code called to write the core file. | |
2117 | - */ | |
2118 | - clear_thread_flag(TIF_SIGPENDING); | |
2119 | - | |
2120 | - ispipe = format_corename(&cn, signr); | |
2121 | - | |
2122 | - if (ispipe) { | |
2123 | - int dump_count; | |
2124 | - char **helper_argv; | |
2125 | - | |
2126 | - if (ispipe < 0) { | |
2127 | - printk(KERN_WARNING "format_corename failed\n"); | |
2128 | - printk(KERN_WARNING "Aborting core\n"); | |
2129 | - goto fail_corename; | |
2130 | - } | |
2131 | - | |
2132 | - if (cprm.limit == 1) { | |
2133 | - /* See umh_pipe_setup() which sets RLIMIT_CORE = 1. | |
2134 | - * | |
2135 | - * Normally core limits are irrelevant to pipes, since | |
2136 | - * we're not writing to the file system, but we use | |
2137 | - * cprm.limit of 1 here as a speacial value, this is a | |
2138 | - * consistent way to catch recursive crashes. | |
2139 | - * We can still crash if the core_pattern binary sets | |
2140 | - * RLIM_CORE = !1, but it runs as root, and can do | |
2141 | - * lots of stupid things. | |
2142 | - * | |
2143 | - * Note that we use task_tgid_vnr here to grab the pid | |
2144 | - * of the process group leader. That way we get the | |
2145 | - * right pid if a thread in a multi-threaded | |
2146 | - * core_pattern process dies. | |
2147 | - */ | |
2148 | - printk(KERN_WARNING | |
2149 | - "Process %d(%s) has RLIMIT_CORE set to 1\n", | |
2150 | - task_tgid_vnr(current), current->comm); | |
2151 | - printk(KERN_WARNING "Aborting core\n"); | |
2152 | - goto fail_unlock; | |
2153 | - } | |
2154 | - cprm.limit = RLIM_INFINITY; | |
2155 | - | |
2156 | - dump_count = atomic_inc_return(&core_dump_count); | |
2157 | - if (core_pipe_limit && (core_pipe_limit < dump_count)) { | |
2158 | - printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n", | |
2159 | - task_tgid_vnr(current), current->comm); | |
2160 | - printk(KERN_WARNING "Skipping core dump\n"); | |
2161 | - goto fail_dropcount; | |
2162 | - } | |
2163 | - | |
2164 | - helper_argv = argv_split(GFP_KERNEL, cn.corename+1, NULL); | |
2165 | - if (!helper_argv) { | |
2166 | - printk(KERN_WARNING "%s failed to allocate memory\n", | |
2167 | - __func__); | |
2168 | - goto fail_dropcount; | |
2169 | - } | |
2170 | - | |
2171 | - retval = call_usermodehelper_fns(helper_argv[0], helper_argv, | |
2172 | - NULL, UMH_WAIT_EXEC, umh_pipe_setup, | |
2173 | - NULL, &cprm); | |
2174 | - argv_free(helper_argv); | |
2175 | - if (retval) { | |
2176 | - printk(KERN_INFO "Core dump to %s pipe failed\n", | |
2177 | - cn.corename); | |
2178 | - goto close_fail; | |
2179 | - } | |
2180 | - } else { | |
2181 | - struct inode *inode; | |
2182 | - | |
2183 | - if (cprm.limit < binfmt->min_coredump) | |
2184 | - goto fail_unlock; | |
2185 | - | |
2186 | - if (need_nonrelative && cn.corename[0] != '/') { | |
2187 | - printk(KERN_WARNING "Pid %d(%s) can only dump core "\ | |
2188 | - "to fully qualified path!\n", | |
2189 | - task_tgid_vnr(current), current->comm); | |
2190 | - printk(KERN_WARNING "Skipping core dump\n"); | |
2191 | - goto fail_unlock; | |
2192 | - } | |
2193 | - | |
2194 | - cprm.file = filp_open(cn.corename, | |
2195 | - O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, | |
2196 | - 0600); | |
2197 | - if (IS_ERR(cprm.file)) | |
2198 | - goto fail_unlock; | |
2199 | - | |
2200 | - inode = cprm.file->f_path.dentry->d_inode; | |
2201 | - if (inode->i_nlink > 1) | |
2202 | - goto close_fail; | |
2203 | - if (d_unhashed(cprm.file->f_path.dentry)) | |
2204 | - goto close_fail; | |
2205 | - /* | |
2206 | - * AK: actually i see no reason to not allow this for named | |
2207 | - * pipes etc, but keep the previous behaviour for now. | |
2208 | - */ | |
2209 | - if (!S_ISREG(inode->i_mode)) | |
2210 | - goto close_fail; | |
2211 | - /* | |
2212 | - * Dont allow local users get cute and trick others to coredump | |
2213 | - * into their pre-created files. | |
2214 | - */ | |
2215 | - if (!uid_eq(inode->i_uid, current_fsuid())) | |
2216 | - goto close_fail; | |
2217 | - if (!cprm.file->f_op || !cprm.file->f_op->write) | |
2218 | - goto close_fail; | |
2219 | - if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file)) | |
2220 | - goto close_fail; | |
2221 | - } | |
2222 | - | |
2223 | - /* get us an unshared descriptor table; almost always a no-op */ | |
2224 | - retval = unshare_files(&displaced); | |
2225 | - if (retval) | |
2226 | - goto close_fail; | |
2227 | - if (displaced) | |
2228 | - put_files_struct(displaced); | |
2229 | - retval = binfmt->core_dump(&cprm); | |
2230 | - if (retval) | |
2231 | - current->signal->group_exit_code |= 0x80; | |
2232 | - | |
2233 | - if (ispipe && core_pipe_limit) | |
2234 | - wait_for_dump_helpers(cprm.file); | |
2235 | -close_fail: | |
2236 | - if (cprm.file) | |
2237 | - filp_close(cprm.file, NULL); | |
2238 | -fail_dropcount: | |
2239 | - if (ispipe) | |
2240 | - atomic_dec(&core_dump_count); | |
2241 | -fail_unlock: | |
2242 | - kfree(cn.corename); | |
2243 | -fail_corename: | |
2244 | - coredump_finish(mm); | |
2245 | - revert_creds(old_cred); | |
2246 | -fail_creds: | |
2247 | - put_cred(cred); | |
2248 | -fail: | |
2249 | - return; | |
2250 | -} | |
2251 | - | |
2252 | -/* | |
2253 | - * Core dumping helper functions. These are the only things you should | |
2254 | - * do on a core-file: use only these functions to write out all the | |
2255 | - * necessary info. | |
2256 | - */ | |
2257 | -int dump_write(struct file *file, const void *addr, int nr) | |
2258 | -{ | |
2259 | - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr; | |
2260 | -} | |
2261 | -EXPORT_SYMBOL(dump_write); | |
2262 | - | |
2263 | -int dump_seek(struct file *file, loff_t off) | |
2264 | -{ | |
2265 | - int ret = 1; | |
2266 | - | |
2267 | - if (file->f_op->llseek && file->f_op->llseek != no_llseek) { | |
2268 | - if (file->f_op->llseek(file, off, SEEK_CUR) < 0) | |
2269 | - return 0; | |
2270 | - } else { | |
2271 | - char *buf = (char *)get_zeroed_page(GFP_KERNEL); | |
2272 | - | |
2273 | - if (!buf) | |
2274 | - return 0; | |
2275 | - while (off > 0) { | |
2276 | - unsigned long n = off; | |
2277 | - | |
2278 | - if (n > PAGE_SIZE) | |
2279 | - n = PAGE_SIZE; | |
2280 | - if (!dump_write(file, buf, n)) { | |
2281 | - ret = 0; | |
2282 | - break; | |
2283 | - } | |
2284 | - off -= n; | |
2285 | - } | |
2286 | - free_page((unsigned long)buf); | |
2287 | - } | |
2288 | - return ret; | |
2289 | -} | |
2290 | -EXPORT_SYMBOL(dump_seek); |
include/linux/sched.h
... | ... | @@ -405,6 +405,7 @@ |
405 | 405 | |
406 | 406 | extern void set_dumpable(struct mm_struct *mm, int value); |
407 | 407 | extern int get_dumpable(struct mm_struct *mm); |
408 | +extern int __get_dumpable(unsigned long mm_flags); | |
408 | 409 | |
409 | 410 | /* get/set_dumpable() values */ |
410 | 411 | #define SUID_DUMPABLE_DISABLED 0 |