Commit 03221702608c60b470fc86a23bdf4bc30e5bd59f
Committed by
Linus Torvalds
1 parent
2400ff77e7
Exists in
master
and in
39 other branches
[PATCH] fix page leak during core dump
When the dump cannot occur most likely because of a full file system and the page to be written is the zero page, the call to page_cache_release() is missed. Signed-off-by: Brian Pomerantz <bapper@mvista.com> Cc: Hugh Dickins <hugh@veritas.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: David Howells <dhowells@redhat.com> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 2 changed files with 5 additions and 2 deletions Inline Diff
fs/binfmt_elf.c
1 | /* | 1 | /* |
2 | * linux/fs/binfmt_elf.c | 2 | * linux/fs/binfmt_elf.c |
3 | * | 3 | * |
4 | * These are the functions used to load ELF format executables as used | 4 | * These are the functions used to load ELF format executables as used |
5 | * on SVr4 machines. Information on the format may be found in the book | 5 | * on SVr4 machines. Information on the format may be found in the book |
6 | * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support | 6 | * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support |
7 | * Tools". | 7 | * Tools". |
8 | * | 8 | * |
9 | * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com). | 9 | * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com). |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/fs.h> | 14 | #include <linux/fs.h> |
15 | #include <linux/stat.h> | 15 | #include <linux/stat.h> |
16 | #include <linux/time.h> | 16 | #include <linux/time.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/mman.h> | 18 | #include <linux/mman.h> |
19 | #include <linux/a.out.h> | 19 | #include <linux/a.out.h> |
20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
21 | #include <linux/signal.h> | 21 | #include <linux/signal.h> |
22 | #include <linux/binfmts.h> | 22 | #include <linux/binfmts.h> |
23 | #include <linux/string.h> | 23 | #include <linux/string.h> |
24 | #include <linux/file.h> | 24 | #include <linux/file.h> |
25 | #include <linux/fcntl.h> | 25 | #include <linux/fcntl.h> |
26 | #include <linux/ptrace.h> | 26 | #include <linux/ptrace.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/shm.h> | 28 | #include <linux/shm.h> |
29 | #include <linux/personality.h> | 29 | #include <linux/personality.h> |
30 | #include <linux/elfcore.h> | 30 | #include <linux/elfcore.h> |
31 | #include <linux/init.h> | 31 | #include <linux/init.h> |
32 | #include <linux/highuid.h> | 32 | #include <linux/highuid.h> |
33 | #include <linux/smp.h> | 33 | #include <linux/smp.h> |
34 | #include <linux/smp_lock.h> | 34 | #include <linux/smp_lock.h> |
35 | #include <linux/compiler.h> | 35 | #include <linux/compiler.h> |
36 | #include <linux/highmem.h> | 36 | #include <linux/highmem.h> |
37 | #include <linux/pagemap.h> | 37 | #include <linux/pagemap.h> |
38 | #include <linux/security.h> | 38 | #include <linux/security.h> |
39 | #include <linux/syscalls.h> | 39 | #include <linux/syscalls.h> |
40 | #include <linux/random.h> | 40 | #include <linux/random.h> |
41 | #include <linux/elf.h> | 41 | #include <linux/elf.h> |
42 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
43 | #include <asm/param.h> | 43 | #include <asm/param.h> |
44 | #include <asm/page.h> | 44 | #include <asm/page.h> |
45 | 45 | ||
46 | static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs); | 46 | static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs); |
47 | static int load_elf_library(struct file *); | 47 | static int load_elf_library(struct file *); |
48 | static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int); | 48 | static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int); |
49 | 49 | ||
50 | /* | 50 | /* |
51 | * If we don't support core dumping, then supply a NULL so we | 51 | * If we don't support core dumping, then supply a NULL so we |
52 | * don't even try. | 52 | * don't even try. |
53 | */ | 53 | */ |
54 | #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) | 54 | #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) |
55 | static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file); | 55 | static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file); |
56 | #else | 56 | #else |
57 | #define elf_core_dump NULL | 57 | #define elf_core_dump NULL |
58 | #endif | 58 | #endif |
59 | 59 | ||
60 | #if ELF_EXEC_PAGESIZE > PAGE_SIZE | 60 | #if ELF_EXEC_PAGESIZE > PAGE_SIZE |
61 | #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE | 61 | #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE |
62 | #else | 62 | #else |
63 | #define ELF_MIN_ALIGN PAGE_SIZE | 63 | #define ELF_MIN_ALIGN PAGE_SIZE |
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | #ifndef ELF_CORE_EFLAGS | 66 | #ifndef ELF_CORE_EFLAGS |
67 | #define ELF_CORE_EFLAGS 0 | 67 | #define ELF_CORE_EFLAGS 0 |
68 | #endif | 68 | #endif |
69 | 69 | ||
70 | #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1)) | 70 | #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1)) |
71 | #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1)) | 71 | #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1)) |
72 | #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1)) | 72 | #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1)) |
73 | 73 | ||
74 | static struct linux_binfmt elf_format = { | 74 | static struct linux_binfmt elf_format = { |
75 | .module = THIS_MODULE, | 75 | .module = THIS_MODULE, |
76 | .load_binary = load_elf_binary, | 76 | .load_binary = load_elf_binary, |
77 | .load_shlib = load_elf_library, | 77 | .load_shlib = load_elf_library, |
78 | .core_dump = elf_core_dump, | 78 | .core_dump = elf_core_dump, |
79 | .min_coredump = ELF_EXEC_PAGESIZE, | 79 | .min_coredump = ELF_EXEC_PAGESIZE, |
80 | .hasvdso = 1 | 80 | .hasvdso = 1 |
81 | }; | 81 | }; |
82 | 82 | ||
83 | #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE) | 83 | #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE) |
84 | 84 | ||
85 | static int set_brk(unsigned long start, unsigned long end) | 85 | static int set_brk(unsigned long start, unsigned long end) |
86 | { | 86 | { |
87 | start = ELF_PAGEALIGN(start); | 87 | start = ELF_PAGEALIGN(start); |
88 | end = ELF_PAGEALIGN(end); | 88 | end = ELF_PAGEALIGN(end); |
89 | if (end > start) { | 89 | if (end > start) { |
90 | unsigned long addr; | 90 | unsigned long addr; |
91 | down_write(¤t->mm->mmap_sem); | 91 | down_write(¤t->mm->mmap_sem); |
92 | addr = do_brk(start, end - start); | 92 | addr = do_brk(start, end - start); |
93 | up_write(¤t->mm->mmap_sem); | 93 | up_write(¤t->mm->mmap_sem); |
94 | if (BAD_ADDR(addr)) | 94 | if (BAD_ADDR(addr)) |
95 | return addr; | 95 | return addr; |
96 | } | 96 | } |
97 | current->mm->start_brk = current->mm->brk = end; | 97 | current->mm->start_brk = current->mm->brk = end; |
98 | return 0; | 98 | return 0; |
99 | } | 99 | } |
100 | 100 | ||
101 | /* We need to explicitly zero any fractional pages | 101 | /* We need to explicitly zero any fractional pages |
102 | after the data section (i.e. bss). This would | 102 | after the data section (i.e. bss). This would |
103 | contain the junk from the file that should not | 103 | contain the junk from the file that should not |
104 | be in memory | 104 | be in memory |
105 | */ | 105 | */ |
106 | static int padzero(unsigned long elf_bss) | 106 | static int padzero(unsigned long elf_bss) |
107 | { | 107 | { |
108 | unsigned long nbyte; | 108 | unsigned long nbyte; |
109 | 109 | ||
110 | nbyte = ELF_PAGEOFFSET(elf_bss); | 110 | nbyte = ELF_PAGEOFFSET(elf_bss); |
111 | if (nbyte) { | 111 | if (nbyte) { |
112 | nbyte = ELF_MIN_ALIGN - nbyte; | 112 | nbyte = ELF_MIN_ALIGN - nbyte; |
113 | if (clear_user((void __user *) elf_bss, nbyte)) | 113 | if (clear_user((void __user *) elf_bss, nbyte)) |
114 | return -EFAULT; | 114 | return -EFAULT; |
115 | } | 115 | } |
116 | return 0; | 116 | return 0; |
117 | } | 117 | } |
118 | 118 | ||
119 | /* Let's use some macros to make this stack manipulation a litle clearer */ | 119 | /* Let's use some macros to make this stack manipulation a litle clearer */ |
120 | #ifdef CONFIG_STACK_GROWSUP | 120 | #ifdef CONFIG_STACK_GROWSUP |
121 | #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items)) | 121 | #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items)) |
122 | #define STACK_ROUND(sp, items) \ | 122 | #define STACK_ROUND(sp, items) \ |
123 | ((15 + (unsigned long) ((sp) + (items))) &~ 15UL) | 123 | ((15 + (unsigned long) ((sp) + (items))) &~ 15UL) |
124 | #define STACK_ALLOC(sp, len) ({ \ | 124 | #define STACK_ALLOC(sp, len) ({ \ |
125 | elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \ | 125 | elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \ |
126 | old_sp; }) | 126 | old_sp; }) |
127 | #else | 127 | #else |
128 | #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items)) | 128 | #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items)) |
129 | #define STACK_ROUND(sp, items) \ | 129 | #define STACK_ROUND(sp, items) \ |
130 | (((unsigned long) (sp - items)) &~ 15UL) | 130 | (((unsigned long) (sp - items)) &~ 15UL) |
131 | #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; }) | 131 | #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; }) |
132 | #endif | 132 | #endif |
133 | 133 | ||
134 | static int | 134 | static int |
135 | create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, | 135 | create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, |
136 | int interp_aout, unsigned long load_addr, | 136 | int interp_aout, unsigned long load_addr, |
137 | unsigned long interp_load_addr) | 137 | unsigned long interp_load_addr) |
138 | { | 138 | { |
139 | unsigned long p = bprm->p; | 139 | unsigned long p = bprm->p; |
140 | int argc = bprm->argc; | 140 | int argc = bprm->argc; |
141 | int envc = bprm->envc; | 141 | int envc = bprm->envc; |
142 | elf_addr_t __user *argv; | 142 | elf_addr_t __user *argv; |
143 | elf_addr_t __user *envp; | 143 | elf_addr_t __user *envp; |
144 | elf_addr_t __user *sp; | 144 | elf_addr_t __user *sp; |
145 | elf_addr_t __user *u_platform; | 145 | elf_addr_t __user *u_platform; |
146 | const char *k_platform = ELF_PLATFORM; | 146 | const char *k_platform = ELF_PLATFORM; |
147 | int items; | 147 | int items; |
148 | elf_addr_t *elf_info; | 148 | elf_addr_t *elf_info; |
149 | int ei_index = 0; | 149 | int ei_index = 0; |
150 | struct task_struct *tsk = current; | 150 | struct task_struct *tsk = current; |
151 | 151 | ||
152 | /* | 152 | /* |
153 | * If this architecture has a platform capability string, copy it | 153 | * If this architecture has a platform capability string, copy it |
154 | * to userspace. In some cases (Sparc), this info is impossible | 154 | * to userspace. In some cases (Sparc), this info is impossible |
155 | * for userspace to get any other way, in others (i386) it is | 155 | * for userspace to get any other way, in others (i386) it is |
156 | * merely difficult. | 156 | * merely difficult. |
157 | */ | 157 | */ |
158 | u_platform = NULL; | 158 | u_platform = NULL; |
159 | if (k_platform) { | 159 | if (k_platform) { |
160 | size_t len = strlen(k_platform) + 1; | 160 | size_t len = strlen(k_platform) + 1; |
161 | 161 | ||
162 | /* | 162 | /* |
163 | * In some cases (e.g. Hyper-Threading), we want to avoid L1 | 163 | * In some cases (e.g. Hyper-Threading), we want to avoid L1 |
164 | * evictions by the processes running on the same package. One | 164 | * evictions by the processes running on the same package. One |
165 | * thing we can do is to shuffle the initial stack for them. | 165 | * thing we can do is to shuffle the initial stack for them. |
166 | */ | 166 | */ |
167 | 167 | ||
168 | p = arch_align_stack(p); | 168 | p = arch_align_stack(p); |
169 | 169 | ||
170 | u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); | 170 | u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len); |
171 | if (__copy_to_user(u_platform, k_platform, len)) | 171 | if (__copy_to_user(u_platform, k_platform, len)) |
172 | return -EFAULT; | 172 | return -EFAULT; |
173 | } | 173 | } |
174 | 174 | ||
175 | /* Create the ELF interpreter info */ | 175 | /* Create the ELF interpreter info */ |
176 | elf_info = (elf_addr_t *)current->mm->saved_auxv; | 176 | elf_info = (elf_addr_t *)current->mm->saved_auxv; |
177 | #define NEW_AUX_ENT(id, val) \ | 177 | #define NEW_AUX_ENT(id, val) \ |
178 | do { \ | 178 | do { \ |
179 | elf_info[ei_index++] = id; \ | 179 | elf_info[ei_index++] = id; \ |
180 | elf_info[ei_index++] = val; \ | 180 | elf_info[ei_index++] = val; \ |
181 | } while (0) | 181 | } while (0) |
182 | 182 | ||
183 | #ifdef ARCH_DLINFO | 183 | #ifdef ARCH_DLINFO |
184 | /* | 184 | /* |
185 | * ARCH_DLINFO must come first so PPC can do its special alignment of | 185 | * ARCH_DLINFO must come first so PPC can do its special alignment of |
186 | * AUXV. | 186 | * AUXV. |
187 | */ | 187 | */ |
188 | ARCH_DLINFO; | 188 | ARCH_DLINFO; |
189 | #endif | 189 | #endif |
190 | NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP); | 190 | NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP); |
191 | NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE); | 191 | NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE); |
192 | NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC); | 192 | NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC); |
193 | NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff); | 193 | NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff); |
194 | NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr)); | 194 | NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr)); |
195 | NEW_AUX_ENT(AT_PHNUM, exec->e_phnum); | 195 | NEW_AUX_ENT(AT_PHNUM, exec->e_phnum); |
196 | NEW_AUX_ENT(AT_BASE, interp_load_addr); | 196 | NEW_AUX_ENT(AT_BASE, interp_load_addr); |
197 | NEW_AUX_ENT(AT_FLAGS, 0); | 197 | NEW_AUX_ENT(AT_FLAGS, 0); |
198 | NEW_AUX_ENT(AT_ENTRY, exec->e_entry); | 198 | NEW_AUX_ENT(AT_ENTRY, exec->e_entry); |
199 | NEW_AUX_ENT(AT_UID, tsk->uid); | 199 | NEW_AUX_ENT(AT_UID, tsk->uid); |
200 | NEW_AUX_ENT(AT_EUID, tsk->euid); | 200 | NEW_AUX_ENT(AT_EUID, tsk->euid); |
201 | NEW_AUX_ENT(AT_GID, tsk->gid); | 201 | NEW_AUX_ENT(AT_GID, tsk->gid); |
202 | NEW_AUX_ENT(AT_EGID, tsk->egid); | 202 | NEW_AUX_ENT(AT_EGID, tsk->egid); |
203 | NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm)); | 203 | NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm)); |
204 | if (k_platform) { | 204 | if (k_platform) { |
205 | NEW_AUX_ENT(AT_PLATFORM, | 205 | NEW_AUX_ENT(AT_PLATFORM, |
206 | (elf_addr_t)(unsigned long)u_platform); | 206 | (elf_addr_t)(unsigned long)u_platform); |
207 | } | 207 | } |
208 | if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) { | 208 | if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) { |
209 | NEW_AUX_ENT(AT_EXECFD, bprm->interp_data); | 209 | NEW_AUX_ENT(AT_EXECFD, bprm->interp_data); |
210 | } | 210 | } |
211 | #undef NEW_AUX_ENT | 211 | #undef NEW_AUX_ENT |
212 | /* AT_NULL is zero; clear the rest too */ | 212 | /* AT_NULL is zero; clear the rest too */ |
213 | memset(&elf_info[ei_index], 0, | 213 | memset(&elf_info[ei_index], 0, |
214 | sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]); | 214 | sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]); |
215 | 215 | ||
216 | /* And advance past the AT_NULL entry. */ | 216 | /* And advance past the AT_NULL entry. */ |
217 | ei_index += 2; | 217 | ei_index += 2; |
218 | 218 | ||
219 | sp = STACK_ADD(p, ei_index); | 219 | sp = STACK_ADD(p, ei_index); |
220 | 220 | ||
221 | items = (argc + 1) + (envc + 1); | 221 | items = (argc + 1) + (envc + 1); |
222 | if (interp_aout) { | 222 | if (interp_aout) { |
223 | items += 3; /* a.out interpreters require argv & envp too */ | 223 | items += 3; /* a.out interpreters require argv & envp too */ |
224 | } else { | 224 | } else { |
225 | items += 1; /* ELF interpreters only put argc on the stack */ | 225 | items += 1; /* ELF interpreters only put argc on the stack */ |
226 | } | 226 | } |
227 | bprm->p = STACK_ROUND(sp, items); | 227 | bprm->p = STACK_ROUND(sp, items); |
228 | 228 | ||
229 | /* Point sp at the lowest address on the stack */ | 229 | /* Point sp at the lowest address on the stack */ |
230 | #ifdef CONFIG_STACK_GROWSUP | 230 | #ifdef CONFIG_STACK_GROWSUP |
231 | sp = (elf_addr_t __user *)bprm->p - items - ei_index; | 231 | sp = (elf_addr_t __user *)bprm->p - items - ei_index; |
232 | bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */ | 232 | bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */ |
233 | #else | 233 | #else |
234 | sp = (elf_addr_t __user *)bprm->p; | 234 | sp = (elf_addr_t __user *)bprm->p; |
235 | #endif | 235 | #endif |
236 | 236 | ||
237 | /* Now, let's put argc (and argv, envp if appropriate) on the stack */ | 237 | /* Now, let's put argc (and argv, envp if appropriate) on the stack */ |
238 | if (__put_user(argc, sp++)) | 238 | if (__put_user(argc, sp++)) |
239 | return -EFAULT; | 239 | return -EFAULT; |
240 | if (interp_aout) { | 240 | if (interp_aout) { |
241 | argv = sp + 2; | 241 | argv = sp + 2; |
242 | envp = argv + argc + 1; | 242 | envp = argv + argc + 1; |
243 | if (__put_user((elf_addr_t)(unsigned long)argv, sp++) || | 243 | if (__put_user((elf_addr_t)(unsigned long)argv, sp++) || |
244 | __put_user((elf_addr_t)(unsigned long)envp, sp++)) | 244 | __put_user((elf_addr_t)(unsigned long)envp, sp++)) |
245 | return -EFAULT; | 245 | return -EFAULT; |
246 | } else { | 246 | } else { |
247 | argv = sp; | 247 | argv = sp; |
248 | envp = argv + argc + 1; | 248 | envp = argv + argc + 1; |
249 | } | 249 | } |
250 | 250 | ||
251 | /* Populate argv and envp */ | 251 | /* Populate argv and envp */ |
252 | p = current->mm->arg_end = current->mm->arg_start; | 252 | p = current->mm->arg_end = current->mm->arg_start; |
253 | while (argc-- > 0) { | 253 | while (argc-- > 0) { |
254 | size_t len; | 254 | size_t len; |
255 | if (__put_user((elf_addr_t)p, argv++)) | 255 | if (__put_user((elf_addr_t)p, argv++)) |
256 | return -EFAULT; | 256 | return -EFAULT; |
257 | len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES); | 257 | len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES); |
258 | if (!len || len > PAGE_SIZE*MAX_ARG_PAGES) | 258 | if (!len || len > PAGE_SIZE*MAX_ARG_PAGES) |
259 | return 0; | 259 | return 0; |
260 | p += len; | 260 | p += len; |
261 | } | 261 | } |
262 | if (__put_user(0, argv)) | 262 | if (__put_user(0, argv)) |
263 | return -EFAULT; | 263 | return -EFAULT; |
264 | current->mm->arg_end = current->mm->env_start = p; | 264 | current->mm->arg_end = current->mm->env_start = p; |
265 | while (envc-- > 0) { | 265 | while (envc-- > 0) { |
266 | size_t len; | 266 | size_t len; |
267 | if (__put_user((elf_addr_t)p, envp++)) | 267 | if (__put_user((elf_addr_t)p, envp++)) |
268 | return -EFAULT; | 268 | return -EFAULT; |
269 | len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES); | 269 | len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES); |
270 | if (!len || len > PAGE_SIZE*MAX_ARG_PAGES) | 270 | if (!len || len > PAGE_SIZE*MAX_ARG_PAGES) |
271 | return 0; | 271 | return 0; |
272 | p += len; | 272 | p += len; |
273 | } | 273 | } |
274 | if (__put_user(0, envp)) | 274 | if (__put_user(0, envp)) |
275 | return -EFAULT; | 275 | return -EFAULT; |
276 | current->mm->env_end = p; | 276 | current->mm->env_end = p; |
277 | 277 | ||
278 | /* Put the elf_info on the stack in the right place. */ | 278 | /* Put the elf_info on the stack in the right place. */ |
279 | sp = (elf_addr_t __user *)envp + 1; | 279 | sp = (elf_addr_t __user *)envp + 1; |
280 | if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t))) | 280 | if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t))) |
281 | return -EFAULT; | 281 | return -EFAULT; |
282 | return 0; | 282 | return 0; |
283 | } | 283 | } |
284 | 284 | ||
285 | #ifndef elf_map | 285 | #ifndef elf_map |
286 | 286 | ||
287 | static unsigned long elf_map(struct file *filep, unsigned long addr, | 287 | static unsigned long elf_map(struct file *filep, unsigned long addr, |
288 | struct elf_phdr *eppnt, int prot, int type) | 288 | struct elf_phdr *eppnt, int prot, int type) |
289 | { | 289 | { |
290 | unsigned long map_addr; | 290 | unsigned long map_addr; |
291 | unsigned long pageoffset = ELF_PAGEOFFSET(eppnt->p_vaddr); | 291 | unsigned long pageoffset = ELF_PAGEOFFSET(eppnt->p_vaddr); |
292 | 292 | ||
293 | down_write(¤t->mm->mmap_sem); | 293 | down_write(¤t->mm->mmap_sem); |
294 | /* mmap() will return -EINVAL if given a zero size, but a | 294 | /* mmap() will return -EINVAL if given a zero size, but a |
295 | * segment with zero filesize is perfectly valid */ | 295 | * segment with zero filesize is perfectly valid */ |
296 | if (eppnt->p_filesz + pageoffset) | 296 | if (eppnt->p_filesz + pageoffset) |
297 | map_addr = do_mmap(filep, ELF_PAGESTART(addr), | 297 | map_addr = do_mmap(filep, ELF_PAGESTART(addr), |
298 | eppnt->p_filesz + pageoffset, prot, type, | 298 | eppnt->p_filesz + pageoffset, prot, type, |
299 | eppnt->p_offset - pageoffset); | 299 | eppnt->p_offset - pageoffset); |
300 | else | 300 | else |
301 | map_addr = ELF_PAGESTART(addr); | 301 | map_addr = ELF_PAGESTART(addr); |
302 | up_write(¤t->mm->mmap_sem); | 302 | up_write(¤t->mm->mmap_sem); |
303 | return(map_addr); | 303 | return(map_addr); |
304 | } | 304 | } |
305 | 305 | ||
306 | #endif /* !elf_map */ | 306 | #endif /* !elf_map */ |
307 | 307 | ||
308 | /* This is much more generalized than the library routine read function, | 308 | /* This is much more generalized than the library routine read function, |
309 | so we keep this separate. Technically the library read function | 309 | so we keep this separate. Technically the library read function |
310 | is only provided so that we can read a.out libraries that have | 310 | is only provided so that we can read a.out libraries that have |
311 | an ELF header */ | 311 | an ELF header */ |
312 | 312 | ||
313 | static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, | 313 | static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, |
314 | struct file *interpreter, unsigned long *interp_load_addr) | 314 | struct file *interpreter, unsigned long *interp_load_addr) |
315 | { | 315 | { |
316 | struct elf_phdr *elf_phdata; | 316 | struct elf_phdr *elf_phdata; |
317 | struct elf_phdr *eppnt; | 317 | struct elf_phdr *eppnt; |
318 | unsigned long load_addr = 0; | 318 | unsigned long load_addr = 0; |
319 | int load_addr_set = 0; | 319 | int load_addr_set = 0; |
320 | unsigned long last_bss = 0, elf_bss = 0; | 320 | unsigned long last_bss = 0, elf_bss = 0; |
321 | unsigned long error = ~0UL; | 321 | unsigned long error = ~0UL; |
322 | int retval, i, size; | 322 | int retval, i, size; |
323 | 323 | ||
324 | /* First of all, some simple consistency checks */ | 324 | /* First of all, some simple consistency checks */ |
325 | if (interp_elf_ex->e_type != ET_EXEC && | 325 | if (interp_elf_ex->e_type != ET_EXEC && |
326 | interp_elf_ex->e_type != ET_DYN) | 326 | interp_elf_ex->e_type != ET_DYN) |
327 | goto out; | 327 | goto out; |
328 | if (!elf_check_arch(interp_elf_ex)) | 328 | if (!elf_check_arch(interp_elf_ex)) |
329 | goto out; | 329 | goto out; |
330 | if (!interpreter->f_op || !interpreter->f_op->mmap) | 330 | if (!interpreter->f_op || !interpreter->f_op->mmap) |
331 | goto out; | 331 | goto out; |
332 | 332 | ||
333 | /* | 333 | /* |
334 | * If the size of this structure has changed, then punt, since | 334 | * If the size of this structure has changed, then punt, since |
335 | * we will be doing the wrong thing. | 335 | * we will be doing the wrong thing. |
336 | */ | 336 | */ |
337 | if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) | 337 | if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) |
338 | goto out; | 338 | goto out; |
339 | if (interp_elf_ex->e_phnum < 1 || | 339 | if (interp_elf_ex->e_phnum < 1 || |
340 | interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr)) | 340 | interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr)) |
341 | goto out; | 341 | goto out; |
342 | 342 | ||
343 | /* Now read in all of the header information */ | 343 | /* Now read in all of the header information */ |
344 | size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum; | 344 | size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum; |
345 | if (size > ELF_MIN_ALIGN) | 345 | if (size > ELF_MIN_ALIGN) |
346 | goto out; | 346 | goto out; |
347 | elf_phdata = kmalloc(size, GFP_KERNEL); | 347 | elf_phdata = kmalloc(size, GFP_KERNEL); |
348 | if (!elf_phdata) | 348 | if (!elf_phdata) |
349 | goto out; | 349 | goto out; |
350 | 350 | ||
351 | retval = kernel_read(interpreter, interp_elf_ex->e_phoff, | 351 | retval = kernel_read(interpreter, interp_elf_ex->e_phoff, |
352 | (char *)elf_phdata,size); | 352 | (char *)elf_phdata,size); |
353 | error = -EIO; | 353 | error = -EIO; |
354 | if (retval != size) { | 354 | if (retval != size) { |
355 | if (retval < 0) | 355 | if (retval < 0) |
356 | error = retval; | 356 | error = retval; |
357 | goto out_close; | 357 | goto out_close; |
358 | } | 358 | } |
359 | 359 | ||
360 | eppnt = elf_phdata; | 360 | eppnt = elf_phdata; |
361 | for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { | 361 | for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { |
362 | if (eppnt->p_type == PT_LOAD) { | 362 | if (eppnt->p_type == PT_LOAD) { |
363 | int elf_type = MAP_PRIVATE | MAP_DENYWRITE; | 363 | int elf_type = MAP_PRIVATE | MAP_DENYWRITE; |
364 | int elf_prot = 0; | 364 | int elf_prot = 0; |
365 | unsigned long vaddr = 0; | 365 | unsigned long vaddr = 0; |
366 | unsigned long k, map_addr; | 366 | unsigned long k, map_addr; |
367 | 367 | ||
368 | if (eppnt->p_flags & PF_R) | 368 | if (eppnt->p_flags & PF_R) |
369 | elf_prot = PROT_READ; | 369 | elf_prot = PROT_READ; |
370 | if (eppnt->p_flags & PF_W) | 370 | if (eppnt->p_flags & PF_W) |
371 | elf_prot |= PROT_WRITE; | 371 | elf_prot |= PROT_WRITE; |
372 | if (eppnt->p_flags & PF_X) | 372 | if (eppnt->p_flags & PF_X) |
373 | elf_prot |= PROT_EXEC; | 373 | elf_prot |= PROT_EXEC; |
374 | vaddr = eppnt->p_vaddr; | 374 | vaddr = eppnt->p_vaddr; |
375 | if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) | 375 | if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) |
376 | elf_type |= MAP_FIXED; | 376 | elf_type |= MAP_FIXED; |
377 | 377 | ||
378 | map_addr = elf_map(interpreter, load_addr + vaddr, | 378 | map_addr = elf_map(interpreter, load_addr + vaddr, |
379 | eppnt, elf_prot, elf_type); | 379 | eppnt, elf_prot, elf_type); |
380 | error = map_addr; | 380 | error = map_addr; |
381 | if (BAD_ADDR(map_addr)) | 381 | if (BAD_ADDR(map_addr)) |
382 | goto out_close; | 382 | goto out_close; |
383 | 383 | ||
384 | if (!load_addr_set && | 384 | if (!load_addr_set && |
385 | interp_elf_ex->e_type == ET_DYN) { | 385 | interp_elf_ex->e_type == ET_DYN) { |
386 | load_addr = map_addr - ELF_PAGESTART(vaddr); | 386 | load_addr = map_addr - ELF_PAGESTART(vaddr); |
387 | load_addr_set = 1; | 387 | load_addr_set = 1; |
388 | } | 388 | } |
389 | 389 | ||
390 | /* | 390 | /* |
391 | * Check to see if the section's size will overflow the | 391 | * Check to see if the section's size will overflow the |
392 | * allowed task size. Note that p_filesz must always be | 392 | * allowed task size. Note that p_filesz must always be |
393 | * <= p_memsize so it's only necessary to check p_memsz. | 393 | * <= p_memsize so it's only necessary to check p_memsz. |
394 | */ | 394 | */ |
395 | k = load_addr + eppnt->p_vaddr; | 395 | k = load_addr + eppnt->p_vaddr; |
396 | if (BAD_ADDR(k) || | 396 | if (BAD_ADDR(k) || |
397 | eppnt->p_filesz > eppnt->p_memsz || | 397 | eppnt->p_filesz > eppnt->p_memsz || |
398 | eppnt->p_memsz > TASK_SIZE || | 398 | eppnt->p_memsz > TASK_SIZE || |
399 | TASK_SIZE - eppnt->p_memsz < k) { | 399 | TASK_SIZE - eppnt->p_memsz < k) { |
400 | error = -ENOMEM; | 400 | error = -ENOMEM; |
401 | goto out_close; | 401 | goto out_close; |
402 | } | 402 | } |
403 | 403 | ||
404 | /* | 404 | /* |
405 | * Find the end of the file mapping for this phdr, and | 405 | * Find the end of the file mapping for this phdr, and |
406 | * keep track of the largest address we see for this. | 406 | * keep track of the largest address we see for this. |
407 | */ | 407 | */ |
408 | k = load_addr + eppnt->p_vaddr + eppnt->p_filesz; | 408 | k = load_addr + eppnt->p_vaddr + eppnt->p_filesz; |
409 | if (k > elf_bss) | 409 | if (k > elf_bss) |
410 | elf_bss = k; | 410 | elf_bss = k; |
411 | 411 | ||
412 | /* | 412 | /* |
413 | * Do the same thing for the memory mapping - between | 413 | * Do the same thing for the memory mapping - between |
414 | * elf_bss and last_bss is the bss section. | 414 | * elf_bss and last_bss is the bss section. |
415 | */ | 415 | */ |
416 | k = load_addr + eppnt->p_memsz + eppnt->p_vaddr; | 416 | k = load_addr + eppnt->p_memsz + eppnt->p_vaddr; |
417 | if (k > last_bss) | 417 | if (k > last_bss) |
418 | last_bss = k; | 418 | last_bss = k; |
419 | } | 419 | } |
420 | } | 420 | } |
421 | 421 | ||
422 | /* | 422 | /* |
423 | * Now fill out the bss section. First pad the last page up | 423 | * Now fill out the bss section. First pad the last page up |
424 | * to the page boundary, and then perform a mmap to make sure | 424 | * to the page boundary, and then perform a mmap to make sure |
425 | * that there are zero-mapped pages up to and including the | 425 | * that there are zero-mapped pages up to and including the |
426 | * last bss page. | 426 | * last bss page. |
427 | */ | 427 | */ |
428 | if (padzero(elf_bss)) { | 428 | if (padzero(elf_bss)) { |
429 | error = -EFAULT; | 429 | error = -EFAULT; |
430 | goto out_close; | 430 | goto out_close; |
431 | } | 431 | } |
432 | 432 | ||
433 | /* What we have mapped so far */ | 433 | /* What we have mapped so far */ |
434 | elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); | 434 | elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); |
435 | 435 | ||
436 | /* Map the last of the bss segment */ | 436 | /* Map the last of the bss segment */ |
437 | if (last_bss > elf_bss) { | 437 | if (last_bss > elf_bss) { |
438 | down_write(¤t->mm->mmap_sem); | 438 | down_write(¤t->mm->mmap_sem); |
439 | error = do_brk(elf_bss, last_bss - elf_bss); | 439 | error = do_brk(elf_bss, last_bss - elf_bss); |
440 | up_write(¤t->mm->mmap_sem); | 440 | up_write(¤t->mm->mmap_sem); |
441 | if (BAD_ADDR(error)) | 441 | if (BAD_ADDR(error)) |
442 | goto out_close; | 442 | goto out_close; |
443 | } | 443 | } |
444 | 444 | ||
445 | *interp_load_addr = load_addr; | 445 | *interp_load_addr = load_addr; |
446 | error = ((unsigned long)interp_elf_ex->e_entry) + load_addr; | 446 | error = ((unsigned long)interp_elf_ex->e_entry) + load_addr; |
447 | 447 | ||
448 | out_close: | 448 | out_close: |
449 | kfree(elf_phdata); | 449 | kfree(elf_phdata); |
450 | out: | 450 | out: |
451 | return error; | 451 | return error; |
452 | } | 452 | } |
453 | 453 | ||
454 | static unsigned long load_aout_interp(struct exec *interp_ex, | 454 | static unsigned long load_aout_interp(struct exec *interp_ex, |
455 | struct file *interpreter) | 455 | struct file *interpreter) |
456 | { | 456 | { |
457 | unsigned long text_data, elf_entry = ~0UL; | 457 | unsigned long text_data, elf_entry = ~0UL; |
458 | char __user * addr; | 458 | char __user * addr; |
459 | loff_t offset; | 459 | loff_t offset; |
460 | 460 | ||
461 | current->mm->end_code = interp_ex->a_text; | 461 | current->mm->end_code = interp_ex->a_text; |
462 | text_data = interp_ex->a_text + interp_ex->a_data; | 462 | text_data = interp_ex->a_text + interp_ex->a_data; |
463 | current->mm->end_data = text_data; | 463 | current->mm->end_data = text_data; |
464 | current->mm->brk = interp_ex->a_bss + text_data; | 464 | current->mm->brk = interp_ex->a_bss + text_data; |
465 | 465 | ||
466 | switch (N_MAGIC(*interp_ex)) { | 466 | switch (N_MAGIC(*interp_ex)) { |
467 | case OMAGIC: | 467 | case OMAGIC: |
468 | offset = 32; | 468 | offset = 32; |
469 | addr = (char __user *)0; | 469 | addr = (char __user *)0; |
470 | break; | 470 | break; |
471 | case ZMAGIC: | 471 | case ZMAGIC: |
472 | case QMAGIC: | 472 | case QMAGIC: |
473 | offset = N_TXTOFF(*interp_ex); | 473 | offset = N_TXTOFF(*interp_ex); |
474 | addr = (char __user *)N_TXTADDR(*interp_ex); | 474 | addr = (char __user *)N_TXTADDR(*interp_ex); |
475 | break; | 475 | break; |
476 | default: | 476 | default: |
477 | goto out; | 477 | goto out; |
478 | } | 478 | } |
479 | 479 | ||
480 | down_write(¤t->mm->mmap_sem); | 480 | down_write(¤t->mm->mmap_sem); |
481 | do_brk(0, text_data); | 481 | do_brk(0, text_data); |
482 | up_write(¤t->mm->mmap_sem); | 482 | up_write(¤t->mm->mmap_sem); |
483 | if (!interpreter->f_op || !interpreter->f_op->read) | 483 | if (!interpreter->f_op || !interpreter->f_op->read) |
484 | goto out; | 484 | goto out; |
485 | if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0) | 485 | if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0) |
486 | goto out; | 486 | goto out; |
487 | flush_icache_range((unsigned long)addr, | 487 | flush_icache_range((unsigned long)addr, |
488 | (unsigned long)addr + text_data); | 488 | (unsigned long)addr + text_data); |
489 | 489 | ||
490 | down_write(¤t->mm->mmap_sem); | 490 | down_write(¤t->mm->mmap_sem); |
491 | do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1), | 491 | do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1), |
492 | interp_ex->a_bss); | 492 | interp_ex->a_bss); |
493 | up_write(¤t->mm->mmap_sem); | 493 | up_write(¤t->mm->mmap_sem); |
494 | elf_entry = interp_ex->a_entry; | 494 | elf_entry = interp_ex->a_entry; |
495 | 495 | ||
496 | out: | 496 | out: |
497 | return elf_entry; | 497 | return elf_entry; |
498 | } | 498 | } |
499 | 499 | ||
500 | /* | 500 | /* |
501 | * These are the functions used to load ELF style executables and shared | 501 | * These are the functions used to load ELF style executables and shared |
502 | * libraries. There is no binary dependent code anywhere else. | 502 | * libraries. There is no binary dependent code anywhere else. |
503 | */ | 503 | */ |
504 | 504 | ||
505 | #define INTERPRETER_NONE 0 | 505 | #define INTERPRETER_NONE 0 |
506 | #define INTERPRETER_AOUT 1 | 506 | #define INTERPRETER_AOUT 1 |
507 | #define INTERPRETER_ELF 2 | 507 | #define INTERPRETER_ELF 2 |
508 | 508 | ||
509 | #ifndef STACK_RND_MASK | 509 | #ifndef STACK_RND_MASK |
510 | #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */ | 510 | #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */ |
511 | #endif | 511 | #endif |
512 | 512 | ||
513 | static unsigned long randomize_stack_top(unsigned long stack_top) | 513 | static unsigned long randomize_stack_top(unsigned long stack_top) |
514 | { | 514 | { |
515 | unsigned int random_variable = 0; | 515 | unsigned int random_variable = 0; |
516 | 516 | ||
517 | if ((current->flags & PF_RANDOMIZE) && | 517 | if ((current->flags & PF_RANDOMIZE) && |
518 | !(current->personality & ADDR_NO_RANDOMIZE)) { | 518 | !(current->personality & ADDR_NO_RANDOMIZE)) { |
519 | random_variable = get_random_int() & STACK_RND_MASK; | 519 | random_variable = get_random_int() & STACK_RND_MASK; |
520 | random_variable <<= PAGE_SHIFT; | 520 | random_variable <<= PAGE_SHIFT; |
521 | } | 521 | } |
522 | #ifdef CONFIG_STACK_GROWSUP | 522 | #ifdef CONFIG_STACK_GROWSUP |
523 | return PAGE_ALIGN(stack_top) + random_variable; | 523 | return PAGE_ALIGN(stack_top) + random_variable; |
524 | #else | 524 | #else |
525 | return PAGE_ALIGN(stack_top) - random_variable; | 525 | return PAGE_ALIGN(stack_top) - random_variable; |
526 | #endif | 526 | #endif |
527 | } | 527 | } |
528 | 528 | ||
529 | static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) | 529 | static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) |
530 | { | 530 | { |
531 | struct file *interpreter = NULL; /* to shut gcc up */ | 531 | struct file *interpreter = NULL; /* to shut gcc up */ |
532 | unsigned long load_addr = 0, load_bias = 0; | 532 | unsigned long load_addr = 0, load_bias = 0; |
533 | int load_addr_set = 0; | 533 | int load_addr_set = 0; |
534 | char * elf_interpreter = NULL; | 534 | char * elf_interpreter = NULL; |
535 | unsigned int interpreter_type = INTERPRETER_NONE; | 535 | unsigned int interpreter_type = INTERPRETER_NONE; |
536 | unsigned char ibcs2_interpreter = 0; | 536 | unsigned char ibcs2_interpreter = 0; |
537 | unsigned long error; | 537 | unsigned long error; |
538 | struct elf_phdr *elf_ppnt, *elf_phdata; | 538 | struct elf_phdr *elf_ppnt, *elf_phdata; |
539 | unsigned long elf_bss, elf_brk; | 539 | unsigned long elf_bss, elf_brk; |
540 | int elf_exec_fileno; | 540 | int elf_exec_fileno; |
541 | int retval, i; | 541 | int retval, i; |
542 | unsigned int size; | 542 | unsigned int size; |
543 | unsigned long elf_entry, interp_load_addr = 0; | 543 | unsigned long elf_entry, interp_load_addr = 0; |
544 | unsigned long start_code, end_code, start_data, end_data; | 544 | unsigned long start_code, end_code, start_data, end_data; |
545 | unsigned long reloc_func_desc = 0; | 545 | unsigned long reloc_func_desc = 0; |
546 | char passed_fileno[6]; | 546 | char passed_fileno[6]; |
547 | struct files_struct *files; | 547 | struct files_struct *files; |
548 | int executable_stack = EXSTACK_DEFAULT; | 548 | int executable_stack = EXSTACK_DEFAULT; |
549 | unsigned long def_flags = 0; | 549 | unsigned long def_flags = 0; |
550 | struct { | 550 | struct { |
551 | struct elfhdr elf_ex; | 551 | struct elfhdr elf_ex; |
552 | struct elfhdr interp_elf_ex; | 552 | struct elfhdr interp_elf_ex; |
553 | struct exec interp_ex; | 553 | struct exec interp_ex; |
554 | } *loc; | 554 | } *loc; |
555 | 555 | ||
556 | loc = kmalloc(sizeof(*loc), GFP_KERNEL); | 556 | loc = kmalloc(sizeof(*loc), GFP_KERNEL); |
557 | if (!loc) { | 557 | if (!loc) { |
558 | retval = -ENOMEM; | 558 | retval = -ENOMEM; |
559 | goto out_ret; | 559 | goto out_ret; |
560 | } | 560 | } |
561 | 561 | ||
562 | /* Get the exec-header */ | 562 | /* Get the exec-header */ |
563 | loc->elf_ex = *((struct elfhdr *)bprm->buf); | 563 | loc->elf_ex = *((struct elfhdr *)bprm->buf); |
564 | 564 | ||
565 | retval = -ENOEXEC; | 565 | retval = -ENOEXEC; |
566 | /* First of all, some simple consistency checks */ | 566 | /* First of all, some simple consistency checks */ |
567 | if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0) | 567 | if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0) |
568 | goto out; | 568 | goto out; |
569 | 569 | ||
570 | if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN) | 570 | if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN) |
571 | goto out; | 571 | goto out; |
572 | if (!elf_check_arch(&loc->elf_ex)) | 572 | if (!elf_check_arch(&loc->elf_ex)) |
573 | goto out; | 573 | goto out; |
574 | if (!bprm->file->f_op||!bprm->file->f_op->mmap) | 574 | if (!bprm->file->f_op||!bprm->file->f_op->mmap) |
575 | goto out; | 575 | goto out; |
576 | 576 | ||
577 | /* Now read in all of the header information */ | 577 | /* Now read in all of the header information */ |
578 | if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr)) | 578 | if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr)) |
579 | goto out; | 579 | goto out; |
580 | if (loc->elf_ex.e_phnum < 1 || | 580 | if (loc->elf_ex.e_phnum < 1 || |
581 | loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr)) | 581 | loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr)) |
582 | goto out; | 582 | goto out; |
583 | size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr); | 583 | size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr); |
584 | retval = -ENOMEM; | 584 | retval = -ENOMEM; |
585 | elf_phdata = kmalloc(size, GFP_KERNEL); | 585 | elf_phdata = kmalloc(size, GFP_KERNEL); |
586 | if (!elf_phdata) | 586 | if (!elf_phdata) |
587 | goto out; | 587 | goto out; |
588 | 588 | ||
589 | retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, | 589 | retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, |
590 | (char *)elf_phdata, size); | 590 | (char *)elf_phdata, size); |
591 | if (retval != size) { | 591 | if (retval != size) { |
592 | if (retval >= 0) | 592 | if (retval >= 0) |
593 | retval = -EIO; | 593 | retval = -EIO; |
594 | goto out_free_ph; | 594 | goto out_free_ph; |
595 | } | 595 | } |
596 | 596 | ||
597 | files = current->files; /* Refcounted so ok */ | 597 | files = current->files; /* Refcounted so ok */ |
598 | retval = unshare_files(); | 598 | retval = unshare_files(); |
599 | if (retval < 0) | 599 | if (retval < 0) |
600 | goto out_free_ph; | 600 | goto out_free_ph; |
601 | if (files == current->files) { | 601 | if (files == current->files) { |
602 | put_files_struct(files); | 602 | put_files_struct(files); |
603 | files = NULL; | 603 | files = NULL; |
604 | } | 604 | } |
605 | 605 | ||
606 | /* exec will make our files private anyway, but for the a.out | 606 | /* exec will make our files private anyway, but for the a.out |
607 | loader stuff we need to do it earlier */ | 607 | loader stuff we need to do it earlier */ |
608 | retval = get_unused_fd(); | 608 | retval = get_unused_fd(); |
609 | if (retval < 0) | 609 | if (retval < 0) |
610 | goto out_free_fh; | 610 | goto out_free_fh; |
611 | get_file(bprm->file); | 611 | get_file(bprm->file); |
612 | fd_install(elf_exec_fileno = retval, bprm->file); | 612 | fd_install(elf_exec_fileno = retval, bprm->file); |
613 | 613 | ||
614 | elf_ppnt = elf_phdata; | 614 | elf_ppnt = elf_phdata; |
615 | elf_bss = 0; | 615 | elf_bss = 0; |
616 | elf_brk = 0; | 616 | elf_brk = 0; |
617 | 617 | ||
618 | start_code = ~0UL; | 618 | start_code = ~0UL; |
619 | end_code = 0; | 619 | end_code = 0; |
620 | start_data = 0; | 620 | start_data = 0; |
621 | end_data = 0; | 621 | end_data = 0; |
622 | 622 | ||
623 | for (i = 0; i < loc->elf_ex.e_phnum; i++) { | 623 | for (i = 0; i < loc->elf_ex.e_phnum; i++) { |
624 | if (elf_ppnt->p_type == PT_INTERP) { | 624 | if (elf_ppnt->p_type == PT_INTERP) { |
625 | /* This is the program interpreter used for | 625 | /* This is the program interpreter used for |
626 | * shared libraries - for now assume that this | 626 | * shared libraries - for now assume that this |
627 | * is an a.out format binary | 627 | * is an a.out format binary |
628 | */ | 628 | */ |
629 | retval = -ENOEXEC; | 629 | retval = -ENOEXEC; |
630 | if (elf_ppnt->p_filesz > PATH_MAX || | 630 | if (elf_ppnt->p_filesz > PATH_MAX || |
631 | elf_ppnt->p_filesz < 2) | 631 | elf_ppnt->p_filesz < 2) |
632 | goto out_free_file; | 632 | goto out_free_file; |
633 | 633 | ||
634 | retval = -ENOMEM; | 634 | retval = -ENOMEM; |
635 | elf_interpreter = kmalloc(elf_ppnt->p_filesz, | 635 | elf_interpreter = kmalloc(elf_ppnt->p_filesz, |
636 | GFP_KERNEL); | 636 | GFP_KERNEL); |
637 | if (!elf_interpreter) | 637 | if (!elf_interpreter) |
638 | goto out_free_file; | 638 | goto out_free_file; |
639 | 639 | ||
640 | retval = kernel_read(bprm->file, elf_ppnt->p_offset, | 640 | retval = kernel_read(bprm->file, elf_ppnt->p_offset, |
641 | elf_interpreter, | 641 | elf_interpreter, |
642 | elf_ppnt->p_filesz); | 642 | elf_ppnt->p_filesz); |
643 | if (retval != elf_ppnt->p_filesz) { | 643 | if (retval != elf_ppnt->p_filesz) { |
644 | if (retval >= 0) | 644 | if (retval >= 0) |
645 | retval = -EIO; | 645 | retval = -EIO; |
646 | goto out_free_interp; | 646 | goto out_free_interp; |
647 | } | 647 | } |
648 | /* make sure path is NULL terminated */ | 648 | /* make sure path is NULL terminated */ |
649 | retval = -ENOEXEC; | 649 | retval = -ENOEXEC; |
650 | if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0') | 650 | if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0') |
651 | goto out_free_interp; | 651 | goto out_free_interp; |
652 | 652 | ||
653 | /* If the program interpreter is one of these two, | 653 | /* If the program interpreter is one of these two, |
654 | * then assume an iBCS2 image. Otherwise assume | 654 | * then assume an iBCS2 image. Otherwise assume |
655 | * a native linux image. | 655 | * a native linux image. |
656 | */ | 656 | */ |
657 | if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 || | 657 | if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 || |
658 | strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) | 658 | strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) |
659 | ibcs2_interpreter = 1; | 659 | ibcs2_interpreter = 1; |
660 | 660 | ||
661 | /* | 661 | /* |
662 | * The early SET_PERSONALITY here is so that the lookup | 662 | * The early SET_PERSONALITY here is so that the lookup |
663 | * for the interpreter happens in the namespace of the | 663 | * for the interpreter happens in the namespace of the |
664 | * to-be-execed image. SET_PERSONALITY can select an | 664 | * to-be-execed image. SET_PERSONALITY can select an |
665 | * alternate root. | 665 | * alternate root. |
666 | * | 666 | * |
667 | * However, SET_PERSONALITY is NOT allowed to switch | 667 | * However, SET_PERSONALITY is NOT allowed to switch |
668 | * this task into the new images's memory mapping | 668 | * this task into the new images's memory mapping |
669 | * policy - that is, TASK_SIZE must still evaluate to | 669 | * policy - that is, TASK_SIZE must still evaluate to |
670 | * that which is appropriate to the execing application. | 670 | * that which is appropriate to the execing application. |
671 | * This is because exit_mmap() needs to have TASK_SIZE | 671 | * This is because exit_mmap() needs to have TASK_SIZE |
672 | * evaluate to the size of the old image. | 672 | * evaluate to the size of the old image. |
673 | * | 673 | * |
674 | * So if (say) a 64-bit application is execing a 32-bit | 674 | * So if (say) a 64-bit application is execing a 32-bit |
675 | * application it is the architecture's responsibility | 675 | * application it is the architecture's responsibility |
676 | * to defer changing the value of TASK_SIZE until the | 676 | * to defer changing the value of TASK_SIZE until the |
677 | * switch really is going to happen - do this in | 677 | * switch really is going to happen - do this in |
678 | * flush_thread(). - akpm | 678 | * flush_thread(). - akpm |
679 | */ | 679 | */ |
680 | SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter); | 680 | SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter); |
681 | 681 | ||
682 | interpreter = open_exec(elf_interpreter); | 682 | interpreter = open_exec(elf_interpreter); |
683 | retval = PTR_ERR(interpreter); | 683 | retval = PTR_ERR(interpreter); |
684 | if (IS_ERR(interpreter)) | 684 | if (IS_ERR(interpreter)) |
685 | goto out_free_interp; | 685 | goto out_free_interp; |
686 | 686 | ||
687 | /* | 687 | /* |
688 | * If the binary is not readable then enforce | 688 | * If the binary is not readable then enforce |
689 | * mm->dumpable = 0 regardless of the interpreter's | 689 | * mm->dumpable = 0 regardless of the interpreter's |
690 | * permissions. | 690 | * permissions. |
691 | */ | 691 | */ |
692 | if (file_permission(interpreter, MAY_READ) < 0) | 692 | if (file_permission(interpreter, MAY_READ) < 0) |
693 | bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP; | 693 | bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP; |
694 | 694 | ||
695 | retval = kernel_read(interpreter, 0, bprm->buf, | 695 | retval = kernel_read(interpreter, 0, bprm->buf, |
696 | BINPRM_BUF_SIZE); | 696 | BINPRM_BUF_SIZE); |
697 | if (retval != BINPRM_BUF_SIZE) { | 697 | if (retval != BINPRM_BUF_SIZE) { |
698 | if (retval >= 0) | 698 | if (retval >= 0) |
699 | retval = -EIO; | 699 | retval = -EIO; |
700 | goto out_free_dentry; | 700 | goto out_free_dentry; |
701 | } | 701 | } |
702 | 702 | ||
703 | /* Get the exec headers */ | 703 | /* Get the exec headers */ |
704 | loc->interp_ex = *((struct exec *)bprm->buf); | 704 | loc->interp_ex = *((struct exec *)bprm->buf); |
705 | loc->interp_elf_ex = *((struct elfhdr *)bprm->buf); | 705 | loc->interp_elf_ex = *((struct elfhdr *)bprm->buf); |
706 | break; | 706 | break; |
707 | } | 707 | } |
708 | elf_ppnt++; | 708 | elf_ppnt++; |
709 | } | 709 | } |
710 | 710 | ||
711 | elf_ppnt = elf_phdata; | 711 | elf_ppnt = elf_phdata; |
712 | for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) | 712 | for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) |
713 | if (elf_ppnt->p_type == PT_GNU_STACK) { | 713 | if (elf_ppnt->p_type == PT_GNU_STACK) { |
714 | if (elf_ppnt->p_flags & PF_X) | 714 | if (elf_ppnt->p_flags & PF_X) |
715 | executable_stack = EXSTACK_ENABLE_X; | 715 | executable_stack = EXSTACK_ENABLE_X; |
716 | else | 716 | else |
717 | executable_stack = EXSTACK_DISABLE_X; | 717 | executable_stack = EXSTACK_DISABLE_X; |
718 | break; | 718 | break; |
719 | } | 719 | } |
720 | 720 | ||
721 | /* Some simple consistency checks for the interpreter */ | 721 | /* Some simple consistency checks for the interpreter */ |
722 | if (elf_interpreter) { | 722 | if (elf_interpreter) { |
723 | interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT; | 723 | interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT; |
724 | 724 | ||
725 | /* Now figure out which format our binary is */ | 725 | /* Now figure out which format our binary is */ |
726 | if ((N_MAGIC(loc->interp_ex) != OMAGIC) && | 726 | if ((N_MAGIC(loc->interp_ex) != OMAGIC) && |
727 | (N_MAGIC(loc->interp_ex) != ZMAGIC) && | 727 | (N_MAGIC(loc->interp_ex) != ZMAGIC) && |
728 | (N_MAGIC(loc->interp_ex) != QMAGIC)) | 728 | (N_MAGIC(loc->interp_ex) != QMAGIC)) |
729 | interpreter_type = INTERPRETER_ELF; | 729 | interpreter_type = INTERPRETER_ELF; |
730 | 730 | ||
731 | if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0) | 731 | if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0) |
732 | interpreter_type &= ~INTERPRETER_ELF; | 732 | interpreter_type &= ~INTERPRETER_ELF; |
733 | 733 | ||
734 | retval = -ELIBBAD; | 734 | retval = -ELIBBAD; |
735 | if (!interpreter_type) | 735 | if (!interpreter_type) |
736 | goto out_free_dentry; | 736 | goto out_free_dentry; |
737 | 737 | ||
738 | /* Make sure only one type was selected */ | 738 | /* Make sure only one type was selected */ |
739 | if ((interpreter_type & INTERPRETER_ELF) && | 739 | if ((interpreter_type & INTERPRETER_ELF) && |
740 | interpreter_type != INTERPRETER_ELF) { | 740 | interpreter_type != INTERPRETER_ELF) { |
741 | // FIXME - ratelimit this before re-enabling | 741 | // FIXME - ratelimit this before re-enabling |
742 | // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n"); | 742 | // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n"); |
743 | interpreter_type = INTERPRETER_ELF; | 743 | interpreter_type = INTERPRETER_ELF; |
744 | } | 744 | } |
745 | /* Verify the interpreter has a valid arch */ | 745 | /* Verify the interpreter has a valid arch */ |
746 | if ((interpreter_type == INTERPRETER_ELF) && | 746 | if ((interpreter_type == INTERPRETER_ELF) && |
747 | !elf_check_arch(&loc->interp_elf_ex)) | 747 | !elf_check_arch(&loc->interp_elf_ex)) |
748 | goto out_free_dentry; | 748 | goto out_free_dentry; |
749 | } else { | 749 | } else { |
750 | /* Executables without an interpreter also need a personality */ | 750 | /* Executables without an interpreter also need a personality */ |
751 | SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter); | 751 | SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter); |
752 | } | 752 | } |
753 | 753 | ||
754 | /* OK, we are done with that, now set up the arg stuff, | 754 | /* OK, we are done with that, now set up the arg stuff, |
755 | and then start this sucker up */ | 755 | and then start this sucker up */ |
756 | if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) { | 756 | if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) { |
757 | char *passed_p = passed_fileno; | 757 | char *passed_p = passed_fileno; |
758 | sprintf(passed_fileno, "%d", elf_exec_fileno); | 758 | sprintf(passed_fileno, "%d", elf_exec_fileno); |
759 | 759 | ||
760 | if (elf_interpreter) { | 760 | if (elf_interpreter) { |
761 | retval = copy_strings_kernel(1, &passed_p, bprm); | 761 | retval = copy_strings_kernel(1, &passed_p, bprm); |
762 | if (retval) | 762 | if (retval) |
763 | goto out_free_dentry; | 763 | goto out_free_dentry; |
764 | bprm->argc++; | 764 | bprm->argc++; |
765 | } | 765 | } |
766 | } | 766 | } |
767 | 767 | ||
768 | /* Flush all traces of the currently running executable */ | 768 | /* Flush all traces of the currently running executable */ |
769 | retval = flush_old_exec(bprm); | 769 | retval = flush_old_exec(bprm); |
770 | if (retval) | 770 | if (retval) |
771 | goto out_free_dentry; | 771 | goto out_free_dentry; |
772 | 772 | ||
773 | /* Discard our unneeded old files struct */ | 773 | /* Discard our unneeded old files struct */ |
774 | if (files) { | 774 | if (files) { |
775 | put_files_struct(files); | 775 | put_files_struct(files); |
776 | files = NULL; | 776 | files = NULL; |
777 | } | 777 | } |
778 | 778 | ||
779 | /* OK, This is the point of no return */ | 779 | /* OK, This is the point of no return */ |
780 | current->mm->start_data = 0; | 780 | current->mm->start_data = 0; |
781 | current->mm->end_data = 0; | 781 | current->mm->end_data = 0; |
782 | current->mm->end_code = 0; | 782 | current->mm->end_code = 0; |
783 | current->mm->mmap = NULL; | 783 | current->mm->mmap = NULL; |
784 | current->flags &= ~PF_FORKNOEXEC; | 784 | current->flags &= ~PF_FORKNOEXEC; |
785 | current->mm->def_flags = def_flags; | 785 | current->mm->def_flags = def_flags; |
786 | 786 | ||
787 | /* Do this immediately, since STACK_TOP as used in setup_arg_pages | 787 | /* Do this immediately, since STACK_TOP as used in setup_arg_pages |
788 | may depend on the personality. */ | 788 | may depend on the personality. */ |
789 | SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter); | 789 | SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter); |
790 | if (elf_read_implies_exec(loc->elf_ex, executable_stack)) | 790 | if (elf_read_implies_exec(loc->elf_ex, executable_stack)) |
791 | current->personality |= READ_IMPLIES_EXEC; | 791 | current->personality |= READ_IMPLIES_EXEC; |
792 | 792 | ||
793 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | 793 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) |
794 | current->flags |= PF_RANDOMIZE; | 794 | current->flags |= PF_RANDOMIZE; |
795 | arch_pick_mmap_layout(current->mm); | 795 | arch_pick_mmap_layout(current->mm); |
796 | 796 | ||
797 | /* Do this so that we can load the interpreter, if need be. We will | 797 | /* Do this so that we can load the interpreter, if need be. We will |
798 | change some of these later */ | 798 | change some of these later */ |
799 | current->mm->free_area_cache = current->mm->mmap_base; | 799 | current->mm->free_area_cache = current->mm->mmap_base; |
800 | current->mm->cached_hole_size = 0; | 800 | current->mm->cached_hole_size = 0; |
801 | retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP), | 801 | retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP), |
802 | executable_stack); | 802 | executable_stack); |
803 | if (retval < 0) { | 803 | if (retval < 0) { |
804 | send_sig(SIGKILL, current, 0); | 804 | send_sig(SIGKILL, current, 0); |
805 | goto out_free_dentry; | 805 | goto out_free_dentry; |
806 | } | 806 | } |
807 | 807 | ||
808 | current->mm->start_stack = bprm->p; | 808 | current->mm->start_stack = bprm->p; |
809 | 809 | ||
810 | /* Now we do a little grungy work by mmaping the ELF image into | 810 | /* Now we do a little grungy work by mmaping the ELF image into |
811 | the correct location in memory. At this point, we assume that | 811 | the correct location in memory. At this point, we assume that |
812 | the image should be loaded at fixed address, not at a variable | 812 | the image should be loaded at fixed address, not at a variable |
813 | address. */ | 813 | address. */ |
814 | for(i = 0, elf_ppnt = elf_phdata; | 814 | for(i = 0, elf_ppnt = elf_phdata; |
815 | i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { | 815 | i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { |
816 | int elf_prot = 0, elf_flags; | 816 | int elf_prot = 0, elf_flags; |
817 | unsigned long k, vaddr; | 817 | unsigned long k, vaddr; |
818 | 818 | ||
819 | if (elf_ppnt->p_type != PT_LOAD) | 819 | if (elf_ppnt->p_type != PT_LOAD) |
820 | continue; | 820 | continue; |
821 | 821 | ||
822 | if (unlikely (elf_brk > elf_bss)) { | 822 | if (unlikely (elf_brk > elf_bss)) { |
823 | unsigned long nbyte; | 823 | unsigned long nbyte; |
824 | 824 | ||
825 | /* There was a PT_LOAD segment with p_memsz > p_filesz | 825 | /* There was a PT_LOAD segment with p_memsz > p_filesz |
826 | before this one. Map anonymous pages, if needed, | 826 | before this one. Map anonymous pages, if needed, |
827 | and clear the area. */ | 827 | and clear the area. */ |
828 | retval = set_brk (elf_bss + load_bias, | 828 | retval = set_brk (elf_bss + load_bias, |
829 | elf_brk + load_bias); | 829 | elf_brk + load_bias); |
830 | if (retval) { | 830 | if (retval) { |
831 | send_sig(SIGKILL, current, 0); | 831 | send_sig(SIGKILL, current, 0); |
832 | goto out_free_dentry; | 832 | goto out_free_dentry; |
833 | } | 833 | } |
834 | nbyte = ELF_PAGEOFFSET(elf_bss); | 834 | nbyte = ELF_PAGEOFFSET(elf_bss); |
835 | if (nbyte) { | 835 | if (nbyte) { |
836 | nbyte = ELF_MIN_ALIGN - nbyte; | 836 | nbyte = ELF_MIN_ALIGN - nbyte; |
837 | if (nbyte > elf_brk - elf_bss) | 837 | if (nbyte > elf_brk - elf_bss) |
838 | nbyte = elf_brk - elf_bss; | 838 | nbyte = elf_brk - elf_bss; |
839 | if (clear_user((void __user *)elf_bss + | 839 | if (clear_user((void __user *)elf_bss + |
840 | load_bias, nbyte)) { | 840 | load_bias, nbyte)) { |
841 | /* | 841 | /* |
842 | * This bss-zeroing can fail if the ELF | 842 | * This bss-zeroing can fail if the ELF |
843 | * file specifies odd protections. So | 843 | * file specifies odd protections. So |
844 | * we don't check the return value | 844 | * we don't check the return value |
845 | */ | 845 | */ |
846 | } | 846 | } |
847 | } | 847 | } |
848 | } | 848 | } |
849 | 849 | ||
850 | if (elf_ppnt->p_flags & PF_R) | 850 | if (elf_ppnt->p_flags & PF_R) |
851 | elf_prot |= PROT_READ; | 851 | elf_prot |= PROT_READ; |
852 | if (elf_ppnt->p_flags & PF_W) | 852 | if (elf_ppnt->p_flags & PF_W) |
853 | elf_prot |= PROT_WRITE; | 853 | elf_prot |= PROT_WRITE; |
854 | if (elf_ppnt->p_flags & PF_X) | 854 | if (elf_ppnt->p_flags & PF_X) |
855 | elf_prot |= PROT_EXEC; | 855 | elf_prot |= PROT_EXEC; |
856 | 856 | ||
857 | elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE; | 857 | elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE; |
858 | 858 | ||
859 | vaddr = elf_ppnt->p_vaddr; | 859 | vaddr = elf_ppnt->p_vaddr; |
860 | if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) { | 860 | if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) { |
861 | elf_flags |= MAP_FIXED; | 861 | elf_flags |= MAP_FIXED; |
862 | } else if (loc->elf_ex.e_type == ET_DYN) { | 862 | } else if (loc->elf_ex.e_type == ET_DYN) { |
863 | /* Try and get dynamic programs out of the way of the | 863 | /* Try and get dynamic programs out of the way of the |
864 | * default mmap base, as well as whatever program they | 864 | * default mmap base, as well as whatever program they |
865 | * might try to exec. This is because the brk will | 865 | * might try to exec. This is because the brk will |
866 | * follow the loader, and is not movable. */ | 866 | * follow the loader, and is not movable. */ |
867 | load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); | 867 | load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); |
868 | } | 868 | } |
869 | 869 | ||
870 | error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, | 870 | error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, |
871 | elf_prot, elf_flags); | 871 | elf_prot, elf_flags); |
872 | if (BAD_ADDR(error)) { | 872 | if (BAD_ADDR(error)) { |
873 | send_sig(SIGKILL, current, 0); | 873 | send_sig(SIGKILL, current, 0); |
874 | goto out_free_dentry; | 874 | goto out_free_dentry; |
875 | } | 875 | } |
876 | 876 | ||
877 | if (!load_addr_set) { | 877 | if (!load_addr_set) { |
878 | load_addr_set = 1; | 878 | load_addr_set = 1; |
879 | load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset); | 879 | load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset); |
880 | if (loc->elf_ex.e_type == ET_DYN) { | 880 | if (loc->elf_ex.e_type == ET_DYN) { |
881 | load_bias += error - | 881 | load_bias += error - |
882 | ELF_PAGESTART(load_bias + vaddr); | 882 | ELF_PAGESTART(load_bias + vaddr); |
883 | load_addr += load_bias; | 883 | load_addr += load_bias; |
884 | reloc_func_desc = load_bias; | 884 | reloc_func_desc = load_bias; |
885 | } | 885 | } |
886 | } | 886 | } |
887 | k = elf_ppnt->p_vaddr; | 887 | k = elf_ppnt->p_vaddr; |
888 | if (k < start_code) | 888 | if (k < start_code) |
889 | start_code = k; | 889 | start_code = k; |
890 | if (start_data < k) | 890 | if (start_data < k) |
891 | start_data = k; | 891 | start_data = k; |
892 | 892 | ||
893 | /* | 893 | /* |
894 | * Check to see if the section's size will overflow the | 894 | * Check to see if the section's size will overflow the |
895 | * allowed task size. Note that p_filesz must always be | 895 | * allowed task size. Note that p_filesz must always be |
896 | * <= p_memsz so it is only necessary to check p_memsz. | 896 | * <= p_memsz so it is only necessary to check p_memsz. |
897 | */ | 897 | */ |
898 | if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz || | 898 | if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz || |
899 | elf_ppnt->p_memsz > TASK_SIZE || | 899 | elf_ppnt->p_memsz > TASK_SIZE || |
900 | TASK_SIZE - elf_ppnt->p_memsz < k) { | 900 | TASK_SIZE - elf_ppnt->p_memsz < k) { |
901 | /* set_brk can never work. Avoid overflows. */ | 901 | /* set_brk can never work. Avoid overflows. */ |
902 | send_sig(SIGKILL, current, 0); | 902 | send_sig(SIGKILL, current, 0); |
903 | goto out_free_dentry; | 903 | goto out_free_dentry; |
904 | } | 904 | } |
905 | 905 | ||
906 | k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; | 906 | k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; |
907 | 907 | ||
908 | if (k > elf_bss) | 908 | if (k > elf_bss) |
909 | elf_bss = k; | 909 | elf_bss = k; |
910 | if ((elf_ppnt->p_flags & PF_X) && end_code < k) | 910 | if ((elf_ppnt->p_flags & PF_X) && end_code < k) |
911 | end_code = k; | 911 | end_code = k; |
912 | if (end_data < k) | 912 | if (end_data < k) |
913 | end_data = k; | 913 | end_data = k; |
914 | k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; | 914 | k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; |
915 | if (k > elf_brk) | 915 | if (k > elf_brk) |
916 | elf_brk = k; | 916 | elf_brk = k; |
917 | } | 917 | } |
918 | 918 | ||
919 | loc->elf_ex.e_entry += load_bias; | 919 | loc->elf_ex.e_entry += load_bias; |
920 | elf_bss += load_bias; | 920 | elf_bss += load_bias; |
921 | elf_brk += load_bias; | 921 | elf_brk += load_bias; |
922 | start_code += load_bias; | 922 | start_code += load_bias; |
923 | end_code += load_bias; | 923 | end_code += load_bias; |
924 | start_data += load_bias; | 924 | start_data += load_bias; |
925 | end_data += load_bias; | 925 | end_data += load_bias; |
926 | 926 | ||
927 | /* Calling set_brk effectively mmaps the pages that we need | 927 | /* Calling set_brk effectively mmaps the pages that we need |
928 | * for the bss and break sections. We must do this before | 928 | * for the bss and break sections. We must do this before |
929 | * mapping in the interpreter, to make sure it doesn't wind | 929 | * mapping in the interpreter, to make sure it doesn't wind |
930 | * up getting placed where the bss needs to go. | 930 | * up getting placed where the bss needs to go. |
931 | */ | 931 | */ |
932 | retval = set_brk(elf_bss, elf_brk); | 932 | retval = set_brk(elf_bss, elf_brk); |
933 | if (retval) { | 933 | if (retval) { |
934 | send_sig(SIGKILL, current, 0); | 934 | send_sig(SIGKILL, current, 0); |
935 | goto out_free_dentry; | 935 | goto out_free_dentry; |
936 | } | 936 | } |
937 | if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) { | 937 | if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) { |
938 | send_sig(SIGSEGV, current, 0); | 938 | send_sig(SIGSEGV, current, 0); |
939 | retval = -EFAULT; /* Nobody gets to see this, but.. */ | 939 | retval = -EFAULT; /* Nobody gets to see this, but.. */ |
940 | goto out_free_dentry; | 940 | goto out_free_dentry; |
941 | } | 941 | } |
942 | 942 | ||
943 | if (elf_interpreter) { | 943 | if (elf_interpreter) { |
944 | if (interpreter_type == INTERPRETER_AOUT) | 944 | if (interpreter_type == INTERPRETER_AOUT) |
945 | elf_entry = load_aout_interp(&loc->interp_ex, | 945 | elf_entry = load_aout_interp(&loc->interp_ex, |
946 | interpreter); | 946 | interpreter); |
947 | else | 947 | else |
948 | elf_entry = load_elf_interp(&loc->interp_elf_ex, | 948 | elf_entry = load_elf_interp(&loc->interp_elf_ex, |
949 | interpreter, | 949 | interpreter, |
950 | &interp_load_addr); | 950 | &interp_load_addr); |
951 | if (BAD_ADDR(elf_entry)) { | 951 | if (BAD_ADDR(elf_entry)) { |
952 | force_sig(SIGSEGV, current); | 952 | force_sig(SIGSEGV, current); |
953 | retval = IS_ERR((void *)elf_entry) ? | 953 | retval = IS_ERR((void *)elf_entry) ? |
954 | (int)elf_entry : -EINVAL; | 954 | (int)elf_entry : -EINVAL; |
955 | goto out_free_dentry; | 955 | goto out_free_dentry; |
956 | } | 956 | } |
957 | reloc_func_desc = interp_load_addr; | 957 | reloc_func_desc = interp_load_addr; |
958 | 958 | ||
959 | allow_write_access(interpreter); | 959 | allow_write_access(interpreter); |
960 | fput(interpreter); | 960 | fput(interpreter); |
961 | kfree(elf_interpreter); | 961 | kfree(elf_interpreter); |
962 | } else { | 962 | } else { |
963 | elf_entry = loc->elf_ex.e_entry; | 963 | elf_entry = loc->elf_ex.e_entry; |
964 | if (BAD_ADDR(elf_entry)) { | 964 | if (BAD_ADDR(elf_entry)) { |
965 | force_sig(SIGSEGV, current); | 965 | force_sig(SIGSEGV, current); |
966 | retval = -EINVAL; | 966 | retval = -EINVAL; |
967 | goto out_free_dentry; | 967 | goto out_free_dentry; |
968 | } | 968 | } |
969 | } | 969 | } |
970 | 970 | ||
971 | kfree(elf_phdata); | 971 | kfree(elf_phdata); |
972 | 972 | ||
973 | if (interpreter_type != INTERPRETER_AOUT) | 973 | if (interpreter_type != INTERPRETER_AOUT) |
974 | sys_close(elf_exec_fileno); | 974 | sys_close(elf_exec_fileno); |
975 | 975 | ||
976 | set_binfmt(&elf_format); | 976 | set_binfmt(&elf_format); |
977 | 977 | ||
978 | #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES | 978 | #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES |
979 | retval = arch_setup_additional_pages(bprm, executable_stack); | 979 | retval = arch_setup_additional_pages(bprm, executable_stack); |
980 | if (retval < 0) { | 980 | if (retval < 0) { |
981 | send_sig(SIGKILL, current, 0); | 981 | send_sig(SIGKILL, current, 0); |
982 | goto out; | 982 | goto out; |
983 | } | 983 | } |
984 | #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */ | 984 | #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */ |
985 | 985 | ||
986 | compute_creds(bprm); | 986 | compute_creds(bprm); |
987 | current->flags &= ~PF_FORKNOEXEC; | 987 | current->flags &= ~PF_FORKNOEXEC; |
988 | create_elf_tables(bprm, &loc->elf_ex, | 988 | create_elf_tables(bprm, &loc->elf_ex, |
989 | (interpreter_type == INTERPRETER_AOUT), | 989 | (interpreter_type == INTERPRETER_AOUT), |
990 | load_addr, interp_load_addr); | 990 | load_addr, interp_load_addr); |
991 | /* N.B. passed_fileno might not be initialized? */ | 991 | /* N.B. passed_fileno might not be initialized? */ |
992 | if (interpreter_type == INTERPRETER_AOUT) | 992 | if (interpreter_type == INTERPRETER_AOUT) |
993 | current->mm->arg_start += strlen(passed_fileno) + 1; | 993 | current->mm->arg_start += strlen(passed_fileno) + 1; |
994 | current->mm->end_code = end_code; | 994 | current->mm->end_code = end_code; |
995 | current->mm->start_code = start_code; | 995 | current->mm->start_code = start_code; |
996 | current->mm->start_data = start_data; | 996 | current->mm->start_data = start_data; |
997 | current->mm->end_data = end_data; | 997 | current->mm->end_data = end_data; |
998 | current->mm->start_stack = bprm->p; | 998 | current->mm->start_stack = bprm->p; |
999 | 999 | ||
1000 | if (current->personality & MMAP_PAGE_ZERO) { | 1000 | if (current->personality & MMAP_PAGE_ZERO) { |
1001 | /* Why this, you ask??? Well SVr4 maps page 0 as read-only, | 1001 | /* Why this, you ask??? Well SVr4 maps page 0 as read-only, |
1002 | and some applications "depend" upon this behavior. | 1002 | and some applications "depend" upon this behavior. |
1003 | Since we do not have the power to recompile these, we | 1003 | Since we do not have the power to recompile these, we |
1004 | emulate the SVr4 behavior. Sigh. */ | 1004 | emulate the SVr4 behavior. Sigh. */ |
1005 | down_write(¤t->mm->mmap_sem); | 1005 | down_write(¤t->mm->mmap_sem); |
1006 | error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC, | 1006 | error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC, |
1007 | MAP_FIXED | MAP_PRIVATE, 0); | 1007 | MAP_FIXED | MAP_PRIVATE, 0); |
1008 | up_write(¤t->mm->mmap_sem); | 1008 | up_write(¤t->mm->mmap_sem); |
1009 | } | 1009 | } |
1010 | 1010 | ||
1011 | #ifdef ELF_PLAT_INIT | 1011 | #ifdef ELF_PLAT_INIT |
1012 | /* | 1012 | /* |
1013 | * The ABI may specify that certain registers be set up in special | 1013 | * The ABI may specify that certain registers be set up in special |
1014 | * ways (on i386 %edx is the address of a DT_FINI function, for | 1014 | * ways (on i386 %edx is the address of a DT_FINI function, for |
1015 | * example. In addition, it may also specify (eg, PowerPC64 ELF) | 1015 | * example. In addition, it may also specify (eg, PowerPC64 ELF) |
1016 | * that the e_entry field is the address of the function descriptor | 1016 | * that the e_entry field is the address of the function descriptor |
1017 | * for the startup routine, rather than the address of the startup | 1017 | * for the startup routine, rather than the address of the startup |
1018 | * routine itself. This macro performs whatever initialization to | 1018 | * routine itself. This macro performs whatever initialization to |
1019 | * the regs structure is required as well as any relocations to the | 1019 | * the regs structure is required as well as any relocations to the |
1020 | * function descriptor entries when executing dynamically links apps. | 1020 | * function descriptor entries when executing dynamically links apps. |
1021 | */ | 1021 | */ |
1022 | ELF_PLAT_INIT(regs, reloc_func_desc); | 1022 | ELF_PLAT_INIT(regs, reloc_func_desc); |
1023 | #endif | 1023 | #endif |
1024 | 1024 | ||
1025 | start_thread(regs, elf_entry, bprm->p); | 1025 | start_thread(regs, elf_entry, bprm->p); |
1026 | if (unlikely(current->ptrace & PT_PTRACED)) { | 1026 | if (unlikely(current->ptrace & PT_PTRACED)) { |
1027 | if (current->ptrace & PT_TRACE_EXEC) | 1027 | if (current->ptrace & PT_TRACE_EXEC) |
1028 | ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP); | 1028 | ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP); |
1029 | else | 1029 | else |
1030 | send_sig(SIGTRAP, current, 0); | 1030 | send_sig(SIGTRAP, current, 0); |
1031 | } | 1031 | } |
1032 | retval = 0; | 1032 | retval = 0; |
1033 | out: | 1033 | out: |
1034 | kfree(loc); | 1034 | kfree(loc); |
1035 | out_ret: | 1035 | out_ret: |
1036 | return retval; | 1036 | return retval; |
1037 | 1037 | ||
1038 | /* error cleanup */ | 1038 | /* error cleanup */ |
1039 | out_free_dentry: | 1039 | out_free_dentry: |
1040 | allow_write_access(interpreter); | 1040 | allow_write_access(interpreter); |
1041 | if (interpreter) | 1041 | if (interpreter) |
1042 | fput(interpreter); | 1042 | fput(interpreter); |
1043 | out_free_interp: | 1043 | out_free_interp: |
1044 | kfree(elf_interpreter); | 1044 | kfree(elf_interpreter); |
1045 | out_free_file: | 1045 | out_free_file: |
1046 | sys_close(elf_exec_fileno); | 1046 | sys_close(elf_exec_fileno); |
1047 | out_free_fh: | 1047 | out_free_fh: |
1048 | if (files) | 1048 | if (files) |
1049 | reset_files_struct(current, files); | 1049 | reset_files_struct(current, files); |
1050 | out_free_ph: | 1050 | out_free_ph: |
1051 | kfree(elf_phdata); | 1051 | kfree(elf_phdata); |
1052 | goto out; | 1052 | goto out; |
1053 | } | 1053 | } |
1054 | 1054 | ||
1055 | /* This is really simpleminded and specialized - we are loading an | 1055 | /* This is really simpleminded and specialized - we are loading an |
1056 | a.out library that is given an ELF header. */ | 1056 | a.out library that is given an ELF header. */ |
1057 | static int load_elf_library(struct file *file) | 1057 | static int load_elf_library(struct file *file) |
1058 | { | 1058 | { |
1059 | struct elf_phdr *elf_phdata; | 1059 | struct elf_phdr *elf_phdata; |
1060 | struct elf_phdr *eppnt; | 1060 | struct elf_phdr *eppnt; |
1061 | unsigned long elf_bss, bss, len; | 1061 | unsigned long elf_bss, bss, len; |
1062 | int retval, error, i, j; | 1062 | int retval, error, i, j; |
1063 | struct elfhdr elf_ex; | 1063 | struct elfhdr elf_ex; |
1064 | 1064 | ||
1065 | error = -ENOEXEC; | 1065 | error = -ENOEXEC; |
1066 | retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex)); | 1066 | retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex)); |
1067 | if (retval != sizeof(elf_ex)) | 1067 | if (retval != sizeof(elf_ex)) |
1068 | goto out; | 1068 | goto out; |
1069 | 1069 | ||
1070 | if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0) | 1070 | if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0) |
1071 | goto out; | 1071 | goto out; |
1072 | 1072 | ||
1073 | /* First of all, some simple consistency checks */ | 1073 | /* First of all, some simple consistency checks */ |
1074 | if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || | 1074 | if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || |
1075 | !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap) | 1075 | !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap) |
1076 | goto out; | 1076 | goto out; |
1077 | 1077 | ||
1078 | /* Now read in all of the header information */ | 1078 | /* Now read in all of the header information */ |
1079 | 1079 | ||
1080 | j = sizeof(struct elf_phdr) * elf_ex.e_phnum; | 1080 | j = sizeof(struct elf_phdr) * elf_ex.e_phnum; |
1081 | /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */ | 1081 | /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */ |
1082 | 1082 | ||
1083 | error = -ENOMEM; | 1083 | error = -ENOMEM; |
1084 | elf_phdata = kmalloc(j, GFP_KERNEL); | 1084 | elf_phdata = kmalloc(j, GFP_KERNEL); |
1085 | if (!elf_phdata) | 1085 | if (!elf_phdata) |
1086 | goto out; | 1086 | goto out; |
1087 | 1087 | ||
1088 | eppnt = elf_phdata; | 1088 | eppnt = elf_phdata; |
1089 | error = -ENOEXEC; | 1089 | error = -ENOEXEC; |
1090 | retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j); | 1090 | retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j); |
1091 | if (retval != j) | 1091 | if (retval != j) |
1092 | goto out_free_ph; | 1092 | goto out_free_ph; |
1093 | 1093 | ||
1094 | for (j = 0, i = 0; i<elf_ex.e_phnum; i++) | 1094 | for (j = 0, i = 0; i<elf_ex.e_phnum; i++) |
1095 | if ((eppnt + i)->p_type == PT_LOAD) | 1095 | if ((eppnt + i)->p_type == PT_LOAD) |
1096 | j++; | 1096 | j++; |
1097 | if (j != 1) | 1097 | if (j != 1) |
1098 | goto out_free_ph; | 1098 | goto out_free_ph; |
1099 | 1099 | ||
1100 | while (eppnt->p_type != PT_LOAD) | 1100 | while (eppnt->p_type != PT_LOAD) |
1101 | eppnt++; | 1101 | eppnt++; |
1102 | 1102 | ||
1103 | /* Now use mmap to map the library into memory. */ | 1103 | /* Now use mmap to map the library into memory. */ |
1104 | down_write(¤t->mm->mmap_sem); | 1104 | down_write(¤t->mm->mmap_sem); |
1105 | error = do_mmap(file, | 1105 | error = do_mmap(file, |
1106 | ELF_PAGESTART(eppnt->p_vaddr), | 1106 | ELF_PAGESTART(eppnt->p_vaddr), |
1107 | (eppnt->p_filesz + | 1107 | (eppnt->p_filesz + |
1108 | ELF_PAGEOFFSET(eppnt->p_vaddr)), | 1108 | ELF_PAGEOFFSET(eppnt->p_vaddr)), |
1109 | PROT_READ | PROT_WRITE | PROT_EXEC, | 1109 | PROT_READ | PROT_WRITE | PROT_EXEC, |
1110 | MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE, | 1110 | MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE, |
1111 | (eppnt->p_offset - | 1111 | (eppnt->p_offset - |
1112 | ELF_PAGEOFFSET(eppnt->p_vaddr))); | 1112 | ELF_PAGEOFFSET(eppnt->p_vaddr))); |
1113 | up_write(¤t->mm->mmap_sem); | 1113 | up_write(¤t->mm->mmap_sem); |
1114 | if (error != ELF_PAGESTART(eppnt->p_vaddr)) | 1114 | if (error != ELF_PAGESTART(eppnt->p_vaddr)) |
1115 | goto out_free_ph; | 1115 | goto out_free_ph; |
1116 | 1116 | ||
1117 | elf_bss = eppnt->p_vaddr + eppnt->p_filesz; | 1117 | elf_bss = eppnt->p_vaddr + eppnt->p_filesz; |
1118 | if (padzero(elf_bss)) { | 1118 | if (padzero(elf_bss)) { |
1119 | error = -EFAULT; | 1119 | error = -EFAULT; |
1120 | goto out_free_ph; | 1120 | goto out_free_ph; |
1121 | } | 1121 | } |
1122 | 1122 | ||
1123 | len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + | 1123 | len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + |
1124 | ELF_MIN_ALIGN - 1); | 1124 | ELF_MIN_ALIGN - 1); |
1125 | bss = eppnt->p_memsz + eppnt->p_vaddr; | 1125 | bss = eppnt->p_memsz + eppnt->p_vaddr; |
1126 | if (bss > len) { | 1126 | if (bss > len) { |
1127 | down_write(¤t->mm->mmap_sem); | 1127 | down_write(¤t->mm->mmap_sem); |
1128 | do_brk(len, bss - len); | 1128 | do_brk(len, bss - len); |
1129 | up_write(¤t->mm->mmap_sem); | 1129 | up_write(¤t->mm->mmap_sem); |
1130 | } | 1130 | } |
1131 | error = 0; | 1131 | error = 0; |
1132 | 1132 | ||
1133 | out_free_ph: | 1133 | out_free_ph: |
1134 | kfree(elf_phdata); | 1134 | kfree(elf_phdata); |
1135 | out: | 1135 | out: |
1136 | return error; | 1136 | return error; |
1137 | } | 1137 | } |
1138 | 1138 | ||
1139 | /* | 1139 | /* |
1140 | * Note that some platforms still use traditional core dumps and not | 1140 | * Note that some platforms still use traditional core dumps and not |
1141 | * the ELF core dump. Each platform can select it as appropriate. | 1141 | * the ELF core dump. Each platform can select it as appropriate. |
1142 | */ | 1142 | */ |
1143 | #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) | 1143 | #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) |
1144 | 1144 | ||
1145 | /* | 1145 | /* |
1146 | * ELF core dumper | 1146 | * ELF core dumper |
1147 | * | 1147 | * |
1148 | * Modelled on fs/exec.c:aout_core_dump() | 1148 | * Modelled on fs/exec.c:aout_core_dump() |
1149 | * Jeremy Fitzhardinge <jeremy@sw.oz.au> | 1149 | * Jeremy Fitzhardinge <jeremy@sw.oz.au> |
1150 | */ | 1150 | */ |
1151 | /* | 1151 | /* |
1152 | * These are the only things you should do on a core-file: use only these | 1152 | * These are the only things you should do on a core-file: use only these |
1153 | * functions to write out all the necessary info. | 1153 | * functions to write out all the necessary info. |
1154 | */ | 1154 | */ |
1155 | static int dump_write(struct file *file, const void *addr, int nr) | 1155 | static int dump_write(struct file *file, const void *addr, int nr) |
1156 | { | 1156 | { |
1157 | return file->f_op->write(file, addr, nr, &file->f_pos) == nr; | 1157 | return file->f_op->write(file, addr, nr, &file->f_pos) == nr; |
1158 | } | 1158 | } |
1159 | 1159 | ||
1160 | static int dump_seek(struct file *file, loff_t off) | 1160 | static int dump_seek(struct file *file, loff_t off) |
1161 | { | 1161 | { |
1162 | if (file->f_op->llseek && file->f_op->llseek != no_llseek) { | 1162 | if (file->f_op->llseek && file->f_op->llseek != no_llseek) { |
1163 | if (file->f_op->llseek(file, off, SEEK_CUR) < 0) | 1163 | if (file->f_op->llseek(file, off, SEEK_CUR) < 0) |
1164 | return 0; | 1164 | return 0; |
1165 | } else { | 1165 | } else { |
1166 | char *buf = (char *)get_zeroed_page(GFP_KERNEL); | 1166 | char *buf = (char *)get_zeroed_page(GFP_KERNEL); |
1167 | if (!buf) | 1167 | if (!buf) |
1168 | return 0; | 1168 | return 0; |
1169 | while (off > 0) { | 1169 | while (off > 0) { |
1170 | unsigned long n = off; | 1170 | unsigned long n = off; |
1171 | if (n > PAGE_SIZE) | 1171 | if (n > PAGE_SIZE) |
1172 | n = PAGE_SIZE; | 1172 | n = PAGE_SIZE; |
1173 | if (!dump_write(file, buf, n)) | 1173 | if (!dump_write(file, buf, n)) |
1174 | return 0; | 1174 | return 0; |
1175 | off -= n; | 1175 | off -= n; |
1176 | } | 1176 | } |
1177 | free_page((unsigned long)buf); | 1177 | free_page((unsigned long)buf); |
1178 | } | 1178 | } |
1179 | return 1; | 1179 | return 1; |
1180 | } | 1180 | } |
1181 | 1181 | ||
1182 | /* | 1182 | /* |
1183 | * Decide whether a segment is worth dumping; default is yes to be | 1183 | * Decide whether a segment is worth dumping; default is yes to be |
1184 | * sure (missing info is worse than too much; etc). | 1184 | * sure (missing info is worse than too much; etc). |
1185 | * Personally I'd include everything, and use the coredump limit... | 1185 | * Personally I'd include everything, and use the coredump limit... |
1186 | * | 1186 | * |
1187 | * I think we should skip something. But I am not sure how. H.J. | 1187 | * I think we should skip something. But I am not sure how. H.J. |
1188 | */ | 1188 | */ |
1189 | static int maydump(struct vm_area_struct *vma) | 1189 | static int maydump(struct vm_area_struct *vma) |
1190 | { | 1190 | { |
1191 | /* The vma can be set up to tell us the answer directly. */ | 1191 | /* The vma can be set up to tell us the answer directly. */ |
1192 | if (vma->vm_flags & VM_ALWAYSDUMP) | 1192 | if (vma->vm_flags & VM_ALWAYSDUMP) |
1193 | return 1; | 1193 | return 1; |
1194 | 1194 | ||
1195 | /* Do not dump I/O mapped devices or special mappings */ | 1195 | /* Do not dump I/O mapped devices or special mappings */ |
1196 | if (vma->vm_flags & (VM_IO | VM_RESERVED)) | 1196 | if (vma->vm_flags & (VM_IO | VM_RESERVED)) |
1197 | return 0; | 1197 | return 0; |
1198 | 1198 | ||
1199 | /* Dump shared memory only if mapped from an anonymous file. */ | 1199 | /* Dump shared memory only if mapped from an anonymous file. */ |
1200 | if (vma->vm_flags & VM_SHARED) | 1200 | if (vma->vm_flags & VM_SHARED) |
1201 | return vma->vm_file->f_path.dentry->d_inode->i_nlink == 0; | 1201 | return vma->vm_file->f_path.dentry->d_inode->i_nlink == 0; |
1202 | 1202 | ||
1203 | /* If it hasn't been written to, don't write it out */ | 1203 | /* If it hasn't been written to, don't write it out */ |
1204 | if (!vma->anon_vma) | 1204 | if (!vma->anon_vma) |
1205 | return 0; | 1205 | return 0; |
1206 | 1206 | ||
1207 | return 1; | 1207 | return 1; |
1208 | } | 1208 | } |
1209 | 1209 | ||
1210 | /* An ELF note in memory */ | 1210 | /* An ELF note in memory */ |
1211 | struct memelfnote | 1211 | struct memelfnote |
1212 | { | 1212 | { |
1213 | const char *name; | 1213 | const char *name; |
1214 | int type; | 1214 | int type; |
1215 | unsigned int datasz; | 1215 | unsigned int datasz; |
1216 | void *data; | 1216 | void *data; |
1217 | }; | 1217 | }; |
1218 | 1218 | ||
1219 | static int notesize(struct memelfnote *en) | 1219 | static int notesize(struct memelfnote *en) |
1220 | { | 1220 | { |
1221 | int sz; | 1221 | int sz; |
1222 | 1222 | ||
1223 | sz = sizeof(struct elf_note); | 1223 | sz = sizeof(struct elf_note); |
1224 | sz += roundup(strlen(en->name) + 1, 4); | 1224 | sz += roundup(strlen(en->name) + 1, 4); |
1225 | sz += roundup(en->datasz, 4); | 1225 | sz += roundup(en->datasz, 4); |
1226 | 1226 | ||
1227 | return sz; | 1227 | return sz; |
1228 | } | 1228 | } |
1229 | 1229 | ||
1230 | #define DUMP_WRITE(addr, nr, foffset) \ | 1230 | #define DUMP_WRITE(addr, nr, foffset) \ |
1231 | do { if (!dump_write(file, (addr), (nr))) return 0; *foffset += (nr); } while(0) | 1231 | do { if (!dump_write(file, (addr), (nr))) return 0; *foffset += (nr); } while(0) |
1232 | 1232 | ||
1233 | static int alignfile(struct file *file, loff_t *foffset) | 1233 | static int alignfile(struct file *file, loff_t *foffset) |
1234 | { | 1234 | { |
1235 | static const char buf[4] = { 0, }; | 1235 | static const char buf[4] = { 0, }; |
1236 | DUMP_WRITE(buf, roundup(*foffset, 4) - *foffset, foffset); | 1236 | DUMP_WRITE(buf, roundup(*foffset, 4) - *foffset, foffset); |
1237 | return 1; | 1237 | return 1; |
1238 | } | 1238 | } |
1239 | 1239 | ||
1240 | static int writenote(struct memelfnote *men, struct file *file, | 1240 | static int writenote(struct memelfnote *men, struct file *file, |
1241 | loff_t *foffset) | 1241 | loff_t *foffset) |
1242 | { | 1242 | { |
1243 | struct elf_note en; | 1243 | struct elf_note en; |
1244 | en.n_namesz = strlen(men->name) + 1; | 1244 | en.n_namesz = strlen(men->name) + 1; |
1245 | en.n_descsz = men->datasz; | 1245 | en.n_descsz = men->datasz; |
1246 | en.n_type = men->type; | 1246 | en.n_type = men->type; |
1247 | 1247 | ||
1248 | DUMP_WRITE(&en, sizeof(en), foffset); | 1248 | DUMP_WRITE(&en, sizeof(en), foffset); |
1249 | DUMP_WRITE(men->name, en.n_namesz, foffset); | 1249 | DUMP_WRITE(men->name, en.n_namesz, foffset); |
1250 | if (!alignfile(file, foffset)) | 1250 | if (!alignfile(file, foffset)) |
1251 | return 0; | 1251 | return 0; |
1252 | DUMP_WRITE(men->data, men->datasz, foffset); | 1252 | DUMP_WRITE(men->data, men->datasz, foffset); |
1253 | if (!alignfile(file, foffset)) | 1253 | if (!alignfile(file, foffset)) |
1254 | return 0; | 1254 | return 0; |
1255 | 1255 | ||
1256 | return 1; | 1256 | return 1; |
1257 | } | 1257 | } |
1258 | #undef DUMP_WRITE | 1258 | #undef DUMP_WRITE |
1259 | 1259 | ||
1260 | #define DUMP_WRITE(addr, nr) \ | 1260 | #define DUMP_WRITE(addr, nr) \ |
1261 | if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \ | 1261 | if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \ |
1262 | goto end_coredump; | 1262 | goto end_coredump; |
1263 | #define DUMP_SEEK(off) \ | 1263 | #define DUMP_SEEK(off) \ |
1264 | if (!dump_seek(file, (off))) \ | 1264 | if (!dump_seek(file, (off))) \ |
1265 | goto end_coredump; | 1265 | goto end_coredump; |
1266 | 1266 | ||
1267 | static void fill_elf_header(struct elfhdr *elf, int segs) | 1267 | static void fill_elf_header(struct elfhdr *elf, int segs) |
1268 | { | 1268 | { |
1269 | memcpy(elf->e_ident, ELFMAG, SELFMAG); | 1269 | memcpy(elf->e_ident, ELFMAG, SELFMAG); |
1270 | elf->e_ident[EI_CLASS] = ELF_CLASS; | 1270 | elf->e_ident[EI_CLASS] = ELF_CLASS; |
1271 | elf->e_ident[EI_DATA] = ELF_DATA; | 1271 | elf->e_ident[EI_DATA] = ELF_DATA; |
1272 | elf->e_ident[EI_VERSION] = EV_CURRENT; | 1272 | elf->e_ident[EI_VERSION] = EV_CURRENT; |
1273 | elf->e_ident[EI_OSABI] = ELF_OSABI; | 1273 | elf->e_ident[EI_OSABI] = ELF_OSABI; |
1274 | memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD); | 1274 | memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD); |
1275 | 1275 | ||
1276 | elf->e_type = ET_CORE; | 1276 | elf->e_type = ET_CORE; |
1277 | elf->e_machine = ELF_ARCH; | 1277 | elf->e_machine = ELF_ARCH; |
1278 | elf->e_version = EV_CURRENT; | 1278 | elf->e_version = EV_CURRENT; |
1279 | elf->e_entry = 0; | 1279 | elf->e_entry = 0; |
1280 | elf->e_phoff = sizeof(struct elfhdr); | 1280 | elf->e_phoff = sizeof(struct elfhdr); |
1281 | elf->e_shoff = 0; | 1281 | elf->e_shoff = 0; |
1282 | elf->e_flags = ELF_CORE_EFLAGS; | 1282 | elf->e_flags = ELF_CORE_EFLAGS; |
1283 | elf->e_ehsize = sizeof(struct elfhdr); | 1283 | elf->e_ehsize = sizeof(struct elfhdr); |
1284 | elf->e_phentsize = sizeof(struct elf_phdr); | 1284 | elf->e_phentsize = sizeof(struct elf_phdr); |
1285 | elf->e_phnum = segs; | 1285 | elf->e_phnum = segs; |
1286 | elf->e_shentsize = 0; | 1286 | elf->e_shentsize = 0; |
1287 | elf->e_shnum = 0; | 1287 | elf->e_shnum = 0; |
1288 | elf->e_shstrndx = 0; | 1288 | elf->e_shstrndx = 0; |
1289 | return; | 1289 | return; |
1290 | } | 1290 | } |
1291 | 1291 | ||
1292 | static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset) | 1292 | static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset) |
1293 | { | 1293 | { |
1294 | phdr->p_type = PT_NOTE; | 1294 | phdr->p_type = PT_NOTE; |
1295 | phdr->p_offset = offset; | 1295 | phdr->p_offset = offset; |
1296 | phdr->p_vaddr = 0; | 1296 | phdr->p_vaddr = 0; |
1297 | phdr->p_paddr = 0; | 1297 | phdr->p_paddr = 0; |
1298 | phdr->p_filesz = sz; | 1298 | phdr->p_filesz = sz; |
1299 | phdr->p_memsz = 0; | 1299 | phdr->p_memsz = 0; |
1300 | phdr->p_flags = 0; | 1300 | phdr->p_flags = 0; |
1301 | phdr->p_align = 0; | 1301 | phdr->p_align = 0; |
1302 | return; | 1302 | return; |
1303 | } | 1303 | } |
1304 | 1304 | ||
1305 | static void fill_note(struct memelfnote *note, const char *name, int type, | 1305 | static void fill_note(struct memelfnote *note, const char *name, int type, |
1306 | unsigned int sz, void *data) | 1306 | unsigned int sz, void *data) |
1307 | { | 1307 | { |
1308 | note->name = name; | 1308 | note->name = name; |
1309 | note->type = type; | 1309 | note->type = type; |
1310 | note->datasz = sz; | 1310 | note->datasz = sz; |
1311 | note->data = data; | 1311 | note->data = data; |
1312 | return; | 1312 | return; |
1313 | } | 1313 | } |
1314 | 1314 | ||
1315 | /* | 1315 | /* |
1316 | * fill up all the fields in prstatus from the given task struct, except | 1316 | * fill up all the fields in prstatus from the given task struct, except |
1317 | * registers which need to be filled up separately. | 1317 | * registers which need to be filled up separately. |
1318 | */ | 1318 | */ |
1319 | static void fill_prstatus(struct elf_prstatus *prstatus, | 1319 | static void fill_prstatus(struct elf_prstatus *prstatus, |
1320 | struct task_struct *p, long signr) | 1320 | struct task_struct *p, long signr) |
1321 | { | 1321 | { |
1322 | prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; | 1322 | prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; |
1323 | prstatus->pr_sigpend = p->pending.signal.sig[0]; | 1323 | prstatus->pr_sigpend = p->pending.signal.sig[0]; |
1324 | prstatus->pr_sighold = p->blocked.sig[0]; | 1324 | prstatus->pr_sighold = p->blocked.sig[0]; |
1325 | prstatus->pr_pid = p->pid; | 1325 | prstatus->pr_pid = p->pid; |
1326 | prstatus->pr_ppid = p->parent->pid; | 1326 | prstatus->pr_ppid = p->parent->pid; |
1327 | prstatus->pr_pgrp = process_group(p); | 1327 | prstatus->pr_pgrp = process_group(p); |
1328 | prstatus->pr_sid = process_session(p); | 1328 | prstatus->pr_sid = process_session(p); |
1329 | if (thread_group_leader(p)) { | 1329 | if (thread_group_leader(p)) { |
1330 | /* | 1330 | /* |
1331 | * This is the record for the group leader. Add in the | 1331 | * This is the record for the group leader. Add in the |
1332 | * cumulative times of previous dead threads. This total | 1332 | * cumulative times of previous dead threads. This total |
1333 | * won't include the time of each live thread whose state | 1333 | * won't include the time of each live thread whose state |
1334 | * is included in the core dump. The final total reported | 1334 | * is included in the core dump. The final total reported |
1335 | * to our parent process when it calls wait4 will include | 1335 | * to our parent process when it calls wait4 will include |
1336 | * those sums as well as the little bit more time it takes | 1336 | * those sums as well as the little bit more time it takes |
1337 | * this and each other thread to finish dying after the | 1337 | * this and each other thread to finish dying after the |
1338 | * core dump synchronization phase. | 1338 | * core dump synchronization phase. |
1339 | */ | 1339 | */ |
1340 | cputime_to_timeval(cputime_add(p->utime, p->signal->utime), | 1340 | cputime_to_timeval(cputime_add(p->utime, p->signal->utime), |
1341 | &prstatus->pr_utime); | 1341 | &prstatus->pr_utime); |
1342 | cputime_to_timeval(cputime_add(p->stime, p->signal->stime), | 1342 | cputime_to_timeval(cputime_add(p->stime, p->signal->stime), |
1343 | &prstatus->pr_stime); | 1343 | &prstatus->pr_stime); |
1344 | } else { | 1344 | } else { |
1345 | cputime_to_timeval(p->utime, &prstatus->pr_utime); | 1345 | cputime_to_timeval(p->utime, &prstatus->pr_utime); |
1346 | cputime_to_timeval(p->stime, &prstatus->pr_stime); | 1346 | cputime_to_timeval(p->stime, &prstatus->pr_stime); |
1347 | } | 1347 | } |
1348 | cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime); | 1348 | cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime); |
1349 | cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime); | 1349 | cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime); |
1350 | } | 1350 | } |
1351 | 1351 | ||
1352 | static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, | 1352 | static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, |
1353 | struct mm_struct *mm) | 1353 | struct mm_struct *mm) |
1354 | { | 1354 | { |
1355 | unsigned int i, len; | 1355 | unsigned int i, len; |
1356 | 1356 | ||
1357 | /* first copy the parameters from user space */ | 1357 | /* first copy the parameters from user space */ |
1358 | memset(psinfo, 0, sizeof(struct elf_prpsinfo)); | 1358 | memset(psinfo, 0, sizeof(struct elf_prpsinfo)); |
1359 | 1359 | ||
1360 | len = mm->arg_end - mm->arg_start; | 1360 | len = mm->arg_end - mm->arg_start; |
1361 | if (len >= ELF_PRARGSZ) | 1361 | if (len >= ELF_PRARGSZ) |
1362 | len = ELF_PRARGSZ-1; | 1362 | len = ELF_PRARGSZ-1; |
1363 | if (copy_from_user(&psinfo->pr_psargs, | 1363 | if (copy_from_user(&psinfo->pr_psargs, |
1364 | (const char __user *)mm->arg_start, len)) | 1364 | (const char __user *)mm->arg_start, len)) |
1365 | return -EFAULT; | 1365 | return -EFAULT; |
1366 | for(i = 0; i < len; i++) | 1366 | for(i = 0; i < len; i++) |
1367 | if (psinfo->pr_psargs[i] == 0) | 1367 | if (psinfo->pr_psargs[i] == 0) |
1368 | psinfo->pr_psargs[i] = ' '; | 1368 | psinfo->pr_psargs[i] = ' '; |
1369 | psinfo->pr_psargs[len] = 0; | 1369 | psinfo->pr_psargs[len] = 0; |
1370 | 1370 | ||
1371 | psinfo->pr_pid = p->pid; | 1371 | psinfo->pr_pid = p->pid; |
1372 | psinfo->pr_ppid = p->parent->pid; | 1372 | psinfo->pr_ppid = p->parent->pid; |
1373 | psinfo->pr_pgrp = process_group(p); | 1373 | psinfo->pr_pgrp = process_group(p); |
1374 | psinfo->pr_sid = process_session(p); | 1374 | psinfo->pr_sid = process_session(p); |
1375 | 1375 | ||
1376 | i = p->state ? ffz(~p->state) + 1 : 0; | 1376 | i = p->state ? ffz(~p->state) + 1 : 0; |
1377 | psinfo->pr_state = i; | 1377 | psinfo->pr_state = i; |
1378 | psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i]; | 1378 | psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i]; |
1379 | psinfo->pr_zomb = psinfo->pr_sname == 'Z'; | 1379 | psinfo->pr_zomb = psinfo->pr_sname == 'Z'; |
1380 | psinfo->pr_nice = task_nice(p); | 1380 | psinfo->pr_nice = task_nice(p); |
1381 | psinfo->pr_flag = p->flags; | 1381 | psinfo->pr_flag = p->flags; |
1382 | SET_UID(psinfo->pr_uid, p->uid); | 1382 | SET_UID(psinfo->pr_uid, p->uid); |
1383 | SET_GID(psinfo->pr_gid, p->gid); | 1383 | SET_GID(psinfo->pr_gid, p->gid); |
1384 | strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname)); | 1384 | strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname)); |
1385 | 1385 | ||
1386 | return 0; | 1386 | return 0; |
1387 | } | 1387 | } |
1388 | 1388 | ||
1389 | /* Here is the structure in which status of each thread is captured. */ | 1389 | /* Here is the structure in which status of each thread is captured. */ |
1390 | struct elf_thread_status | 1390 | struct elf_thread_status |
1391 | { | 1391 | { |
1392 | struct list_head list; | 1392 | struct list_head list; |
1393 | struct elf_prstatus prstatus; /* NT_PRSTATUS */ | 1393 | struct elf_prstatus prstatus; /* NT_PRSTATUS */ |
1394 | elf_fpregset_t fpu; /* NT_PRFPREG */ | 1394 | elf_fpregset_t fpu; /* NT_PRFPREG */ |
1395 | struct task_struct *thread; | 1395 | struct task_struct *thread; |
1396 | #ifdef ELF_CORE_COPY_XFPREGS | 1396 | #ifdef ELF_CORE_COPY_XFPREGS |
1397 | elf_fpxregset_t xfpu; /* NT_PRXFPREG */ | 1397 | elf_fpxregset_t xfpu; /* NT_PRXFPREG */ |
1398 | #endif | 1398 | #endif |
1399 | struct memelfnote notes[3]; | 1399 | struct memelfnote notes[3]; |
1400 | int num_notes; | 1400 | int num_notes; |
1401 | }; | 1401 | }; |
1402 | 1402 | ||
1403 | /* | 1403 | /* |
1404 | * In order to add the specific thread information for the elf file format, | 1404 | * In order to add the specific thread information for the elf file format, |
1405 | * we need to keep a linked list of every threads pr_status and then create | 1405 | * we need to keep a linked list of every threads pr_status and then create |
1406 | * a single section for them in the final core file. | 1406 | * a single section for them in the final core file. |
1407 | */ | 1407 | */ |
1408 | static int elf_dump_thread_status(long signr, struct elf_thread_status *t) | 1408 | static int elf_dump_thread_status(long signr, struct elf_thread_status *t) |
1409 | { | 1409 | { |
1410 | int sz = 0; | 1410 | int sz = 0; |
1411 | struct task_struct *p = t->thread; | 1411 | struct task_struct *p = t->thread; |
1412 | t->num_notes = 0; | 1412 | t->num_notes = 0; |
1413 | 1413 | ||
1414 | fill_prstatus(&t->prstatus, p, signr); | 1414 | fill_prstatus(&t->prstatus, p, signr); |
1415 | elf_core_copy_task_regs(p, &t->prstatus.pr_reg); | 1415 | elf_core_copy_task_regs(p, &t->prstatus.pr_reg); |
1416 | 1416 | ||
1417 | fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), | 1417 | fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), |
1418 | &(t->prstatus)); | 1418 | &(t->prstatus)); |
1419 | t->num_notes++; | 1419 | t->num_notes++; |
1420 | sz += notesize(&t->notes[0]); | 1420 | sz += notesize(&t->notes[0]); |
1421 | 1421 | ||
1422 | if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, | 1422 | if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, |
1423 | &t->fpu))) { | 1423 | &t->fpu))) { |
1424 | fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), | 1424 | fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), |
1425 | &(t->fpu)); | 1425 | &(t->fpu)); |
1426 | t->num_notes++; | 1426 | t->num_notes++; |
1427 | sz += notesize(&t->notes[1]); | 1427 | sz += notesize(&t->notes[1]); |
1428 | } | 1428 | } |
1429 | 1429 | ||
1430 | #ifdef ELF_CORE_COPY_XFPREGS | 1430 | #ifdef ELF_CORE_COPY_XFPREGS |
1431 | if (elf_core_copy_task_xfpregs(p, &t->xfpu)) { | 1431 | if (elf_core_copy_task_xfpregs(p, &t->xfpu)) { |
1432 | fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), | 1432 | fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), |
1433 | &t->xfpu); | 1433 | &t->xfpu); |
1434 | t->num_notes++; | 1434 | t->num_notes++; |
1435 | sz += notesize(&t->notes[2]); | 1435 | sz += notesize(&t->notes[2]); |
1436 | } | 1436 | } |
1437 | #endif | 1437 | #endif |
1438 | return sz; | 1438 | return sz; |
1439 | } | 1439 | } |
1440 | 1440 | ||
1441 | static struct vm_area_struct *first_vma(struct task_struct *tsk, | 1441 | static struct vm_area_struct *first_vma(struct task_struct *tsk, |
1442 | struct vm_area_struct *gate_vma) | 1442 | struct vm_area_struct *gate_vma) |
1443 | { | 1443 | { |
1444 | struct vm_area_struct *ret = tsk->mm->mmap; | 1444 | struct vm_area_struct *ret = tsk->mm->mmap; |
1445 | 1445 | ||
1446 | if (ret) | 1446 | if (ret) |
1447 | return ret; | 1447 | return ret; |
1448 | return gate_vma; | 1448 | return gate_vma; |
1449 | } | 1449 | } |
1450 | /* | 1450 | /* |
1451 | * Helper function for iterating across a vma list. It ensures that the caller | 1451 | * Helper function for iterating across a vma list. It ensures that the caller |
1452 | * will visit `gate_vma' prior to terminating the search. | 1452 | * will visit `gate_vma' prior to terminating the search. |
1453 | */ | 1453 | */ |
1454 | static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma, | 1454 | static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma, |
1455 | struct vm_area_struct *gate_vma) | 1455 | struct vm_area_struct *gate_vma) |
1456 | { | 1456 | { |
1457 | struct vm_area_struct *ret; | 1457 | struct vm_area_struct *ret; |
1458 | 1458 | ||
1459 | ret = this_vma->vm_next; | 1459 | ret = this_vma->vm_next; |
1460 | if (ret) | 1460 | if (ret) |
1461 | return ret; | 1461 | return ret; |
1462 | if (this_vma == gate_vma) | 1462 | if (this_vma == gate_vma) |
1463 | return NULL; | 1463 | return NULL; |
1464 | return gate_vma; | 1464 | return gate_vma; |
1465 | } | 1465 | } |
1466 | 1466 | ||
1467 | /* | 1467 | /* |
1468 | * Actual dumper | 1468 | * Actual dumper |
1469 | * | 1469 | * |
1470 | * This is a two-pass process; first we find the offsets of the bits, | 1470 | * This is a two-pass process; first we find the offsets of the bits, |
1471 | * and then they are actually written out. If we run out of core limit | 1471 | * and then they are actually written out. If we run out of core limit |
1472 | * we just truncate. | 1472 | * we just truncate. |
1473 | */ | 1473 | */ |
1474 | static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file) | 1474 | static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file) |
1475 | { | 1475 | { |
1476 | #define NUM_NOTES 6 | 1476 | #define NUM_NOTES 6 |
1477 | int has_dumped = 0; | 1477 | int has_dumped = 0; |
1478 | mm_segment_t fs; | 1478 | mm_segment_t fs; |
1479 | int segs; | 1479 | int segs; |
1480 | size_t size = 0; | 1480 | size_t size = 0; |
1481 | int i; | 1481 | int i; |
1482 | struct vm_area_struct *vma, *gate_vma; | 1482 | struct vm_area_struct *vma, *gate_vma; |
1483 | struct elfhdr *elf = NULL; | 1483 | struct elfhdr *elf = NULL; |
1484 | loff_t offset = 0, dataoff, foffset; | 1484 | loff_t offset = 0, dataoff, foffset; |
1485 | unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur; | 1485 | unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur; |
1486 | int numnote; | 1486 | int numnote; |
1487 | struct memelfnote *notes = NULL; | 1487 | struct memelfnote *notes = NULL; |
1488 | struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */ | 1488 | struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */ |
1489 | struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */ | 1489 | struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */ |
1490 | struct task_struct *g, *p; | 1490 | struct task_struct *g, *p; |
1491 | LIST_HEAD(thread_list); | 1491 | LIST_HEAD(thread_list); |
1492 | struct list_head *t; | 1492 | struct list_head *t; |
1493 | elf_fpregset_t *fpu = NULL; | 1493 | elf_fpregset_t *fpu = NULL; |
1494 | #ifdef ELF_CORE_COPY_XFPREGS | 1494 | #ifdef ELF_CORE_COPY_XFPREGS |
1495 | elf_fpxregset_t *xfpu = NULL; | 1495 | elf_fpxregset_t *xfpu = NULL; |
1496 | #endif | 1496 | #endif |
1497 | int thread_status_size = 0; | 1497 | int thread_status_size = 0; |
1498 | elf_addr_t *auxv; | 1498 | elf_addr_t *auxv; |
1499 | 1499 | ||
1500 | /* | 1500 | /* |
1501 | * We no longer stop all VM operations. | 1501 | * We no longer stop all VM operations. |
1502 | * | 1502 | * |
1503 | * This is because those proceses that could possibly change map_count | 1503 | * This is because those proceses that could possibly change map_count |
1504 | * or the mmap / vma pages are now blocked in do_exit on current | 1504 | * or the mmap / vma pages are now blocked in do_exit on current |
1505 | * finishing this core dump. | 1505 | * finishing this core dump. |
1506 | * | 1506 | * |
1507 | * Only ptrace can touch these memory addresses, but it doesn't change | 1507 | * Only ptrace can touch these memory addresses, but it doesn't change |
1508 | * the map_count or the pages allocated. So no possibility of crashing | 1508 | * the map_count or the pages allocated. So no possibility of crashing |
1509 | * exists while dumping the mm->vm_next areas to the core file. | 1509 | * exists while dumping the mm->vm_next areas to the core file. |
1510 | */ | 1510 | */ |
1511 | 1511 | ||
1512 | /* alloc memory for large data structures: too large to be on stack */ | 1512 | /* alloc memory for large data structures: too large to be on stack */ |
1513 | elf = kmalloc(sizeof(*elf), GFP_KERNEL); | 1513 | elf = kmalloc(sizeof(*elf), GFP_KERNEL); |
1514 | if (!elf) | 1514 | if (!elf) |
1515 | goto cleanup; | 1515 | goto cleanup; |
1516 | prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL); | 1516 | prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL); |
1517 | if (!prstatus) | 1517 | if (!prstatus) |
1518 | goto cleanup; | 1518 | goto cleanup; |
1519 | psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL); | 1519 | psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL); |
1520 | if (!psinfo) | 1520 | if (!psinfo) |
1521 | goto cleanup; | 1521 | goto cleanup; |
1522 | notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL); | 1522 | notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL); |
1523 | if (!notes) | 1523 | if (!notes) |
1524 | goto cleanup; | 1524 | goto cleanup; |
1525 | fpu = kmalloc(sizeof(*fpu), GFP_KERNEL); | 1525 | fpu = kmalloc(sizeof(*fpu), GFP_KERNEL); |
1526 | if (!fpu) | 1526 | if (!fpu) |
1527 | goto cleanup; | 1527 | goto cleanup; |
1528 | #ifdef ELF_CORE_COPY_XFPREGS | 1528 | #ifdef ELF_CORE_COPY_XFPREGS |
1529 | xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL); | 1529 | xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL); |
1530 | if (!xfpu) | 1530 | if (!xfpu) |
1531 | goto cleanup; | 1531 | goto cleanup; |
1532 | #endif | 1532 | #endif |
1533 | 1533 | ||
1534 | if (signr) { | 1534 | if (signr) { |
1535 | struct elf_thread_status *tmp; | 1535 | struct elf_thread_status *tmp; |
1536 | rcu_read_lock(); | 1536 | rcu_read_lock(); |
1537 | do_each_thread(g,p) | 1537 | do_each_thread(g,p) |
1538 | if (current->mm == p->mm && current != p) { | 1538 | if (current->mm == p->mm && current != p) { |
1539 | tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC); | 1539 | tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC); |
1540 | if (!tmp) { | 1540 | if (!tmp) { |
1541 | rcu_read_unlock(); | 1541 | rcu_read_unlock(); |
1542 | goto cleanup; | 1542 | goto cleanup; |
1543 | } | 1543 | } |
1544 | tmp->thread = p; | 1544 | tmp->thread = p; |
1545 | list_add(&tmp->list, &thread_list); | 1545 | list_add(&tmp->list, &thread_list); |
1546 | } | 1546 | } |
1547 | while_each_thread(g,p); | 1547 | while_each_thread(g,p); |
1548 | rcu_read_unlock(); | 1548 | rcu_read_unlock(); |
1549 | list_for_each(t, &thread_list) { | 1549 | list_for_each(t, &thread_list) { |
1550 | struct elf_thread_status *tmp; | 1550 | struct elf_thread_status *tmp; |
1551 | int sz; | 1551 | int sz; |
1552 | 1552 | ||
1553 | tmp = list_entry(t, struct elf_thread_status, list); | 1553 | tmp = list_entry(t, struct elf_thread_status, list); |
1554 | sz = elf_dump_thread_status(signr, tmp); | 1554 | sz = elf_dump_thread_status(signr, tmp); |
1555 | thread_status_size += sz; | 1555 | thread_status_size += sz; |
1556 | } | 1556 | } |
1557 | } | 1557 | } |
1558 | /* now collect the dump for the current */ | 1558 | /* now collect the dump for the current */ |
1559 | memset(prstatus, 0, sizeof(*prstatus)); | 1559 | memset(prstatus, 0, sizeof(*prstatus)); |
1560 | fill_prstatus(prstatus, current, signr); | 1560 | fill_prstatus(prstatus, current, signr); |
1561 | elf_core_copy_regs(&prstatus->pr_reg, regs); | 1561 | elf_core_copy_regs(&prstatus->pr_reg, regs); |
1562 | 1562 | ||
1563 | segs = current->mm->map_count; | 1563 | segs = current->mm->map_count; |
1564 | #ifdef ELF_CORE_EXTRA_PHDRS | 1564 | #ifdef ELF_CORE_EXTRA_PHDRS |
1565 | segs += ELF_CORE_EXTRA_PHDRS; | 1565 | segs += ELF_CORE_EXTRA_PHDRS; |
1566 | #endif | 1566 | #endif |
1567 | 1567 | ||
1568 | gate_vma = get_gate_vma(current); | 1568 | gate_vma = get_gate_vma(current); |
1569 | if (gate_vma != NULL) | 1569 | if (gate_vma != NULL) |
1570 | segs++; | 1570 | segs++; |
1571 | 1571 | ||
1572 | /* Set up header */ | 1572 | /* Set up header */ |
1573 | fill_elf_header(elf, segs + 1); /* including notes section */ | 1573 | fill_elf_header(elf, segs + 1); /* including notes section */ |
1574 | 1574 | ||
1575 | has_dumped = 1; | 1575 | has_dumped = 1; |
1576 | current->flags |= PF_DUMPCORE; | 1576 | current->flags |= PF_DUMPCORE; |
1577 | 1577 | ||
1578 | /* | 1578 | /* |
1579 | * Set up the notes in similar form to SVR4 core dumps made | 1579 | * Set up the notes in similar form to SVR4 core dumps made |
1580 | * with info from their /proc. | 1580 | * with info from their /proc. |
1581 | */ | 1581 | */ |
1582 | 1582 | ||
1583 | fill_note(notes + 0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus); | 1583 | fill_note(notes + 0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus); |
1584 | fill_psinfo(psinfo, current->group_leader, current->mm); | 1584 | fill_psinfo(psinfo, current->group_leader, current->mm); |
1585 | fill_note(notes + 1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo); | 1585 | fill_note(notes + 1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo); |
1586 | 1586 | ||
1587 | numnote = 2; | 1587 | numnote = 2; |
1588 | 1588 | ||
1589 | auxv = (elf_addr_t *)current->mm->saved_auxv; | 1589 | auxv = (elf_addr_t *)current->mm->saved_auxv; |
1590 | 1590 | ||
1591 | i = 0; | 1591 | i = 0; |
1592 | do | 1592 | do |
1593 | i += 2; | 1593 | i += 2; |
1594 | while (auxv[i - 2] != AT_NULL); | 1594 | while (auxv[i - 2] != AT_NULL); |
1595 | fill_note(¬es[numnote++], "CORE", NT_AUXV, | 1595 | fill_note(¬es[numnote++], "CORE", NT_AUXV, |
1596 | i * sizeof(elf_addr_t), auxv); | 1596 | i * sizeof(elf_addr_t), auxv); |
1597 | 1597 | ||
1598 | /* Try to dump the FPU. */ | 1598 | /* Try to dump the FPU. */ |
1599 | if ((prstatus->pr_fpvalid = | 1599 | if ((prstatus->pr_fpvalid = |
1600 | elf_core_copy_task_fpregs(current, regs, fpu))) | 1600 | elf_core_copy_task_fpregs(current, regs, fpu))) |
1601 | fill_note(notes + numnote++, | 1601 | fill_note(notes + numnote++, |
1602 | "CORE", NT_PRFPREG, sizeof(*fpu), fpu); | 1602 | "CORE", NT_PRFPREG, sizeof(*fpu), fpu); |
1603 | #ifdef ELF_CORE_COPY_XFPREGS | 1603 | #ifdef ELF_CORE_COPY_XFPREGS |
1604 | if (elf_core_copy_task_xfpregs(current, xfpu)) | 1604 | if (elf_core_copy_task_xfpregs(current, xfpu)) |
1605 | fill_note(notes + numnote++, | 1605 | fill_note(notes + numnote++, |
1606 | "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu); | 1606 | "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu); |
1607 | #endif | 1607 | #endif |
1608 | 1608 | ||
1609 | fs = get_fs(); | 1609 | fs = get_fs(); |
1610 | set_fs(KERNEL_DS); | 1610 | set_fs(KERNEL_DS); |
1611 | 1611 | ||
1612 | DUMP_WRITE(elf, sizeof(*elf)); | 1612 | DUMP_WRITE(elf, sizeof(*elf)); |
1613 | offset += sizeof(*elf); /* Elf header */ | 1613 | offset += sizeof(*elf); /* Elf header */ |
1614 | offset += (segs + 1) * sizeof(struct elf_phdr); /* Program headers */ | 1614 | offset += (segs + 1) * sizeof(struct elf_phdr); /* Program headers */ |
1615 | foffset = offset; | 1615 | foffset = offset; |
1616 | 1616 | ||
1617 | /* Write notes phdr entry */ | 1617 | /* Write notes phdr entry */ |
1618 | { | 1618 | { |
1619 | struct elf_phdr phdr; | 1619 | struct elf_phdr phdr; |
1620 | int sz = 0; | 1620 | int sz = 0; |
1621 | 1621 | ||
1622 | for (i = 0; i < numnote; i++) | 1622 | for (i = 0; i < numnote; i++) |
1623 | sz += notesize(notes + i); | 1623 | sz += notesize(notes + i); |
1624 | 1624 | ||
1625 | sz += thread_status_size; | 1625 | sz += thread_status_size; |
1626 | 1626 | ||
1627 | #ifdef ELF_CORE_WRITE_EXTRA_NOTES | 1627 | #ifdef ELF_CORE_WRITE_EXTRA_NOTES |
1628 | sz += ELF_CORE_EXTRA_NOTES_SIZE; | 1628 | sz += ELF_CORE_EXTRA_NOTES_SIZE; |
1629 | #endif | 1629 | #endif |
1630 | 1630 | ||
1631 | fill_elf_note_phdr(&phdr, sz, offset); | 1631 | fill_elf_note_phdr(&phdr, sz, offset); |
1632 | offset += sz; | 1632 | offset += sz; |
1633 | DUMP_WRITE(&phdr, sizeof(phdr)); | 1633 | DUMP_WRITE(&phdr, sizeof(phdr)); |
1634 | } | 1634 | } |
1635 | 1635 | ||
1636 | dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); | 1636 | dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); |
1637 | 1637 | ||
1638 | /* Write program headers for segments dump */ | 1638 | /* Write program headers for segments dump */ |
1639 | for (vma = first_vma(current, gate_vma); vma != NULL; | 1639 | for (vma = first_vma(current, gate_vma); vma != NULL; |
1640 | vma = next_vma(vma, gate_vma)) { | 1640 | vma = next_vma(vma, gate_vma)) { |
1641 | struct elf_phdr phdr; | 1641 | struct elf_phdr phdr; |
1642 | size_t sz; | 1642 | size_t sz; |
1643 | 1643 | ||
1644 | sz = vma->vm_end - vma->vm_start; | 1644 | sz = vma->vm_end - vma->vm_start; |
1645 | 1645 | ||
1646 | phdr.p_type = PT_LOAD; | 1646 | phdr.p_type = PT_LOAD; |
1647 | phdr.p_offset = offset; | 1647 | phdr.p_offset = offset; |
1648 | phdr.p_vaddr = vma->vm_start; | 1648 | phdr.p_vaddr = vma->vm_start; |
1649 | phdr.p_paddr = 0; | 1649 | phdr.p_paddr = 0; |
1650 | phdr.p_filesz = maydump(vma) ? sz : 0; | 1650 | phdr.p_filesz = maydump(vma) ? sz : 0; |
1651 | phdr.p_memsz = sz; | 1651 | phdr.p_memsz = sz; |
1652 | offset += phdr.p_filesz; | 1652 | offset += phdr.p_filesz; |
1653 | phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; | 1653 | phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; |
1654 | if (vma->vm_flags & VM_WRITE) | 1654 | if (vma->vm_flags & VM_WRITE) |
1655 | phdr.p_flags |= PF_W; | 1655 | phdr.p_flags |= PF_W; |
1656 | if (vma->vm_flags & VM_EXEC) | 1656 | if (vma->vm_flags & VM_EXEC) |
1657 | phdr.p_flags |= PF_X; | 1657 | phdr.p_flags |= PF_X; |
1658 | phdr.p_align = ELF_EXEC_PAGESIZE; | 1658 | phdr.p_align = ELF_EXEC_PAGESIZE; |
1659 | 1659 | ||
1660 | DUMP_WRITE(&phdr, sizeof(phdr)); | 1660 | DUMP_WRITE(&phdr, sizeof(phdr)); |
1661 | } | 1661 | } |
1662 | 1662 | ||
1663 | #ifdef ELF_CORE_WRITE_EXTRA_PHDRS | 1663 | #ifdef ELF_CORE_WRITE_EXTRA_PHDRS |
1664 | ELF_CORE_WRITE_EXTRA_PHDRS; | 1664 | ELF_CORE_WRITE_EXTRA_PHDRS; |
1665 | #endif | 1665 | #endif |
1666 | 1666 | ||
1667 | /* write out the notes section */ | 1667 | /* write out the notes section */ |
1668 | for (i = 0; i < numnote; i++) | 1668 | for (i = 0; i < numnote; i++) |
1669 | if (!writenote(notes + i, file, &foffset)) | 1669 | if (!writenote(notes + i, file, &foffset)) |
1670 | goto end_coredump; | 1670 | goto end_coredump; |
1671 | 1671 | ||
1672 | #ifdef ELF_CORE_WRITE_EXTRA_NOTES | 1672 | #ifdef ELF_CORE_WRITE_EXTRA_NOTES |
1673 | ELF_CORE_WRITE_EXTRA_NOTES; | 1673 | ELF_CORE_WRITE_EXTRA_NOTES; |
1674 | #endif | 1674 | #endif |
1675 | 1675 | ||
1676 | /* write out the thread status notes section */ | 1676 | /* write out the thread status notes section */ |
1677 | list_for_each(t, &thread_list) { | 1677 | list_for_each(t, &thread_list) { |
1678 | struct elf_thread_status *tmp = | 1678 | struct elf_thread_status *tmp = |
1679 | list_entry(t, struct elf_thread_status, list); | 1679 | list_entry(t, struct elf_thread_status, list); |
1680 | 1680 | ||
1681 | for (i = 0; i < tmp->num_notes; i++) | 1681 | for (i = 0; i < tmp->num_notes; i++) |
1682 | if (!writenote(&tmp->notes[i], file, &foffset)) | 1682 | if (!writenote(&tmp->notes[i], file, &foffset)) |
1683 | goto end_coredump; | 1683 | goto end_coredump; |
1684 | } | 1684 | } |
1685 | 1685 | ||
1686 | /* Align to page */ | 1686 | /* Align to page */ |
1687 | DUMP_SEEK(dataoff - foffset); | 1687 | DUMP_SEEK(dataoff - foffset); |
1688 | 1688 | ||
1689 | for (vma = first_vma(current, gate_vma); vma != NULL; | 1689 | for (vma = first_vma(current, gate_vma); vma != NULL; |
1690 | vma = next_vma(vma, gate_vma)) { | 1690 | vma = next_vma(vma, gate_vma)) { |
1691 | unsigned long addr; | 1691 | unsigned long addr; |
1692 | 1692 | ||
1693 | if (!maydump(vma)) | 1693 | if (!maydump(vma)) |
1694 | continue; | 1694 | continue; |
1695 | 1695 | ||
1696 | for (addr = vma->vm_start; | 1696 | for (addr = vma->vm_start; |
1697 | addr < vma->vm_end; | 1697 | addr < vma->vm_end; |
1698 | addr += PAGE_SIZE) { | 1698 | addr += PAGE_SIZE) { |
1699 | struct page *page; | 1699 | struct page *page; |
1700 | struct vm_area_struct *vma; | 1700 | struct vm_area_struct *vma; |
1701 | 1701 | ||
1702 | if (get_user_pages(current, current->mm, addr, 1, 0, 1, | 1702 | if (get_user_pages(current, current->mm, addr, 1, 0, 1, |
1703 | &page, &vma) <= 0) { | 1703 | &page, &vma) <= 0) { |
1704 | DUMP_SEEK(PAGE_SIZE); | 1704 | DUMP_SEEK(PAGE_SIZE); |
1705 | } else { | 1705 | } else { |
1706 | if (page == ZERO_PAGE(addr)) { | 1706 | if (page == ZERO_PAGE(addr)) { |
1707 | DUMP_SEEK(PAGE_SIZE); | 1707 | if (!dump_seek(file, PAGE_SIZE)) { |
1708 | page_cache_release(page); | ||
1709 | goto end_coredump; | ||
1710 | } | ||
1708 | } else { | 1711 | } else { |
1709 | void *kaddr; | 1712 | void *kaddr; |
1710 | flush_cache_page(vma, addr, | 1713 | flush_cache_page(vma, addr, |
1711 | page_to_pfn(page)); | 1714 | page_to_pfn(page)); |
1712 | kaddr = kmap(page); | 1715 | kaddr = kmap(page); |
1713 | if ((size += PAGE_SIZE) > limit || | 1716 | if ((size += PAGE_SIZE) > limit || |
1714 | !dump_write(file, kaddr, | 1717 | !dump_write(file, kaddr, |
1715 | PAGE_SIZE)) { | 1718 | PAGE_SIZE)) { |
1716 | kunmap(page); | 1719 | kunmap(page); |
1717 | page_cache_release(page); | 1720 | page_cache_release(page); |
1718 | goto end_coredump; | 1721 | goto end_coredump; |
1719 | } | 1722 | } |
1720 | kunmap(page); | 1723 | kunmap(page); |
1721 | } | 1724 | } |
1722 | page_cache_release(page); | 1725 | page_cache_release(page); |
1723 | } | 1726 | } |
1724 | } | 1727 | } |
1725 | } | 1728 | } |
1726 | 1729 | ||
1727 | #ifdef ELF_CORE_WRITE_EXTRA_DATA | 1730 | #ifdef ELF_CORE_WRITE_EXTRA_DATA |
1728 | ELF_CORE_WRITE_EXTRA_DATA; | 1731 | ELF_CORE_WRITE_EXTRA_DATA; |
1729 | #endif | 1732 | #endif |
1730 | 1733 | ||
1731 | end_coredump: | 1734 | end_coredump: |
1732 | set_fs(fs); | 1735 | set_fs(fs); |
1733 | 1736 | ||
1734 | cleanup: | 1737 | cleanup: |
1735 | while (!list_empty(&thread_list)) { | 1738 | while (!list_empty(&thread_list)) { |
1736 | struct list_head *tmp = thread_list.next; | 1739 | struct list_head *tmp = thread_list.next; |
1737 | list_del(tmp); | 1740 | list_del(tmp); |
1738 | kfree(list_entry(tmp, struct elf_thread_status, list)); | 1741 | kfree(list_entry(tmp, struct elf_thread_status, list)); |
1739 | } | 1742 | } |
1740 | 1743 | ||
1741 | kfree(elf); | 1744 | kfree(elf); |
1742 | kfree(prstatus); | 1745 | kfree(prstatus); |
1743 | kfree(psinfo); | 1746 | kfree(psinfo); |
1744 | kfree(notes); | 1747 | kfree(notes); |
1745 | kfree(fpu); | 1748 | kfree(fpu); |
1746 | #ifdef ELF_CORE_COPY_XFPREGS | 1749 | #ifdef ELF_CORE_COPY_XFPREGS |
1747 | kfree(xfpu); | 1750 | kfree(xfpu); |
1748 | #endif | 1751 | #endif |
1749 | return has_dumped; | 1752 | return has_dumped; |
1750 | #undef NUM_NOTES | 1753 | #undef NUM_NOTES |
1751 | } | 1754 | } |
1752 | 1755 | ||
1753 | #endif /* USE_ELF_CORE_DUMP */ | 1756 | #endif /* USE_ELF_CORE_DUMP */ |
1754 | 1757 | ||
1755 | static int __init init_elf_binfmt(void) | 1758 | static int __init init_elf_binfmt(void) |
1756 | { | 1759 | { |
1757 | return register_binfmt(&elf_format); | 1760 | return register_binfmt(&elf_format); |
1758 | } | 1761 | } |
1759 | 1762 | ||
1760 | static void __exit exit_elf_binfmt(void) | 1763 | static void __exit exit_elf_binfmt(void) |
1761 | { | 1764 | { |
1762 | /* Remove the COFF and ELF loaders. */ | 1765 | /* Remove the COFF and ELF loaders. */ |
1763 | unregister_binfmt(&elf_format); | 1766 | unregister_binfmt(&elf_format); |
1764 | } | 1767 | } |
1765 | 1768 | ||
1766 | core_initcall(init_elf_binfmt); | 1769 | core_initcall(init_elf_binfmt); |
1767 | module_exit(exit_elf_binfmt); | 1770 | module_exit(exit_elf_binfmt); |
1768 | MODULE_LICENSE("GPL"); | 1771 | MODULE_LICENSE("GPL"); |
1769 | 1772 |
fs/binfmt_elf_fdpic.c
1 | /* binfmt_elf_fdpic.c: FDPIC ELF binary format | 1 | /* binfmt_elf_fdpic.c: FDPIC ELF binary format |
2 | * | 2 | * |
3 | * Copyright (C) 2003, 2004, 2006 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2003, 2004, 2006 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
5 | * Derived from binfmt_elf.c | 5 | * Derived from binfmt_elf.c |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
9 | * as published by the Free Software Foundation; either version | 9 | * as published by the Free Software Foundation; either version |
10 | * 2 of the License, or (at your option) any later version. | 10 | * 2 of the License, or (at your option) any later version. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | 14 | ||
15 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
16 | #include <linux/stat.h> | 16 | #include <linux/stat.h> |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
19 | #include <linux/mman.h> | 19 | #include <linux/mman.h> |
20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
21 | #include <linux/signal.h> | 21 | #include <linux/signal.h> |
22 | #include <linux/binfmts.h> | 22 | #include <linux/binfmts.h> |
23 | #include <linux/string.h> | 23 | #include <linux/string.h> |
24 | #include <linux/file.h> | 24 | #include <linux/file.h> |
25 | #include <linux/fcntl.h> | 25 | #include <linux/fcntl.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/pagemap.h> | 27 | #include <linux/pagemap.h> |
28 | #include <linux/highmem.h> | 28 | #include <linux/highmem.h> |
29 | #include <linux/highuid.h> | 29 | #include <linux/highuid.h> |
30 | #include <linux/personality.h> | 30 | #include <linux/personality.h> |
31 | #include <linux/ptrace.h> | 31 | #include <linux/ptrace.h> |
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/smp_lock.h> | 33 | #include <linux/smp_lock.h> |
34 | #include <linux/elf.h> | 34 | #include <linux/elf.h> |
35 | #include <linux/elf-fdpic.h> | 35 | #include <linux/elf-fdpic.h> |
36 | #include <linux/elfcore.h> | 36 | #include <linux/elfcore.h> |
37 | 37 | ||
38 | #include <asm/uaccess.h> | 38 | #include <asm/uaccess.h> |
39 | #include <asm/param.h> | 39 | #include <asm/param.h> |
40 | #include <asm/pgalloc.h> | 40 | #include <asm/pgalloc.h> |
41 | 41 | ||
42 | typedef char *elf_caddr_t; | 42 | typedef char *elf_caddr_t; |
43 | 43 | ||
44 | #if 0 | 44 | #if 0 |
45 | #define kdebug(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ ) | 45 | #define kdebug(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ ) |
46 | #else | 46 | #else |
47 | #define kdebug(fmt, ...) do {} while(0) | 47 | #define kdebug(fmt, ...) do {} while(0) |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | #if 0 | 50 | #if 0 |
51 | #define kdcore(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ ) | 51 | #define kdcore(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ ) |
52 | #else | 52 | #else |
53 | #define kdcore(fmt, ...) do {} while(0) | 53 | #define kdcore(fmt, ...) do {} while(0) |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | MODULE_LICENSE("GPL"); | 56 | MODULE_LICENSE("GPL"); |
57 | 57 | ||
58 | static int load_elf_fdpic_binary(struct linux_binprm *, struct pt_regs *); | 58 | static int load_elf_fdpic_binary(struct linux_binprm *, struct pt_regs *); |
59 | static int elf_fdpic_fetch_phdrs(struct elf_fdpic_params *, struct file *); | 59 | static int elf_fdpic_fetch_phdrs(struct elf_fdpic_params *, struct file *); |
60 | static int elf_fdpic_map_file(struct elf_fdpic_params *, struct file *, | 60 | static int elf_fdpic_map_file(struct elf_fdpic_params *, struct file *, |
61 | struct mm_struct *, const char *); | 61 | struct mm_struct *, const char *); |
62 | 62 | ||
63 | static int create_elf_fdpic_tables(struct linux_binprm *, struct mm_struct *, | 63 | static int create_elf_fdpic_tables(struct linux_binprm *, struct mm_struct *, |
64 | struct elf_fdpic_params *, | 64 | struct elf_fdpic_params *, |
65 | struct elf_fdpic_params *); | 65 | struct elf_fdpic_params *); |
66 | 66 | ||
67 | #ifndef CONFIG_MMU | 67 | #ifndef CONFIG_MMU |
68 | static int elf_fdpic_transfer_args_to_stack(struct linux_binprm *, | 68 | static int elf_fdpic_transfer_args_to_stack(struct linux_binprm *, |
69 | unsigned long *); | 69 | unsigned long *); |
70 | static int elf_fdpic_map_file_constdisp_on_uclinux(struct elf_fdpic_params *, | 70 | static int elf_fdpic_map_file_constdisp_on_uclinux(struct elf_fdpic_params *, |
71 | struct file *, | 71 | struct file *, |
72 | struct mm_struct *); | 72 | struct mm_struct *); |
73 | #endif | 73 | #endif |
74 | 74 | ||
75 | static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *, | 75 | static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *, |
76 | struct file *, struct mm_struct *); | 76 | struct file *, struct mm_struct *); |
77 | 77 | ||
78 | #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) | 78 | #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) |
79 | static int elf_fdpic_core_dump(long, struct pt_regs *, struct file *); | 79 | static int elf_fdpic_core_dump(long, struct pt_regs *, struct file *); |
80 | #endif | 80 | #endif |
81 | 81 | ||
82 | static struct linux_binfmt elf_fdpic_format = { | 82 | static struct linux_binfmt elf_fdpic_format = { |
83 | .module = THIS_MODULE, | 83 | .module = THIS_MODULE, |
84 | .load_binary = load_elf_fdpic_binary, | 84 | .load_binary = load_elf_fdpic_binary, |
85 | #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) | 85 | #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) |
86 | .core_dump = elf_fdpic_core_dump, | 86 | .core_dump = elf_fdpic_core_dump, |
87 | #endif | 87 | #endif |
88 | .min_coredump = ELF_EXEC_PAGESIZE, | 88 | .min_coredump = ELF_EXEC_PAGESIZE, |
89 | }; | 89 | }; |
90 | 90 | ||
91 | static int __init init_elf_fdpic_binfmt(void) | 91 | static int __init init_elf_fdpic_binfmt(void) |
92 | { | 92 | { |
93 | return register_binfmt(&elf_fdpic_format); | 93 | return register_binfmt(&elf_fdpic_format); |
94 | } | 94 | } |
95 | 95 | ||
96 | static void __exit exit_elf_fdpic_binfmt(void) | 96 | static void __exit exit_elf_fdpic_binfmt(void) |
97 | { | 97 | { |
98 | unregister_binfmt(&elf_fdpic_format); | 98 | unregister_binfmt(&elf_fdpic_format); |
99 | } | 99 | } |
100 | 100 | ||
101 | core_initcall(init_elf_fdpic_binfmt); | 101 | core_initcall(init_elf_fdpic_binfmt); |
102 | module_exit(exit_elf_fdpic_binfmt); | 102 | module_exit(exit_elf_fdpic_binfmt); |
103 | 103 | ||
104 | static int is_elf_fdpic(struct elfhdr *hdr, struct file *file) | 104 | static int is_elf_fdpic(struct elfhdr *hdr, struct file *file) |
105 | { | 105 | { |
106 | if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0) | 106 | if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0) |
107 | return 0; | 107 | return 0; |
108 | if (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN) | 108 | if (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN) |
109 | return 0; | 109 | return 0; |
110 | if (!elf_check_arch(hdr) || !elf_check_fdpic(hdr)) | 110 | if (!elf_check_arch(hdr) || !elf_check_fdpic(hdr)) |
111 | return 0; | 111 | return 0; |
112 | if (!file->f_op || !file->f_op->mmap) | 112 | if (!file->f_op || !file->f_op->mmap) |
113 | return 0; | 113 | return 0; |
114 | return 1; | 114 | return 1; |
115 | } | 115 | } |
116 | 116 | ||
117 | /*****************************************************************************/ | 117 | /*****************************************************************************/ |
118 | /* | 118 | /* |
119 | * read the program headers table into memory | 119 | * read the program headers table into memory |
120 | */ | 120 | */ |
121 | static int elf_fdpic_fetch_phdrs(struct elf_fdpic_params *params, | 121 | static int elf_fdpic_fetch_phdrs(struct elf_fdpic_params *params, |
122 | struct file *file) | 122 | struct file *file) |
123 | { | 123 | { |
124 | struct elf32_phdr *phdr; | 124 | struct elf32_phdr *phdr; |
125 | unsigned long size; | 125 | unsigned long size; |
126 | int retval, loop; | 126 | int retval, loop; |
127 | 127 | ||
128 | if (params->hdr.e_phentsize != sizeof(struct elf_phdr)) | 128 | if (params->hdr.e_phentsize != sizeof(struct elf_phdr)) |
129 | return -ENOMEM; | 129 | return -ENOMEM; |
130 | if (params->hdr.e_phnum > 65536U / sizeof(struct elf_phdr)) | 130 | if (params->hdr.e_phnum > 65536U / sizeof(struct elf_phdr)) |
131 | return -ENOMEM; | 131 | return -ENOMEM; |
132 | 132 | ||
133 | size = params->hdr.e_phnum * sizeof(struct elf_phdr); | 133 | size = params->hdr.e_phnum * sizeof(struct elf_phdr); |
134 | params->phdrs = kmalloc(size, GFP_KERNEL); | 134 | params->phdrs = kmalloc(size, GFP_KERNEL); |
135 | if (!params->phdrs) | 135 | if (!params->phdrs) |
136 | return -ENOMEM; | 136 | return -ENOMEM; |
137 | 137 | ||
138 | retval = kernel_read(file, params->hdr.e_phoff, | 138 | retval = kernel_read(file, params->hdr.e_phoff, |
139 | (char *) params->phdrs, size); | 139 | (char *) params->phdrs, size); |
140 | if (retval < 0) | 140 | if (retval < 0) |
141 | return retval; | 141 | return retval; |
142 | 142 | ||
143 | /* determine stack size for this binary */ | 143 | /* determine stack size for this binary */ |
144 | phdr = params->phdrs; | 144 | phdr = params->phdrs; |
145 | for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) { | 145 | for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) { |
146 | if (phdr->p_type != PT_GNU_STACK) | 146 | if (phdr->p_type != PT_GNU_STACK) |
147 | continue; | 147 | continue; |
148 | 148 | ||
149 | if (phdr->p_flags & PF_X) | 149 | if (phdr->p_flags & PF_X) |
150 | params->flags |= ELF_FDPIC_FLAG_EXEC_STACK; | 150 | params->flags |= ELF_FDPIC_FLAG_EXEC_STACK; |
151 | else | 151 | else |
152 | params->flags |= ELF_FDPIC_FLAG_NOEXEC_STACK; | 152 | params->flags |= ELF_FDPIC_FLAG_NOEXEC_STACK; |
153 | 153 | ||
154 | params->stack_size = phdr->p_memsz; | 154 | params->stack_size = phdr->p_memsz; |
155 | break; | 155 | break; |
156 | } | 156 | } |
157 | 157 | ||
158 | return 0; | 158 | return 0; |
159 | } | 159 | } |
160 | 160 | ||
161 | /*****************************************************************************/ | 161 | /*****************************************************************************/ |
162 | /* | 162 | /* |
163 | * load an fdpic binary into various bits of memory | 163 | * load an fdpic binary into various bits of memory |
164 | */ | 164 | */ |
165 | static int load_elf_fdpic_binary(struct linux_binprm *bprm, | 165 | static int load_elf_fdpic_binary(struct linux_binprm *bprm, |
166 | struct pt_regs *regs) | 166 | struct pt_regs *regs) |
167 | { | 167 | { |
168 | struct elf_fdpic_params exec_params, interp_params; | 168 | struct elf_fdpic_params exec_params, interp_params; |
169 | struct elf_phdr *phdr; | 169 | struct elf_phdr *phdr; |
170 | unsigned long stack_size, entryaddr; | 170 | unsigned long stack_size, entryaddr; |
171 | #ifndef CONFIG_MMU | 171 | #ifndef CONFIG_MMU |
172 | unsigned long fullsize; | 172 | unsigned long fullsize; |
173 | #endif | 173 | #endif |
174 | #ifdef ELF_FDPIC_PLAT_INIT | 174 | #ifdef ELF_FDPIC_PLAT_INIT |
175 | unsigned long dynaddr; | 175 | unsigned long dynaddr; |
176 | #endif | 176 | #endif |
177 | struct file *interpreter = NULL; /* to shut gcc up */ | 177 | struct file *interpreter = NULL; /* to shut gcc up */ |
178 | char *interpreter_name = NULL; | 178 | char *interpreter_name = NULL; |
179 | int executable_stack; | 179 | int executable_stack; |
180 | int retval, i; | 180 | int retval, i; |
181 | 181 | ||
182 | kdebug("____ LOAD %d ____", current->pid); | 182 | kdebug("____ LOAD %d ____", current->pid); |
183 | 183 | ||
184 | memset(&exec_params, 0, sizeof(exec_params)); | 184 | memset(&exec_params, 0, sizeof(exec_params)); |
185 | memset(&interp_params, 0, sizeof(interp_params)); | 185 | memset(&interp_params, 0, sizeof(interp_params)); |
186 | 186 | ||
187 | exec_params.hdr = *(struct elfhdr *) bprm->buf; | 187 | exec_params.hdr = *(struct elfhdr *) bprm->buf; |
188 | exec_params.flags = ELF_FDPIC_FLAG_PRESENT | ELF_FDPIC_FLAG_EXECUTABLE; | 188 | exec_params.flags = ELF_FDPIC_FLAG_PRESENT | ELF_FDPIC_FLAG_EXECUTABLE; |
189 | 189 | ||
190 | /* check that this is a binary we know how to deal with */ | 190 | /* check that this is a binary we know how to deal with */ |
191 | retval = -ENOEXEC; | 191 | retval = -ENOEXEC; |
192 | if (!is_elf_fdpic(&exec_params.hdr, bprm->file)) | 192 | if (!is_elf_fdpic(&exec_params.hdr, bprm->file)) |
193 | goto error; | 193 | goto error; |
194 | 194 | ||
195 | /* read the program header table */ | 195 | /* read the program header table */ |
196 | retval = elf_fdpic_fetch_phdrs(&exec_params, bprm->file); | 196 | retval = elf_fdpic_fetch_phdrs(&exec_params, bprm->file); |
197 | if (retval < 0) | 197 | if (retval < 0) |
198 | goto error; | 198 | goto error; |
199 | 199 | ||
200 | /* scan for a program header that specifies an interpreter */ | 200 | /* scan for a program header that specifies an interpreter */ |
201 | phdr = exec_params.phdrs; | 201 | phdr = exec_params.phdrs; |
202 | 202 | ||
203 | for (i = 0; i < exec_params.hdr.e_phnum; i++, phdr++) { | 203 | for (i = 0; i < exec_params.hdr.e_phnum; i++, phdr++) { |
204 | switch (phdr->p_type) { | 204 | switch (phdr->p_type) { |
205 | case PT_INTERP: | 205 | case PT_INTERP: |
206 | retval = -ENOMEM; | 206 | retval = -ENOMEM; |
207 | if (phdr->p_filesz > PATH_MAX) | 207 | if (phdr->p_filesz > PATH_MAX) |
208 | goto error; | 208 | goto error; |
209 | retval = -ENOENT; | 209 | retval = -ENOENT; |
210 | if (phdr->p_filesz < 2) | 210 | if (phdr->p_filesz < 2) |
211 | goto error; | 211 | goto error; |
212 | 212 | ||
213 | /* read the name of the interpreter into memory */ | 213 | /* read the name of the interpreter into memory */ |
214 | interpreter_name = kmalloc(phdr->p_filesz, GFP_KERNEL); | 214 | interpreter_name = kmalloc(phdr->p_filesz, GFP_KERNEL); |
215 | if (!interpreter_name) | 215 | if (!interpreter_name) |
216 | goto error; | 216 | goto error; |
217 | 217 | ||
218 | retval = kernel_read(bprm->file, | 218 | retval = kernel_read(bprm->file, |
219 | phdr->p_offset, | 219 | phdr->p_offset, |
220 | interpreter_name, | 220 | interpreter_name, |
221 | phdr->p_filesz); | 221 | phdr->p_filesz); |
222 | if (retval < 0) | 222 | if (retval < 0) |
223 | goto error; | 223 | goto error; |
224 | 224 | ||
225 | retval = -ENOENT; | 225 | retval = -ENOENT; |
226 | if (interpreter_name[phdr->p_filesz - 1] != '\0') | 226 | if (interpreter_name[phdr->p_filesz - 1] != '\0') |
227 | goto error; | 227 | goto error; |
228 | 228 | ||
229 | kdebug("Using ELF interpreter %s", interpreter_name); | 229 | kdebug("Using ELF interpreter %s", interpreter_name); |
230 | 230 | ||
231 | /* replace the program with the interpreter */ | 231 | /* replace the program with the interpreter */ |
232 | interpreter = open_exec(interpreter_name); | 232 | interpreter = open_exec(interpreter_name); |
233 | retval = PTR_ERR(interpreter); | 233 | retval = PTR_ERR(interpreter); |
234 | if (IS_ERR(interpreter)) { | 234 | if (IS_ERR(interpreter)) { |
235 | interpreter = NULL; | 235 | interpreter = NULL; |
236 | goto error; | 236 | goto error; |
237 | } | 237 | } |
238 | 238 | ||
239 | /* | 239 | /* |
240 | * If the binary is not readable then enforce | 240 | * If the binary is not readable then enforce |
241 | * mm->dumpable = 0 regardless of the interpreter's | 241 | * mm->dumpable = 0 regardless of the interpreter's |
242 | * permissions. | 242 | * permissions. |
243 | */ | 243 | */ |
244 | if (file_permission(interpreter, MAY_READ) < 0) | 244 | if (file_permission(interpreter, MAY_READ) < 0) |
245 | bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP; | 245 | bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP; |
246 | 246 | ||
247 | retval = kernel_read(interpreter, 0, bprm->buf, | 247 | retval = kernel_read(interpreter, 0, bprm->buf, |
248 | BINPRM_BUF_SIZE); | 248 | BINPRM_BUF_SIZE); |
249 | if (retval < 0) | 249 | if (retval < 0) |
250 | goto error; | 250 | goto error; |
251 | 251 | ||
252 | interp_params.hdr = *((struct elfhdr *) bprm->buf); | 252 | interp_params.hdr = *((struct elfhdr *) bprm->buf); |
253 | break; | 253 | break; |
254 | 254 | ||
255 | case PT_LOAD: | 255 | case PT_LOAD: |
256 | #ifdef CONFIG_MMU | 256 | #ifdef CONFIG_MMU |
257 | if (exec_params.load_addr == 0) | 257 | if (exec_params.load_addr == 0) |
258 | exec_params.load_addr = phdr->p_vaddr; | 258 | exec_params.load_addr = phdr->p_vaddr; |
259 | #endif | 259 | #endif |
260 | break; | 260 | break; |
261 | } | 261 | } |
262 | 262 | ||
263 | } | 263 | } |
264 | 264 | ||
265 | if (elf_check_const_displacement(&exec_params.hdr)) | 265 | if (elf_check_const_displacement(&exec_params.hdr)) |
266 | exec_params.flags |= ELF_FDPIC_FLAG_CONSTDISP; | 266 | exec_params.flags |= ELF_FDPIC_FLAG_CONSTDISP; |
267 | 267 | ||
268 | /* perform insanity checks on the interpreter */ | 268 | /* perform insanity checks on the interpreter */ |
269 | if (interpreter_name) { | 269 | if (interpreter_name) { |
270 | retval = -ELIBBAD; | 270 | retval = -ELIBBAD; |
271 | if (!is_elf_fdpic(&interp_params.hdr, interpreter)) | 271 | if (!is_elf_fdpic(&interp_params.hdr, interpreter)) |
272 | goto error; | 272 | goto error; |
273 | 273 | ||
274 | interp_params.flags = ELF_FDPIC_FLAG_PRESENT; | 274 | interp_params.flags = ELF_FDPIC_FLAG_PRESENT; |
275 | 275 | ||
276 | /* read the interpreter's program header table */ | 276 | /* read the interpreter's program header table */ |
277 | retval = elf_fdpic_fetch_phdrs(&interp_params, interpreter); | 277 | retval = elf_fdpic_fetch_phdrs(&interp_params, interpreter); |
278 | if (retval < 0) | 278 | if (retval < 0) |
279 | goto error; | 279 | goto error; |
280 | } | 280 | } |
281 | 281 | ||
282 | stack_size = exec_params.stack_size; | 282 | stack_size = exec_params.stack_size; |
283 | if (stack_size < interp_params.stack_size) | 283 | if (stack_size < interp_params.stack_size) |
284 | stack_size = interp_params.stack_size; | 284 | stack_size = interp_params.stack_size; |
285 | 285 | ||
286 | if (exec_params.flags & ELF_FDPIC_FLAG_EXEC_STACK) | 286 | if (exec_params.flags & ELF_FDPIC_FLAG_EXEC_STACK) |
287 | executable_stack = EXSTACK_ENABLE_X; | 287 | executable_stack = EXSTACK_ENABLE_X; |
288 | else if (exec_params.flags & ELF_FDPIC_FLAG_NOEXEC_STACK) | 288 | else if (exec_params.flags & ELF_FDPIC_FLAG_NOEXEC_STACK) |
289 | executable_stack = EXSTACK_DISABLE_X; | 289 | executable_stack = EXSTACK_DISABLE_X; |
290 | else if (interp_params.flags & ELF_FDPIC_FLAG_EXEC_STACK) | 290 | else if (interp_params.flags & ELF_FDPIC_FLAG_EXEC_STACK) |
291 | executable_stack = EXSTACK_ENABLE_X; | 291 | executable_stack = EXSTACK_ENABLE_X; |
292 | else if (interp_params.flags & ELF_FDPIC_FLAG_NOEXEC_STACK) | 292 | else if (interp_params.flags & ELF_FDPIC_FLAG_NOEXEC_STACK) |
293 | executable_stack = EXSTACK_DISABLE_X; | 293 | executable_stack = EXSTACK_DISABLE_X; |
294 | else | 294 | else |
295 | executable_stack = EXSTACK_DEFAULT; | 295 | executable_stack = EXSTACK_DEFAULT; |
296 | 296 | ||
297 | retval = -ENOEXEC; | 297 | retval = -ENOEXEC; |
298 | if (stack_size == 0) | 298 | if (stack_size == 0) |
299 | goto error; | 299 | goto error; |
300 | 300 | ||
301 | if (elf_check_const_displacement(&interp_params.hdr)) | 301 | if (elf_check_const_displacement(&interp_params.hdr)) |
302 | interp_params.flags |= ELF_FDPIC_FLAG_CONSTDISP; | 302 | interp_params.flags |= ELF_FDPIC_FLAG_CONSTDISP; |
303 | 303 | ||
304 | /* flush all traces of the currently running executable */ | 304 | /* flush all traces of the currently running executable */ |
305 | retval = flush_old_exec(bprm); | 305 | retval = flush_old_exec(bprm); |
306 | if (retval) | 306 | if (retval) |
307 | goto error; | 307 | goto error; |
308 | 308 | ||
309 | /* there's now no turning back... the old userspace image is dead, | 309 | /* there's now no turning back... the old userspace image is dead, |
310 | * defunct, deceased, etc. after this point we have to exit via | 310 | * defunct, deceased, etc. after this point we have to exit via |
311 | * error_kill */ | 311 | * error_kill */ |
312 | set_personality(PER_LINUX_FDPIC); | 312 | set_personality(PER_LINUX_FDPIC); |
313 | set_binfmt(&elf_fdpic_format); | 313 | set_binfmt(&elf_fdpic_format); |
314 | 314 | ||
315 | current->mm->start_code = 0; | 315 | current->mm->start_code = 0; |
316 | current->mm->end_code = 0; | 316 | current->mm->end_code = 0; |
317 | current->mm->start_stack = 0; | 317 | current->mm->start_stack = 0; |
318 | current->mm->start_data = 0; | 318 | current->mm->start_data = 0; |
319 | current->mm->end_data = 0; | 319 | current->mm->end_data = 0; |
320 | current->mm->context.exec_fdpic_loadmap = 0; | 320 | current->mm->context.exec_fdpic_loadmap = 0; |
321 | current->mm->context.interp_fdpic_loadmap = 0; | 321 | current->mm->context.interp_fdpic_loadmap = 0; |
322 | 322 | ||
323 | current->flags &= ~PF_FORKNOEXEC; | 323 | current->flags &= ~PF_FORKNOEXEC; |
324 | 324 | ||
325 | #ifdef CONFIG_MMU | 325 | #ifdef CONFIG_MMU |
326 | elf_fdpic_arch_lay_out_mm(&exec_params, | 326 | elf_fdpic_arch_lay_out_mm(&exec_params, |
327 | &interp_params, | 327 | &interp_params, |
328 | ¤t->mm->start_stack, | 328 | ¤t->mm->start_stack, |
329 | ¤t->mm->start_brk); | 329 | ¤t->mm->start_brk); |
330 | 330 | ||
331 | retval = setup_arg_pages(bprm, current->mm->start_stack, | 331 | retval = setup_arg_pages(bprm, current->mm->start_stack, |
332 | executable_stack); | 332 | executable_stack); |
333 | if (retval < 0) { | 333 | if (retval < 0) { |
334 | send_sig(SIGKILL, current, 0); | 334 | send_sig(SIGKILL, current, 0); |
335 | goto error_kill; | 335 | goto error_kill; |
336 | } | 336 | } |
337 | #endif | 337 | #endif |
338 | 338 | ||
339 | /* load the executable and interpreter into memory */ | 339 | /* load the executable and interpreter into memory */ |
340 | retval = elf_fdpic_map_file(&exec_params, bprm->file, current->mm, | 340 | retval = elf_fdpic_map_file(&exec_params, bprm->file, current->mm, |
341 | "executable"); | 341 | "executable"); |
342 | if (retval < 0) | 342 | if (retval < 0) |
343 | goto error_kill; | 343 | goto error_kill; |
344 | 344 | ||
345 | if (interpreter_name) { | 345 | if (interpreter_name) { |
346 | retval = elf_fdpic_map_file(&interp_params, interpreter, | 346 | retval = elf_fdpic_map_file(&interp_params, interpreter, |
347 | current->mm, "interpreter"); | 347 | current->mm, "interpreter"); |
348 | if (retval < 0) { | 348 | if (retval < 0) { |
349 | printk(KERN_ERR "Unable to load interpreter\n"); | 349 | printk(KERN_ERR "Unable to load interpreter\n"); |
350 | goto error_kill; | 350 | goto error_kill; |
351 | } | 351 | } |
352 | 352 | ||
353 | allow_write_access(interpreter); | 353 | allow_write_access(interpreter); |
354 | fput(interpreter); | 354 | fput(interpreter); |
355 | interpreter = NULL; | 355 | interpreter = NULL; |
356 | } | 356 | } |
357 | 357 | ||
358 | #ifdef CONFIG_MMU | 358 | #ifdef CONFIG_MMU |
359 | if (!current->mm->start_brk) | 359 | if (!current->mm->start_brk) |
360 | current->mm->start_brk = current->mm->end_data; | 360 | current->mm->start_brk = current->mm->end_data; |
361 | 361 | ||
362 | current->mm->brk = current->mm->start_brk = | 362 | current->mm->brk = current->mm->start_brk = |
363 | PAGE_ALIGN(current->mm->start_brk); | 363 | PAGE_ALIGN(current->mm->start_brk); |
364 | 364 | ||
365 | #else | 365 | #else |
366 | /* create a stack and brk area big enough for everyone | 366 | /* create a stack and brk area big enough for everyone |
367 | * - the brk heap starts at the bottom and works up | 367 | * - the brk heap starts at the bottom and works up |
368 | * - the stack starts at the top and works down | 368 | * - the stack starts at the top and works down |
369 | */ | 369 | */ |
370 | stack_size = (stack_size + PAGE_SIZE - 1) & PAGE_MASK; | 370 | stack_size = (stack_size + PAGE_SIZE - 1) & PAGE_MASK; |
371 | if (stack_size < PAGE_SIZE * 2) | 371 | if (stack_size < PAGE_SIZE * 2) |
372 | stack_size = PAGE_SIZE * 2; | 372 | stack_size = PAGE_SIZE * 2; |
373 | 373 | ||
374 | down_write(¤t->mm->mmap_sem); | 374 | down_write(¤t->mm->mmap_sem); |
375 | current->mm->start_brk = do_mmap(NULL, 0, stack_size, | 375 | current->mm->start_brk = do_mmap(NULL, 0, stack_size, |
376 | PROT_READ | PROT_WRITE | PROT_EXEC, | 376 | PROT_READ | PROT_WRITE | PROT_EXEC, |
377 | MAP_PRIVATE | MAP_ANONYMOUS | MAP_GROWSDOWN, | 377 | MAP_PRIVATE | MAP_ANONYMOUS | MAP_GROWSDOWN, |
378 | 0); | 378 | 0); |
379 | 379 | ||
380 | if (IS_ERR_VALUE(current->mm->start_brk)) { | 380 | if (IS_ERR_VALUE(current->mm->start_brk)) { |
381 | up_write(¤t->mm->mmap_sem); | 381 | up_write(¤t->mm->mmap_sem); |
382 | retval = current->mm->start_brk; | 382 | retval = current->mm->start_brk; |
383 | current->mm->start_brk = 0; | 383 | current->mm->start_brk = 0; |
384 | goto error_kill; | 384 | goto error_kill; |
385 | } | 385 | } |
386 | 386 | ||
387 | /* expand the stack mapping to use up the entire allocation granule */ | 387 | /* expand the stack mapping to use up the entire allocation granule */ |
388 | fullsize = ksize((char *) current->mm->start_brk); | 388 | fullsize = ksize((char *) current->mm->start_brk); |
389 | if (!IS_ERR_VALUE(do_mremap(current->mm->start_brk, stack_size, | 389 | if (!IS_ERR_VALUE(do_mremap(current->mm->start_brk, stack_size, |
390 | fullsize, 0, 0))) | 390 | fullsize, 0, 0))) |
391 | stack_size = fullsize; | 391 | stack_size = fullsize; |
392 | up_write(¤t->mm->mmap_sem); | 392 | up_write(¤t->mm->mmap_sem); |
393 | 393 | ||
394 | current->mm->brk = current->mm->start_brk; | 394 | current->mm->brk = current->mm->start_brk; |
395 | current->mm->context.end_brk = current->mm->start_brk; | 395 | current->mm->context.end_brk = current->mm->start_brk; |
396 | current->mm->context.end_brk += | 396 | current->mm->context.end_brk += |
397 | (stack_size > PAGE_SIZE) ? (stack_size - PAGE_SIZE) : 0; | 397 | (stack_size > PAGE_SIZE) ? (stack_size - PAGE_SIZE) : 0; |
398 | current->mm->start_stack = current->mm->start_brk + stack_size; | 398 | current->mm->start_stack = current->mm->start_brk + stack_size; |
399 | #endif | 399 | #endif |
400 | 400 | ||
401 | compute_creds(bprm); | 401 | compute_creds(bprm); |
402 | current->flags &= ~PF_FORKNOEXEC; | 402 | current->flags &= ~PF_FORKNOEXEC; |
403 | if (create_elf_fdpic_tables(bprm, current->mm, | 403 | if (create_elf_fdpic_tables(bprm, current->mm, |
404 | &exec_params, &interp_params) < 0) | 404 | &exec_params, &interp_params) < 0) |
405 | goto error_kill; | 405 | goto error_kill; |
406 | 406 | ||
407 | kdebug("- start_code %lx", current->mm->start_code); | 407 | kdebug("- start_code %lx", current->mm->start_code); |
408 | kdebug("- end_code %lx", current->mm->end_code); | 408 | kdebug("- end_code %lx", current->mm->end_code); |
409 | kdebug("- start_data %lx", current->mm->start_data); | 409 | kdebug("- start_data %lx", current->mm->start_data); |
410 | kdebug("- end_data %lx", current->mm->end_data); | 410 | kdebug("- end_data %lx", current->mm->end_data); |
411 | kdebug("- start_brk %lx", current->mm->start_brk); | 411 | kdebug("- start_brk %lx", current->mm->start_brk); |
412 | kdebug("- brk %lx", current->mm->brk); | 412 | kdebug("- brk %lx", current->mm->brk); |
413 | kdebug("- start_stack %lx", current->mm->start_stack); | 413 | kdebug("- start_stack %lx", current->mm->start_stack); |
414 | 414 | ||
415 | #ifdef ELF_FDPIC_PLAT_INIT | 415 | #ifdef ELF_FDPIC_PLAT_INIT |
416 | /* | 416 | /* |
417 | * The ABI may specify that certain registers be set up in special | 417 | * The ABI may specify that certain registers be set up in special |
418 | * ways (on i386 %edx is the address of a DT_FINI function, for | 418 | * ways (on i386 %edx is the address of a DT_FINI function, for |
419 | * example. This macro performs whatever initialization to | 419 | * example. This macro performs whatever initialization to |
420 | * the regs structure is required. | 420 | * the regs structure is required. |
421 | */ | 421 | */ |
422 | dynaddr = interp_params.dynamic_addr ?: exec_params.dynamic_addr; | 422 | dynaddr = interp_params.dynamic_addr ?: exec_params.dynamic_addr; |
423 | ELF_FDPIC_PLAT_INIT(regs, exec_params.map_addr, interp_params.map_addr, | 423 | ELF_FDPIC_PLAT_INIT(regs, exec_params.map_addr, interp_params.map_addr, |
424 | dynaddr); | 424 | dynaddr); |
425 | #endif | 425 | #endif |
426 | 426 | ||
427 | /* everything is now ready... get the userspace context ready to roll */ | 427 | /* everything is now ready... get the userspace context ready to roll */ |
428 | entryaddr = interp_params.entry_addr ?: exec_params.entry_addr; | 428 | entryaddr = interp_params.entry_addr ?: exec_params.entry_addr; |
429 | start_thread(regs, entryaddr, current->mm->start_stack); | 429 | start_thread(regs, entryaddr, current->mm->start_stack); |
430 | 430 | ||
431 | if (unlikely(current->ptrace & PT_PTRACED)) { | 431 | if (unlikely(current->ptrace & PT_PTRACED)) { |
432 | if (current->ptrace & PT_TRACE_EXEC) | 432 | if (current->ptrace & PT_TRACE_EXEC) |
433 | ptrace_notify((PTRACE_EVENT_EXEC << 8) | SIGTRAP); | 433 | ptrace_notify((PTRACE_EVENT_EXEC << 8) | SIGTRAP); |
434 | else | 434 | else |
435 | send_sig(SIGTRAP, current, 0); | 435 | send_sig(SIGTRAP, current, 0); |
436 | } | 436 | } |
437 | 437 | ||
438 | retval = 0; | 438 | retval = 0; |
439 | 439 | ||
440 | error: | 440 | error: |
441 | if (interpreter) { | 441 | if (interpreter) { |
442 | allow_write_access(interpreter); | 442 | allow_write_access(interpreter); |
443 | fput(interpreter); | 443 | fput(interpreter); |
444 | } | 444 | } |
445 | kfree(interpreter_name); | 445 | kfree(interpreter_name); |
446 | kfree(exec_params.phdrs); | 446 | kfree(exec_params.phdrs); |
447 | kfree(exec_params.loadmap); | 447 | kfree(exec_params.loadmap); |
448 | kfree(interp_params.phdrs); | 448 | kfree(interp_params.phdrs); |
449 | kfree(interp_params.loadmap); | 449 | kfree(interp_params.loadmap); |
450 | return retval; | 450 | return retval; |
451 | 451 | ||
452 | /* unrecoverable error - kill the process */ | 452 | /* unrecoverable error - kill the process */ |
453 | error_kill: | 453 | error_kill: |
454 | send_sig(SIGSEGV, current, 0); | 454 | send_sig(SIGSEGV, current, 0); |
455 | goto error; | 455 | goto error; |
456 | 456 | ||
457 | } | 457 | } |
458 | 458 | ||
459 | /*****************************************************************************/ | 459 | /*****************************************************************************/ |
460 | /* | 460 | /* |
461 | * present useful information to the program | 461 | * present useful information to the program |
462 | */ | 462 | */ |
463 | static int create_elf_fdpic_tables(struct linux_binprm *bprm, | 463 | static int create_elf_fdpic_tables(struct linux_binprm *bprm, |
464 | struct mm_struct *mm, | 464 | struct mm_struct *mm, |
465 | struct elf_fdpic_params *exec_params, | 465 | struct elf_fdpic_params *exec_params, |
466 | struct elf_fdpic_params *interp_params) | 466 | struct elf_fdpic_params *interp_params) |
467 | { | 467 | { |
468 | unsigned long sp, csp, nitems; | 468 | unsigned long sp, csp, nitems; |
469 | elf_caddr_t __user *argv, *envp; | 469 | elf_caddr_t __user *argv, *envp; |
470 | size_t platform_len = 0, len; | 470 | size_t platform_len = 0, len; |
471 | char *k_platform; | 471 | char *k_platform; |
472 | char __user *u_platform, *p; | 472 | char __user *u_platform, *p; |
473 | long hwcap; | 473 | long hwcap; |
474 | int loop; | 474 | int loop; |
475 | 475 | ||
476 | /* we're going to shovel a whole load of stuff onto the stack */ | 476 | /* we're going to shovel a whole load of stuff onto the stack */ |
477 | #ifdef CONFIG_MMU | 477 | #ifdef CONFIG_MMU |
478 | sp = bprm->p; | 478 | sp = bprm->p; |
479 | #else | 479 | #else |
480 | sp = mm->start_stack; | 480 | sp = mm->start_stack; |
481 | 481 | ||
482 | /* stack the program arguments and environment */ | 482 | /* stack the program arguments and environment */ |
483 | if (elf_fdpic_transfer_args_to_stack(bprm, &sp) < 0) | 483 | if (elf_fdpic_transfer_args_to_stack(bprm, &sp) < 0) |
484 | return -EFAULT; | 484 | return -EFAULT; |
485 | #endif | 485 | #endif |
486 | 486 | ||
487 | /* get hold of platform and hardware capabilities masks for the machine | 487 | /* get hold of platform and hardware capabilities masks for the machine |
488 | * we are running on. In some cases (Sparc), this info is impossible | 488 | * we are running on. In some cases (Sparc), this info is impossible |
489 | * to get, in others (i386) it is merely difficult. | 489 | * to get, in others (i386) it is merely difficult. |
490 | */ | 490 | */ |
491 | hwcap = ELF_HWCAP; | 491 | hwcap = ELF_HWCAP; |
492 | k_platform = ELF_PLATFORM; | 492 | k_platform = ELF_PLATFORM; |
493 | u_platform = NULL; | 493 | u_platform = NULL; |
494 | 494 | ||
495 | if (k_platform) { | 495 | if (k_platform) { |
496 | platform_len = strlen(k_platform) + 1; | 496 | platform_len = strlen(k_platform) + 1; |
497 | sp -= platform_len; | 497 | sp -= platform_len; |
498 | u_platform = (char __user *) sp; | 498 | u_platform = (char __user *) sp; |
499 | if (__copy_to_user(u_platform, k_platform, platform_len) != 0) | 499 | if (__copy_to_user(u_platform, k_platform, platform_len) != 0) |
500 | return -EFAULT; | 500 | return -EFAULT; |
501 | } | 501 | } |
502 | 502 | ||
503 | #if defined(__i386__) && defined(CONFIG_SMP) | 503 | #if defined(__i386__) && defined(CONFIG_SMP) |
504 | /* in some cases (e.g. Hyper-Threading), we want to avoid L1 evictions | 504 | /* in some cases (e.g. Hyper-Threading), we want to avoid L1 evictions |
505 | * by the processes running on the same package. One thing we can do is | 505 | * by the processes running on the same package. One thing we can do is |
506 | * to shuffle the initial stack for them. | 506 | * to shuffle the initial stack for them. |
507 | * | 507 | * |
508 | * the conditionals here are unneeded, but kept in to make the code | 508 | * the conditionals here are unneeded, but kept in to make the code |
509 | * behaviour the same as pre change unless we have hyperthreaded | 509 | * behaviour the same as pre change unless we have hyperthreaded |
510 | * processors. This keeps Mr Marcelo Person happier but should be | 510 | * processors. This keeps Mr Marcelo Person happier but should be |
511 | * removed for 2.5 | 511 | * removed for 2.5 |
512 | */ | 512 | */ |
513 | if (smp_num_siblings > 1) | 513 | if (smp_num_siblings > 1) |
514 | sp = sp - ((current->pid % 64) << 7); | 514 | sp = sp - ((current->pid % 64) << 7); |
515 | #endif | 515 | #endif |
516 | 516 | ||
517 | sp &= ~7UL; | 517 | sp &= ~7UL; |
518 | 518 | ||
519 | /* stack the load map(s) */ | 519 | /* stack the load map(s) */ |
520 | len = sizeof(struct elf32_fdpic_loadmap); | 520 | len = sizeof(struct elf32_fdpic_loadmap); |
521 | len += sizeof(struct elf32_fdpic_loadseg) * exec_params->loadmap->nsegs; | 521 | len += sizeof(struct elf32_fdpic_loadseg) * exec_params->loadmap->nsegs; |
522 | sp = (sp - len) & ~7UL; | 522 | sp = (sp - len) & ~7UL; |
523 | exec_params->map_addr = sp; | 523 | exec_params->map_addr = sp; |
524 | 524 | ||
525 | if (copy_to_user((void __user *) sp, exec_params->loadmap, len) != 0) | 525 | if (copy_to_user((void __user *) sp, exec_params->loadmap, len) != 0) |
526 | return -EFAULT; | 526 | return -EFAULT; |
527 | 527 | ||
528 | current->mm->context.exec_fdpic_loadmap = (unsigned long) sp; | 528 | current->mm->context.exec_fdpic_loadmap = (unsigned long) sp; |
529 | 529 | ||
530 | if (interp_params->loadmap) { | 530 | if (interp_params->loadmap) { |
531 | len = sizeof(struct elf32_fdpic_loadmap); | 531 | len = sizeof(struct elf32_fdpic_loadmap); |
532 | len += sizeof(struct elf32_fdpic_loadseg) * | 532 | len += sizeof(struct elf32_fdpic_loadseg) * |
533 | interp_params->loadmap->nsegs; | 533 | interp_params->loadmap->nsegs; |
534 | sp = (sp - len) & ~7UL; | 534 | sp = (sp - len) & ~7UL; |
535 | interp_params->map_addr = sp; | 535 | interp_params->map_addr = sp; |
536 | 536 | ||
537 | if (copy_to_user((void __user *) sp, interp_params->loadmap, | 537 | if (copy_to_user((void __user *) sp, interp_params->loadmap, |
538 | len) != 0) | 538 | len) != 0) |
539 | return -EFAULT; | 539 | return -EFAULT; |
540 | 540 | ||
541 | current->mm->context.interp_fdpic_loadmap = (unsigned long) sp; | 541 | current->mm->context.interp_fdpic_loadmap = (unsigned long) sp; |
542 | } | 542 | } |
543 | 543 | ||
544 | /* force 16 byte _final_ alignment here for generality */ | 544 | /* force 16 byte _final_ alignment here for generality */ |
545 | #define DLINFO_ITEMS 13 | 545 | #define DLINFO_ITEMS 13 |
546 | 546 | ||
547 | nitems = 1 + DLINFO_ITEMS + (k_platform ? 1 : 0); | 547 | nitems = 1 + DLINFO_ITEMS + (k_platform ? 1 : 0); |
548 | #ifdef DLINFO_ARCH_ITEMS | 548 | #ifdef DLINFO_ARCH_ITEMS |
549 | nitems += DLINFO_ARCH_ITEMS; | 549 | nitems += DLINFO_ARCH_ITEMS; |
550 | #endif | 550 | #endif |
551 | 551 | ||
552 | csp = sp; | 552 | csp = sp; |
553 | sp -= nitems * 2 * sizeof(unsigned long); | 553 | sp -= nitems * 2 * sizeof(unsigned long); |
554 | sp -= (bprm->envc + 1) * sizeof(char *); /* envv[] */ | 554 | sp -= (bprm->envc + 1) * sizeof(char *); /* envv[] */ |
555 | sp -= (bprm->argc + 1) * sizeof(char *); /* argv[] */ | 555 | sp -= (bprm->argc + 1) * sizeof(char *); /* argv[] */ |
556 | sp -= 1 * sizeof(unsigned long); /* argc */ | 556 | sp -= 1 * sizeof(unsigned long); /* argc */ |
557 | 557 | ||
558 | csp -= sp & 15UL; | 558 | csp -= sp & 15UL; |
559 | sp -= sp & 15UL; | 559 | sp -= sp & 15UL; |
560 | 560 | ||
561 | /* put the ELF interpreter info on the stack */ | 561 | /* put the ELF interpreter info on the stack */ |
562 | #define NEW_AUX_ENT(nr, id, val) \ | 562 | #define NEW_AUX_ENT(nr, id, val) \ |
563 | do { \ | 563 | do { \ |
564 | struct { unsigned long _id, _val; } __user *ent; \ | 564 | struct { unsigned long _id, _val; } __user *ent; \ |
565 | \ | 565 | \ |
566 | ent = (void __user *) csp; \ | 566 | ent = (void __user *) csp; \ |
567 | __put_user((id), &ent[nr]._id); \ | 567 | __put_user((id), &ent[nr]._id); \ |
568 | __put_user((val), &ent[nr]._val); \ | 568 | __put_user((val), &ent[nr]._val); \ |
569 | } while (0) | 569 | } while (0) |
570 | 570 | ||
571 | csp -= 2 * sizeof(unsigned long); | 571 | csp -= 2 * sizeof(unsigned long); |
572 | NEW_AUX_ENT(0, AT_NULL, 0); | 572 | NEW_AUX_ENT(0, AT_NULL, 0); |
573 | if (k_platform) { | 573 | if (k_platform) { |
574 | csp -= 2 * sizeof(unsigned long); | 574 | csp -= 2 * sizeof(unsigned long); |
575 | NEW_AUX_ENT(0, AT_PLATFORM, | 575 | NEW_AUX_ENT(0, AT_PLATFORM, |
576 | (elf_addr_t) (unsigned long) u_platform); | 576 | (elf_addr_t) (unsigned long) u_platform); |
577 | } | 577 | } |
578 | 578 | ||
579 | csp -= DLINFO_ITEMS * 2 * sizeof(unsigned long); | 579 | csp -= DLINFO_ITEMS * 2 * sizeof(unsigned long); |
580 | NEW_AUX_ENT( 0, AT_HWCAP, hwcap); | 580 | NEW_AUX_ENT( 0, AT_HWCAP, hwcap); |
581 | NEW_AUX_ENT( 1, AT_PAGESZ, PAGE_SIZE); | 581 | NEW_AUX_ENT( 1, AT_PAGESZ, PAGE_SIZE); |
582 | NEW_AUX_ENT( 2, AT_CLKTCK, CLOCKS_PER_SEC); | 582 | NEW_AUX_ENT( 2, AT_CLKTCK, CLOCKS_PER_SEC); |
583 | NEW_AUX_ENT( 3, AT_PHDR, exec_params->ph_addr); | 583 | NEW_AUX_ENT( 3, AT_PHDR, exec_params->ph_addr); |
584 | NEW_AUX_ENT( 4, AT_PHENT, sizeof(struct elf_phdr)); | 584 | NEW_AUX_ENT( 4, AT_PHENT, sizeof(struct elf_phdr)); |
585 | NEW_AUX_ENT( 5, AT_PHNUM, exec_params->hdr.e_phnum); | 585 | NEW_AUX_ENT( 5, AT_PHNUM, exec_params->hdr.e_phnum); |
586 | NEW_AUX_ENT( 6, AT_BASE, interp_params->elfhdr_addr); | 586 | NEW_AUX_ENT( 6, AT_BASE, interp_params->elfhdr_addr); |
587 | NEW_AUX_ENT( 7, AT_FLAGS, 0); | 587 | NEW_AUX_ENT( 7, AT_FLAGS, 0); |
588 | NEW_AUX_ENT( 8, AT_ENTRY, exec_params->entry_addr); | 588 | NEW_AUX_ENT( 8, AT_ENTRY, exec_params->entry_addr); |
589 | NEW_AUX_ENT( 9, AT_UID, (elf_addr_t) current->uid); | 589 | NEW_AUX_ENT( 9, AT_UID, (elf_addr_t) current->uid); |
590 | NEW_AUX_ENT(10, AT_EUID, (elf_addr_t) current->euid); | 590 | NEW_AUX_ENT(10, AT_EUID, (elf_addr_t) current->euid); |
591 | NEW_AUX_ENT(11, AT_GID, (elf_addr_t) current->gid); | 591 | NEW_AUX_ENT(11, AT_GID, (elf_addr_t) current->gid); |
592 | NEW_AUX_ENT(12, AT_EGID, (elf_addr_t) current->egid); | 592 | NEW_AUX_ENT(12, AT_EGID, (elf_addr_t) current->egid); |
593 | 593 | ||
594 | #ifdef ARCH_DLINFO | 594 | #ifdef ARCH_DLINFO |
595 | /* ARCH_DLINFO must come last so platform specific code can enforce | 595 | /* ARCH_DLINFO must come last so platform specific code can enforce |
596 | * special alignment requirements on the AUXV if necessary (eg. PPC). | 596 | * special alignment requirements on the AUXV if necessary (eg. PPC). |
597 | */ | 597 | */ |
598 | ARCH_DLINFO; | 598 | ARCH_DLINFO; |
599 | #endif | 599 | #endif |
600 | #undef NEW_AUX_ENT | 600 | #undef NEW_AUX_ENT |
601 | 601 | ||
602 | /* allocate room for argv[] and envv[] */ | 602 | /* allocate room for argv[] and envv[] */ |
603 | csp -= (bprm->envc + 1) * sizeof(elf_caddr_t); | 603 | csp -= (bprm->envc + 1) * sizeof(elf_caddr_t); |
604 | envp = (elf_caddr_t __user *) csp; | 604 | envp = (elf_caddr_t __user *) csp; |
605 | csp -= (bprm->argc + 1) * sizeof(elf_caddr_t); | 605 | csp -= (bprm->argc + 1) * sizeof(elf_caddr_t); |
606 | argv = (elf_caddr_t __user *) csp; | 606 | argv = (elf_caddr_t __user *) csp; |
607 | 607 | ||
608 | /* stack argc */ | 608 | /* stack argc */ |
609 | csp -= sizeof(unsigned long); | 609 | csp -= sizeof(unsigned long); |
610 | __put_user(bprm->argc, (unsigned long __user *) csp); | 610 | __put_user(bprm->argc, (unsigned long __user *) csp); |
611 | 611 | ||
612 | BUG_ON(csp != sp); | 612 | BUG_ON(csp != sp); |
613 | 613 | ||
614 | /* fill in the argv[] array */ | 614 | /* fill in the argv[] array */ |
615 | #ifdef CONFIG_MMU | 615 | #ifdef CONFIG_MMU |
616 | current->mm->arg_start = bprm->p; | 616 | current->mm->arg_start = bprm->p; |
617 | #else | 617 | #else |
618 | current->mm->arg_start = current->mm->start_stack - | 618 | current->mm->arg_start = current->mm->start_stack - |
619 | (MAX_ARG_PAGES * PAGE_SIZE - bprm->p); | 619 | (MAX_ARG_PAGES * PAGE_SIZE - bprm->p); |
620 | #endif | 620 | #endif |
621 | 621 | ||
622 | p = (char __user *) current->mm->arg_start; | 622 | p = (char __user *) current->mm->arg_start; |
623 | for (loop = bprm->argc; loop > 0; loop--) { | 623 | for (loop = bprm->argc; loop > 0; loop--) { |
624 | __put_user((elf_caddr_t) p, argv++); | 624 | __put_user((elf_caddr_t) p, argv++); |
625 | len = strnlen_user(p, PAGE_SIZE * MAX_ARG_PAGES); | 625 | len = strnlen_user(p, PAGE_SIZE * MAX_ARG_PAGES); |
626 | if (!len || len > PAGE_SIZE * MAX_ARG_PAGES) | 626 | if (!len || len > PAGE_SIZE * MAX_ARG_PAGES) |
627 | return -EINVAL; | 627 | return -EINVAL; |
628 | p += len; | 628 | p += len; |
629 | } | 629 | } |
630 | __put_user(NULL, argv); | 630 | __put_user(NULL, argv); |
631 | current->mm->arg_end = (unsigned long) p; | 631 | current->mm->arg_end = (unsigned long) p; |
632 | 632 | ||
633 | /* fill in the envv[] array */ | 633 | /* fill in the envv[] array */ |
634 | current->mm->env_start = (unsigned long) p; | 634 | current->mm->env_start = (unsigned long) p; |
635 | for (loop = bprm->envc; loop > 0; loop--) { | 635 | for (loop = bprm->envc; loop > 0; loop--) { |
636 | __put_user((elf_caddr_t)(unsigned long) p, envp++); | 636 | __put_user((elf_caddr_t)(unsigned long) p, envp++); |
637 | len = strnlen_user(p, PAGE_SIZE * MAX_ARG_PAGES); | 637 | len = strnlen_user(p, PAGE_SIZE * MAX_ARG_PAGES); |
638 | if (!len || len > PAGE_SIZE * MAX_ARG_PAGES) | 638 | if (!len || len > PAGE_SIZE * MAX_ARG_PAGES) |
639 | return -EINVAL; | 639 | return -EINVAL; |
640 | p += len; | 640 | p += len; |
641 | } | 641 | } |
642 | __put_user(NULL, envp); | 642 | __put_user(NULL, envp); |
643 | current->mm->env_end = (unsigned long) p; | 643 | current->mm->env_end = (unsigned long) p; |
644 | 644 | ||
645 | mm->start_stack = (unsigned long) sp; | 645 | mm->start_stack = (unsigned long) sp; |
646 | return 0; | 646 | return 0; |
647 | } | 647 | } |
648 | 648 | ||
649 | /*****************************************************************************/ | 649 | /*****************************************************************************/ |
650 | /* | 650 | /* |
651 | * transfer the program arguments and environment from the holding pages onto | 651 | * transfer the program arguments and environment from the holding pages onto |
652 | * the stack | 652 | * the stack |
653 | */ | 653 | */ |
654 | #ifndef CONFIG_MMU | 654 | #ifndef CONFIG_MMU |
655 | static int elf_fdpic_transfer_args_to_stack(struct linux_binprm *bprm, | 655 | static int elf_fdpic_transfer_args_to_stack(struct linux_binprm *bprm, |
656 | unsigned long *_sp) | 656 | unsigned long *_sp) |
657 | { | 657 | { |
658 | unsigned long index, stop, sp; | 658 | unsigned long index, stop, sp; |
659 | char *src; | 659 | char *src; |
660 | int ret = 0; | 660 | int ret = 0; |
661 | 661 | ||
662 | stop = bprm->p >> PAGE_SHIFT; | 662 | stop = bprm->p >> PAGE_SHIFT; |
663 | sp = *_sp; | 663 | sp = *_sp; |
664 | 664 | ||
665 | for (index = MAX_ARG_PAGES - 1; index >= stop; index--) { | 665 | for (index = MAX_ARG_PAGES - 1; index >= stop; index--) { |
666 | src = kmap(bprm->page[index]); | 666 | src = kmap(bprm->page[index]); |
667 | sp -= PAGE_SIZE; | 667 | sp -= PAGE_SIZE; |
668 | if (copy_to_user((void *) sp, src, PAGE_SIZE) != 0) | 668 | if (copy_to_user((void *) sp, src, PAGE_SIZE) != 0) |
669 | ret = -EFAULT; | 669 | ret = -EFAULT; |
670 | kunmap(bprm->page[index]); | 670 | kunmap(bprm->page[index]); |
671 | if (ret < 0) | 671 | if (ret < 0) |
672 | goto out; | 672 | goto out; |
673 | } | 673 | } |
674 | 674 | ||
675 | *_sp = (*_sp - (MAX_ARG_PAGES * PAGE_SIZE - bprm->p)) & ~15; | 675 | *_sp = (*_sp - (MAX_ARG_PAGES * PAGE_SIZE - bprm->p)) & ~15; |
676 | 676 | ||
677 | out: | 677 | out: |
678 | return ret; | 678 | return ret; |
679 | } | 679 | } |
680 | #endif | 680 | #endif |
681 | 681 | ||
682 | /*****************************************************************************/ | 682 | /*****************************************************************************/ |
683 | /* | 683 | /* |
684 | * load the appropriate binary image (executable or interpreter) into memory | 684 | * load the appropriate binary image (executable or interpreter) into memory |
685 | * - we assume no MMU is available | 685 | * - we assume no MMU is available |
686 | * - if no other PIC bits are set in params->hdr->e_flags | 686 | * - if no other PIC bits are set in params->hdr->e_flags |
687 | * - we assume that the LOADable segments in the binary are independently relocatable | 687 | * - we assume that the LOADable segments in the binary are independently relocatable |
688 | * - we assume R/O executable segments are shareable | 688 | * - we assume R/O executable segments are shareable |
689 | * - else | 689 | * - else |
690 | * - we assume the loadable parts of the image to require fixed displacement | 690 | * - we assume the loadable parts of the image to require fixed displacement |
691 | * - the image is not shareable | 691 | * - the image is not shareable |
692 | */ | 692 | */ |
693 | static int elf_fdpic_map_file(struct elf_fdpic_params *params, | 693 | static int elf_fdpic_map_file(struct elf_fdpic_params *params, |
694 | struct file *file, | 694 | struct file *file, |
695 | struct mm_struct *mm, | 695 | struct mm_struct *mm, |
696 | const char *what) | 696 | const char *what) |
697 | { | 697 | { |
698 | struct elf32_fdpic_loadmap *loadmap; | 698 | struct elf32_fdpic_loadmap *loadmap; |
699 | #ifdef CONFIG_MMU | 699 | #ifdef CONFIG_MMU |
700 | struct elf32_fdpic_loadseg *mseg; | 700 | struct elf32_fdpic_loadseg *mseg; |
701 | #endif | 701 | #endif |
702 | struct elf32_fdpic_loadseg *seg; | 702 | struct elf32_fdpic_loadseg *seg; |
703 | struct elf32_phdr *phdr; | 703 | struct elf32_phdr *phdr; |
704 | unsigned long load_addr, stop; | 704 | unsigned long load_addr, stop; |
705 | unsigned nloads, tmp; | 705 | unsigned nloads, tmp; |
706 | size_t size; | 706 | size_t size; |
707 | int loop, ret; | 707 | int loop, ret; |
708 | 708 | ||
709 | /* allocate a load map table */ | 709 | /* allocate a load map table */ |
710 | nloads = 0; | 710 | nloads = 0; |
711 | for (loop = 0; loop < params->hdr.e_phnum; loop++) | 711 | for (loop = 0; loop < params->hdr.e_phnum; loop++) |
712 | if (params->phdrs[loop].p_type == PT_LOAD) | 712 | if (params->phdrs[loop].p_type == PT_LOAD) |
713 | nloads++; | 713 | nloads++; |
714 | 714 | ||
715 | if (nloads == 0) | 715 | if (nloads == 0) |
716 | return -ELIBBAD; | 716 | return -ELIBBAD; |
717 | 717 | ||
718 | size = sizeof(*loadmap) + nloads * sizeof(*seg); | 718 | size = sizeof(*loadmap) + nloads * sizeof(*seg); |
719 | loadmap = kzalloc(size, GFP_KERNEL); | 719 | loadmap = kzalloc(size, GFP_KERNEL); |
720 | if (!loadmap) | 720 | if (!loadmap) |
721 | return -ENOMEM; | 721 | return -ENOMEM; |
722 | 722 | ||
723 | params->loadmap = loadmap; | 723 | params->loadmap = loadmap; |
724 | 724 | ||
725 | loadmap->version = ELF32_FDPIC_LOADMAP_VERSION; | 725 | loadmap->version = ELF32_FDPIC_LOADMAP_VERSION; |
726 | loadmap->nsegs = nloads; | 726 | loadmap->nsegs = nloads; |
727 | 727 | ||
728 | load_addr = params->load_addr; | 728 | load_addr = params->load_addr; |
729 | seg = loadmap->segs; | 729 | seg = loadmap->segs; |
730 | 730 | ||
731 | /* map the requested LOADs into the memory space */ | 731 | /* map the requested LOADs into the memory space */ |
732 | switch (params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) { | 732 | switch (params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) { |
733 | case ELF_FDPIC_FLAG_CONSTDISP: | 733 | case ELF_FDPIC_FLAG_CONSTDISP: |
734 | case ELF_FDPIC_FLAG_CONTIGUOUS: | 734 | case ELF_FDPIC_FLAG_CONTIGUOUS: |
735 | #ifndef CONFIG_MMU | 735 | #ifndef CONFIG_MMU |
736 | ret = elf_fdpic_map_file_constdisp_on_uclinux(params, file, mm); | 736 | ret = elf_fdpic_map_file_constdisp_on_uclinux(params, file, mm); |
737 | if (ret < 0) | 737 | if (ret < 0) |
738 | return ret; | 738 | return ret; |
739 | break; | 739 | break; |
740 | #endif | 740 | #endif |
741 | default: | 741 | default: |
742 | ret = elf_fdpic_map_file_by_direct_mmap(params, file, mm); | 742 | ret = elf_fdpic_map_file_by_direct_mmap(params, file, mm); |
743 | if (ret < 0) | 743 | if (ret < 0) |
744 | return ret; | 744 | return ret; |
745 | break; | 745 | break; |
746 | } | 746 | } |
747 | 747 | ||
748 | /* map the entry point */ | 748 | /* map the entry point */ |
749 | if (params->hdr.e_entry) { | 749 | if (params->hdr.e_entry) { |
750 | seg = loadmap->segs; | 750 | seg = loadmap->segs; |
751 | for (loop = loadmap->nsegs; loop > 0; loop--, seg++) { | 751 | for (loop = loadmap->nsegs; loop > 0; loop--, seg++) { |
752 | if (params->hdr.e_entry >= seg->p_vaddr && | 752 | if (params->hdr.e_entry >= seg->p_vaddr && |
753 | params->hdr.e_entry < seg->p_vaddr + seg->p_memsz) { | 753 | params->hdr.e_entry < seg->p_vaddr + seg->p_memsz) { |
754 | params->entry_addr = | 754 | params->entry_addr = |
755 | (params->hdr.e_entry - seg->p_vaddr) + | 755 | (params->hdr.e_entry - seg->p_vaddr) + |
756 | seg->addr; | 756 | seg->addr; |
757 | break; | 757 | break; |
758 | } | 758 | } |
759 | } | 759 | } |
760 | } | 760 | } |
761 | 761 | ||
762 | /* determine where the program header table has wound up if mapped */ | 762 | /* determine where the program header table has wound up if mapped */ |
763 | stop = params->hdr.e_phoff; | 763 | stop = params->hdr.e_phoff; |
764 | stop += params->hdr.e_phnum * sizeof (struct elf_phdr); | 764 | stop += params->hdr.e_phnum * sizeof (struct elf_phdr); |
765 | phdr = params->phdrs; | 765 | phdr = params->phdrs; |
766 | 766 | ||
767 | for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) { | 767 | for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) { |
768 | if (phdr->p_type != PT_LOAD) | 768 | if (phdr->p_type != PT_LOAD) |
769 | continue; | 769 | continue; |
770 | 770 | ||
771 | if (phdr->p_offset > params->hdr.e_phoff || | 771 | if (phdr->p_offset > params->hdr.e_phoff || |
772 | phdr->p_offset + phdr->p_filesz < stop) | 772 | phdr->p_offset + phdr->p_filesz < stop) |
773 | continue; | 773 | continue; |
774 | 774 | ||
775 | seg = loadmap->segs; | 775 | seg = loadmap->segs; |
776 | for (loop = loadmap->nsegs; loop > 0; loop--, seg++) { | 776 | for (loop = loadmap->nsegs; loop > 0; loop--, seg++) { |
777 | if (phdr->p_vaddr >= seg->p_vaddr && | 777 | if (phdr->p_vaddr >= seg->p_vaddr && |
778 | phdr->p_vaddr + phdr->p_filesz <= | 778 | phdr->p_vaddr + phdr->p_filesz <= |
779 | seg->p_vaddr + seg->p_memsz) { | 779 | seg->p_vaddr + seg->p_memsz) { |
780 | params->ph_addr = | 780 | params->ph_addr = |
781 | (phdr->p_vaddr - seg->p_vaddr) + | 781 | (phdr->p_vaddr - seg->p_vaddr) + |
782 | seg->addr + | 782 | seg->addr + |
783 | params->hdr.e_phoff - phdr->p_offset; | 783 | params->hdr.e_phoff - phdr->p_offset; |
784 | break; | 784 | break; |
785 | } | 785 | } |
786 | } | 786 | } |
787 | break; | 787 | break; |
788 | } | 788 | } |
789 | 789 | ||
790 | /* determine where the dynamic section has wound up if there is one */ | 790 | /* determine where the dynamic section has wound up if there is one */ |
791 | phdr = params->phdrs; | 791 | phdr = params->phdrs; |
792 | for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) { | 792 | for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) { |
793 | if (phdr->p_type != PT_DYNAMIC) | 793 | if (phdr->p_type != PT_DYNAMIC) |
794 | continue; | 794 | continue; |
795 | 795 | ||
796 | seg = loadmap->segs; | 796 | seg = loadmap->segs; |
797 | for (loop = loadmap->nsegs; loop > 0; loop--, seg++) { | 797 | for (loop = loadmap->nsegs; loop > 0; loop--, seg++) { |
798 | if (phdr->p_vaddr >= seg->p_vaddr && | 798 | if (phdr->p_vaddr >= seg->p_vaddr && |
799 | phdr->p_vaddr + phdr->p_memsz <= | 799 | phdr->p_vaddr + phdr->p_memsz <= |
800 | seg->p_vaddr + seg->p_memsz) { | 800 | seg->p_vaddr + seg->p_memsz) { |
801 | params->dynamic_addr = | 801 | params->dynamic_addr = |
802 | (phdr->p_vaddr - seg->p_vaddr) + | 802 | (phdr->p_vaddr - seg->p_vaddr) + |
803 | seg->addr; | 803 | seg->addr; |
804 | 804 | ||
805 | /* check the dynamic section contains at least | 805 | /* check the dynamic section contains at least |
806 | * one item, and that the last item is a NULL | 806 | * one item, and that the last item is a NULL |
807 | * entry */ | 807 | * entry */ |
808 | if (phdr->p_memsz == 0 || | 808 | if (phdr->p_memsz == 0 || |
809 | phdr->p_memsz % sizeof(Elf32_Dyn) != 0) | 809 | phdr->p_memsz % sizeof(Elf32_Dyn) != 0) |
810 | goto dynamic_error; | 810 | goto dynamic_error; |
811 | 811 | ||
812 | tmp = phdr->p_memsz / sizeof(Elf32_Dyn); | 812 | tmp = phdr->p_memsz / sizeof(Elf32_Dyn); |
813 | if (((Elf32_Dyn *) | 813 | if (((Elf32_Dyn *) |
814 | params->dynamic_addr)[tmp - 1].d_tag != 0) | 814 | params->dynamic_addr)[tmp - 1].d_tag != 0) |
815 | goto dynamic_error; | 815 | goto dynamic_error; |
816 | break; | 816 | break; |
817 | } | 817 | } |
818 | } | 818 | } |
819 | break; | 819 | break; |
820 | } | 820 | } |
821 | 821 | ||
822 | /* now elide adjacent segments in the load map on MMU linux | 822 | /* now elide adjacent segments in the load map on MMU linux |
823 | * - on uClinux the holes between may actually be filled with system | 823 | * - on uClinux the holes between may actually be filled with system |
824 | * stuff or stuff from other processes | 824 | * stuff or stuff from other processes |
825 | */ | 825 | */ |
826 | #ifdef CONFIG_MMU | 826 | #ifdef CONFIG_MMU |
827 | nloads = loadmap->nsegs; | 827 | nloads = loadmap->nsegs; |
828 | mseg = loadmap->segs; | 828 | mseg = loadmap->segs; |
829 | seg = mseg + 1; | 829 | seg = mseg + 1; |
830 | for (loop = 1; loop < nloads; loop++) { | 830 | for (loop = 1; loop < nloads; loop++) { |
831 | /* see if we have a candidate for merging */ | 831 | /* see if we have a candidate for merging */ |
832 | if (seg->p_vaddr - mseg->p_vaddr == seg->addr - mseg->addr) { | 832 | if (seg->p_vaddr - mseg->p_vaddr == seg->addr - mseg->addr) { |
833 | load_addr = PAGE_ALIGN(mseg->addr + mseg->p_memsz); | 833 | load_addr = PAGE_ALIGN(mseg->addr + mseg->p_memsz); |
834 | if (load_addr == (seg->addr & PAGE_MASK)) { | 834 | if (load_addr == (seg->addr & PAGE_MASK)) { |
835 | mseg->p_memsz += | 835 | mseg->p_memsz += |
836 | load_addr - | 836 | load_addr - |
837 | (mseg->addr + mseg->p_memsz); | 837 | (mseg->addr + mseg->p_memsz); |
838 | mseg->p_memsz += seg->addr & ~PAGE_MASK; | 838 | mseg->p_memsz += seg->addr & ~PAGE_MASK; |
839 | mseg->p_memsz += seg->p_memsz; | 839 | mseg->p_memsz += seg->p_memsz; |
840 | loadmap->nsegs--; | 840 | loadmap->nsegs--; |
841 | continue; | 841 | continue; |
842 | } | 842 | } |
843 | } | 843 | } |
844 | 844 | ||
845 | mseg++; | 845 | mseg++; |
846 | if (mseg != seg) | 846 | if (mseg != seg) |
847 | *mseg = *seg; | 847 | *mseg = *seg; |
848 | } | 848 | } |
849 | #endif | 849 | #endif |
850 | 850 | ||
851 | kdebug("Mapped Object [%s]:", what); | 851 | kdebug("Mapped Object [%s]:", what); |
852 | kdebug("- elfhdr : %lx", params->elfhdr_addr); | 852 | kdebug("- elfhdr : %lx", params->elfhdr_addr); |
853 | kdebug("- entry : %lx", params->entry_addr); | 853 | kdebug("- entry : %lx", params->entry_addr); |
854 | kdebug("- PHDR[] : %lx", params->ph_addr); | 854 | kdebug("- PHDR[] : %lx", params->ph_addr); |
855 | kdebug("- DYNAMIC[]: %lx", params->dynamic_addr); | 855 | kdebug("- DYNAMIC[]: %lx", params->dynamic_addr); |
856 | seg = loadmap->segs; | 856 | seg = loadmap->segs; |
857 | for (loop = 0; loop < loadmap->nsegs; loop++, seg++) | 857 | for (loop = 0; loop < loadmap->nsegs; loop++, seg++) |
858 | kdebug("- LOAD[%d] : %08x-%08x [va=%x ms=%x]", | 858 | kdebug("- LOAD[%d] : %08x-%08x [va=%x ms=%x]", |
859 | loop, | 859 | loop, |
860 | seg->addr, seg->addr + seg->p_memsz - 1, | 860 | seg->addr, seg->addr + seg->p_memsz - 1, |
861 | seg->p_vaddr, seg->p_memsz); | 861 | seg->p_vaddr, seg->p_memsz); |
862 | 862 | ||
863 | return 0; | 863 | return 0; |
864 | 864 | ||
865 | dynamic_error: | 865 | dynamic_error: |
866 | printk("ELF FDPIC %s with invalid DYNAMIC section (inode=%lu)\n", | 866 | printk("ELF FDPIC %s with invalid DYNAMIC section (inode=%lu)\n", |
867 | what, file->f_path.dentry->d_inode->i_ino); | 867 | what, file->f_path.dentry->d_inode->i_ino); |
868 | return -ELIBBAD; | 868 | return -ELIBBAD; |
869 | } | 869 | } |
870 | 870 | ||
871 | /*****************************************************************************/ | 871 | /*****************************************************************************/ |
872 | /* | 872 | /* |
873 | * map a file with constant displacement under uClinux | 873 | * map a file with constant displacement under uClinux |
874 | */ | 874 | */ |
875 | #ifndef CONFIG_MMU | 875 | #ifndef CONFIG_MMU |
876 | static int elf_fdpic_map_file_constdisp_on_uclinux( | 876 | static int elf_fdpic_map_file_constdisp_on_uclinux( |
877 | struct elf_fdpic_params *params, | 877 | struct elf_fdpic_params *params, |
878 | struct file *file, | 878 | struct file *file, |
879 | struct mm_struct *mm) | 879 | struct mm_struct *mm) |
880 | { | 880 | { |
881 | struct elf32_fdpic_loadseg *seg; | 881 | struct elf32_fdpic_loadseg *seg; |
882 | struct elf32_phdr *phdr; | 882 | struct elf32_phdr *phdr; |
883 | unsigned long load_addr, base = ULONG_MAX, top = 0, maddr = 0, mflags; | 883 | unsigned long load_addr, base = ULONG_MAX, top = 0, maddr = 0, mflags; |
884 | loff_t fpos; | 884 | loff_t fpos; |
885 | int loop, ret; | 885 | int loop, ret; |
886 | 886 | ||
887 | load_addr = params->load_addr; | 887 | load_addr = params->load_addr; |
888 | seg = params->loadmap->segs; | 888 | seg = params->loadmap->segs; |
889 | 889 | ||
890 | /* determine the bounds of the contiguous overall allocation we must | 890 | /* determine the bounds of the contiguous overall allocation we must |
891 | * make */ | 891 | * make */ |
892 | phdr = params->phdrs; | 892 | phdr = params->phdrs; |
893 | for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) { | 893 | for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) { |
894 | if (params->phdrs[loop].p_type != PT_LOAD) | 894 | if (params->phdrs[loop].p_type != PT_LOAD) |
895 | continue; | 895 | continue; |
896 | 896 | ||
897 | if (base > phdr->p_vaddr) | 897 | if (base > phdr->p_vaddr) |
898 | base = phdr->p_vaddr; | 898 | base = phdr->p_vaddr; |
899 | if (top < phdr->p_vaddr + phdr->p_memsz) | 899 | if (top < phdr->p_vaddr + phdr->p_memsz) |
900 | top = phdr->p_vaddr + phdr->p_memsz; | 900 | top = phdr->p_vaddr + phdr->p_memsz; |
901 | } | 901 | } |
902 | 902 | ||
903 | /* allocate one big anon block for everything */ | 903 | /* allocate one big anon block for everything */ |
904 | mflags = MAP_PRIVATE; | 904 | mflags = MAP_PRIVATE; |
905 | if (params->flags & ELF_FDPIC_FLAG_EXECUTABLE) | 905 | if (params->flags & ELF_FDPIC_FLAG_EXECUTABLE) |
906 | mflags |= MAP_EXECUTABLE; | 906 | mflags |= MAP_EXECUTABLE; |
907 | 907 | ||
908 | down_write(&mm->mmap_sem); | 908 | down_write(&mm->mmap_sem); |
909 | maddr = do_mmap(NULL, load_addr, top - base, | 909 | maddr = do_mmap(NULL, load_addr, top - base, |
910 | PROT_READ | PROT_WRITE | PROT_EXEC, mflags, 0); | 910 | PROT_READ | PROT_WRITE | PROT_EXEC, mflags, 0); |
911 | up_write(&mm->mmap_sem); | 911 | up_write(&mm->mmap_sem); |
912 | if (IS_ERR_VALUE(maddr)) | 912 | if (IS_ERR_VALUE(maddr)) |
913 | return (int) maddr; | 913 | return (int) maddr; |
914 | 914 | ||
915 | if (load_addr != 0) | 915 | if (load_addr != 0) |
916 | load_addr += PAGE_ALIGN(top - base); | 916 | load_addr += PAGE_ALIGN(top - base); |
917 | 917 | ||
918 | /* and then load the file segments into it */ | 918 | /* and then load the file segments into it */ |
919 | phdr = params->phdrs; | 919 | phdr = params->phdrs; |
920 | for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) { | 920 | for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) { |
921 | if (params->phdrs[loop].p_type != PT_LOAD) | 921 | if (params->phdrs[loop].p_type != PT_LOAD) |
922 | continue; | 922 | continue; |
923 | 923 | ||
924 | fpos = phdr->p_offset; | 924 | fpos = phdr->p_offset; |
925 | 925 | ||
926 | seg->addr = maddr + (phdr->p_vaddr - base); | 926 | seg->addr = maddr + (phdr->p_vaddr - base); |
927 | seg->p_vaddr = phdr->p_vaddr; | 927 | seg->p_vaddr = phdr->p_vaddr; |
928 | seg->p_memsz = phdr->p_memsz; | 928 | seg->p_memsz = phdr->p_memsz; |
929 | 929 | ||
930 | ret = file->f_op->read(file, (void *) seg->addr, | 930 | ret = file->f_op->read(file, (void *) seg->addr, |
931 | phdr->p_filesz, &fpos); | 931 | phdr->p_filesz, &fpos); |
932 | if (ret < 0) | 932 | if (ret < 0) |
933 | return ret; | 933 | return ret; |
934 | 934 | ||
935 | /* map the ELF header address if in this segment */ | 935 | /* map the ELF header address if in this segment */ |
936 | if (phdr->p_offset == 0) | 936 | if (phdr->p_offset == 0) |
937 | params->elfhdr_addr = seg->addr; | 937 | params->elfhdr_addr = seg->addr; |
938 | 938 | ||
939 | /* clear any space allocated but not loaded */ | 939 | /* clear any space allocated but not loaded */ |
940 | if (phdr->p_filesz < phdr->p_memsz) | 940 | if (phdr->p_filesz < phdr->p_memsz) |
941 | clear_user((void *) (seg->addr + phdr->p_filesz), | 941 | clear_user((void *) (seg->addr + phdr->p_filesz), |
942 | phdr->p_memsz - phdr->p_filesz); | 942 | phdr->p_memsz - phdr->p_filesz); |
943 | 943 | ||
944 | if (mm) { | 944 | if (mm) { |
945 | if (phdr->p_flags & PF_X) { | 945 | if (phdr->p_flags & PF_X) { |
946 | if (!mm->start_code) { | 946 | if (!mm->start_code) { |
947 | mm->start_code = seg->addr; | 947 | mm->start_code = seg->addr; |
948 | mm->end_code = seg->addr + | 948 | mm->end_code = seg->addr + |
949 | phdr->p_memsz; | 949 | phdr->p_memsz; |
950 | } | 950 | } |
951 | } else if (!mm->start_data) { | 951 | } else if (!mm->start_data) { |
952 | mm->start_data = seg->addr; | 952 | mm->start_data = seg->addr; |
953 | #ifndef CONFIG_MMU | 953 | #ifndef CONFIG_MMU |
954 | mm->end_data = seg->addr + phdr->p_memsz; | 954 | mm->end_data = seg->addr + phdr->p_memsz; |
955 | #endif | 955 | #endif |
956 | } | 956 | } |
957 | 957 | ||
958 | #ifdef CONFIG_MMU | 958 | #ifdef CONFIG_MMU |
959 | if (seg->addr + phdr->p_memsz > mm->end_data) | 959 | if (seg->addr + phdr->p_memsz > mm->end_data) |
960 | mm->end_data = seg->addr + phdr->p_memsz; | 960 | mm->end_data = seg->addr + phdr->p_memsz; |
961 | #endif | 961 | #endif |
962 | } | 962 | } |
963 | 963 | ||
964 | seg++; | 964 | seg++; |
965 | } | 965 | } |
966 | 966 | ||
967 | return 0; | 967 | return 0; |
968 | } | 968 | } |
969 | #endif | 969 | #endif |
970 | 970 | ||
971 | /*****************************************************************************/ | 971 | /*****************************************************************************/ |
972 | /* | 972 | /* |
973 | * map a binary by direct mmap() of the individual PT_LOAD segments | 973 | * map a binary by direct mmap() of the individual PT_LOAD segments |
974 | */ | 974 | */ |
975 | static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params, | 975 | static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params, |
976 | struct file *file, | 976 | struct file *file, |
977 | struct mm_struct *mm) | 977 | struct mm_struct *mm) |
978 | { | 978 | { |
979 | struct elf32_fdpic_loadseg *seg; | 979 | struct elf32_fdpic_loadseg *seg; |
980 | struct elf32_phdr *phdr; | 980 | struct elf32_phdr *phdr; |
981 | unsigned long load_addr, delta_vaddr; | 981 | unsigned long load_addr, delta_vaddr; |
982 | int loop, dvset; | 982 | int loop, dvset; |
983 | 983 | ||
984 | load_addr = params->load_addr; | 984 | load_addr = params->load_addr; |
985 | delta_vaddr = 0; | 985 | delta_vaddr = 0; |
986 | dvset = 0; | 986 | dvset = 0; |
987 | 987 | ||
988 | seg = params->loadmap->segs; | 988 | seg = params->loadmap->segs; |
989 | 989 | ||
990 | /* deal with each load segment separately */ | 990 | /* deal with each load segment separately */ |
991 | phdr = params->phdrs; | 991 | phdr = params->phdrs; |
992 | for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) { | 992 | for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) { |
993 | unsigned long maddr, disp, excess, excess1; | 993 | unsigned long maddr, disp, excess, excess1; |
994 | int prot = 0, flags; | 994 | int prot = 0, flags; |
995 | 995 | ||
996 | if (phdr->p_type != PT_LOAD) | 996 | if (phdr->p_type != PT_LOAD) |
997 | continue; | 997 | continue; |
998 | 998 | ||
999 | kdebug("[LOAD] va=%lx of=%lx fs=%lx ms=%lx", | 999 | kdebug("[LOAD] va=%lx of=%lx fs=%lx ms=%lx", |
1000 | (unsigned long) phdr->p_vaddr, | 1000 | (unsigned long) phdr->p_vaddr, |
1001 | (unsigned long) phdr->p_offset, | 1001 | (unsigned long) phdr->p_offset, |
1002 | (unsigned long) phdr->p_filesz, | 1002 | (unsigned long) phdr->p_filesz, |
1003 | (unsigned long) phdr->p_memsz); | 1003 | (unsigned long) phdr->p_memsz); |
1004 | 1004 | ||
1005 | /* determine the mapping parameters */ | 1005 | /* determine the mapping parameters */ |
1006 | if (phdr->p_flags & PF_R) prot |= PROT_READ; | 1006 | if (phdr->p_flags & PF_R) prot |= PROT_READ; |
1007 | if (phdr->p_flags & PF_W) prot |= PROT_WRITE; | 1007 | if (phdr->p_flags & PF_W) prot |= PROT_WRITE; |
1008 | if (phdr->p_flags & PF_X) prot |= PROT_EXEC; | 1008 | if (phdr->p_flags & PF_X) prot |= PROT_EXEC; |
1009 | 1009 | ||
1010 | flags = MAP_PRIVATE | MAP_DENYWRITE; | 1010 | flags = MAP_PRIVATE | MAP_DENYWRITE; |
1011 | if (params->flags & ELF_FDPIC_FLAG_EXECUTABLE) | 1011 | if (params->flags & ELF_FDPIC_FLAG_EXECUTABLE) |
1012 | flags |= MAP_EXECUTABLE; | 1012 | flags |= MAP_EXECUTABLE; |
1013 | 1013 | ||
1014 | maddr = 0; | 1014 | maddr = 0; |
1015 | 1015 | ||
1016 | switch (params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) { | 1016 | switch (params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) { |
1017 | case ELF_FDPIC_FLAG_INDEPENDENT: | 1017 | case ELF_FDPIC_FLAG_INDEPENDENT: |
1018 | /* PT_LOADs are independently locatable */ | 1018 | /* PT_LOADs are independently locatable */ |
1019 | break; | 1019 | break; |
1020 | 1020 | ||
1021 | case ELF_FDPIC_FLAG_HONOURVADDR: | 1021 | case ELF_FDPIC_FLAG_HONOURVADDR: |
1022 | /* the specified virtual address must be honoured */ | 1022 | /* the specified virtual address must be honoured */ |
1023 | maddr = phdr->p_vaddr; | 1023 | maddr = phdr->p_vaddr; |
1024 | flags |= MAP_FIXED; | 1024 | flags |= MAP_FIXED; |
1025 | break; | 1025 | break; |
1026 | 1026 | ||
1027 | case ELF_FDPIC_FLAG_CONSTDISP: | 1027 | case ELF_FDPIC_FLAG_CONSTDISP: |
1028 | /* constant displacement | 1028 | /* constant displacement |
1029 | * - can be mapped anywhere, but must be mapped as a | 1029 | * - can be mapped anywhere, but must be mapped as a |
1030 | * unit | 1030 | * unit |
1031 | */ | 1031 | */ |
1032 | if (!dvset) { | 1032 | if (!dvset) { |
1033 | maddr = load_addr; | 1033 | maddr = load_addr; |
1034 | delta_vaddr = phdr->p_vaddr; | 1034 | delta_vaddr = phdr->p_vaddr; |
1035 | dvset = 1; | 1035 | dvset = 1; |
1036 | } else { | 1036 | } else { |
1037 | maddr = load_addr + phdr->p_vaddr - delta_vaddr; | 1037 | maddr = load_addr + phdr->p_vaddr - delta_vaddr; |
1038 | flags |= MAP_FIXED; | 1038 | flags |= MAP_FIXED; |
1039 | } | 1039 | } |
1040 | break; | 1040 | break; |
1041 | 1041 | ||
1042 | case ELF_FDPIC_FLAG_CONTIGUOUS: | 1042 | case ELF_FDPIC_FLAG_CONTIGUOUS: |
1043 | /* contiguity handled later */ | 1043 | /* contiguity handled later */ |
1044 | break; | 1044 | break; |
1045 | 1045 | ||
1046 | default: | 1046 | default: |
1047 | BUG(); | 1047 | BUG(); |
1048 | } | 1048 | } |
1049 | 1049 | ||
1050 | maddr &= PAGE_MASK; | 1050 | maddr &= PAGE_MASK; |
1051 | 1051 | ||
1052 | /* create the mapping */ | 1052 | /* create the mapping */ |
1053 | disp = phdr->p_vaddr & ~PAGE_MASK; | 1053 | disp = phdr->p_vaddr & ~PAGE_MASK; |
1054 | down_write(&mm->mmap_sem); | 1054 | down_write(&mm->mmap_sem); |
1055 | maddr = do_mmap(file, maddr, phdr->p_memsz + disp, prot, flags, | 1055 | maddr = do_mmap(file, maddr, phdr->p_memsz + disp, prot, flags, |
1056 | phdr->p_offset - disp); | 1056 | phdr->p_offset - disp); |
1057 | up_write(&mm->mmap_sem); | 1057 | up_write(&mm->mmap_sem); |
1058 | 1058 | ||
1059 | kdebug("mmap[%d] <file> sz=%lx pr=%x fl=%x of=%lx --> %08lx", | 1059 | kdebug("mmap[%d] <file> sz=%lx pr=%x fl=%x of=%lx --> %08lx", |
1060 | loop, phdr->p_memsz + disp, prot, flags, | 1060 | loop, phdr->p_memsz + disp, prot, flags, |
1061 | phdr->p_offset - disp, maddr); | 1061 | phdr->p_offset - disp, maddr); |
1062 | 1062 | ||
1063 | if (IS_ERR_VALUE(maddr)) | 1063 | if (IS_ERR_VALUE(maddr)) |
1064 | return (int) maddr; | 1064 | return (int) maddr; |
1065 | 1065 | ||
1066 | if ((params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) == | 1066 | if ((params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) == |
1067 | ELF_FDPIC_FLAG_CONTIGUOUS) | 1067 | ELF_FDPIC_FLAG_CONTIGUOUS) |
1068 | load_addr += PAGE_ALIGN(phdr->p_memsz + disp); | 1068 | load_addr += PAGE_ALIGN(phdr->p_memsz + disp); |
1069 | 1069 | ||
1070 | seg->addr = maddr + disp; | 1070 | seg->addr = maddr + disp; |
1071 | seg->p_vaddr = phdr->p_vaddr; | 1071 | seg->p_vaddr = phdr->p_vaddr; |
1072 | seg->p_memsz = phdr->p_memsz; | 1072 | seg->p_memsz = phdr->p_memsz; |
1073 | 1073 | ||
1074 | /* map the ELF header address if in this segment */ | 1074 | /* map the ELF header address if in this segment */ |
1075 | if (phdr->p_offset == 0) | 1075 | if (phdr->p_offset == 0) |
1076 | params->elfhdr_addr = seg->addr; | 1076 | params->elfhdr_addr = seg->addr; |
1077 | 1077 | ||
1078 | /* clear the bit between beginning of mapping and beginning of | 1078 | /* clear the bit between beginning of mapping and beginning of |
1079 | * PT_LOAD */ | 1079 | * PT_LOAD */ |
1080 | if (prot & PROT_WRITE && disp > 0) { | 1080 | if (prot & PROT_WRITE && disp > 0) { |
1081 | kdebug("clear[%d] ad=%lx sz=%lx", loop, maddr, disp); | 1081 | kdebug("clear[%d] ad=%lx sz=%lx", loop, maddr, disp); |
1082 | clear_user((void __user *) maddr, disp); | 1082 | clear_user((void __user *) maddr, disp); |
1083 | maddr += disp; | 1083 | maddr += disp; |
1084 | } | 1084 | } |
1085 | 1085 | ||
1086 | /* clear any space allocated but not loaded | 1086 | /* clear any space allocated but not loaded |
1087 | * - on uClinux we can just clear the lot | 1087 | * - on uClinux we can just clear the lot |
1088 | * - on MMU linux we'll get a SIGBUS beyond the last page | 1088 | * - on MMU linux we'll get a SIGBUS beyond the last page |
1089 | * extant in the file | 1089 | * extant in the file |
1090 | */ | 1090 | */ |
1091 | excess = phdr->p_memsz - phdr->p_filesz; | 1091 | excess = phdr->p_memsz - phdr->p_filesz; |
1092 | excess1 = PAGE_SIZE - ((maddr + phdr->p_filesz) & ~PAGE_MASK); | 1092 | excess1 = PAGE_SIZE - ((maddr + phdr->p_filesz) & ~PAGE_MASK); |
1093 | 1093 | ||
1094 | #ifdef CONFIG_MMU | 1094 | #ifdef CONFIG_MMU |
1095 | if (excess > excess1) { | 1095 | if (excess > excess1) { |
1096 | unsigned long xaddr = maddr + phdr->p_filesz + excess1; | 1096 | unsigned long xaddr = maddr + phdr->p_filesz + excess1; |
1097 | unsigned long xmaddr; | 1097 | unsigned long xmaddr; |
1098 | 1098 | ||
1099 | flags |= MAP_FIXED | MAP_ANONYMOUS; | 1099 | flags |= MAP_FIXED | MAP_ANONYMOUS; |
1100 | down_write(&mm->mmap_sem); | 1100 | down_write(&mm->mmap_sem); |
1101 | xmaddr = do_mmap(NULL, xaddr, excess - excess1, | 1101 | xmaddr = do_mmap(NULL, xaddr, excess - excess1, |
1102 | prot, flags, 0); | 1102 | prot, flags, 0); |
1103 | up_write(&mm->mmap_sem); | 1103 | up_write(&mm->mmap_sem); |
1104 | 1104 | ||
1105 | kdebug("mmap[%d] <anon>" | 1105 | kdebug("mmap[%d] <anon>" |
1106 | " ad=%lx sz=%lx pr=%x fl=%x of=0 --> %08lx", | 1106 | " ad=%lx sz=%lx pr=%x fl=%x of=0 --> %08lx", |
1107 | loop, xaddr, excess - excess1, prot, flags, | 1107 | loop, xaddr, excess - excess1, prot, flags, |
1108 | xmaddr); | 1108 | xmaddr); |
1109 | 1109 | ||
1110 | if (xmaddr != xaddr) | 1110 | if (xmaddr != xaddr) |
1111 | return -ENOMEM; | 1111 | return -ENOMEM; |
1112 | } | 1112 | } |
1113 | 1113 | ||
1114 | if (prot & PROT_WRITE && excess1 > 0) { | 1114 | if (prot & PROT_WRITE && excess1 > 0) { |
1115 | kdebug("clear[%d] ad=%lx sz=%lx", | 1115 | kdebug("clear[%d] ad=%lx sz=%lx", |
1116 | loop, maddr + phdr->p_filesz, excess1); | 1116 | loop, maddr + phdr->p_filesz, excess1); |
1117 | clear_user((void __user *) maddr + phdr->p_filesz, | 1117 | clear_user((void __user *) maddr + phdr->p_filesz, |
1118 | excess1); | 1118 | excess1); |
1119 | } | 1119 | } |
1120 | 1120 | ||
1121 | #else | 1121 | #else |
1122 | if (excess > 0) { | 1122 | if (excess > 0) { |
1123 | kdebug("clear[%d] ad=%lx sz=%lx", | 1123 | kdebug("clear[%d] ad=%lx sz=%lx", |
1124 | loop, maddr + phdr->p_filesz, excess); | 1124 | loop, maddr + phdr->p_filesz, excess); |
1125 | clear_user((void *) maddr + phdr->p_filesz, excess); | 1125 | clear_user((void *) maddr + phdr->p_filesz, excess); |
1126 | } | 1126 | } |
1127 | #endif | 1127 | #endif |
1128 | 1128 | ||
1129 | if (mm) { | 1129 | if (mm) { |
1130 | if (phdr->p_flags & PF_X) { | 1130 | if (phdr->p_flags & PF_X) { |
1131 | if (!mm->start_code) { | 1131 | if (!mm->start_code) { |
1132 | mm->start_code = maddr; | 1132 | mm->start_code = maddr; |
1133 | mm->end_code = maddr + phdr->p_memsz; | 1133 | mm->end_code = maddr + phdr->p_memsz; |
1134 | } | 1134 | } |
1135 | } else if (!mm->start_data) { | 1135 | } else if (!mm->start_data) { |
1136 | mm->start_data = maddr; | 1136 | mm->start_data = maddr; |
1137 | mm->end_data = maddr + phdr->p_memsz; | 1137 | mm->end_data = maddr + phdr->p_memsz; |
1138 | } | 1138 | } |
1139 | } | 1139 | } |
1140 | 1140 | ||
1141 | seg++; | 1141 | seg++; |
1142 | } | 1142 | } |
1143 | 1143 | ||
1144 | return 0; | 1144 | return 0; |
1145 | } | 1145 | } |
1146 | 1146 | ||
1147 | /*****************************************************************************/ | 1147 | /*****************************************************************************/ |
1148 | /* | 1148 | /* |
1149 | * ELF-FDPIC core dumper | 1149 | * ELF-FDPIC core dumper |
1150 | * | 1150 | * |
1151 | * Modelled on fs/exec.c:aout_core_dump() | 1151 | * Modelled on fs/exec.c:aout_core_dump() |
1152 | * Jeremy Fitzhardinge <jeremy@sw.oz.au> | 1152 | * Jeremy Fitzhardinge <jeremy@sw.oz.au> |
1153 | * | 1153 | * |
1154 | * Modelled on fs/binfmt_elf.c core dumper | 1154 | * Modelled on fs/binfmt_elf.c core dumper |
1155 | */ | 1155 | */ |
1156 | #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) | 1156 | #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) |
1157 | 1157 | ||
1158 | /* | 1158 | /* |
1159 | * These are the only things you should do on a core-file: use only these | 1159 | * These are the only things you should do on a core-file: use only these |
1160 | * functions to write out all the necessary info. | 1160 | * functions to write out all the necessary info. |
1161 | */ | 1161 | */ |
1162 | static int dump_write(struct file *file, const void *addr, int nr) | 1162 | static int dump_write(struct file *file, const void *addr, int nr) |
1163 | { | 1163 | { |
1164 | return file->f_op->write(file, addr, nr, &file->f_pos) == nr; | 1164 | return file->f_op->write(file, addr, nr, &file->f_pos) == nr; |
1165 | } | 1165 | } |
1166 | 1166 | ||
1167 | static int dump_seek(struct file *file, loff_t off) | 1167 | static int dump_seek(struct file *file, loff_t off) |
1168 | { | 1168 | { |
1169 | if (file->f_op->llseek) { | 1169 | if (file->f_op->llseek) { |
1170 | if (file->f_op->llseek(file, off, SEEK_SET) != off) | 1170 | if (file->f_op->llseek(file, off, SEEK_SET) != off) |
1171 | return 0; | 1171 | return 0; |
1172 | } else { | 1172 | } else { |
1173 | file->f_pos = off; | 1173 | file->f_pos = off; |
1174 | } | 1174 | } |
1175 | return 1; | 1175 | return 1; |
1176 | } | 1176 | } |
1177 | 1177 | ||
1178 | /* | 1178 | /* |
1179 | * Decide whether a segment is worth dumping; default is yes to be | 1179 | * Decide whether a segment is worth dumping; default is yes to be |
1180 | * sure (missing info is worse than too much; etc). | 1180 | * sure (missing info is worse than too much; etc). |
1181 | * Personally I'd include everything, and use the coredump limit... | 1181 | * Personally I'd include everything, and use the coredump limit... |
1182 | * | 1182 | * |
1183 | * I think we should skip something. But I am not sure how. H.J. | 1183 | * I think we should skip something. But I am not sure how. H.J. |
1184 | */ | 1184 | */ |
1185 | static int maydump(struct vm_area_struct *vma) | 1185 | static int maydump(struct vm_area_struct *vma) |
1186 | { | 1186 | { |
1187 | /* Do not dump I/O mapped devices or special mappings */ | 1187 | /* Do not dump I/O mapped devices or special mappings */ |
1188 | if (vma->vm_flags & (VM_IO | VM_RESERVED)) { | 1188 | if (vma->vm_flags & (VM_IO | VM_RESERVED)) { |
1189 | kdcore("%08lx: %08lx: no (IO)", vma->vm_start, vma->vm_flags); | 1189 | kdcore("%08lx: %08lx: no (IO)", vma->vm_start, vma->vm_flags); |
1190 | return 0; | 1190 | return 0; |
1191 | } | 1191 | } |
1192 | 1192 | ||
1193 | /* If we may not read the contents, don't allow us to dump | 1193 | /* If we may not read the contents, don't allow us to dump |
1194 | * them either. "dump_write()" can't handle it anyway. | 1194 | * them either. "dump_write()" can't handle it anyway. |
1195 | */ | 1195 | */ |
1196 | if (!(vma->vm_flags & VM_READ)) { | 1196 | if (!(vma->vm_flags & VM_READ)) { |
1197 | kdcore("%08lx: %08lx: no (!read)", vma->vm_start, vma->vm_flags); | 1197 | kdcore("%08lx: %08lx: no (!read)", vma->vm_start, vma->vm_flags); |
1198 | return 0; | 1198 | return 0; |
1199 | } | 1199 | } |
1200 | 1200 | ||
1201 | /* Dump shared memory only if mapped from an anonymous file. */ | 1201 | /* Dump shared memory only if mapped from an anonymous file. */ |
1202 | if (vma->vm_flags & VM_SHARED) { | 1202 | if (vma->vm_flags & VM_SHARED) { |
1203 | if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0) { | 1203 | if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0) { |
1204 | kdcore("%08lx: %08lx: no (share)", vma->vm_start, vma->vm_flags); | 1204 | kdcore("%08lx: %08lx: no (share)", vma->vm_start, vma->vm_flags); |
1205 | return 1; | 1205 | return 1; |
1206 | } | 1206 | } |
1207 | 1207 | ||
1208 | kdcore("%08lx: %08lx: no (share)", vma->vm_start, vma->vm_flags); | 1208 | kdcore("%08lx: %08lx: no (share)", vma->vm_start, vma->vm_flags); |
1209 | return 0; | 1209 | return 0; |
1210 | } | 1210 | } |
1211 | 1211 | ||
1212 | #ifdef CONFIG_MMU | 1212 | #ifdef CONFIG_MMU |
1213 | /* If it hasn't been written to, don't write it out */ | 1213 | /* If it hasn't been written to, don't write it out */ |
1214 | if (!vma->anon_vma) { | 1214 | if (!vma->anon_vma) { |
1215 | kdcore("%08lx: %08lx: no (!anon)", vma->vm_start, vma->vm_flags); | 1215 | kdcore("%08lx: %08lx: no (!anon)", vma->vm_start, vma->vm_flags); |
1216 | return 0; | 1216 | return 0; |
1217 | } | 1217 | } |
1218 | #endif | 1218 | #endif |
1219 | 1219 | ||
1220 | kdcore("%08lx: %08lx: yes", vma->vm_start, vma->vm_flags); | 1220 | kdcore("%08lx: %08lx: yes", vma->vm_start, vma->vm_flags); |
1221 | return 1; | 1221 | return 1; |
1222 | } | 1222 | } |
1223 | 1223 | ||
1224 | /* An ELF note in memory */ | 1224 | /* An ELF note in memory */ |
1225 | struct memelfnote | 1225 | struct memelfnote |
1226 | { | 1226 | { |
1227 | const char *name; | 1227 | const char *name; |
1228 | int type; | 1228 | int type; |
1229 | unsigned int datasz; | 1229 | unsigned int datasz; |
1230 | void *data; | 1230 | void *data; |
1231 | }; | 1231 | }; |
1232 | 1232 | ||
1233 | static int notesize(struct memelfnote *en) | 1233 | static int notesize(struct memelfnote *en) |
1234 | { | 1234 | { |
1235 | int sz; | 1235 | int sz; |
1236 | 1236 | ||
1237 | sz = sizeof(struct elf_note); | 1237 | sz = sizeof(struct elf_note); |
1238 | sz += roundup(strlen(en->name) + 1, 4); | 1238 | sz += roundup(strlen(en->name) + 1, 4); |
1239 | sz += roundup(en->datasz, 4); | 1239 | sz += roundup(en->datasz, 4); |
1240 | 1240 | ||
1241 | return sz; | 1241 | return sz; |
1242 | } | 1242 | } |
1243 | 1243 | ||
1244 | /* #define DEBUG */ | 1244 | /* #define DEBUG */ |
1245 | 1245 | ||
1246 | #define DUMP_WRITE(addr, nr) \ | 1246 | #define DUMP_WRITE(addr, nr) \ |
1247 | do { if (!dump_write(file, (addr), (nr))) return 0; } while(0) | 1247 | do { if (!dump_write(file, (addr), (nr))) return 0; } while(0) |
1248 | #define DUMP_SEEK(off) \ | 1248 | #define DUMP_SEEK(off) \ |
1249 | do { if (!dump_seek(file, (off))) return 0; } while(0) | 1249 | do { if (!dump_seek(file, (off))) return 0; } while(0) |
1250 | 1250 | ||
1251 | static int writenote(struct memelfnote *men, struct file *file) | 1251 | static int writenote(struct memelfnote *men, struct file *file) |
1252 | { | 1252 | { |
1253 | struct elf_note en; | 1253 | struct elf_note en; |
1254 | 1254 | ||
1255 | en.n_namesz = strlen(men->name) + 1; | 1255 | en.n_namesz = strlen(men->name) + 1; |
1256 | en.n_descsz = men->datasz; | 1256 | en.n_descsz = men->datasz; |
1257 | en.n_type = men->type; | 1257 | en.n_type = men->type; |
1258 | 1258 | ||
1259 | DUMP_WRITE(&en, sizeof(en)); | 1259 | DUMP_WRITE(&en, sizeof(en)); |
1260 | DUMP_WRITE(men->name, en.n_namesz); | 1260 | DUMP_WRITE(men->name, en.n_namesz); |
1261 | /* XXX - cast from long long to long to avoid need for libgcc.a */ | 1261 | /* XXX - cast from long long to long to avoid need for libgcc.a */ |
1262 | DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */ | 1262 | DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */ |
1263 | DUMP_WRITE(men->data, men->datasz); | 1263 | DUMP_WRITE(men->data, men->datasz); |
1264 | DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */ | 1264 | DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */ |
1265 | 1265 | ||
1266 | return 1; | 1266 | return 1; |
1267 | } | 1267 | } |
1268 | #undef DUMP_WRITE | 1268 | #undef DUMP_WRITE |
1269 | #undef DUMP_SEEK | 1269 | #undef DUMP_SEEK |
1270 | 1270 | ||
1271 | #define DUMP_WRITE(addr, nr) \ | 1271 | #define DUMP_WRITE(addr, nr) \ |
1272 | if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \ | 1272 | if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \ |
1273 | goto end_coredump; | 1273 | goto end_coredump; |
1274 | #define DUMP_SEEK(off) \ | 1274 | #define DUMP_SEEK(off) \ |
1275 | if (!dump_seek(file, (off))) \ | 1275 | if (!dump_seek(file, (off))) \ |
1276 | goto end_coredump; | 1276 | goto end_coredump; |
1277 | 1277 | ||
1278 | static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs) | 1278 | static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs) |
1279 | { | 1279 | { |
1280 | memcpy(elf->e_ident, ELFMAG, SELFMAG); | 1280 | memcpy(elf->e_ident, ELFMAG, SELFMAG); |
1281 | elf->e_ident[EI_CLASS] = ELF_CLASS; | 1281 | elf->e_ident[EI_CLASS] = ELF_CLASS; |
1282 | elf->e_ident[EI_DATA] = ELF_DATA; | 1282 | elf->e_ident[EI_DATA] = ELF_DATA; |
1283 | elf->e_ident[EI_VERSION] = EV_CURRENT; | 1283 | elf->e_ident[EI_VERSION] = EV_CURRENT; |
1284 | elf->e_ident[EI_OSABI] = ELF_OSABI; | 1284 | elf->e_ident[EI_OSABI] = ELF_OSABI; |
1285 | memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD); | 1285 | memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD); |
1286 | 1286 | ||
1287 | elf->e_type = ET_CORE; | 1287 | elf->e_type = ET_CORE; |
1288 | elf->e_machine = ELF_ARCH; | 1288 | elf->e_machine = ELF_ARCH; |
1289 | elf->e_version = EV_CURRENT; | 1289 | elf->e_version = EV_CURRENT; |
1290 | elf->e_entry = 0; | 1290 | elf->e_entry = 0; |
1291 | elf->e_phoff = sizeof(struct elfhdr); | 1291 | elf->e_phoff = sizeof(struct elfhdr); |
1292 | elf->e_shoff = 0; | 1292 | elf->e_shoff = 0; |
1293 | elf->e_flags = ELF_FDPIC_CORE_EFLAGS; | 1293 | elf->e_flags = ELF_FDPIC_CORE_EFLAGS; |
1294 | elf->e_ehsize = sizeof(struct elfhdr); | 1294 | elf->e_ehsize = sizeof(struct elfhdr); |
1295 | elf->e_phentsize = sizeof(struct elf_phdr); | 1295 | elf->e_phentsize = sizeof(struct elf_phdr); |
1296 | elf->e_phnum = segs; | 1296 | elf->e_phnum = segs; |
1297 | elf->e_shentsize = 0; | 1297 | elf->e_shentsize = 0; |
1298 | elf->e_shnum = 0; | 1298 | elf->e_shnum = 0; |
1299 | elf->e_shstrndx = 0; | 1299 | elf->e_shstrndx = 0; |
1300 | return; | 1300 | return; |
1301 | } | 1301 | } |
1302 | 1302 | ||
1303 | static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset) | 1303 | static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset) |
1304 | { | 1304 | { |
1305 | phdr->p_type = PT_NOTE; | 1305 | phdr->p_type = PT_NOTE; |
1306 | phdr->p_offset = offset; | 1306 | phdr->p_offset = offset; |
1307 | phdr->p_vaddr = 0; | 1307 | phdr->p_vaddr = 0; |
1308 | phdr->p_paddr = 0; | 1308 | phdr->p_paddr = 0; |
1309 | phdr->p_filesz = sz; | 1309 | phdr->p_filesz = sz; |
1310 | phdr->p_memsz = 0; | 1310 | phdr->p_memsz = 0; |
1311 | phdr->p_flags = 0; | 1311 | phdr->p_flags = 0; |
1312 | phdr->p_align = 0; | 1312 | phdr->p_align = 0; |
1313 | return; | 1313 | return; |
1314 | } | 1314 | } |
1315 | 1315 | ||
1316 | static inline void fill_note(struct memelfnote *note, const char *name, int type, | 1316 | static inline void fill_note(struct memelfnote *note, const char *name, int type, |
1317 | unsigned int sz, void *data) | 1317 | unsigned int sz, void *data) |
1318 | { | 1318 | { |
1319 | note->name = name; | 1319 | note->name = name; |
1320 | note->type = type; | 1320 | note->type = type; |
1321 | note->datasz = sz; | 1321 | note->datasz = sz; |
1322 | note->data = data; | 1322 | note->data = data; |
1323 | return; | 1323 | return; |
1324 | } | 1324 | } |
1325 | 1325 | ||
1326 | /* | 1326 | /* |
1327 | * fill up all the fields in prstatus from the given task struct, except | 1327 | * fill up all the fields in prstatus from the given task struct, except |
1328 | * registers which need to be filled up seperately. | 1328 | * registers which need to be filled up seperately. |
1329 | */ | 1329 | */ |
1330 | static void fill_prstatus(struct elf_prstatus *prstatus, | 1330 | static void fill_prstatus(struct elf_prstatus *prstatus, |
1331 | struct task_struct *p, long signr) | 1331 | struct task_struct *p, long signr) |
1332 | { | 1332 | { |
1333 | prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; | 1333 | prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; |
1334 | prstatus->pr_sigpend = p->pending.signal.sig[0]; | 1334 | prstatus->pr_sigpend = p->pending.signal.sig[0]; |
1335 | prstatus->pr_sighold = p->blocked.sig[0]; | 1335 | prstatus->pr_sighold = p->blocked.sig[0]; |
1336 | prstatus->pr_pid = p->pid; | 1336 | prstatus->pr_pid = p->pid; |
1337 | prstatus->pr_ppid = p->parent->pid; | 1337 | prstatus->pr_ppid = p->parent->pid; |
1338 | prstatus->pr_pgrp = process_group(p); | 1338 | prstatus->pr_pgrp = process_group(p); |
1339 | prstatus->pr_sid = process_session(p); | 1339 | prstatus->pr_sid = process_session(p); |
1340 | if (thread_group_leader(p)) { | 1340 | if (thread_group_leader(p)) { |
1341 | /* | 1341 | /* |
1342 | * This is the record for the group leader. Add in the | 1342 | * This is the record for the group leader. Add in the |
1343 | * cumulative times of previous dead threads. This total | 1343 | * cumulative times of previous dead threads. This total |
1344 | * won't include the time of each live thread whose state | 1344 | * won't include the time of each live thread whose state |
1345 | * is included in the core dump. The final total reported | 1345 | * is included in the core dump. The final total reported |
1346 | * to our parent process when it calls wait4 will include | 1346 | * to our parent process when it calls wait4 will include |
1347 | * those sums as well as the little bit more time it takes | 1347 | * those sums as well as the little bit more time it takes |
1348 | * this and each other thread to finish dying after the | 1348 | * this and each other thread to finish dying after the |
1349 | * core dump synchronization phase. | 1349 | * core dump synchronization phase. |
1350 | */ | 1350 | */ |
1351 | cputime_to_timeval(cputime_add(p->utime, p->signal->utime), | 1351 | cputime_to_timeval(cputime_add(p->utime, p->signal->utime), |
1352 | &prstatus->pr_utime); | 1352 | &prstatus->pr_utime); |
1353 | cputime_to_timeval(cputime_add(p->stime, p->signal->stime), | 1353 | cputime_to_timeval(cputime_add(p->stime, p->signal->stime), |
1354 | &prstatus->pr_stime); | 1354 | &prstatus->pr_stime); |
1355 | } else { | 1355 | } else { |
1356 | cputime_to_timeval(p->utime, &prstatus->pr_utime); | 1356 | cputime_to_timeval(p->utime, &prstatus->pr_utime); |
1357 | cputime_to_timeval(p->stime, &prstatus->pr_stime); | 1357 | cputime_to_timeval(p->stime, &prstatus->pr_stime); |
1358 | } | 1358 | } |
1359 | cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime); | 1359 | cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime); |
1360 | cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime); | 1360 | cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime); |
1361 | 1361 | ||
1362 | prstatus->pr_exec_fdpic_loadmap = p->mm->context.exec_fdpic_loadmap; | 1362 | prstatus->pr_exec_fdpic_loadmap = p->mm->context.exec_fdpic_loadmap; |
1363 | prstatus->pr_interp_fdpic_loadmap = p->mm->context.interp_fdpic_loadmap; | 1363 | prstatus->pr_interp_fdpic_loadmap = p->mm->context.interp_fdpic_loadmap; |
1364 | } | 1364 | } |
1365 | 1365 | ||
1366 | static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, | 1366 | static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, |
1367 | struct mm_struct *mm) | 1367 | struct mm_struct *mm) |
1368 | { | 1368 | { |
1369 | unsigned int i, len; | 1369 | unsigned int i, len; |
1370 | 1370 | ||
1371 | /* first copy the parameters from user space */ | 1371 | /* first copy the parameters from user space */ |
1372 | memset(psinfo, 0, sizeof(struct elf_prpsinfo)); | 1372 | memset(psinfo, 0, sizeof(struct elf_prpsinfo)); |
1373 | 1373 | ||
1374 | len = mm->arg_end - mm->arg_start; | 1374 | len = mm->arg_end - mm->arg_start; |
1375 | if (len >= ELF_PRARGSZ) | 1375 | if (len >= ELF_PRARGSZ) |
1376 | len = ELF_PRARGSZ - 1; | 1376 | len = ELF_PRARGSZ - 1; |
1377 | if (copy_from_user(&psinfo->pr_psargs, | 1377 | if (copy_from_user(&psinfo->pr_psargs, |
1378 | (const char __user *) mm->arg_start, len)) | 1378 | (const char __user *) mm->arg_start, len)) |
1379 | return -EFAULT; | 1379 | return -EFAULT; |
1380 | for (i = 0; i < len; i++) | 1380 | for (i = 0; i < len; i++) |
1381 | if (psinfo->pr_psargs[i] == 0) | 1381 | if (psinfo->pr_psargs[i] == 0) |
1382 | psinfo->pr_psargs[i] = ' '; | 1382 | psinfo->pr_psargs[i] = ' '; |
1383 | psinfo->pr_psargs[len] = 0; | 1383 | psinfo->pr_psargs[len] = 0; |
1384 | 1384 | ||
1385 | psinfo->pr_pid = p->pid; | 1385 | psinfo->pr_pid = p->pid; |
1386 | psinfo->pr_ppid = p->parent->pid; | 1386 | psinfo->pr_ppid = p->parent->pid; |
1387 | psinfo->pr_pgrp = process_group(p); | 1387 | psinfo->pr_pgrp = process_group(p); |
1388 | psinfo->pr_sid = process_session(p); | 1388 | psinfo->pr_sid = process_session(p); |
1389 | 1389 | ||
1390 | i = p->state ? ffz(~p->state) + 1 : 0; | 1390 | i = p->state ? ffz(~p->state) + 1 : 0; |
1391 | psinfo->pr_state = i; | 1391 | psinfo->pr_state = i; |
1392 | psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i]; | 1392 | psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i]; |
1393 | psinfo->pr_zomb = psinfo->pr_sname == 'Z'; | 1393 | psinfo->pr_zomb = psinfo->pr_sname == 'Z'; |
1394 | psinfo->pr_nice = task_nice(p); | 1394 | psinfo->pr_nice = task_nice(p); |
1395 | psinfo->pr_flag = p->flags; | 1395 | psinfo->pr_flag = p->flags; |
1396 | SET_UID(psinfo->pr_uid, p->uid); | 1396 | SET_UID(psinfo->pr_uid, p->uid); |
1397 | SET_GID(psinfo->pr_gid, p->gid); | 1397 | SET_GID(psinfo->pr_gid, p->gid); |
1398 | strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname)); | 1398 | strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname)); |
1399 | 1399 | ||
1400 | return 0; | 1400 | return 0; |
1401 | } | 1401 | } |
1402 | 1402 | ||
1403 | /* Here is the structure in which status of each thread is captured. */ | 1403 | /* Here is the structure in which status of each thread is captured. */ |
1404 | struct elf_thread_status | 1404 | struct elf_thread_status |
1405 | { | 1405 | { |
1406 | struct list_head list; | 1406 | struct list_head list; |
1407 | struct elf_prstatus prstatus; /* NT_PRSTATUS */ | 1407 | struct elf_prstatus prstatus; /* NT_PRSTATUS */ |
1408 | elf_fpregset_t fpu; /* NT_PRFPREG */ | 1408 | elf_fpregset_t fpu; /* NT_PRFPREG */ |
1409 | struct task_struct *thread; | 1409 | struct task_struct *thread; |
1410 | #ifdef ELF_CORE_COPY_XFPREGS | 1410 | #ifdef ELF_CORE_COPY_XFPREGS |
1411 | elf_fpxregset_t xfpu; /* NT_PRXFPREG */ | 1411 | elf_fpxregset_t xfpu; /* NT_PRXFPREG */ |
1412 | #endif | 1412 | #endif |
1413 | struct memelfnote notes[3]; | 1413 | struct memelfnote notes[3]; |
1414 | int num_notes; | 1414 | int num_notes; |
1415 | }; | 1415 | }; |
1416 | 1416 | ||
1417 | /* | 1417 | /* |
1418 | * In order to add the specific thread information for the elf file format, | 1418 | * In order to add the specific thread information for the elf file format, |
1419 | * we need to keep a linked list of every thread's pr_status and then create | 1419 | * we need to keep a linked list of every thread's pr_status and then create |
1420 | * a single section for them in the final core file. | 1420 | * a single section for them in the final core file. |
1421 | */ | 1421 | */ |
1422 | static int elf_dump_thread_status(long signr, struct elf_thread_status *t) | 1422 | static int elf_dump_thread_status(long signr, struct elf_thread_status *t) |
1423 | { | 1423 | { |
1424 | struct task_struct *p = t->thread; | 1424 | struct task_struct *p = t->thread; |
1425 | int sz = 0; | 1425 | int sz = 0; |
1426 | 1426 | ||
1427 | t->num_notes = 0; | 1427 | t->num_notes = 0; |
1428 | 1428 | ||
1429 | fill_prstatus(&t->prstatus, p, signr); | 1429 | fill_prstatus(&t->prstatus, p, signr); |
1430 | elf_core_copy_task_regs(p, &t->prstatus.pr_reg); | 1430 | elf_core_copy_task_regs(p, &t->prstatus.pr_reg); |
1431 | 1431 | ||
1432 | fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), | 1432 | fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), |
1433 | &t->prstatus); | 1433 | &t->prstatus); |
1434 | t->num_notes++; | 1434 | t->num_notes++; |
1435 | sz += notesize(&t->notes[0]); | 1435 | sz += notesize(&t->notes[0]); |
1436 | 1436 | ||
1437 | t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu); | 1437 | t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu); |
1438 | if (t->prstatus.pr_fpvalid) { | 1438 | if (t->prstatus.pr_fpvalid) { |
1439 | fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), | 1439 | fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), |
1440 | &t->fpu); | 1440 | &t->fpu); |
1441 | t->num_notes++; | 1441 | t->num_notes++; |
1442 | sz += notesize(&t->notes[1]); | 1442 | sz += notesize(&t->notes[1]); |
1443 | } | 1443 | } |
1444 | 1444 | ||
1445 | #ifdef ELF_CORE_COPY_XFPREGS | 1445 | #ifdef ELF_CORE_COPY_XFPREGS |
1446 | if (elf_core_copy_task_xfpregs(p, &t->xfpu)) { | 1446 | if (elf_core_copy_task_xfpregs(p, &t->xfpu)) { |
1447 | fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), | 1447 | fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), |
1448 | &t->xfpu); | 1448 | &t->xfpu); |
1449 | t->num_notes++; | 1449 | t->num_notes++; |
1450 | sz += notesize(&t->notes[2]); | 1450 | sz += notesize(&t->notes[2]); |
1451 | } | 1451 | } |
1452 | #endif | 1452 | #endif |
1453 | return sz; | 1453 | return sz; |
1454 | } | 1454 | } |
1455 | 1455 | ||
1456 | /* | 1456 | /* |
1457 | * dump the segments for an MMU process | 1457 | * dump the segments for an MMU process |
1458 | */ | 1458 | */ |
1459 | #ifdef CONFIG_MMU | 1459 | #ifdef CONFIG_MMU |
1460 | static int elf_fdpic_dump_segments(struct file *file, struct mm_struct *mm, | 1460 | static int elf_fdpic_dump_segments(struct file *file, struct mm_struct *mm, |
1461 | size_t *size, unsigned long *limit) | 1461 | size_t *size, unsigned long *limit) |
1462 | { | 1462 | { |
1463 | struct vm_area_struct *vma; | 1463 | struct vm_area_struct *vma; |
1464 | 1464 | ||
1465 | for (vma = current->mm->mmap; vma; vma = vma->vm_next) { | 1465 | for (vma = current->mm->mmap; vma; vma = vma->vm_next) { |
1466 | unsigned long addr; | 1466 | unsigned long addr; |
1467 | 1467 | ||
1468 | if (!maydump(vma)) | 1468 | if (!maydump(vma)) |
1469 | continue; | 1469 | continue; |
1470 | 1470 | ||
1471 | for (addr = vma->vm_start; | 1471 | for (addr = vma->vm_start; |
1472 | addr < vma->vm_end; | 1472 | addr < vma->vm_end; |
1473 | addr += PAGE_SIZE | 1473 | addr += PAGE_SIZE |
1474 | ) { | 1474 | ) { |
1475 | struct vm_area_struct *vma; | 1475 | struct vm_area_struct *vma; |
1476 | struct page *page; | 1476 | struct page *page; |
1477 | 1477 | ||
1478 | if (get_user_pages(current, current->mm, addr, 1, 0, 1, | 1478 | if (get_user_pages(current, current->mm, addr, 1, 0, 1, |
1479 | &page, &vma) <= 0) { | 1479 | &page, &vma) <= 0) { |
1480 | DUMP_SEEK(file->f_pos + PAGE_SIZE); | 1480 | DUMP_SEEK(file->f_pos + PAGE_SIZE); |
1481 | } | 1481 | } |
1482 | else if (page == ZERO_PAGE(addr)) { | 1482 | else if (page == ZERO_PAGE(addr)) { |
1483 | DUMP_SEEK(file->f_pos + PAGE_SIZE); | ||
1484 | page_cache_release(page); | 1483 | page_cache_release(page); |
1484 | DUMP_SEEK(file->f_pos + PAGE_SIZE); | ||
1485 | } | 1485 | } |
1486 | else { | 1486 | else { |
1487 | void *kaddr; | 1487 | void *kaddr; |
1488 | 1488 | ||
1489 | flush_cache_page(vma, addr, page_to_pfn(page)); | 1489 | flush_cache_page(vma, addr, page_to_pfn(page)); |
1490 | kaddr = kmap(page); | 1490 | kaddr = kmap(page); |
1491 | if ((*size += PAGE_SIZE) > *limit || | 1491 | if ((*size += PAGE_SIZE) > *limit || |
1492 | !dump_write(file, kaddr, PAGE_SIZE) | 1492 | !dump_write(file, kaddr, PAGE_SIZE) |
1493 | ) { | 1493 | ) { |
1494 | kunmap(page); | 1494 | kunmap(page); |
1495 | page_cache_release(page); | 1495 | page_cache_release(page); |
1496 | return -EIO; | 1496 | return -EIO; |
1497 | } | 1497 | } |
1498 | kunmap(page); | 1498 | kunmap(page); |
1499 | page_cache_release(page); | 1499 | page_cache_release(page); |
1500 | } | 1500 | } |
1501 | } | 1501 | } |
1502 | } | 1502 | } |
1503 | 1503 | ||
1504 | return 0; | 1504 | return 0; |
1505 | 1505 | ||
1506 | end_coredump: | 1506 | end_coredump: |
1507 | return -EFBIG; | 1507 | return -EFBIG; |
1508 | } | 1508 | } |
1509 | #endif | 1509 | #endif |
1510 | 1510 | ||
1511 | /* | 1511 | /* |
1512 | * dump the segments for a NOMMU process | 1512 | * dump the segments for a NOMMU process |
1513 | */ | 1513 | */ |
1514 | #ifndef CONFIG_MMU | 1514 | #ifndef CONFIG_MMU |
1515 | static int elf_fdpic_dump_segments(struct file *file, struct mm_struct *mm, | 1515 | static int elf_fdpic_dump_segments(struct file *file, struct mm_struct *mm, |
1516 | size_t *size, unsigned long *limit) | 1516 | size_t *size, unsigned long *limit) |
1517 | { | 1517 | { |
1518 | struct vm_list_struct *vml; | 1518 | struct vm_list_struct *vml; |
1519 | 1519 | ||
1520 | for (vml = current->mm->context.vmlist; vml; vml = vml->next) { | 1520 | for (vml = current->mm->context.vmlist; vml; vml = vml->next) { |
1521 | struct vm_area_struct *vma = vml->vma; | 1521 | struct vm_area_struct *vma = vml->vma; |
1522 | 1522 | ||
1523 | if (!maydump(vma)) | 1523 | if (!maydump(vma)) |
1524 | continue; | 1524 | continue; |
1525 | 1525 | ||
1526 | if ((*size += PAGE_SIZE) > *limit) | 1526 | if ((*size += PAGE_SIZE) > *limit) |
1527 | return -EFBIG; | 1527 | return -EFBIG; |
1528 | 1528 | ||
1529 | if (!dump_write(file, (void *) vma->vm_start, | 1529 | if (!dump_write(file, (void *) vma->vm_start, |
1530 | vma->vm_end - vma->vm_start)) | 1530 | vma->vm_end - vma->vm_start)) |
1531 | return -EIO; | 1531 | return -EIO; |
1532 | } | 1532 | } |
1533 | 1533 | ||
1534 | return 0; | 1534 | return 0; |
1535 | } | 1535 | } |
1536 | #endif | 1536 | #endif |
1537 | 1537 | ||
1538 | /* | 1538 | /* |
1539 | * Actual dumper | 1539 | * Actual dumper |
1540 | * | 1540 | * |
1541 | * This is a two-pass process; first we find the offsets of the bits, | 1541 | * This is a two-pass process; first we find the offsets of the bits, |
1542 | * and then they are actually written out. If we run out of core limit | 1542 | * and then they are actually written out. If we run out of core limit |
1543 | * we just truncate. | 1543 | * we just truncate. |
1544 | */ | 1544 | */ |
1545 | static int elf_fdpic_core_dump(long signr, struct pt_regs *regs, | 1545 | static int elf_fdpic_core_dump(long signr, struct pt_regs *regs, |
1546 | struct file *file) | 1546 | struct file *file) |
1547 | { | 1547 | { |
1548 | #define NUM_NOTES 6 | 1548 | #define NUM_NOTES 6 |
1549 | int has_dumped = 0; | 1549 | int has_dumped = 0; |
1550 | mm_segment_t fs; | 1550 | mm_segment_t fs; |
1551 | int segs; | 1551 | int segs; |
1552 | size_t size = 0; | 1552 | size_t size = 0; |
1553 | int i; | 1553 | int i; |
1554 | struct vm_area_struct *vma; | 1554 | struct vm_area_struct *vma; |
1555 | struct elfhdr *elf = NULL; | 1555 | struct elfhdr *elf = NULL; |
1556 | loff_t offset = 0, dataoff; | 1556 | loff_t offset = 0, dataoff; |
1557 | unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur; | 1557 | unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur; |
1558 | int numnote; | 1558 | int numnote; |
1559 | struct memelfnote *notes = NULL; | 1559 | struct memelfnote *notes = NULL; |
1560 | struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */ | 1560 | struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */ |
1561 | struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */ | 1561 | struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */ |
1562 | struct task_struct *g, *p; | 1562 | struct task_struct *g, *p; |
1563 | LIST_HEAD(thread_list); | 1563 | LIST_HEAD(thread_list); |
1564 | struct list_head *t; | 1564 | struct list_head *t; |
1565 | elf_fpregset_t *fpu = NULL; | 1565 | elf_fpregset_t *fpu = NULL; |
1566 | #ifdef ELF_CORE_COPY_XFPREGS | 1566 | #ifdef ELF_CORE_COPY_XFPREGS |
1567 | elf_fpxregset_t *xfpu = NULL; | 1567 | elf_fpxregset_t *xfpu = NULL; |
1568 | #endif | 1568 | #endif |
1569 | int thread_status_size = 0; | 1569 | int thread_status_size = 0; |
1570 | #ifndef CONFIG_MMU | 1570 | #ifndef CONFIG_MMU |
1571 | struct vm_list_struct *vml; | 1571 | struct vm_list_struct *vml; |
1572 | #endif | 1572 | #endif |
1573 | elf_addr_t *auxv; | 1573 | elf_addr_t *auxv; |
1574 | 1574 | ||
1575 | /* | 1575 | /* |
1576 | * We no longer stop all VM operations. | 1576 | * We no longer stop all VM operations. |
1577 | * | 1577 | * |
1578 | * This is because those proceses that could possibly change map_count | 1578 | * This is because those proceses that could possibly change map_count |
1579 | * or the mmap / vma pages are now blocked in do_exit on current | 1579 | * or the mmap / vma pages are now blocked in do_exit on current |
1580 | * finishing this core dump. | 1580 | * finishing this core dump. |
1581 | * | 1581 | * |
1582 | * Only ptrace can touch these memory addresses, but it doesn't change | 1582 | * Only ptrace can touch these memory addresses, but it doesn't change |
1583 | * the map_count or the pages allocated. So no possibility of crashing | 1583 | * the map_count or the pages allocated. So no possibility of crashing |
1584 | * exists while dumping the mm->vm_next areas to the core file. | 1584 | * exists while dumping the mm->vm_next areas to the core file. |
1585 | */ | 1585 | */ |
1586 | 1586 | ||
1587 | /* alloc memory for large data structures: too large to be on stack */ | 1587 | /* alloc memory for large data structures: too large to be on stack */ |
1588 | elf = kmalloc(sizeof(*elf), GFP_KERNEL); | 1588 | elf = kmalloc(sizeof(*elf), GFP_KERNEL); |
1589 | if (!elf) | 1589 | if (!elf) |
1590 | goto cleanup; | 1590 | goto cleanup; |
1591 | prstatus = kzalloc(sizeof(*prstatus), GFP_KERNEL); | 1591 | prstatus = kzalloc(sizeof(*prstatus), GFP_KERNEL); |
1592 | if (!prstatus) | 1592 | if (!prstatus) |
1593 | goto cleanup; | 1593 | goto cleanup; |
1594 | psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL); | 1594 | psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL); |
1595 | if (!psinfo) | 1595 | if (!psinfo) |
1596 | goto cleanup; | 1596 | goto cleanup; |
1597 | notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL); | 1597 | notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL); |
1598 | if (!notes) | 1598 | if (!notes) |
1599 | goto cleanup; | 1599 | goto cleanup; |
1600 | fpu = kmalloc(sizeof(*fpu), GFP_KERNEL); | 1600 | fpu = kmalloc(sizeof(*fpu), GFP_KERNEL); |
1601 | if (!fpu) | 1601 | if (!fpu) |
1602 | goto cleanup; | 1602 | goto cleanup; |
1603 | #ifdef ELF_CORE_COPY_XFPREGS | 1603 | #ifdef ELF_CORE_COPY_XFPREGS |
1604 | xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL); | 1604 | xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL); |
1605 | if (!xfpu) | 1605 | if (!xfpu) |
1606 | goto cleanup; | 1606 | goto cleanup; |
1607 | #endif | 1607 | #endif |
1608 | 1608 | ||
1609 | if (signr) { | 1609 | if (signr) { |
1610 | struct elf_thread_status *tmp; | 1610 | struct elf_thread_status *tmp; |
1611 | rcu_read_lock(); | 1611 | rcu_read_lock(); |
1612 | do_each_thread(g,p) | 1612 | do_each_thread(g,p) |
1613 | if (current->mm == p->mm && current != p) { | 1613 | if (current->mm == p->mm && current != p) { |
1614 | tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC); | 1614 | tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC); |
1615 | if (!tmp) { | 1615 | if (!tmp) { |
1616 | rcu_read_unlock(); | 1616 | rcu_read_unlock(); |
1617 | goto cleanup; | 1617 | goto cleanup; |
1618 | } | 1618 | } |
1619 | tmp->thread = p; | 1619 | tmp->thread = p; |
1620 | list_add(&tmp->list, &thread_list); | 1620 | list_add(&tmp->list, &thread_list); |
1621 | } | 1621 | } |
1622 | while_each_thread(g,p); | 1622 | while_each_thread(g,p); |
1623 | rcu_read_unlock(); | 1623 | rcu_read_unlock(); |
1624 | list_for_each(t, &thread_list) { | 1624 | list_for_each(t, &thread_list) { |
1625 | struct elf_thread_status *tmp; | 1625 | struct elf_thread_status *tmp; |
1626 | int sz; | 1626 | int sz; |
1627 | 1627 | ||
1628 | tmp = list_entry(t, struct elf_thread_status, list); | 1628 | tmp = list_entry(t, struct elf_thread_status, list); |
1629 | sz = elf_dump_thread_status(signr, tmp); | 1629 | sz = elf_dump_thread_status(signr, tmp); |
1630 | thread_status_size += sz; | 1630 | thread_status_size += sz; |
1631 | } | 1631 | } |
1632 | } | 1632 | } |
1633 | 1633 | ||
1634 | /* now collect the dump for the current */ | 1634 | /* now collect the dump for the current */ |
1635 | fill_prstatus(prstatus, current, signr); | 1635 | fill_prstatus(prstatus, current, signr); |
1636 | elf_core_copy_regs(&prstatus->pr_reg, regs); | 1636 | elf_core_copy_regs(&prstatus->pr_reg, regs); |
1637 | 1637 | ||
1638 | #ifdef CONFIG_MMU | 1638 | #ifdef CONFIG_MMU |
1639 | segs = current->mm->map_count; | 1639 | segs = current->mm->map_count; |
1640 | #else | 1640 | #else |
1641 | segs = 0; | 1641 | segs = 0; |
1642 | for (vml = current->mm->context.vmlist; vml; vml = vml->next) | 1642 | for (vml = current->mm->context.vmlist; vml; vml = vml->next) |
1643 | segs++; | 1643 | segs++; |
1644 | #endif | 1644 | #endif |
1645 | #ifdef ELF_CORE_EXTRA_PHDRS | 1645 | #ifdef ELF_CORE_EXTRA_PHDRS |
1646 | segs += ELF_CORE_EXTRA_PHDRS; | 1646 | segs += ELF_CORE_EXTRA_PHDRS; |
1647 | #endif | 1647 | #endif |
1648 | 1648 | ||
1649 | /* Set up header */ | 1649 | /* Set up header */ |
1650 | fill_elf_fdpic_header(elf, segs + 1); /* including notes section */ | 1650 | fill_elf_fdpic_header(elf, segs + 1); /* including notes section */ |
1651 | 1651 | ||
1652 | has_dumped = 1; | 1652 | has_dumped = 1; |
1653 | current->flags |= PF_DUMPCORE; | 1653 | current->flags |= PF_DUMPCORE; |
1654 | 1654 | ||
1655 | /* | 1655 | /* |
1656 | * Set up the notes in similar form to SVR4 core dumps made | 1656 | * Set up the notes in similar form to SVR4 core dumps made |
1657 | * with info from their /proc. | 1657 | * with info from their /proc. |
1658 | */ | 1658 | */ |
1659 | 1659 | ||
1660 | fill_note(notes + 0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus); | 1660 | fill_note(notes + 0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus); |
1661 | fill_psinfo(psinfo, current->group_leader, current->mm); | 1661 | fill_psinfo(psinfo, current->group_leader, current->mm); |
1662 | fill_note(notes + 1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo); | 1662 | fill_note(notes + 1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo); |
1663 | 1663 | ||
1664 | numnote = 2; | 1664 | numnote = 2; |
1665 | 1665 | ||
1666 | auxv = (elf_addr_t *) current->mm->saved_auxv; | 1666 | auxv = (elf_addr_t *) current->mm->saved_auxv; |
1667 | 1667 | ||
1668 | i = 0; | 1668 | i = 0; |
1669 | do | 1669 | do |
1670 | i += 2; | 1670 | i += 2; |
1671 | while (auxv[i - 2] != AT_NULL); | 1671 | while (auxv[i - 2] != AT_NULL); |
1672 | fill_note(¬es[numnote++], "CORE", NT_AUXV, | 1672 | fill_note(¬es[numnote++], "CORE", NT_AUXV, |
1673 | i * sizeof(elf_addr_t), auxv); | 1673 | i * sizeof(elf_addr_t), auxv); |
1674 | 1674 | ||
1675 | /* Try to dump the FPU. */ | 1675 | /* Try to dump the FPU. */ |
1676 | if ((prstatus->pr_fpvalid = | 1676 | if ((prstatus->pr_fpvalid = |
1677 | elf_core_copy_task_fpregs(current, regs, fpu))) | 1677 | elf_core_copy_task_fpregs(current, regs, fpu))) |
1678 | fill_note(notes + numnote++, | 1678 | fill_note(notes + numnote++, |
1679 | "CORE", NT_PRFPREG, sizeof(*fpu), fpu); | 1679 | "CORE", NT_PRFPREG, sizeof(*fpu), fpu); |
1680 | #ifdef ELF_CORE_COPY_XFPREGS | 1680 | #ifdef ELF_CORE_COPY_XFPREGS |
1681 | if (elf_core_copy_task_xfpregs(current, xfpu)) | 1681 | if (elf_core_copy_task_xfpregs(current, xfpu)) |
1682 | fill_note(notes + numnote++, | 1682 | fill_note(notes + numnote++, |
1683 | "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu); | 1683 | "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu); |
1684 | #endif | 1684 | #endif |
1685 | 1685 | ||
1686 | fs = get_fs(); | 1686 | fs = get_fs(); |
1687 | set_fs(KERNEL_DS); | 1687 | set_fs(KERNEL_DS); |
1688 | 1688 | ||
1689 | DUMP_WRITE(elf, sizeof(*elf)); | 1689 | DUMP_WRITE(elf, sizeof(*elf)); |
1690 | offset += sizeof(*elf); /* Elf header */ | 1690 | offset += sizeof(*elf); /* Elf header */ |
1691 | offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */ | 1691 | offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */ |
1692 | 1692 | ||
1693 | /* Write notes phdr entry */ | 1693 | /* Write notes phdr entry */ |
1694 | { | 1694 | { |
1695 | struct elf_phdr phdr; | 1695 | struct elf_phdr phdr; |
1696 | int sz = 0; | 1696 | int sz = 0; |
1697 | 1697 | ||
1698 | for (i = 0; i < numnote; i++) | 1698 | for (i = 0; i < numnote; i++) |
1699 | sz += notesize(notes + i); | 1699 | sz += notesize(notes + i); |
1700 | 1700 | ||
1701 | sz += thread_status_size; | 1701 | sz += thread_status_size; |
1702 | 1702 | ||
1703 | fill_elf_note_phdr(&phdr, sz, offset); | 1703 | fill_elf_note_phdr(&phdr, sz, offset); |
1704 | offset += sz; | 1704 | offset += sz; |
1705 | DUMP_WRITE(&phdr, sizeof(phdr)); | 1705 | DUMP_WRITE(&phdr, sizeof(phdr)); |
1706 | } | 1706 | } |
1707 | 1707 | ||
1708 | /* Page-align dumped data */ | 1708 | /* Page-align dumped data */ |
1709 | dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); | 1709 | dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); |
1710 | 1710 | ||
1711 | /* write program headers for segments dump */ | 1711 | /* write program headers for segments dump */ |
1712 | for ( | 1712 | for ( |
1713 | #ifdef CONFIG_MMU | 1713 | #ifdef CONFIG_MMU |
1714 | vma = current->mm->mmap; vma; vma = vma->vm_next | 1714 | vma = current->mm->mmap; vma; vma = vma->vm_next |
1715 | #else | 1715 | #else |
1716 | vml = current->mm->context.vmlist; vml; vml = vml->next | 1716 | vml = current->mm->context.vmlist; vml; vml = vml->next |
1717 | #endif | 1717 | #endif |
1718 | ) { | 1718 | ) { |
1719 | struct elf_phdr phdr; | 1719 | struct elf_phdr phdr; |
1720 | size_t sz; | 1720 | size_t sz; |
1721 | 1721 | ||
1722 | #ifndef CONFIG_MMU | 1722 | #ifndef CONFIG_MMU |
1723 | vma = vml->vma; | 1723 | vma = vml->vma; |
1724 | #endif | 1724 | #endif |
1725 | 1725 | ||
1726 | sz = vma->vm_end - vma->vm_start; | 1726 | sz = vma->vm_end - vma->vm_start; |
1727 | 1727 | ||
1728 | phdr.p_type = PT_LOAD; | 1728 | phdr.p_type = PT_LOAD; |
1729 | phdr.p_offset = offset; | 1729 | phdr.p_offset = offset; |
1730 | phdr.p_vaddr = vma->vm_start; | 1730 | phdr.p_vaddr = vma->vm_start; |
1731 | phdr.p_paddr = 0; | 1731 | phdr.p_paddr = 0; |
1732 | phdr.p_filesz = maydump(vma) ? sz : 0; | 1732 | phdr.p_filesz = maydump(vma) ? sz : 0; |
1733 | phdr.p_memsz = sz; | 1733 | phdr.p_memsz = sz; |
1734 | offset += phdr.p_filesz; | 1734 | offset += phdr.p_filesz; |
1735 | phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; | 1735 | phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; |
1736 | if (vma->vm_flags & VM_WRITE) | 1736 | if (vma->vm_flags & VM_WRITE) |
1737 | phdr.p_flags |= PF_W; | 1737 | phdr.p_flags |= PF_W; |
1738 | if (vma->vm_flags & VM_EXEC) | 1738 | if (vma->vm_flags & VM_EXEC) |
1739 | phdr.p_flags |= PF_X; | 1739 | phdr.p_flags |= PF_X; |
1740 | phdr.p_align = ELF_EXEC_PAGESIZE; | 1740 | phdr.p_align = ELF_EXEC_PAGESIZE; |
1741 | 1741 | ||
1742 | DUMP_WRITE(&phdr, sizeof(phdr)); | 1742 | DUMP_WRITE(&phdr, sizeof(phdr)); |
1743 | } | 1743 | } |
1744 | 1744 | ||
1745 | #ifdef ELF_CORE_WRITE_EXTRA_PHDRS | 1745 | #ifdef ELF_CORE_WRITE_EXTRA_PHDRS |
1746 | ELF_CORE_WRITE_EXTRA_PHDRS; | 1746 | ELF_CORE_WRITE_EXTRA_PHDRS; |
1747 | #endif | 1747 | #endif |
1748 | 1748 | ||
1749 | /* write out the notes section */ | 1749 | /* write out the notes section */ |
1750 | for (i = 0; i < numnote; i++) | 1750 | for (i = 0; i < numnote; i++) |
1751 | if (!writenote(notes + i, file)) | 1751 | if (!writenote(notes + i, file)) |
1752 | goto end_coredump; | 1752 | goto end_coredump; |
1753 | 1753 | ||
1754 | /* write out the thread status notes section */ | 1754 | /* write out the thread status notes section */ |
1755 | list_for_each(t, &thread_list) { | 1755 | list_for_each(t, &thread_list) { |
1756 | struct elf_thread_status *tmp = | 1756 | struct elf_thread_status *tmp = |
1757 | list_entry(t, struct elf_thread_status, list); | 1757 | list_entry(t, struct elf_thread_status, list); |
1758 | 1758 | ||
1759 | for (i = 0; i < tmp->num_notes; i++) | 1759 | for (i = 0; i < tmp->num_notes; i++) |
1760 | if (!writenote(&tmp->notes[i], file)) | 1760 | if (!writenote(&tmp->notes[i], file)) |
1761 | goto end_coredump; | 1761 | goto end_coredump; |
1762 | } | 1762 | } |
1763 | 1763 | ||
1764 | DUMP_SEEK(dataoff); | 1764 | DUMP_SEEK(dataoff); |
1765 | 1765 | ||
1766 | if (elf_fdpic_dump_segments(file, current->mm, &size, &limit) < 0) | 1766 | if (elf_fdpic_dump_segments(file, current->mm, &size, &limit) < 0) |
1767 | goto end_coredump; | 1767 | goto end_coredump; |
1768 | 1768 | ||
1769 | #ifdef ELF_CORE_WRITE_EXTRA_DATA | 1769 | #ifdef ELF_CORE_WRITE_EXTRA_DATA |
1770 | ELF_CORE_WRITE_EXTRA_DATA; | 1770 | ELF_CORE_WRITE_EXTRA_DATA; |
1771 | #endif | 1771 | #endif |
1772 | 1772 | ||
1773 | if (file->f_pos != offset) { | 1773 | if (file->f_pos != offset) { |
1774 | /* Sanity check */ | 1774 | /* Sanity check */ |
1775 | printk(KERN_WARNING | 1775 | printk(KERN_WARNING |
1776 | "elf_core_dump: file->f_pos (%lld) != offset (%lld)\n", | 1776 | "elf_core_dump: file->f_pos (%lld) != offset (%lld)\n", |
1777 | file->f_pos, offset); | 1777 | file->f_pos, offset); |
1778 | } | 1778 | } |
1779 | 1779 | ||
1780 | end_coredump: | 1780 | end_coredump: |
1781 | set_fs(fs); | 1781 | set_fs(fs); |
1782 | 1782 | ||
1783 | cleanup: | 1783 | cleanup: |
1784 | while (!list_empty(&thread_list)) { | 1784 | while (!list_empty(&thread_list)) { |
1785 | struct list_head *tmp = thread_list.next; | 1785 | struct list_head *tmp = thread_list.next; |
1786 | list_del(tmp); | 1786 | list_del(tmp); |
1787 | kfree(list_entry(tmp, struct elf_thread_status, list)); | 1787 | kfree(list_entry(tmp, struct elf_thread_status, list)); |
1788 | } | 1788 | } |
1789 | 1789 | ||
1790 | kfree(elf); | 1790 | kfree(elf); |
1791 | kfree(prstatus); | 1791 | kfree(prstatus); |
1792 | kfree(psinfo); | 1792 | kfree(psinfo); |
1793 | kfree(notes); | 1793 | kfree(notes); |
1794 | kfree(fpu); | 1794 | kfree(fpu); |
1795 | #ifdef ELF_CORE_COPY_XFPREGS | 1795 | #ifdef ELF_CORE_COPY_XFPREGS |
1796 | kfree(xfpu); | 1796 | kfree(xfpu); |
1797 | #endif | 1797 | #endif |
1798 | return has_dumped; | 1798 | return has_dumped; |
1799 | #undef NUM_NOTES | 1799 | #undef NUM_NOTES |
1800 | } | 1800 | } |
1801 | 1801 | ||
1802 | #endif /* USE_ELF_CORE_DUMP */ | 1802 | #endif /* USE_ELF_CORE_DUMP */ |