Commit 81ab6e7b26b453a795d46f2616ed0e31d97f05b9
Committed by
Linus Torvalds
1 parent
ae3cef7300
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
kmod: convert two call sites to call_usermodehelper_fns()
Both kernel/sys.c && security/keys/request_key.c where inlining the exact same code as call_usermodehelper_fns(); So simply convert these sites to directly use call_usermodehelper_fns(). Signed-off-by: Boaz Harrosh <bharrosh@panasas.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 2 changed files with 11 additions and 21 deletions Inline Diff
kernel/sys.c
1 | /* | 1 | /* |
2 | * linux/kernel/sys.c | 2 | * linux/kernel/sys.c |
3 | * | 3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | 4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/export.h> | 7 | #include <linux/export.h> |
8 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
9 | #include <linux/utsname.h> | 9 | #include <linux/utsname.h> |
10 | #include <linux/mman.h> | 10 | #include <linux/mman.h> |
11 | #include <linux/reboot.h> | 11 | #include <linux/reboot.h> |
12 | #include <linux/prctl.h> | 12 | #include <linux/prctl.h> |
13 | #include <linux/highuid.h> | 13 | #include <linux/highuid.h> |
14 | #include <linux/fs.h> | 14 | #include <linux/fs.h> |
15 | #include <linux/kmod.h> | 15 | #include <linux/kmod.h> |
16 | #include <linux/perf_event.h> | 16 | #include <linux/perf_event.h> |
17 | #include <linux/resource.h> | 17 | #include <linux/resource.h> |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/kexec.h> | 19 | #include <linux/kexec.h> |
20 | #include <linux/workqueue.h> | 20 | #include <linux/workqueue.h> |
21 | #include <linux/capability.h> | 21 | #include <linux/capability.h> |
22 | #include <linux/device.h> | 22 | #include <linux/device.h> |
23 | #include <linux/key.h> | 23 | #include <linux/key.h> |
24 | #include <linux/times.h> | 24 | #include <linux/times.h> |
25 | #include <linux/posix-timers.h> | 25 | #include <linux/posix-timers.h> |
26 | #include <linux/security.h> | 26 | #include <linux/security.h> |
27 | #include <linux/dcookies.h> | 27 | #include <linux/dcookies.h> |
28 | #include <linux/suspend.h> | 28 | #include <linux/suspend.h> |
29 | #include <linux/tty.h> | 29 | #include <linux/tty.h> |
30 | #include <linux/signal.h> | 30 | #include <linux/signal.h> |
31 | #include <linux/cn_proc.h> | 31 | #include <linux/cn_proc.h> |
32 | #include <linux/getcpu.h> | 32 | #include <linux/getcpu.h> |
33 | #include <linux/task_io_accounting_ops.h> | 33 | #include <linux/task_io_accounting_ops.h> |
34 | #include <linux/seccomp.h> | 34 | #include <linux/seccomp.h> |
35 | #include <linux/cpu.h> | 35 | #include <linux/cpu.h> |
36 | #include <linux/personality.h> | 36 | #include <linux/personality.h> |
37 | #include <linux/ptrace.h> | 37 | #include <linux/ptrace.h> |
38 | #include <linux/fs_struct.h> | 38 | #include <linux/fs_struct.h> |
39 | #include <linux/gfp.h> | 39 | #include <linux/gfp.h> |
40 | #include <linux/syscore_ops.h> | 40 | #include <linux/syscore_ops.h> |
41 | #include <linux/version.h> | 41 | #include <linux/version.h> |
42 | #include <linux/ctype.h> | 42 | #include <linux/ctype.h> |
43 | 43 | ||
44 | #include <linux/compat.h> | 44 | #include <linux/compat.h> |
45 | #include <linux/syscalls.h> | 45 | #include <linux/syscalls.h> |
46 | #include <linux/kprobes.h> | 46 | #include <linux/kprobes.h> |
47 | #include <linux/user_namespace.h> | 47 | #include <linux/user_namespace.h> |
48 | 48 | ||
49 | #include <linux/kmsg_dump.h> | 49 | #include <linux/kmsg_dump.h> |
50 | /* Move somewhere else to avoid recompiling? */ | 50 | /* Move somewhere else to avoid recompiling? */ |
51 | #include <generated/utsrelease.h> | 51 | #include <generated/utsrelease.h> |
52 | 52 | ||
53 | #include <asm/uaccess.h> | 53 | #include <asm/uaccess.h> |
54 | #include <asm/io.h> | 54 | #include <asm/io.h> |
55 | #include <asm/unistd.h> | 55 | #include <asm/unistd.h> |
56 | 56 | ||
57 | #ifndef SET_UNALIGN_CTL | 57 | #ifndef SET_UNALIGN_CTL |
58 | # define SET_UNALIGN_CTL(a,b) (-EINVAL) | 58 | # define SET_UNALIGN_CTL(a,b) (-EINVAL) |
59 | #endif | 59 | #endif |
60 | #ifndef GET_UNALIGN_CTL | 60 | #ifndef GET_UNALIGN_CTL |
61 | # define GET_UNALIGN_CTL(a,b) (-EINVAL) | 61 | # define GET_UNALIGN_CTL(a,b) (-EINVAL) |
62 | #endif | 62 | #endif |
63 | #ifndef SET_FPEMU_CTL | 63 | #ifndef SET_FPEMU_CTL |
64 | # define SET_FPEMU_CTL(a,b) (-EINVAL) | 64 | # define SET_FPEMU_CTL(a,b) (-EINVAL) |
65 | #endif | 65 | #endif |
66 | #ifndef GET_FPEMU_CTL | 66 | #ifndef GET_FPEMU_CTL |
67 | # define GET_FPEMU_CTL(a,b) (-EINVAL) | 67 | # define GET_FPEMU_CTL(a,b) (-EINVAL) |
68 | #endif | 68 | #endif |
69 | #ifndef SET_FPEXC_CTL | 69 | #ifndef SET_FPEXC_CTL |
70 | # define SET_FPEXC_CTL(a,b) (-EINVAL) | 70 | # define SET_FPEXC_CTL(a,b) (-EINVAL) |
71 | #endif | 71 | #endif |
72 | #ifndef GET_FPEXC_CTL | 72 | #ifndef GET_FPEXC_CTL |
73 | # define GET_FPEXC_CTL(a,b) (-EINVAL) | 73 | # define GET_FPEXC_CTL(a,b) (-EINVAL) |
74 | #endif | 74 | #endif |
75 | #ifndef GET_ENDIAN | 75 | #ifndef GET_ENDIAN |
76 | # define GET_ENDIAN(a,b) (-EINVAL) | 76 | # define GET_ENDIAN(a,b) (-EINVAL) |
77 | #endif | 77 | #endif |
78 | #ifndef SET_ENDIAN | 78 | #ifndef SET_ENDIAN |
79 | # define SET_ENDIAN(a,b) (-EINVAL) | 79 | # define SET_ENDIAN(a,b) (-EINVAL) |
80 | #endif | 80 | #endif |
81 | #ifndef GET_TSC_CTL | 81 | #ifndef GET_TSC_CTL |
82 | # define GET_TSC_CTL(a) (-EINVAL) | 82 | # define GET_TSC_CTL(a) (-EINVAL) |
83 | #endif | 83 | #endif |
84 | #ifndef SET_TSC_CTL | 84 | #ifndef SET_TSC_CTL |
85 | # define SET_TSC_CTL(a) (-EINVAL) | 85 | # define SET_TSC_CTL(a) (-EINVAL) |
86 | #endif | 86 | #endif |
87 | 87 | ||
88 | /* | 88 | /* |
89 | * this is where the system-wide overflow UID and GID are defined, for | 89 | * this is where the system-wide overflow UID and GID are defined, for |
90 | * architectures that now have 32-bit UID/GID but didn't in the past | 90 | * architectures that now have 32-bit UID/GID but didn't in the past |
91 | */ | 91 | */ |
92 | 92 | ||
93 | int overflowuid = DEFAULT_OVERFLOWUID; | 93 | int overflowuid = DEFAULT_OVERFLOWUID; |
94 | int overflowgid = DEFAULT_OVERFLOWGID; | 94 | int overflowgid = DEFAULT_OVERFLOWGID; |
95 | 95 | ||
96 | EXPORT_SYMBOL(overflowuid); | 96 | EXPORT_SYMBOL(overflowuid); |
97 | EXPORT_SYMBOL(overflowgid); | 97 | EXPORT_SYMBOL(overflowgid); |
98 | 98 | ||
99 | /* | 99 | /* |
100 | * the same as above, but for filesystems which can only store a 16-bit | 100 | * the same as above, but for filesystems which can only store a 16-bit |
101 | * UID and GID. as such, this is needed on all architectures | 101 | * UID and GID. as such, this is needed on all architectures |
102 | */ | 102 | */ |
103 | 103 | ||
104 | int fs_overflowuid = DEFAULT_FS_OVERFLOWUID; | 104 | int fs_overflowuid = DEFAULT_FS_OVERFLOWUID; |
105 | int fs_overflowgid = DEFAULT_FS_OVERFLOWUID; | 105 | int fs_overflowgid = DEFAULT_FS_OVERFLOWUID; |
106 | 106 | ||
107 | EXPORT_SYMBOL(fs_overflowuid); | 107 | EXPORT_SYMBOL(fs_overflowuid); |
108 | EXPORT_SYMBOL(fs_overflowgid); | 108 | EXPORT_SYMBOL(fs_overflowgid); |
109 | 109 | ||
110 | /* | 110 | /* |
111 | * this indicates whether you can reboot with ctrl-alt-del: the default is yes | 111 | * this indicates whether you can reboot with ctrl-alt-del: the default is yes |
112 | */ | 112 | */ |
113 | 113 | ||
114 | int C_A_D = 1; | 114 | int C_A_D = 1; |
115 | struct pid *cad_pid; | 115 | struct pid *cad_pid; |
116 | EXPORT_SYMBOL(cad_pid); | 116 | EXPORT_SYMBOL(cad_pid); |
117 | 117 | ||
118 | /* | 118 | /* |
119 | * If set, this is used for preparing the system to power off. | 119 | * If set, this is used for preparing the system to power off. |
120 | */ | 120 | */ |
121 | 121 | ||
122 | void (*pm_power_off_prepare)(void); | 122 | void (*pm_power_off_prepare)(void); |
123 | 123 | ||
124 | /* | 124 | /* |
125 | * Returns true if current's euid is same as p's uid or euid, | 125 | * Returns true if current's euid is same as p's uid or euid, |
126 | * or has CAP_SYS_NICE to p's user_ns. | 126 | * or has CAP_SYS_NICE to p's user_ns. |
127 | * | 127 | * |
128 | * Called with rcu_read_lock, creds are safe | 128 | * Called with rcu_read_lock, creds are safe |
129 | */ | 129 | */ |
130 | static bool set_one_prio_perm(struct task_struct *p) | 130 | static bool set_one_prio_perm(struct task_struct *p) |
131 | { | 131 | { |
132 | const struct cred *cred = current_cred(), *pcred = __task_cred(p); | 132 | const struct cred *cred = current_cred(), *pcred = __task_cred(p); |
133 | 133 | ||
134 | if (uid_eq(pcred->uid, cred->euid) || | 134 | if (uid_eq(pcred->uid, cred->euid) || |
135 | uid_eq(pcred->euid, cred->euid)) | 135 | uid_eq(pcred->euid, cred->euid)) |
136 | return true; | 136 | return true; |
137 | if (ns_capable(pcred->user_ns, CAP_SYS_NICE)) | 137 | if (ns_capable(pcred->user_ns, CAP_SYS_NICE)) |
138 | return true; | 138 | return true; |
139 | return false; | 139 | return false; |
140 | } | 140 | } |
141 | 141 | ||
142 | /* | 142 | /* |
143 | * set the priority of a task | 143 | * set the priority of a task |
144 | * - the caller must hold the RCU read lock | 144 | * - the caller must hold the RCU read lock |
145 | */ | 145 | */ |
146 | static int set_one_prio(struct task_struct *p, int niceval, int error) | 146 | static int set_one_prio(struct task_struct *p, int niceval, int error) |
147 | { | 147 | { |
148 | int no_nice; | 148 | int no_nice; |
149 | 149 | ||
150 | if (!set_one_prio_perm(p)) { | 150 | if (!set_one_prio_perm(p)) { |
151 | error = -EPERM; | 151 | error = -EPERM; |
152 | goto out; | 152 | goto out; |
153 | } | 153 | } |
154 | if (niceval < task_nice(p) && !can_nice(p, niceval)) { | 154 | if (niceval < task_nice(p) && !can_nice(p, niceval)) { |
155 | error = -EACCES; | 155 | error = -EACCES; |
156 | goto out; | 156 | goto out; |
157 | } | 157 | } |
158 | no_nice = security_task_setnice(p, niceval); | 158 | no_nice = security_task_setnice(p, niceval); |
159 | if (no_nice) { | 159 | if (no_nice) { |
160 | error = no_nice; | 160 | error = no_nice; |
161 | goto out; | 161 | goto out; |
162 | } | 162 | } |
163 | if (error == -ESRCH) | 163 | if (error == -ESRCH) |
164 | error = 0; | 164 | error = 0; |
165 | set_user_nice(p, niceval); | 165 | set_user_nice(p, niceval); |
166 | out: | 166 | out: |
167 | return error; | 167 | return error; |
168 | } | 168 | } |
169 | 169 | ||
170 | SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval) | 170 | SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval) |
171 | { | 171 | { |
172 | struct task_struct *g, *p; | 172 | struct task_struct *g, *p; |
173 | struct user_struct *user; | 173 | struct user_struct *user; |
174 | const struct cred *cred = current_cred(); | 174 | const struct cred *cred = current_cred(); |
175 | int error = -EINVAL; | 175 | int error = -EINVAL; |
176 | struct pid *pgrp; | 176 | struct pid *pgrp; |
177 | kuid_t uid; | 177 | kuid_t uid; |
178 | 178 | ||
179 | if (which > PRIO_USER || which < PRIO_PROCESS) | 179 | if (which > PRIO_USER || which < PRIO_PROCESS) |
180 | goto out; | 180 | goto out; |
181 | 181 | ||
182 | /* normalize: avoid signed division (rounding problems) */ | 182 | /* normalize: avoid signed division (rounding problems) */ |
183 | error = -ESRCH; | 183 | error = -ESRCH; |
184 | if (niceval < -20) | 184 | if (niceval < -20) |
185 | niceval = -20; | 185 | niceval = -20; |
186 | if (niceval > 19) | 186 | if (niceval > 19) |
187 | niceval = 19; | 187 | niceval = 19; |
188 | 188 | ||
189 | rcu_read_lock(); | 189 | rcu_read_lock(); |
190 | read_lock(&tasklist_lock); | 190 | read_lock(&tasklist_lock); |
191 | switch (which) { | 191 | switch (which) { |
192 | case PRIO_PROCESS: | 192 | case PRIO_PROCESS: |
193 | if (who) | 193 | if (who) |
194 | p = find_task_by_vpid(who); | 194 | p = find_task_by_vpid(who); |
195 | else | 195 | else |
196 | p = current; | 196 | p = current; |
197 | if (p) | 197 | if (p) |
198 | error = set_one_prio(p, niceval, error); | 198 | error = set_one_prio(p, niceval, error); |
199 | break; | 199 | break; |
200 | case PRIO_PGRP: | 200 | case PRIO_PGRP: |
201 | if (who) | 201 | if (who) |
202 | pgrp = find_vpid(who); | 202 | pgrp = find_vpid(who); |
203 | else | 203 | else |
204 | pgrp = task_pgrp(current); | 204 | pgrp = task_pgrp(current); |
205 | do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { | 205 | do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { |
206 | error = set_one_prio(p, niceval, error); | 206 | error = set_one_prio(p, niceval, error); |
207 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); | 207 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); |
208 | break; | 208 | break; |
209 | case PRIO_USER: | 209 | case PRIO_USER: |
210 | uid = make_kuid(cred->user_ns, who); | 210 | uid = make_kuid(cred->user_ns, who); |
211 | user = cred->user; | 211 | user = cred->user; |
212 | if (!who) | 212 | if (!who) |
213 | uid = cred->uid; | 213 | uid = cred->uid; |
214 | else if (!uid_eq(uid, cred->uid) && | 214 | else if (!uid_eq(uid, cred->uid) && |
215 | !(user = find_user(uid))) | 215 | !(user = find_user(uid))) |
216 | goto out_unlock; /* No processes for this user */ | 216 | goto out_unlock; /* No processes for this user */ |
217 | 217 | ||
218 | do_each_thread(g, p) { | 218 | do_each_thread(g, p) { |
219 | if (uid_eq(task_uid(p), uid)) | 219 | if (uid_eq(task_uid(p), uid)) |
220 | error = set_one_prio(p, niceval, error); | 220 | error = set_one_prio(p, niceval, error); |
221 | } while_each_thread(g, p); | 221 | } while_each_thread(g, p); |
222 | if (!uid_eq(uid, cred->uid)) | 222 | if (!uid_eq(uid, cred->uid)) |
223 | free_uid(user); /* For find_user() */ | 223 | free_uid(user); /* For find_user() */ |
224 | break; | 224 | break; |
225 | } | 225 | } |
226 | out_unlock: | 226 | out_unlock: |
227 | read_unlock(&tasklist_lock); | 227 | read_unlock(&tasklist_lock); |
228 | rcu_read_unlock(); | 228 | rcu_read_unlock(); |
229 | out: | 229 | out: |
230 | return error; | 230 | return error; |
231 | } | 231 | } |
232 | 232 | ||
233 | /* | 233 | /* |
234 | * Ugh. To avoid negative return values, "getpriority()" will | 234 | * Ugh. To avoid negative return values, "getpriority()" will |
235 | * not return the normal nice-value, but a negated value that | 235 | * not return the normal nice-value, but a negated value that |
236 | * has been offset by 20 (ie it returns 40..1 instead of -20..19) | 236 | * has been offset by 20 (ie it returns 40..1 instead of -20..19) |
237 | * to stay compatible. | 237 | * to stay compatible. |
238 | */ | 238 | */ |
239 | SYSCALL_DEFINE2(getpriority, int, which, int, who) | 239 | SYSCALL_DEFINE2(getpriority, int, which, int, who) |
240 | { | 240 | { |
241 | struct task_struct *g, *p; | 241 | struct task_struct *g, *p; |
242 | struct user_struct *user; | 242 | struct user_struct *user; |
243 | const struct cred *cred = current_cred(); | 243 | const struct cred *cred = current_cred(); |
244 | long niceval, retval = -ESRCH; | 244 | long niceval, retval = -ESRCH; |
245 | struct pid *pgrp; | 245 | struct pid *pgrp; |
246 | kuid_t uid; | 246 | kuid_t uid; |
247 | 247 | ||
248 | if (which > PRIO_USER || which < PRIO_PROCESS) | 248 | if (which > PRIO_USER || which < PRIO_PROCESS) |
249 | return -EINVAL; | 249 | return -EINVAL; |
250 | 250 | ||
251 | rcu_read_lock(); | 251 | rcu_read_lock(); |
252 | read_lock(&tasklist_lock); | 252 | read_lock(&tasklist_lock); |
253 | switch (which) { | 253 | switch (which) { |
254 | case PRIO_PROCESS: | 254 | case PRIO_PROCESS: |
255 | if (who) | 255 | if (who) |
256 | p = find_task_by_vpid(who); | 256 | p = find_task_by_vpid(who); |
257 | else | 257 | else |
258 | p = current; | 258 | p = current; |
259 | if (p) { | 259 | if (p) { |
260 | niceval = 20 - task_nice(p); | 260 | niceval = 20 - task_nice(p); |
261 | if (niceval > retval) | 261 | if (niceval > retval) |
262 | retval = niceval; | 262 | retval = niceval; |
263 | } | 263 | } |
264 | break; | 264 | break; |
265 | case PRIO_PGRP: | 265 | case PRIO_PGRP: |
266 | if (who) | 266 | if (who) |
267 | pgrp = find_vpid(who); | 267 | pgrp = find_vpid(who); |
268 | else | 268 | else |
269 | pgrp = task_pgrp(current); | 269 | pgrp = task_pgrp(current); |
270 | do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { | 270 | do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { |
271 | niceval = 20 - task_nice(p); | 271 | niceval = 20 - task_nice(p); |
272 | if (niceval > retval) | 272 | if (niceval > retval) |
273 | retval = niceval; | 273 | retval = niceval; |
274 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); | 274 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); |
275 | break; | 275 | break; |
276 | case PRIO_USER: | 276 | case PRIO_USER: |
277 | uid = make_kuid(cred->user_ns, who); | 277 | uid = make_kuid(cred->user_ns, who); |
278 | user = cred->user; | 278 | user = cred->user; |
279 | if (!who) | 279 | if (!who) |
280 | uid = cred->uid; | 280 | uid = cred->uid; |
281 | else if (!uid_eq(uid, cred->uid) && | 281 | else if (!uid_eq(uid, cred->uid) && |
282 | !(user = find_user(uid))) | 282 | !(user = find_user(uid))) |
283 | goto out_unlock; /* No processes for this user */ | 283 | goto out_unlock; /* No processes for this user */ |
284 | 284 | ||
285 | do_each_thread(g, p) { | 285 | do_each_thread(g, p) { |
286 | if (uid_eq(task_uid(p), uid)) { | 286 | if (uid_eq(task_uid(p), uid)) { |
287 | niceval = 20 - task_nice(p); | 287 | niceval = 20 - task_nice(p); |
288 | if (niceval > retval) | 288 | if (niceval > retval) |
289 | retval = niceval; | 289 | retval = niceval; |
290 | } | 290 | } |
291 | } while_each_thread(g, p); | 291 | } while_each_thread(g, p); |
292 | if (!uid_eq(uid, cred->uid)) | 292 | if (!uid_eq(uid, cred->uid)) |
293 | free_uid(user); /* for find_user() */ | 293 | free_uid(user); /* for find_user() */ |
294 | break; | 294 | break; |
295 | } | 295 | } |
296 | out_unlock: | 296 | out_unlock: |
297 | read_unlock(&tasklist_lock); | 297 | read_unlock(&tasklist_lock); |
298 | rcu_read_unlock(); | 298 | rcu_read_unlock(); |
299 | 299 | ||
300 | return retval; | 300 | return retval; |
301 | } | 301 | } |
302 | 302 | ||
303 | /** | 303 | /** |
304 | * emergency_restart - reboot the system | 304 | * emergency_restart - reboot the system |
305 | * | 305 | * |
306 | * Without shutting down any hardware or taking any locks | 306 | * Without shutting down any hardware or taking any locks |
307 | * reboot the system. This is called when we know we are in | 307 | * reboot the system. This is called when we know we are in |
308 | * trouble so this is our best effort to reboot. This is | 308 | * trouble so this is our best effort to reboot. This is |
309 | * safe to call in interrupt context. | 309 | * safe to call in interrupt context. |
310 | */ | 310 | */ |
311 | void emergency_restart(void) | 311 | void emergency_restart(void) |
312 | { | 312 | { |
313 | kmsg_dump(KMSG_DUMP_EMERG); | 313 | kmsg_dump(KMSG_DUMP_EMERG); |
314 | machine_emergency_restart(); | 314 | machine_emergency_restart(); |
315 | } | 315 | } |
316 | EXPORT_SYMBOL_GPL(emergency_restart); | 316 | EXPORT_SYMBOL_GPL(emergency_restart); |
317 | 317 | ||
318 | void kernel_restart_prepare(char *cmd) | 318 | void kernel_restart_prepare(char *cmd) |
319 | { | 319 | { |
320 | blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); | 320 | blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); |
321 | system_state = SYSTEM_RESTART; | 321 | system_state = SYSTEM_RESTART; |
322 | usermodehelper_disable(); | 322 | usermodehelper_disable(); |
323 | device_shutdown(); | 323 | device_shutdown(); |
324 | syscore_shutdown(); | 324 | syscore_shutdown(); |
325 | } | 325 | } |
326 | 326 | ||
327 | /** | 327 | /** |
328 | * register_reboot_notifier - Register function to be called at reboot time | 328 | * register_reboot_notifier - Register function to be called at reboot time |
329 | * @nb: Info about notifier function to be called | 329 | * @nb: Info about notifier function to be called |
330 | * | 330 | * |
331 | * Registers a function with the list of functions | 331 | * Registers a function with the list of functions |
332 | * to be called at reboot time. | 332 | * to be called at reboot time. |
333 | * | 333 | * |
334 | * Currently always returns zero, as blocking_notifier_chain_register() | 334 | * Currently always returns zero, as blocking_notifier_chain_register() |
335 | * always returns zero. | 335 | * always returns zero. |
336 | */ | 336 | */ |
337 | int register_reboot_notifier(struct notifier_block *nb) | 337 | int register_reboot_notifier(struct notifier_block *nb) |
338 | { | 338 | { |
339 | return blocking_notifier_chain_register(&reboot_notifier_list, nb); | 339 | return blocking_notifier_chain_register(&reboot_notifier_list, nb); |
340 | } | 340 | } |
341 | EXPORT_SYMBOL(register_reboot_notifier); | 341 | EXPORT_SYMBOL(register_reboot_notifier); |
342 | 342 | ||
343 | /** | 343 | /** |
344 | * unregister_reboot_notifier - Unregister previously registered reboot notifier | 344 | * unregister_reboot_notifier - Unregister previously registered reboot notifier |
345 | * @nb: Hook to be unregistered | 345 | * @nb: Hook to be unregistered |
346 | * | 346 | * |
347 | * Unregisters a previously registered reboot | 347 | * Unregisters a previously registered reboot |
348 | * notifier function. | 348 | * notifier function. |
349 | * | 349 | * |
350 | * Returns zero on success, or %-ENOENT on failure. | 350 | * Returns zero on success, or %-ENOENT on failure. |
351 | */ | 351 | */ |
352 | int unregister_reboot_notifier(struct notifier_block *nb) | 352 | int unregister_reboot_notifier(struct notifier_block *nb) |
353 | { | 353 | { |
354 | return blocking_notifier_chain_unregister(&reboot_notifier_list, nb); | 354 | return blocking_notifier_chain_unregister(&reboot_notifier_list, nb); |
355 | } | 355 | } |
356 | EXPORT_SYMBOL(unregister_reboot_notifier); | 356 | EXPORT_SYMBOL(unregister_reboot_notifier); |
357 | 357 | ||
358 | /** | 358 | /** |
359 | * kernel_restart - reboot the system | 359 | * kernel_restart - reboot the system |
360 | * @cmd: pointer to buffer containing command to execute for restart | 360 | * @cmd: pointer to buffer containing command to execute for restart |
361 | * or %NULL | 361 | * or %NULL |
362 | * | 362 | * |
363 | * Shutdown everything and perform a clean reboot. | 363 | * Shutdown everything and perform a clean reboot. |
364 | * This is not safe to call in interrupt context. | 364 | * This is not safe to call in interrupt context. |
365 | */ | 365 | */ |
366 | void kernel_restart(char *cmd) | 366 | void kernel_restart(char *cmd) |
367 | { | 367 | { |
368 | kernel_restart_prepare(cmd); | 368 | kernel_restart_prepare(cmd); |
369 | if (!cmd) | 369 | if (!cmd) |
370 | printk(KERN_EMERG "Restarting system.\n"); | 370 | printk(KERN_EMERG "Restarting system.\n"); |
371 | else | 371 | else |
372 | printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd); | 372 | printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd); |
373 | kmsg_dump(KMSG_DUMP_RESTART); | 373 | kmsg_dump(KMSG_DUMP_RESTART); |
374 | machine_restart(cmd); | 374 | machine_restart(cmd); |
375 | } | 375 | } |
376 | EXPORT_SYMBOL_GPL(kernel_restart); | 376 | EXPORT_SYMBOL_GPL(kernel_restart); |
377 | 377 | ||
378 | static void kernel_shutdown_prepare(enum system_states state) | 378 | static void kernel_shutdown_prepare(enum system_states state) |
379 | { | 379 | { |
380 | blocking_notifier_call_chain(&reboot_notifier_list, | 380 | blocking_notifier_call_chain(&reboot_notifier_list, |
381 | (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL); | 381 | (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL); |
382 | system_state = state; | 382 | system_state = state; |
383 | usermodehelper_disable(); | 383 | usermodehelper_disable(); |
384 | device_shutdown(); | 384 | device_shutdown(); |
385 | } | 385 | } |
386 | /** | 386 | /** |
387 | * kernel_halt - halt the system | 387 | * kernel_halt - halt the system |
388 | * | 388 | * |
389 | * Shutdown everything and perform a clean system halt. | 389 | * Shutdown everything and perform a clean system halt. |
390 | */ | 390 | */ |
391 | void kernel_halt(void) | 391 | void kernel_halt(void) |
392 | { | 392 | { |
393 | kernel_shutdown_prepare(SYSTEM_HALT); | 393 | kernel_shutdown_prepare(SYSTEM_HALT); |
394 | syscore_shutdown(); | 394 | syscore_shutdown(); |
395 | printk(KERN_EMERG "System halted.\n"); | 395 | printk(KERN_EMERG "System halted.\n"); |
396 | kmsg_dump(KMSG_DUMP_HALT); | 396 | kmsg_dump(KMSG_DUMP_HALT); |
397 | machine_halt(); | 397 | machine_halt(); |
398 | } | 398 | } |
399 | 399 | ||
400 | EXPORT_SYMBOL_GPL(kernel_halt); | 400 | EXPORT_SYMBOL_GPL(kernel_halt); |
401 | 401 | ||
402 | /** | 402 | /** |
403 | * kernel_power_off - power_off the system | 403 | * kernel_power_off - power_off the system |
404 | * | 404 | * |
405 | * Shutdown everything and perform a clean system power_off. | 405 | * Shutdown everything and perform a clean system power_off. |
406 | */ | 406 | */ |
407 | void kernel_power_off(void) | 407 | void kernel_power_off(void) |
408 | { | 408 | { |
409 | kernel_shutdown_prepare(SYSTEM_POWER_OFF); | 409 | kernel_shutdown_prepare(SYSTEM_POWER_OFF); |
410 | if (pm_power_off_prepare) | 410 | if (pm_power_off_prepare) |
411 | pm_power_off_prepare(); | 411 | pm_power_off_prepare(); |
412 | disable_nonboot_cpus(); | 412 | disable_nonboot_cpus(); |
413 | syscore_shutdown(); | 413 | syscore_shutdown(); |
414 | printk(KERN_EMERG "Power down.\n"); | 414 | printk(KERN_EMERG "Power down.\n"); |
415 | kmsg_dump(KMSG_DUMP_POWEROFF); | 415 | kmsg_dump(KMSG_DUMP_POWEROFF); |
416 | machine_power_off(); | 416 | machine_power_off(); |
417 | } | 417 | } |
418 | EXPORT_SYMBOL_GPL(kernel_power_off); | 418 | EXPORT_SYMBOL_GPL(kernel_power_off); |
419 | 419 | ||
420 | static DEFINE_MUTEX(reboot_mutex); | 420 | static DEFINE_MUTEX(reboot_mutex); |
421 | 421 | ||
422 | /* | 422 | /* |
423 | * Reboot system call: for obvious reasons only root may call it, | 423 | * Reboot system call: for obvious reasons only root may call it, |
424 | * and even root needs to set up some magic numbers in the registers | 424 | * and even root needs to set up some magic numbers in the registers |
425 | * so that some mistake won't make this reboot the whole machine. | 425 | * so that some mistake won't make this reboot the whole machine. |
426 | * You can also set the meaning of the ctrl-alt-del-key here. | 426 | * You can also set the meaning of the ctrl-alt-del-key here. |
427 | * | 427 | * |
428 | * reboot doesn't sync: do that yourself before calling this. | 428 | * reboot doesn't sync: do that yourself before calling this. |
429 | */ | 429 | */ |
430 | SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, | 430 | SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, |
431 | void __user *, arg) | 431 | void __user *, arg) |
432 | { | 432 | { |
433 | char buffer[256]; | 433 | char buffer[256]; |
434 | int ret = 0; | 434 | int ret = 0; |
435 | 435 | ||
436 | /* We only trust the superuser with rebooting the system. */ | 436 | /* We only trust the superuser with rebooting the system. */ |
437 | if (!capable(CAP_SYS_BOOT)) | 437 | if (!capable(CAP_SYS_BOOT)) |
438 | return -EPERM; | 438 | return -EPERM; |
439 | 439 | ||
440 | /* For safety, we require "magic" arguments. */ | 440 | /* For safety, we require "magic" arguments. */ |
441 | if (magic1 != LINUX_REBOOT_MAGIC1 || | 441 | if (magic1 != LINUX_REBOOT_MAGIC1 || |
442 | (magic2 != LINUX_REBOOT_MAGIC2 && | 442 | (magic2 != LINUX_REBOOT_MAGIC2 && |
443 | magic2 != LINUX_REBOOT_MAGIC2A && | 443 | magic2 != LINUX_REBOOT_MAGIC2A && |
444 | magic2 != LINUX_REBOOT_MAGIC2B && | 444 | magic2 != LINUX_REBOOT_MAGIC2B && |
445 | magic2 != LINUX_REBOOT_MAGIC2C)) | 445 | magic2 != LINUX_REBOOT_MAGIC2C)) |
446 | return -EINVAL; | 446 | return -EINVAL; |
447 | 447 | ||
448 | /* | 448 | /* |
449 | * If pid namespaces are enabled and the current task is in a child | 449 | * If pid namespaces are enabled and the current task is in a child |
450 | * pid_namespace, the command is handled by reboot_pid_ns() which will | 450 | * pid_namespace, the command is handled by reboot_pid_ns() which will |
451 | * call do_exit(). | 451 | * call do_exit(). |
452 | */ | 452 | */ |
453 | ret = reboot_pid_ns(task_active_pid_ns(current), cmd); | 453 | ret = reboot_pid_ns(task_active_pid_ns(current), cmd); |
454 | if (ret) | 454 | if (ret) |
455 | return ret; | 455 | return ret; |
456 | 456 | ||
457 | /* Instead of trying to make the power_off code look like | 457 | /* Instead of trying to make the power_off code look like |
458 | * halt when pm_power_off is not set do it the easy way. | 458 | * halt when pm_power_off is not set do it the easy way. |
459 | */ | 459 | */ |
460 | if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off) | 460 | if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off) |
461 | cmd = LINUX_REBOOT_CMD_HALT; | 461 | cmd = LINUX_REBOOT_CMD_HALT; |
462 | 462 | ||
463 | mutex_lock(&reboot_mutex); | 463 | mutex_lock(&reboot_mutex); |
464 | switch (cmd) { | 464 | switch (cmd) { |
465 | case LINUX_REBOOT_CMD_RESTART: | 465 | case LINUX_REBOOT_CMD_RESTART: |
466 | kernel_restart(NULL); | 466 | kernel_restart(NULL); |
467 | break; | 467 | break; |
468 | 468 | ||
469 | case LINUX_REBOOT_CMD_CAD_ON: | 469 | case LINUX_REBOOT_CMD_CAD_ON: |
470 | C_A_D = 1; | 470 | C_A_D = 1; |
471 | break; | 471 | break; |
472 | 472 | ||
473 | case LINUX_REBOOT_CMD_CAD_OFF: | 473 | case LINUX_REBOOT_CMD_CAD_OFF: |
474 | C_A_D = 0; | 474 | C_A_D = 0; |
475 | break; | 475 | break; |
476 | 476 | ||
477 | case LINUX_REBOOT_CMD_HALT: | 477 | case LINUX_REBOOT_CMD_HALT: |
478 | kernel_halt(); | 478 | kernel_halt(); |
479 | do_exit(0); | 479 | do_exit(0); |
480 | panic("cannot halt"); | 480 | panic("cannot halt"); |
481 | 481 | ||
482 | case LINUX_REBOOT_CMD_POWER_OFF: | 482 | case LINUX_REBOOT_CMD_POWER_OFF: |
483 | kernel_power_off(); | 483 | kernel_power_off(); |
484 | do_exit(0); | 484 | do_exit(0); |
485 | break; | 485 | break; |
486 | 486 | ||
487 | case LINUX_REBOOT_CMD_RESTART2: | 487 | case LINUX_REBOOT_CMD_RESTART2: |
488 | if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) { | 488 | if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) { |
489 | ret = -EFAULT; | 489 | ret = -EFAULT; |
490 | break; | 490 | break; |
491 | } | 491 | } |
492 | buffer[sizeof(buffer) - 1] = '\0'; | 492 | buffer[sizeof(buffer) - 1] = '\0'; |
493 | 493 | ||
494 | kernel_restart(buffer); | 494 | kernel_restart(buffer); |
495 | break; | 495 | break; |
496 | 496 | ||
497 | #ifdef CONFIG_KEXEC | 497 | #ifdef CONFIG_KEXEC |
498 | case LINUX_REBOOT_CMD_KEXEC: | 498 | case LINUX_REBOOT_CMD_KEXEC: |
499 | ret = kernel_kexec(); | 499 | ret = kernel_kexec(); |
500 | break; | 500 | break; |
501 | #endif | 501 | #endif |
502 | 502 | ||
503 | #ifdef CONFIG_HIBERNATION | 503 | #ifdef CONFIG_HIBERNATION |
504 | case LINUX_REBOOT_CMD_SW_SUSPEND: | 504 | case LINUX_REBOOT_CMD_SW_SUSPEND: |
505 | ret = hibernate(); | 505 | ret = hibernate(); |
506 | break; | 506 | break; |
507 | #endif | 507 | #endif |
508 | 508 | ||
509 | default: | 509 | default: |
510 | ret = -EINVAL; | 510 | ret = -EINVAL; |
511 | break; | 511 | break; |
512 | } | 512 | } |
513 | mutex_unlock(&reboot_mutex); | 513 | mutex_unlock(&reboot_mutex); |
514 | return ret; | 514 | return ret; |
515 | } | 515 | } |
516 | 516 | ||
517 | static void deferred_cad(struct work_struct *dummy) | 517 | static void deferred_cad(struct work_struct *dummy) |
518 | { | 518 | { |
519 | kernel_restart(NULL); | 519 | kernel_restart(NULL); |
520 | } | 520 | } |
521 | 521 | ||
522 | /* | 522 | /* |
523 | * This function gets called by ctrl-alt-del - ie the keyboard interrupt. | 523 | * This function gets called by ctrl-alt-del - ie the keyboard interrupt. |
524 | * As it's called within an interrupt, it may NOT sync: the only choice | 524 | * As it's called within an interrupt, it may NOT sync: the only choice |
525 | * is whether to reboot at once, or just ignore the ctrl-alt-del. | 525 | * is whether to reboot at once, or just ignore the ctrl-alt-del. |
526 | */ | 526 | */ |
527 | void ctrl_alt_del(void) | 527 | void ctrl_alt_del(void) |
528 | { | 528 | { |
529 | static DECLARE_WORK(cad_work, deferred_cad); | 529 | static DECLARE_WORK(cad_work, deferred_cad); |
530 | 530 | ||
531 | if (C_A_D) | 531 | if (C_A_D) |
532 | schedule_work(&cad_work); | 532 | schedule_work(&cad_work); |
533 | else | 533 | else |
534 | kill_cad_pid(SIGINT, 1); | 534 | kill_cad_pid(SIGINT, 1); |
535 | } | 535 | } |
536 | 536 | ||
537 | /* | 537 | /* |
538 | * Unprivileged users may change the real gid to the effective gid | 538 | * Unprivileged users may change the real gid to the effective gid |
539 | * or vice versa. (BSD-style) | 539 | * or vice versa. (BSD-style) |
540 | * | 540 | * |
541 | * If you set the real gid at all, or set the effective gid to a value not | 541 | * If you set the real gid at all, or set the effective gid to a value not |
542 | * equal to the real gid, then the saved gid is set to the new effective gid. | 542 | * equal to the real gid, then the saved gid is set to the new effective gid. |
543 | * | 543 | * |
544 | * This makes it possible for a setgid program to completely drop its | 544 | * This makes it possible for a setgid program to completely drop its |
545 | * privileges, which is often a useful assertion to make when you are doing | 545 | * privileges, which is often a useful assertion to make when you are doing |
546 | * a security audit over a program. | 546 | * a security audit over a program. |
547 | * | 547 | * |
548 | * The general idea is that a program which uses just setregid() will be | 548 | * The general idea is that a program which uses just setregid() will be |
549 | * 100% compatible with BSD. A program which uses just setgid() will be | 549 | * 100% compatible with BSD. A program which uses just setgid() will be |
550 | * 100% compatible with POSIX with saved IDs. | 550 | * 100% compatible with POSIX with saved IDs. |
551 | * | 551 | * |
552 | * SMP: There are not races, the GIDs are checked only by filesystem | 552 | * SMP: There are not races, the GIDs are checked only by filesystem |
553 | * operations (as far as semantic preservation is concerned). | 553 | * operations (as far as semantic preservation is concerned). |
554 | */ | 554 | */ |
555 | SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) | 555 | SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) |
556 | { | 556 | { |
557 | struct user_namespace *ns = current_user_ns(); | 557 | struct user_namespace *ns = current_user_ns(); |
558 | const struct cred *old; | 558 | const struct cred *old; |
559 | struct cred *new; | 559 | struct cred *new; |
560 | int retval; | 560 | int retval; |
561 | kgid_t krgid, kegid; | 561 | kgid_t krgid, kegid; |
562 | 562 | ||
563 | krgid = make_kgid(ns, rgid); | 563 | krgid = make_kgid(ns, rgid); |
564 | kegid = make_kgid(ns, egid); | 564 | kegid = make_kgid(ns, egid); |
565 | 565 | ||
566 | if ((rgid != (gid_t) -1) && !gid_valid(krgid)) | 566 | if ((rgid != (gid_t) -1) && !gid_valid(krgid)) |
567 | return -EINVAL; | 567 | return -EINVAL; |
568 | if ((egid != (gid_t) -1) && !gid_valid(kegid)) | 568 | if ((egid != (gid_t) -1) && !gid_valid(kegid)) |
569 | return -EINVAL; | 569 | return -EINVAL; |
570 | 570 | ||
571 | new = prepare_creds(); | 571 | new = prepare_creds(); |
572 | if (!new) | 572 | if (!new) |
573 | return -ENOMEM; | 573 | return -ENOMEM; |
574 | old = current_cred(); | 574 | old = current_cred(); |
575 | 575 | ||
576 | retval = -EPERM; | 576 | retval = -EPERM; |
577 | if (rgid != (gid_t) -1) { | 577 | if (rgid != (gid_t) -1) { |
578 | if (gid_eq(old->gid, krgid) || | 578 | if (gid_eq(old->gid, krgid) || |
579 | gid_eq(old->egid, krgid) || | 579 | gid_eq(old->egid, krgid) || |
580 | nsown_capable(CAP_SETGID)) | 580 | nsown_capable(CAP_SETGID)) |
581 | new->gid = krgid; | 581 | new->gid = krgid; |
582 | else | 582 | else |
583 | goto error; | 583 | goto error; |
584 | } | 584 | } |
585 | if (egid != (gid_t) -1) { | 585 | if (egid != (gid_t) -1) { |
586 | if (gid_eq(old->gid, kegid) || | 586 | if (gid_eq(old->gid, kegid) || |
587 | gid_eq(old->egid, kegid) || | 587 | gid_eq(old->egid, kegid) || |
588 | gid_eq(old->sgid, kegid) || | 588 | gid_eq(old->sgid, kegid) || |
589 | nsown_capable(CAP_SETGID)) | 589 | nsown_capable(CAP_SETGID)) |
590 | new->egid = kegid; | 590 | new->egid = kegid; |
591 | else | 591 | else |
592 | goto error; | 592 | goto error; |
593 | } | 593 | } |
594 | 594 | ||
595 | if (rgid != (gid_t) -1 || | 595 | if (rgid != (gid_t) -1 || |
596 | (egid != (gid_t) -1 && !gid_eq(kegid, old->gid))) | 596 | (egid != (gid_t) -1 && !gid_eq(kegid, old->gid))) |
597 | new->sgid = new->egid; | 597 | new->sgid = new->egid; |
598 | new->fsgid = new->egid; | 598 | new->fsgid = new->egid; |
599 | 599 | ||
600 | return commit_creds(new); | 600 | return commit_creds(new); |
601 | 601 | ||
602 | error: | 602 | error: |
603 | abort_creds(new); | 603 | abort_creds(new); |
604 | return retval; | 604 | return retval; |
605 | } | 605 | } |
606 | 606 | ||
607 | /* | 607 | /* |
608 | * setgid() is implemented like SysV w/ SAVED_IDS | 608 | * setgid() is implemented like SysV w/ SAVED_IDS |
609 | * | 609 | * |
610 | * SMP: Same implicit races as above. | 610 | * SMP: Same implicit races as above. |
611 | */ | 611 | */ |
612 | SYSCALL_DEFINE1(setgid, gid_t, gid) | 612 | SYSCALL_DEFINE1(setgid, gid_t, gid) |
613 | { | 613 | { |
614 | struct user_namespace *ns = current_user_ns(); | 614 | struct user_namespace *ns = current_user_ns(); |
615 | const struct cred *old; | 615 | const struct cred *old; |
616 | struct cred *new; | 616 | struct cred *new; |
617 | int retval; | 617 | int retval; |
618 | kgid_t kgid; | 618 | kgid_t kgid; |
619 | 619 | ||
620 | kgid = make_kgid(ns, gid); | 620 | kgid = make_kgid(ns, gid); |
621 | if (!gid_valid(kgid)) | 621 | if (!gid_valid(kgid)) |
622 | return -EINVAL; | 622 | return -EINVAL; |
623 | 623 | ||
624 | new = prepare_creds(); | 624 | new = prepare_creds(); |
625 | if (!new) | 625 | if (!new) |
626 | return -ENOMEM; | 626 | return -ENOMEM; |
627 | old = current_cred(); | 627 | old = current_cred(); |
628 | 628 | ||
629 | retval = -EPERM; | 629 | retval = -EPERM; |
630 | if (nsown_capable(CAP_SETGID)) | 630 | if (nsown_capable(CAP_SETGID)) |
631 | new->gid = new->egid = new->sgid = new->fsgid = kgid; | 631 | new->gid = new->egid = new->sgid = new->fsgid = kgid; |
632 | else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid)) | 632 | else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid)) |
633 | new->egid = new->fsgid = kgid; | 633 | new->egid = new->fsgid = kgid; |
634 | else | 634 | else |
635 | goto error; | 635 | goto error; |
636 | 636 | ||
637 | return commit_creds(new); | 637 | return commit_creds(new); |
638 | 638 | ||
639 | error: | 639 | error: |
640 | abort_creds(new); | 640 | abort_creds(new); |
641 | return retval; | 641 | return retval; |
642 | } | 642 | } |
643 | 643 | ||
644 | /* | 644 | /* |
645 | * change the user struct in a credentials set to match the new UID | 645 | * change the user struct in a credentials set to match the new UID |
646 | */ | 646 | */ |
647 | static int set_user(struct cred *new) | 647 | static int set_user(struct cred *new) |
648 | { | 648 | { |
649 | struct user_struct *new_user; | 649 | struct user_struct *new_user; |
650 | 650 | ||
651 | new_user = alloc_uid(new->uid); | 651 | new_user = alloc_uid(new->uid); |
652 | if (!new_user) | 652 | if (!new_user) |
653 | return -EAGAIN; | 653 | return -EAGAIN; |
654 | 654 | ||
655 | /* | 655 | /* |
656 | * We don't fail in case of NPROC limit excess here because too many | 656 | * We don't fail in case of NPROC limit excess here because too many |
657 | * poorly written programs don't check set*uid() return code, assuming | 657 | * poorly written programs don't check set*uid() return code, assuming |
658 | * it never fails if called by root. We may still enforce NPROC limit | 658 | * it never fails if called by root. We may still enforce NPROC limit |
659 | * for programs doing set*uid()+execve() by harmlessly deferring the | 659 | * for programs doing set*uid()+execve() by harmlessly deferring the |
660 | * failure to the execve() stage. | 660 | * failure to the execve() stage. |
661 | */ | 661 | */ |
662 | if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) && | 662 | if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) && |
663 | new_user != INIT_USER) | 663 | new_user != INIT_USER) |
664 | current->flags |= PF_NPROC_EXCEEDED; | 664 | current->flags |= PF_NPROC_EXCEEDED; |
665 | else | 665 | else |
666 | current->flags &= ~PF_NPROC_EXCEEDED; | 666 | current->flags &= ~PF_NPROC_EXCEEDED; |
667 | 667 | ||
668 | free_uid(new->user); | 668 | free_uid(new->user); |
669 | new->user = new_user; | 669 | new->user = new_user; |
670 | return 0; | 670 | return 0; |
671 | } | 671 | } |
672 | 672 | ||
673 | /* | 673 | /* |
674 | * Unprivileged users may change the real uid to the effective uid | 674 | * Unprivileged users may change the real uid to the effective uid |
675 | * or vice versa. (BSD-style) | 675 | * or vice versa. (BSD-style) |
676 | * | 676 | * |
677 | * If you set the real uid at all, or set the effective uid to a value not | 677 | * If you set the real uid at all, or set the effective uid to a value not |
678 | * equal to the real uid, then the saved uid is set to the new effective uid. | 678 | * equal to the real uid, then the saved uid is set to the new effective uid. |
679 | * | 679 | * |
680 | * This makes it possible for a setuid program to completely drop its | 680 | * This makes it possible for a setuid program to completely drop its |
681 | * privileges, which is often a useful assertion to make when you are doing | 681 | * privileges, which is often a useful assertion to make when you are doing |
682 | * a security audit over a program. | 682 | * a security audit over a program. |
683 | * | 683 | * |
684 | * The general idea is that a program which uses just setreuid() will be | 684 | * The general idea is that a program which uses just setreuid() will be |
685 | * 100% compatible with BSD. A program which uses just setuid() will be | 685 | * 100% compatible with BSD. A program which uses just setuid() will be |
686 | * 100% compatible with POSIX with saved IDs. | 686 | * 100% compatible with POSIX with saved IDs. |
687 | */ | 687 | */ |
688 | SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) | 688 | SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) |
689 | { | 689 | { |
690 | struct user_namespace *ns = current_user_ns(); | 690 | struct user_namespace *ns = current_user_ns(); |
691 | const struct cred *old; | 691 | const struct cred *old; |
692 | struct cred *new; | 692 | struct cred *new; |
693 | int retval; | 693 | int retval; |
694 | kuid_t kruid, keuid; | 694 | kuid_t kruid, keuid; |
695 | 695 | ||
696 | kruid = make_kuid(ns, ruid); | 696 | kruid = make_kuid(ns, ruid); |
697 | keuid = make_kuid(ns, euid); | 697 | keuid = make_kuid(ns, euid); |
698 | 698 | ||
699 | if ((ruid != (uid_t) -1) && !uid_valid(kruid)) | 699 | if ((ruid != (uid_t) -1) && !uid_valid(kruid)) |
700 | return -EINVAL; | 700 | return -EINVAL; |
701 | if ((euid != (uid_t) -1) && !uid_valid(keuid)) | 701 | if ((euid != (uid_t) -1) && !uid_valid(keuid)) |
702 | return -EINVAL; | 702 | return -EINVAL; |
703 | 703 | ||
704 | new = prepare_creds(); | 704 | new = prepare_creds(); |
705 | if (!new) | 705 | if (!new) |
706 | return -ENOMEM; | 706 | return -ENOMEM; |
707 | old = current_cred(); | 707 | old = current_cred(); |
708 | 708 | ||
709 | retval = -EPERM; | 709 | retval = -EPERM; |
710 | if (ruid != (uid_t) -1) { | 710 | if (ruid != (uid_t) -1) { |
711 | new->uid = kruid; | 711 | new->uid = kruid; |
712 | if (!uid_eq(old->uid, kruid) && | 712 | if (!uid_eq(old->uid, kruid) && |
713 | !uid_eq(old->euid, kruid) && | 713 | !uid_eq(old->euid, kruid) && |
714 | !nsown_capable(CAP_SETUID)) | 714 | !nsown_capable(CAP_SETUID)) |
715 | goto error; | 715 | goto error; |
716 | } | 716 | } |
717 | 717 | ||
718 | if (euid != (uid_t) -1) { | 718 | if (euid != (uid_t) -1) { |
719 | new->euid = keuid; | 719 | new->euid = keuid; |
720 | if (!uid_eq(old->uid, keuid) && | 720 | if (!uid_eq(old->uid, keuid) && |
721 | !uid_eq(old->euid, keuid) && | 721 | !uid_eq(old->euid, keuid) && |
722 | !uid_eq(old->suid, keuid) && | 722 | !uid_eq(old->suid, keuid) && |
723 | !nsown_capable(CAP_SETUID)) | 723 | !nsown_capable(CAP_SETUID)) |
724 | goto error; | 724 | goto error; |
725 | } | 725 | } |
726 | 726 | ||
727 | if (!uid_eq(new->uid, old->uid)) { | 727 | if (!uid_eq(new->uid, old->uid)) { |
728 | retval = set_user(new); | 728 | retval = set_user(new); |
729 | if (retval < 0) | 729 | if (retval < 0) |
730 | goto error; | 730 | goto error; |
731 | } | 731 | } |
732 | if (ruid != (uid_t) -1 || | 732 | if (ruid != (uid_t) -1 || |
733 | (euid != (uid_t) -1 && !uid_eq(keuid, old->uid))) | 733 | (euid != (uid_t) -1 && !uid_eq(keuid, old->uid))) |
734 | new->suid = new->euid; | 734 | new->suid = new->euid; |
735 | new->fsuid = new->euid; | 735 | new->fsuid = new->euid; |
736 | 736 | ||
737 | retval = security_task_fix_setuid(new, old, LSM_SETID_RE); | 737 | retval = security_task_fix_setuid(new, old, LSM_SETID_RE); |
738 | if (retval < 0) | 738 | if (retval < 0) |
739 | goto error; | 739 | goto error; |
740 | 740 | ||
741 | return commit_creds(new); | 741 | return commit_creds(new); |
742 | 742 | ||
743 | error: | 743 | error: |
744 | abort_creds(new); | 744 | abort_creds(new); |
745 | return retval; | 745 | return retval; |
746 | } | 746 | } |
747 | 747 | ||
748 | /* | 748 | /* |
749 | * setuid() is implemented like SysV with SAVED_IDS | 749 | * setuid() is implemented like SysV with SAVED_IDS |
750 | * | 750 | * |
751 | * Note that SAVED_ID's is deficient in that a setuid root program | 751 | * Note that SAVED_ID's is deficient in that a setuid root program |
752 | * like sendmail, for example, cannot set its uid to be a normal | 752 | * like sendmail, for example, cannot set its uid to be a normal |
753 | * user and then switch back, because if you're root, setuid() sets | 753 | * user and then switch back, because if you're root, setuid() sets |
754 | * the saved uid too. If you don't like this, blame the bright people | 754 | * the saved uid too. If you don't like this, blame the bright people |
755 | * in the POSIX committee and/or USG. Note that the BSD-style setreuid() | 755 | * in the POSIX committee and/or USG. Note that the BSD-style setreuid() |
756 | * will allow a root program to temporarily drop privileges and be able to | 756 | * will allow a root program to temporarily drop privileges and be able to |
757 | * regain them by swapping the real and effective uid. | 757 | * regain them by swapping the real and effective uid. |
758 | */ | 758 | */ |
759 | SYSCALL_DEFINE1(setuid, uid_t, uid) | 759 | SYSCALL_DEFINE1(setuid, uid_t, uid) |
760 | { | 760 | { |
761 | struct user_namespace *ns = current_user_ns(); | 761 | struct user_namespace *ns = current_user_ns(); |
762 | const struct cred *old; | 762 | const struct cred *old; |
763 | struct cred *new; | 763 | struct cred *new; |
764 | int retval; | 764 | int retval; |
765 | kuid_t kuid; | 765 | kuid_t kuid; |
766 | 766 | ||
767 | kuid = make_kuid(ns, uid); | 767 | kuid = make_kuid(ns, uid); |
768 | if (!uid_valid(kuid)) | 768 | if (!uid_valid(kuid)) |
769 | return -EINVAL; | 769 | return -EINVAL; |
770 | 770 | ||
771 | new = prepare_creds(); | 771 | new = prepare_creds(); |
772 | if (!new) | 772 | if (!new) |
773 | return -ENOMEM; | 773 | return -ENOMEM; |
774 | old = current_cred(); | 774 | old = current_cred(); |
775 | 775 | ||
776 | retval = -EPERM; | 776 | retval = -EPERM; |
777 | if (nsown_capable(CAP_SETUID)) { | 777 | if (nsown_capable(CAP_SETUID)) { |
778 | new->suid = new->uid = kuid; | 778 | new->suid = new->uid = kuid; |
779 | if (!uid_eq(kuid, old->uid)) { | 779 | if (!uid_eq(kuid, old->uid)) { |
780 | retval = set_user(new); | 780 | retval = set_user(new); |
781 | if (retval < 0) | 781 | if (retval < 0) |
782 | goto error; | 782 | goto error; |
783 | } | 783 | } |
784 | } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) { | 784 | } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) { |
785 | goto error; | 785 | goto error; |
786 | } | 786 | } |
787 | 787 | ||
788 | new->fsuid = new->euid = kuid; | 788 | new->fsuid = new->euid = kuid; |
789 | 789 | ||
790 | retval = security_task_fix_setuid(new, old, LSM_SETID_ID); | 790 | retval = security_task_fix_setuid(new, old, LSM_SETID_ID); |
791 | if (retval < 0) | 791 | if (retval < 0) |
792 | goto error; | 792 | goto error; |
793 | 793 | ||
794 | return commit_creds(new); | 794 | return commit_creds(new); |
795 | 795 | ||
796 | error: | 796 | error: |
797 | abort_creds(new); | 797 | abort_creds(new); |
798 | return retval; | 798 | return retval; |
799 | } | 799 | } |
800 | 800 | ||
801 | 801 | ||
802 | /* | 802 | /* |
803 | * This function implements a generic ability to update ruid, euid, | 803 | * This function implements a generic ability to update ruid, euid, |
804 | * and suid. This allows you to implement the 4.4 compatible seteuid(). | 804 | * and suid. This allows you to implement the 4.4 compatible seteuid(). |
805 | */ | 805 | */ |
806 | SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) | 806 | SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) |
807 | { | 807 | { |
808 | struct user_namespace *ns = current_user_ns(); | 808 | struct user_namespace *ns = current_user_ns(); |
809 | const struct cred *old; | 809 | const struct cred *old; |
810 | struct cred *new; | 810 | struct cred *new; |
811 | int retval; | 811 | int retval; |
812 | kuid_t kruid, keuid, ksuid; | 812 | kuid_t kruid, keuid, ksuid; |
813 | 813 | ||
814 | kruid = make_kuid(ns, ruid); | 814 | kruid = make_kuid(ns, ruid); |
815 | keuid = make_kuid(ns, euid); | 815 | keuid = make_kuid(ns, euid); |
816 | ksuid = make_kuid(ns, suid); | 816 | ksuid = make_kuid(ns, suid); |
817 | 817 | ||
818 | if ((ruid != (uid_t) -1) && !uid_valid(kruid)) | 818 | if ((ruid != (uid_t) -1) && !uid_valid(kruid)) |
819 | return -EINVAL; | 819 | return -EINVAL; |
820 | 820 | ||
821 | if ((euid != (uid_t) -1) && !uid_valid(keuid)) | 821 | if ((euid != (uid_t) -1) && !uid_valid(keuid)) |
822 | return -EINVAL; | 822 | return -EINVAL; |
823 | 823 | ||
824 | if ((suid != (uid_t) -1) && !uid_valid(ksuid)) | 824 | if ((suid != (uid_t) -1) && !uid_valid(ksuid)) |
825 | return -EINVAL; | 825 | return -EINVAL; |
826 | 826 | ||
827 | new = prepare_creds(); | 827 | new = prepare_creds(); |
828 | if (!new) | 828 | if (!new) |
829 | return -ENOMEM; | 829 | return -ENOMEM; |
830 | 830 | ||
831 | old = current_cred(); | 831 | old = current_cred(); |
832 | 832 | ||
833 | retval = -EPERM; | 833 | retval = -EPERM; |
834 | if (!nsown_capable(CAP_SETUID)) { | 834 | if (!nsown_capable(CAP_SETUID)) { |
835 | if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) && | 835 | if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) && |
836 | !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid)) | 836 | !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid)) |
837 | goto error; | 837 | goto error; |
838 | if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) && | 838 | if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) && |
839 | !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid)) | 839 | !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid)) |
840 | goto error; | 840 | goto error; |
841 | if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) && | 841 | if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) && |
842 | !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid)) | 842 | !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid)) |
843 | goto error; | 843 | goto error; |
844 | } | 844 | } |
845 | 845 | ||
846 | if (ruid != (uid_t) -1) { | 846 | if (ruid != (uid_t) -1) { |
847 | new->uid = kruid; | 847 | new->uid = kruid; |
848 | if (!uid_eq(kruid, old->uid)) { | 848 | if (!uid_eq(kruid, old->uid)) { |
849 | retval = set_user(new); | 849 | retval = set_user(new); |
850 | if (retval < 0) | 850 | if (retval < 0) |
851 | goto error; | 851 | goto error; |
852 | } | 852 | } |
853 | } | 853 | } |
854 | if (euid != (uid_t) -1) | 854 | if (euid != (uid_t) -1) |
855 | new->euid = keuid; | 855 | new->euid = keuid; |
856 | if (suid != (uid_t) -1) | 856 | if (suid != (uid_t) -1) |
857 | new->suid = ksuid; | 857 | new->suid = ksuid; |
858 | new->fsuid = new->euid; | 858 | new->fsuid = new->euid; |
859 | 859 | ||
860 | retval = security_task_fix_setuid(new, old, LSM_SETID_RES); | 860 | retval = security_task_fix_setuid(new, old, LSM_SETID_RES); |
861 | if (retval < 0) | 861 | if (retval < 0) |
862 | goto error; | 862 | goto error; |
863 | 863 | ||
864 | return commit_creds(new); | 864 | return commit_creds(new); |
865 | 865 | ||
866 | error: | 866 | error: |
867 | abort_creds(new); | 867 | abort_creds(new); |
868 | return retval; | 868 | return retval; |
869 | } | 869 | } |
870 | 870 | ||
871 | SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp) | 871 | SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp) |
872 | { | 872 | { |
873 | const struct cred *cred = current_cred(); | 873 | const struct cred *cred = current_cred(); |
874 | int retval; | 874 | int retval; |
875 | uid_t ruid, euid, suid; | 875 | uid_t ruid, euid, suid; |
876 | 876 | ||
877 | ruid = from_kuid_munged(cred->user_ns, cred->uid); | 877 | ruid = from_kuid_munged(cred->user_ns, cred->uid); |
878 | euid = from_kuid_munged(cred->user_ns, cred->euid); | 878 | euid = from_kuid_munged(cred->user_ns, cred->euid); |
879 | suid = from_kuid_munged(cred->user_ns, cred->suid); | 879 | suid = from_kuid_munged(cred->user_ns, cred->suid); |
880 | 880 | ||
881 | if (!(retval = put_user(ruid, ruidp)) && | 881 | if (!(retval = put_user(ruid, ruidp)) && |
882 | !(retval = put_user(euid, euidp))) | 882 | !(retval = put_user(euid, euidp))) |
883 | retval = put_user(suid, suidp); | 883 | retval = put_user(suid, suidp); |
884 | 884 | ||
885 | return retval; | 885 | return retval; |
886 | } | 886 | } |
887 | 887 | ||
888 | /* | 888 | /* |
889 | * Same as above, but for rgid, egid, sgid. | 889 | * Same as above, but for rgid, egid, sgid. |
890 | */ | 890 | */ |
891 | SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) | 891 | SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) |
892 | { | 892 | { |
893 | struct user_namespace *ns = current_user_ns(); | 893 | struct user_namespace *ns = current_user_ns(); |
894 | const struct cred *old; | 894 | const struct cred *old; |
895 | struct cred *new; | 895 | struct cred *new; |
896 | int retval; | 896 | int retval; |
897 | kgid_t krgid, kegid, ksgid; | 897 | kgid_t krgid, kegid, ksgid; |
898 | 898 | ||
899 | krgid = make_kgid(ns, rgid); | 899 | krgid = make_kgid(ns, rgid); |
900 | kegid = make_kgid(ns, egid); | 900 | kegid = make_kgid(ns, egid); |
901 | ksgid = make_kgid(ns, sgid); | 901 | ksgid = make_kgid(ns, sgid); |
902 | 902 | ||
903 | if ((rgid != (gid_t) -1) && !gid_valid(krgid)) | 903 | if ((rgid != (gid_t) -1) && !gid_valid(krgid)) |
904 | return -EINVAL; | 904 | return -EINVAL; |
905 | if ((egid != (gid_t) -1) && !gid_valid(kegid)) | 905 | if ((egid != (gid_t) -1) && !gid_valid(kegid)) |
906 | return -EINVAL; | 906 | return -EINVAL; |
907 | if ((sgid != (gid_t) -1) && !gid_valid(ksgid)) | 907 | if ((sgid != (gid_t) -1) && !gid_valid(ksgid)) |
908 | return -EINVAL; | 908 | return -EINVAL; |
909 | 909 | ||
910 | new = prepare_creds(); | 910 | new = prepare_creds(); |
911 | if (!new) | 911 | if (!new) |
912 | return -ENOMEM; | 912 | return -ENOMEM; |
913 | old = current_cred(); | 913 | old = current_cred(); |
914 | 914 | ||
915 | retval = -EPERM; | 915 | retval = -EPERM; |
916 | if (!nsown_capable(CAP_SETGID)) { | 916 | if (!nsown_capable(CAP_SETGID)) { |
917 | if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) && | 917 | if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) && |
918 | !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid)) | 918 | !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid)) |
919 | goto error; | 919 | goto error; |
920 | if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) && | 920 | if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) && |
921 | !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid)) | 921 | !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid)) |
922 | goto error; | 922 | goto error; |
923 | if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) && | 923 | if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) && |
924 | !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid)) | 924 | !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid)) |
925 | goto error; | 925 | goto error; |
926 | } | 926 | } |
927 | 927 | ||
928 | if (rgid != (gid_t) -1) | 928 | if (rgid != (gid_t) -1) |
929 | new->gid = krgid; | 929 | new->gid = krgid; |
930 | if (egid != (gid_t) -1) | 930 | if (egid != (gid_t) -1) |
931 | new->egid = kegid; | 931 | new->egid = kegid; |
932 | if (sgid != (gid_t) -1) | 932 | if (sgid != (gid_t) -1) |
933 | new->sgid = ksgid; | 933 | new->sgid = ksgid; |
934 | new->fsgid = new->egid; | 934 | new->fsgid = new->egid; |
935 | 935 | ||
936 | return commit_creds(new); | 936 | return commit_creds(new); |
937 | 937 | ||
938 | error: | 938 | error: |
939 | abort_creds(new); | 939 | abort_creds(new); |
940 | return retval; | 940 | return retval; |
941 | } | 941 | } |
942 | 942 | ||
943 | SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp) | 943 | SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp) |
944 | { | 944 | { |
945 | const struct cred *cred = current_cred(); | 945 | const struct cred *cred = current_cred(); |
946 | int retval; | 946 | int retval; |
947 | gid_t rgid, egid, sgid; | 947 | gid_t rgid, egid, sgid; |
948 | 948 | ||
949 | rgid = from_kgid_munged(cred->user_ns, cred->gid); | 949 | rgid = from_kgid_munged(cred->user_ns, cred->gid); |
950 | egid = from_kgid_munged(cred->user_ns, cred->egid); | 950 | egid = from_kgid_munged(cred->user_ns, cred->egid); |
951 | sgid = from_kgid_munged(cred->user_ns, cred->sgid); | 951 | sgid = from_kgid_munged(cred->user_ns, cred->sgid); |
952 | 952 | ||
953 | if (!(retval = put_user(rgid, rgidp)) && | 953 | if (!(retval = put_user(rgid, rgidp)) && |
954 | !(retval = put_user(egid, egidp))) | 954 | !(retval = put_user(egid, egidp))) |
955 | retval = put_user(sgid, sgidp); | 955 | retval = put_user(sgid, sgidp); |
956 | 956 | ||
957 | return retval; | 957 | return retval; |
958 | } | 958 | } |
959 | 959 | ||
960 | 960 | ||
961 | /* | 961 | /* |
962 | * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This | 962 | * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This |
963 | * is used for "access()" and for the NFS daemon (letting nfsd stay at | 963 | * is used for "access()" and for the NFS daemon (letting nfsd stay at |
964 | * whatever uid it wants to). It normally shadows "euid", except when | 964 | * whatever uid it wants to). It normally shadows "euid", except when |
965 | * explicitly set by setfsuid() or for access.. | 965 | * explicitly set by setfsuid() or for access.. |
966 | */ | 966 | */ |
967 | SYSCALL_DEFINE1(setfsuid, uid_t, uid) | 967 | SYSCALL_DEFINE1(setfsuid, uid_t, uid) |
968 | { | 968 | { |
969 | const struct cred *old; | 969 | const struct cred *old; |
970 | struct cred *new; | 970 | struct cred *new; |
971 | uid_t old_fsuid; | 971 | uid_t old_fsuid; |
972 | kuid_t kuid; | 972 | kuid_t kuid; |
973 | 973 | ||
974 | old = current_cred(); | 974 | old = current_cred(); |
975 | old_fsuid = from_kuid_munged(old->user_ns, old->fsuid); | 975 | old_fsuid = from_kuid_munged(old->user_ns, old->fsuid); |
976 | 976 | ||
977 | kuid = make_kuid(old->user_ns, uid); | 977 | kuid = make_kuid(old->user_ns, uid); |
978 | if (!uid_valid(kuid)) | 978 | if (!uid_valid(kuid)) |
979 | return old_fsuid; | 979 | return old_fsuid; |
980 | 980 | ||
981 | new = prepare_creds(); | 981 | new = prepare_creds(); |
982 | if (!new) | 982 | if (!new) |
983 | return old_fsuid; | 983 | return old_fsuid; |
984 | 984 | ||
985 | if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) || | 985 | if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) || |
986 | uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) || | 986 | uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) || |
987 | nsown_capable(CAP_SETUID)) { | 987 | nsown_capable(CAP_SETUID)) { |
988 | if (!uid_eq(kuid, old->fsuid)) { | 988 | if (!uid_eq(kuid, old->fsuid)) { |
989 | new->fsuid = kuid; | 989 | new->fsuid = kuid; |
990 | if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0) | 990 | if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0) |
991 | goto change_okay; | 991 | goto change_okay; |
992 | } | 992 | } |
993 | } | 993 | } |
994 | 994 | ||
995 | abort_creds(new); | 995 | abort_creds(new); |
996 | return old_fsuid; | 996 | return old_fsuid; |
997 | 997 | ||
998 | change_okay: | 998 | change_okay: |
999 | commit_creds(new); | 999 | commit_creds(new); |
1000 | return old_fsuid; | 1000 | return old_fsuid; |
1001 | } | 1001 | } |
1002 | 1002 | ||
1003 | /* | 1003 | /* |
1004 | * Samma pรฅ svenska.. | 1004 | * Samma pรฅ svenska.. |
1005 | */ | 1005 | */ |
1006 | SYSCALL_DEFINE1(setfsgid, gid_t, gid) | 1006 | SYSCALL_DEFINE1(setfsgid, gid_t, gid) |
1007 | { | 1007 | { |
1008 | const struct cred *old; | 1008 | const struct cred *old; |
1009 | struct cred *new; | 1009 | struct cred *new; |
1010 | gid_t old_fsgid; | 1010 | gid_t old_fsgid; |
1011 | kgid_t kgid; | 1011 | kgid_t kgid; |
1012 | 1012 | ||
1013 | old = current_cred(); | 1013 | old = current_cred(); |
1014 | old_fsgid = from_kgid_munged(old->user_ns, old->fsgid); | 1014 | old_fsgid = from_kgid_munged(old->user_ns, old->fsgid); |
1015 | 1015 | ||
1016 | kgid = make_kgid(old->user_ns, gid); | 1016 | kgid = make_kgid(old->user_ns, gid); |
1017 | if (!gid_valid(kgid)) | 1017 | if (!gid_valid(kgid)) |
1018 | return old_fsgid; | 1018 | return old_fsgid; |
1019 | 1019 | ||
1020 | new = prepare_creds(); | 1020 | new = prepare_creds(); |
1021 | if (!new) | 1021 | if (!new) |
1022 | return old_fsgid; | 1022 | return old_fsgid; |
1023 | 1023 | ||
1024 | if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) || | 1024 | if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) || |
1025 | gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) || | 1025 | gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) || |
1026 | nsown_capable(CAP_SETGID)) { | 1026 | nsown_capable(CAP_SETGID)) { |
1027 | if (!gid_eq(kgid, old->fsgid)) { | 1027 | if (!gid_eq(kgid, old->fsgid)) { |
1028 | new->fsgid = kgid; | 1028 | new->fsgid = kgid; |
1029 | goto change_okay; | 1029 | goto change_okay; |
1030 | } | 1030 | } |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | abort_creds(new); | 1033 | abort_creds(new); |
1034 | return old_fsgid; | 1034 | return old_fsgid; |
1035 | 1035 | ||
1036 | change_okay: | 1036 | change_okay: |
1037 | commit_creds(new); | 1037 | commit_creds(new); |
1038 | return old_fsgid; | 1038 | return old_fsgid; |
1039 | } | 1039 | } |
1040 | 1040 | ||
1041 | void do_sys_times(struct tms *tms) | 1041 | void do_sys_times(struct tms *tms) |
1042 | { | 1042 | { |
1043 | cputime_t tgutime, tgstime, cutime, cstime; | 1043 | cputime_t tgutime, tgstime, cutime, cstime; |
1044 | 1044 | ||
1045 | spin_lock_irq(¤t->sighand->siglock); | 1045 | spin_lock_irq(¤t->sighand->siglock); |
1046 | thread_group_times(current, &tgutime, &tgstime); | 1046 | thread_group_times(current, &tgutime, &tgstime); |
1047 | cutime = current->signal->cutime; | 1047 | cutime = current->signal->cutime; |
1048 | cstime = current->signal->cstime; | 1048 | cstime = current->signal->cstime; |
1049 | spin_unlock_irq(¤t->sighand->siglock); | 1049 | spin_unlock_irq(¤t->sighand->siglock); |
1050 | tms->tms_utime = cputime_to_clock_t(tgutime); | 1050 | tms->tms_utime = cputime_to_clock_t(tgutime); |
1051 | tms->tms_stime = cputime_to_clock_t(tgstime); | 1051 | tms->tms_stime = cputime_to_clock_t(tgstime); |
1052 | tms->tms_cutime = cputime_to_clock_t(cutime); | 1052 | tms->tms_cutime = cputime_to_clock_t(cutime); |
1053 | tms->tms_cstime = cputime_to_clock_t(cstime); | 1053 | tms->tms_cstime = cputime_to_clock_t(cstime); |
1054 | } | 1054 | } |
1055 | 1055 | ||
1056 | SYSCALL_DEFINE1(times, struct tms __user *, tbuf) | 1056 | SYSCALL_DEFINE1(times, struct tms __user *, tbuf) |
1057 | { | 1057 | { |
1058 | if (tbuf) { | 1058 | if (tbuf) { |
1059 | struct tms tmp; | 1059 | struct tms tmp; |
1060 | 1060 | ||
1061 | do_sys_times(&tmp); | 1061 | do_sys_times(&tmp); |
1062 | if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) | 1062 | if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) |
1063 | return -EFAULT; | 1063 | return -EFAULT; |
1064 | } | 1064 | } |
1065 | force_successful_syscall_return(); | 1065 | force_successful_syscall_return(); |
1066 | return (long) jiffies_64_to_clock_t(get_jiffies_64()); | 1066 | return (long) jiffies_64_to_clock_t(get_jiffies_64()); |
1067 | } | 1067 | } |
1068 | 1068 | ||
1069 | /* | 1069 | /* |
1070 | * This needs some heavy checking ... | 1070 | * This needs some heavy checking ... |
1071 | * I just haven't the stomach for it. I also don't fully | 1071 | * I just haven't the stomach for it. I also don't fully |
1072 | * understand sessions/pgrp etc. Let somebody who does explain it. | 1072 | * understand sessions/pgrp etc. Let somebody who does explain it. |
1073 | * | 1073 | * |
1074 | * OK, I think I have the protection semantics right.... this is really | 1074 | * OK, I think I have the protection semantics right.... this is really |
1075 | * only important on a multi-user system anyway, to make sure one user | 1075 | * only important on a multi-user system anyway, to make sure one user |
1076 | * can't send a signal to a process owned by another. -TYT, 12/12/91 | 1076 | * can't send a signal to a process owned by another. -TYT, 12/12/91 |
1077 | * | 1077 | * |
1078 | * Auch. Had to add the 'did_exec' flag to conform completely to POSIX. | 1078 | * Auch. Had to add the 'did_exec' flag to conform completely to POSIX. |
1079 | * LBT 04.03.94 | 1079 | * LBT 04.03.94 |
1080 | */ | 1080 | */ |
1081 | SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) | 1081 | SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) |
1082 | { | 1082 | { |
1083 | struct task_struct *p; | 1083 | struct task_struct *p; |
1084 | struct task_struct *group_leader = current->group_leader; | 1084 | struct task_struct *group_leader = current->group_leader; |
1085 | struct pid *pgrp; | 1085 | struct pid *pgrp; |
1086 | int err; | 1086 | int err; |
1087 | 1087 | ||
1088 | if (!pid) | 1088 | if (!pid) |
1089 | pid = task_pid_vnr(group_leader); | 1089 | pid = task_pid_vnr(group_leader); |
1090 | if (!pgid) | 1090 | if (!pgid) |
1091 | pgid = pid; | 1091 | pgid = pid; |
1092 | if (pgid < 0) | 1092 | if (pgid < 0) |
1093 | return -EINVAL; | 1093 | return -EINVAL; |
1094 | rcu_read_lock(); | 1094 | rcu_read_lock(); |
1095 | 1095 | ||
1096 | /* From this point forward we keep holding onto the tasklist lock | 1096 | /* From this point forward we keep holding onto the tasklist lock |
1097 | * so that our parent does not change from under us. -DaveM | 1097 | * so that our parent does not change from under us. -DaveM |
1098 | */ | 1098 | */ |
1099 | write_lock_irq(&tasklist_lock); | 1099 | write_lock_irq(&tasklist_lock); |
1100 | 1100 | ||
1101 | err = -ESRCH; | 1101 | err = -ESRCH; |
1102 | p = find_task_by_vpid(pid); | 1102 | p = find_task_by_vpid(pid); |
1103 | if (!p) | 1103 | if (!p) |
1104 | goto out; | 1104 | goto out; |
1105 | 1105 | ||
1106 | err = -EINVAL; | 1106 | err = -EINVAL; |
1107 | if (!thread_group_leader(p)) | 1107 | if (!thread_group_leader(p)) |
1108 | goto out; | 1108 | goto out; |
1109 | 1109 | ||
1110 | if (same_thread_group(p->real_parent, group_leader)) { | 1110 | if (same_thread_group(p->real_parent, group_leader)) { |
1111 | err = -EPERM; | 1111 | err = -EPERM; |
1112 | if (task_session(p) != task_session(group_leader)) | 1112 | if (task_session(p) != task_session(group_leader)) |
1113 | goto out; | 1113 | goto out; |
1114 | err = -EACCES; | 1114 | err = -EACCES; |
1115 | if (p->did_exec) | 1115 | if (p->did_exec) |
1116 | goto out; | 1116 | goto out; |
1117 | } else { | 1117 | } else { |
1118 | err = -ESRCH; | 1118 | err = -ESRCH; |
1119 | if (p != group_leader) | 1119 | if (p != group_leader) |
1120 | goto out; | 1120 | goto out; |
1121 | } | 1121 | } |
1122 | 1122 | ||
1123 | err = -EPERM; | 1123 | err = -EPERM; |
1124 | if (p->signal->leader) | 1124 | if (p->signal->leader) |
1125 | goto out; | 1125 | goto out; |
1126 | 1126 | ||
1127 | pgrp = task_pid(p); | 1127 | pgrp = task_pid(p); |
1128 | if (pgid != pid) { | 1128 | if (pgid != pid) { |
1129 | struct task_struct *g; | 1129 | struct task_struct *g; |
1130 | 1130 | ||
1131 | pgrp = find_vpid(pgid); | 1131 | pgrp = find_vpid(pgid); |
1132 | g = pid_task(pgrp, PIDTYPE_PGID); | 1132 | g = pid_task(pgrp, PIDTYPE_PGID); |
1133 | if (!g || task_session(g) != task_session(group_leader)) | 1133 | if (!g || task_session(g) != task_session(group_leader)) |
1134 | goto out; | 1134 | goto out; |
1135 | } | 1135 | } |
1136 | 1136 | ||
1137 | err = security_task_setpgid(p, pgid); | 1137 | err = security_task_setpgid(p, pgid); |
1138 | if (err) | 1138 | if (err) |
1139 | goto out; | 1139 | goto out; |
1140 | 1140 | ||
1141 | if (task_pgrp(p) != pgrp) | 1141 | if (task_pgrp(p) != pgrp) |
1142 | change_pid(p, PIDTYPE_PGID, pgrp); | 1142 | change_pid(p, PIDTYPE_PGID, pgrp); |
1143 | 1143 | ||
1144 | err = 0; | 1144 | err = 0; |
1145 | out: | 1145 | out: |
1146 | /* All paths lead to here, thus we are safe. -DaveM */ | 1146 | /* All paths lead to here, thus we are safe. -DaveM */ |
1147 | write_unlock_irq(&tasklist_lock); | 1147 | write_unlock_irq(&tasklist_lock); |
1148 | rcu_read_unlock(); | 1148 | rcu_read_unlock(); |
1149 | return err; | 1149 | return err; |
1150 | } | 1150 | } |
1151 | 1151 | ||
1152 | SYSCALL_DEFINE1(getpgid, pid_t, pid) | 1152 | SYSCALL_DEFINE1(getpgid, pid_t, pid) |
1153 | { | 1153 | { |
1154 | struct task_struct *p; | 1154 | struct task_struct *p; |
1155 | struct pid *grp; | 1155 | struct pid *grp; |
1156 | int retval; | 1156 | int retval; |
1157 | 1157 | ||
1158 | rcu_read_lock(); | 1158 | rcu_read_lock(); |
1159 | if (!pid) | 1159 | if (!pid) |
1160 | grp = task_pgrp(current); | 1160 | grp = task_pgrp(current); |
1161 | else { | 1161 | else { |
1162 | retval = -ESRCH; | 1162 | retval = -ESRCH; |
1163 | p = find_task_by_vpid(pid); | 1163 | p = find_task_by_vpid(pid); |
1164 | if (!p) | 1164 | if (!p) |
1165 | goto out; | 1165 | goto out; |
1166 | grp = task_pgrp(p); | 1166 | grp = task_pgrp(p); |
1167 | if (!grp) | 1167 | if (!grp) |
1168 | goto out; | 1168 | goto out; |
1169 | 1169 | ||
1170 | retval = security_task_getpgid(p); | 1170 | retval = security_task_getpgid(p); |
1171 | if (retval) | 1171 | if (retval) |
1172 | goto out; | 1172 | goto out; |
1173 | } | 1173 | } |
1174 | retval = pid_vnr(grp); | 1174 | retval = pid_vnr(grp); |
1175 | out: | 1175 | out: |
1176 | rcu_read_unlock(); | 1176 | rcu_read_unlock(); |
1177 | return retval; | 1177 | return retval; |
1178 | } | 1178 | } |
1179 | 1179 | ||
1180 | #ifdef __ARCH_WANT_SYS_GETPGRP | 1180 | #ifdef __ARCH_WANT_SYS_GETPGRP |
1181 | 1181 | ||
1182 | SYSCALL_DEFINE0(getpgrp) | 1182 | SYSCALL_DEFINE0(getpgrp) |
1183 | { | 1183 | { |
1184 | return sys_getpgid(0); | 1184 | return sys_getpgid(0); |
1185 | } | 1185 | } |
1186 | 1186 | ||
1187 | #endif | 1187 | #endif |
1188 | 1188 | ||
1189 | SYSCALL_DEFINE1(getsid, pid_t, pid) | 1189 | SYSCALL_DEFINE1(getsid, pid_t, pid) |
1190 | { | 1190 | { |
1191 | struct task_struct *p; | 1191 | struct task_struct *p; |
1192 | struct pid *sid; | 1192 | struct pid *sid; |
1193 | int retval; | 1193 | int retval; |
1194 | 1194 | ||
1195 | rcu_read_lock(); | 1195 | rcu_read_lock(); |
1196 | if (!pid) | 1196 | if (!pid) |
1197 | sid = task_session(current); | 1197 | sid = task_session(current); |
1198 | else { | 1198 | else { |
1199 | retval = -ESRCH; | 1199 | retval = -ESRCH; |
1200 | p = find_task_by_vpid(pid); | 1200 | p = find_task_by_vpid(pid); |
1201 | if (!p) | 1201 | if (!p) |
1202 | goto out; | 1202 | goto out; |
1203 | sid = task_session(p); | 1203 | sid = task_session(p); |
1204 | if (!sid) | 1204 | if (!sid) |
1205 | goto out; | 1205 | goto out; |
1206 | 1206 | ||
1207 | retval = security_task_getsid(p); | 1207 | retval = security_task_getsid(p); |
1208 | if (retval) | 1208 | if (retval) |
1209 | goto out; | 1209 | goto out; |
1210 | } | 1210 | } |
1211 | retval = pid_vnr(sid); | 1211 | retval = pid_vnr(sid); |
1212 | out: | 1212 | out: |
1213 | rcu_read_unlock(); | 1213 | rcu_read_unlock(); |
1214 | return retval; | 1214 | return retval; |
1215 | } | 1215 | } |
1216 | 1216 | ||
1217 | SYSCALL_DEFINE0(setsid) | 1217 | SYSCALL_DEFINE0(setsid) |
1218 | { | 1218 | { |
1219 | struct task_struct *group_leader = current->group_leader; | 1219 | struct task_struct *group_leader = current->group_leader; |
1220 | struct pid *sid = task_pid(group_leader); | 1220 | struct pid *sid = task_pid(group_leader); |
1221 | pid_t session = pid_vnr(sid); | 1221 | pid_t session = pid_vnr(sid); |
1222 | int err = -EPERM; | 1222 | int err = -EPERM; |
1223 | 1223 | ||
1224 | write_lock_irq(&tasklist_lock); | 1224 | write_lock_irq(&tasklist_lock); |
1225 | /* Fail if I am already a session leader */ | 1225 | /* Fail if I am already a session leader */ |
1226 | if (group_leader->signal->leader) | 1226 | if (group_leader->signal->leader) |
1227 | goto out; | 1227 | goto out; |
1228 | 1228 | ||
1229 | /* Fail if a process group id already exists that equals the | 1229 | /* Fail if a process group id already exists that equals the |
1230 | * proposed session id. | 1230 | * proposed session id. |
1231 | */ | 1231 | */ |
1232 | if (pid_task(sid, PIDTYPE_PGID)) | 1232 | if (pid_task(sid, PIDTYPE_PGID)) |
1233 | goto out; | 1233 | goto out; |
1234 | 1234 | ||
1235 | group_leader->signal->leader = 1; | 1235 | group_leader->signal->leader = 1; |
1236 | __set_special_pids(sid); | 1236 | __set_special_pids(sid); |
1237 | 1237 | ||
1238 | proc_clear_tty(group_leader); | 1238 | proc_clear_tty(group_leader); |
1239 | 1239 | ||
1240 | err = session; | 1240 | err = session; |
1241 | out: | 1241 | out: |
1242 | write_unlock_irq(&tasklist_lock); | 1242 | write_unlock_irq(&tasklist_lock); |
1243 | if (err > 0) { | 1243 | if (err > 0) { |
1244 | proc_sid_connector(group_leader); | 1244 | proc_sid_connector(group_leader); |
1245 | sched_autogroup_create_attach(group_leader); | 1245 | sched_autogroup_create_attach(group_leader); |
1246 | } | 1246 | } |
1247 | return err; | 1247 | return err; |
1248 | } | 1248 | } |
1249 | 1249 | ||
1250 | DECLARE_RWSEM(uts_sem); | 1250 | DECLARE_RWSEM(uts_sem); |
1251 | 1251 | ||
1252 | #ifdef COMPAT_UTS_MACHINE | 1252 | #ifdef COMPAT_UTS_MACHINE |
1253 | #define override_architecture(name) \ | 1253 | #define override_architecture(name) \ |
1254 | (personality(current->personality) == PER_LINUX32 && \ | 1254 | (personality(current->personality) == PER_LINUX32 && \ |
1255 | copy_to_user(name->machine, COMPAT_UTS_MACHINE, \ | 1255 | copy_to_user(name->machine, COMPAT_UTS_MACHINE, \ |
1256 | sizeof(COMPAT_UTS_MACHINE))) | 1256 | sizeof(COMPAT_UTS_MACHINE))) |
1257 | #else | 1257 | #else |
1258 | #define override_architecture(name) 0 | 1258 | #define override_architecture(name) 0 |
1259 | #endif | 1259 | #endif |
1260 | 1260 | ||
1261 | /* | 1261 | /* |
1262 | * Work around broken programs that cannot handle "Linux 3.0". | 1262 | * Work around broken programs that cannot handle "Linux 3.0". |
1263 | * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 | 1263 | * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 |
1264 | */ | 1264 | */ |
1265 | static int override_release(char __user *release, int len) | 1265 | static int override_release(char __user *release, int len) |
1266 | { | 1266 | { |
1267 | int ret = 0; | 1267 | int ret = 0; |
1268 | char buf[65]; | 1268 | char buf[65]; |
1269 | 1269 | ||
1270 | if (current->personality & UNAME26) { | 1270 | if (current->personality & UNAME26) { |
1271 | char *rest = UTS_RELEASE; | 1271 | char *rest = UTS_RELEASE; |
1272 | int ndots = 0; | 1272 | int ndots = 0; |
1273 | unsigned v; | 1273 | unsigned v; |
1274 | 1274 | ||
1275 | while (*rest) { | 1275 | while (*rest) { |
1276 | if (*rest == '.' && ++ndots >= 3) | 1276 | if (*rest == '.' && ++ndots >= 3) |
1277 | break; | 1277 | break; |
1278 | if (!isdigit(*rest) && *rest != '.') | 1278 | if (!isdigit(*rest) && *rest != '.') |
1279 | break; | 1279 | break; |
1280 | rest++; | 1280 | rest++; |
1281 | } | 1281 | } |
1282 | v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40; | 1282 | v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40; |
1283 | snprintf(buf, len, "2.6.%u%s", v, rest); | 1283 | snprintf(buf, len, "2.6.%u%s", v, rest); |
1284 | ret = copy_to_user(release, buf, len); | 1284 | ret = copy_to_user(release, buf, len); |
1285 | } | 1285 | } |
1286 | return ret; | 1286 | return ret; |
1287 | } | 1287 | } |
1288 | 1288 | ||
1289 | SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) | 1289 | SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) |
1290 | { | 1290 | { |
1291 | int errno = 0; | 1291 | int errno = 0; |
1292 | 1292 | ||
1293 | down_read(&uts_sem); | 1293 | down_read(&uts_sem); |
1294 | if (copy_to_user(name, utsname(), sizeof *name)) | 1294 | if (copy_to_user(name, utsname(), sizeof *name)) |
1295 | errno = -EFAULT; | 1295 | errno = -EFAULT; |
1296 | up_read(&uts_sem); | 1296 | up_read(&uts_sem); |
1297 | 1297 | ||
1298 | if (!errno && override_release(name->release, sizeof(name->release))) | 1298 | if (!errno && override_release(name->release, sizeof(name->release))) |
1299 | errno = -EFAULT; | 1299 | errno = -EFAULT; |
1300 | if (!errno && override_architecture(name)) | 1300 | if (!errno && override_architecture(name)) |
1301 | errno = -EFAULT; | 1301 | errno = -EFAULT; |
1302 | return errno; | 1302 | return errno; |
1303 | } | 1303 | } |
1304 | 1304 | ||
1305 | #ifdef __ARCH_WANT_SYS_OLD_UNAME | 1305 | #ifdef __ARCH_WANT_SYS_OLD_UNAME |
1306 | /* | 1306 | /* |
1307 | * Old cruft | 1307 | * Old cruft |
1308 | */ | 1308 | */ |
1309 | SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) | 1309 | SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) |
1310 | { | 1310 | { |
1311 | int error = 0; | 1311 | int error = 0; |
1312 | 1312 | ||
1313 | if (!name) | 1313 | if (!name) |
1314 | return -EFAULT; | 1314 | return -EFAULT; |
1315 | 1315 | ||
1316 | down_read(&uts_sem); | 1316 | down_read(&uts_sem); |
1317 | if (copy_to_user(name, utsname(), sizeof(*name))) | 1317 | if (copy_to_user(name, utsname(), sizeof(*name))) |
1318 | error = -EFAULT; | 1318 | error = -EFAULT; |
1319 | up_read(&uts_sem); | 1319 | up_read(&uts_sem); |
1320 | 1320 | ||
1321 | if (!error && override_release(name->release, sizeof(name->release))) | 1321 | if (!error && override_release(name->release, sizeof(name->release))) |
1322 | error = -EFAULT; | 1322 | error = -EFAULT; |
1323 | if (!error && override_architecture(name)) | 1323 | if (!error && override_architecture(name)) |
1324 | error = -EFAULT; | 1324 | error = -EFAULT; |
1325 | return error; | 1325 | return error; |
1326 | } | 1326 | } |
1327 | 1327 | ||
1328 | SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) | 1328 | SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) |
1329 | { | 1329 | { |
1330 | int error; | 1330 | int error; |
1331 | 1331 | ||
1332 | if (!name) | 1332 | if (!name) |
1333 | return -EFAULT; | 1333 | return -EFAULT; |
1334 | if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname))) | 1334 | if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname))) |
1335 | return -EFAULT; | 1335 | return -EFAULT; |
1336 | 1336 | ||
1337 | down_read(&uts_sem); | 1337 | down_read(&uts_sem); |
1338 | error = __copy_to_user(&name->sysname, &utsname()->sysname, | 1338 | error = __copy_to_user(&name->sysname, &utsname()->sysname, |
1339 | __OLD_UTS_LEN); | 1339 | __OLD_UTS_LEN); |
1340 | error |= __put_user(0, name->sysname + __OLD_UTS_LEN); | 1340 | error |= __put_user(0, name->sysname + __OLD_UTS_LEN); |
1341 | error |= __copy_to_user(&name->nodename, &utsname()->nodename, | 1341 | error |= __copy_to_user(&name->nodename, &utsname()->nodename, |
1342 | __OLD_UTS_LEN); | 1342 | __OLD_UTS_LEN); |
1343 | error |= __put_user(0, name->nodename + __OLD_UTS_LEN); | 1343 | error |= __put_user(0, name->nodename + __OLD_UTS_LEN); |
1344 | error |= __copy_to_user(&name->release, &utsname()->release, | 1344 | error |= __copy_to_user(&name->release, &utsname()->release, |
1345 | __OLD_UTS_LEN); | 1345 | __OLD_UTS_LEN); |
1346 | error |= __put_user(0, name->release + __OLD_UTS_LEN); | 1346 | error |= __put_user(0, name->release + __OLD_UTS_LEN); |
1347 | error |= __copy_to_user(&name->version, &utsname()->version, | 1347 | error |= __copy_to_user(&name->version, &utsname()->version, |
1348 | __OLD_UTS_LEN); | 1348 | __OLD_UTS_LEN); |
1349 | error |= __put_user(0, name->version + __OLD_UTS_LEN); | 1349 | error |= __put_user(0, name->version + __OLD_UTS_LEN); |
1350 | error |= __copy_to_user(&name->machine, &utsname()->machine, | 1350 | error |= __copy_to_user(&name->machine, &utsname()->machine, |
1351 | __OLD_UTS_LEN); | 1351 | __OLD_UTS_LEN); |
1352 | error |= __put_user(0, name->machine + __OLD_UTS_LEN); | 1352 | error |= __put_user(0, name->machine + __OLD_UTS_LEN); |
1353 | up_read(&uts_sem); | 1353 | up_read(&uts_sem); |
1354 | 1354 | ||
1355 | if (!error && override_architecture(name)) | 1355 | if (!error && override_architecture(name)) |
1356 | error = -EFAULT; | 1356 | error = -EFAULT; |
1357 | if (!error && override_release(name->release, sizeof(name->release))) | 1357 | if (!error && override_release(name->release, sizeof(name->release))) |
1358 | error = -EFAULT; | 1358 | error = -EFAULT; |
1359 | return error ? -EFAULT : 0; | 1359 | return error ? -EFAULT : 0; |
1360 | } | 1360 | } |
1361 | #endif | 1361 | #endif |
1362 | 1362 | ||
1363 | SYSCALL_DEFINE2(sethostname, char __user *, name, int, len) | 1363 | SYSCALL_DEFINE2(sethostname, char __user *, name, int, len) |
1364 | { | 1364 | { |
1365 | int errno; | 1365 | int errno; |
1366 | char tmp[__NEW_UTS_LEN]; | 1366 | char tmp[__NEW_UTS_LEN]; |
1367 | 1367 | ||
1368 | if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) | 1368 | if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) |
1369 | return -EPERM; | 1369 | return -EPERM; |
1370 | 1370 | ||
1371 | if (len < 0 || len > __NEW_UTS_LEN) | 1371 | if (len < 0 || len > __NEW_UTS_LEN) |
1372 | return -EINVAL; | 1372 | return -EINVAL; |
1373 | down_write(&uts_sem); | 1373 | down_write(&uts_sem); |
1374 | errno = -EFAULT; | 1374 | errno = -EFAULT; |
1375 | if (!copy_from_user(tmp, name, len)) { | 1375 | if (!copy_from_user(tmp, name, len)) { |
1376 | struct new_utsname *u = utsname(); | 1376 | struct new_utsname *u = utsname(); |
1377 | 1377 | ||
1378 | memcpy(u->nodename, tmp, len); | 1378 | memcpy(u->nodename, tmp, len); |
1379 | memset(u->nodename + len, 0, sizeof(u->nodename) - len); | 1379 | memset(u->nodename + len, 0, sizeof(u->nodename) - len); |
1380 | errno = 0; | 1380 | errno = 0; |
1381 | uts_proc_notify(UTS_PROC_HOSTNAME); | 1381 | uts_proc_notify(UTS_PROC_HOSTNAME); |
1382 | } | 1382 | } |
1383 | up_write(&uts_sem); | 1383 | up_write(&uts_sem); |
1384 | return errno; | 1384 | return errno; |
1385 | } | 1385 | } |
1386 | 1386 | ||
1387 | #ifdef __ARCH_WANT_SYS_GETHOSTNAME | 1387 | #ifdef __ARCH_WANT_SYS_GETHOSTNAME |
1388 | 1388 | ||
1389 | SYSCALL_DEFINE2(gethostname, char __user *, name, int, len) | 1389 | SYSCALL_DEFINE2(gethostname, char __user *, name, int, len) |
1390 | { | 1390 | { |
1391 | int i, errno; | 1391 | int i, errno; |
1392 | struct new_utsname *u; | 1392 | struct new_utsname *u; |
1393 | 1393 | ||
1394 | if (len < 0) | 1394 | if (len < 0) |
1395 | return -EINVAL; | 1395 | return -EINVAL; |
1396 | down_read(&uts_sem); | 1396 | down_read(&uts_sem); |
1397 | u = utsname(); | 1397 | u = utsname(); |
1398 | i = 1 + strlen(u->nodename); | 1398 | i = 1 + strlen(u->nodename); |
1399 | if (i > len) | 1399 | if (i > len) |
1400 | i = len; | 1400 | i = len; |
1401 | errno = 0; | 1401 | errno = 0; |
1402 | if (copy_to_user(name, u->nodename, i)) | 1402 | if (copy_to_user(name, u->nodename, i)) |
1403 | errno = -EFAULT; | 1403 | errno = -EFAULT; |
1404 | up_read(&uts_sem); | 1404 | up_read(&uts_sem); |
1405 | return errno; | 1405 | return errno; |
1406 | } | 1406 | } |
1407 | 1407 | ||
1408 | #endif | 1408 | #endif |
1409 | 1409 | ||
1410 | /* | 1410 | /* |
1411 | * Only setdomainname; getdomainname can be implemented by calling | 1411 | * Only setdomainname; getdomainname can be implemented by calling |
1412 | * uname() | 1412 | * uname() |
1413 | */ | 1413 | */ |
1414 | SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len) | 1414 | SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len) |
1415 | { | 1415 | { |
1416 | int errno; | 1416 | int errno; |
1417 | char tmp[__NEW_UTS_LEN]; | 1417 | char tmp[__NEW_UTS_LEN]; |
1418 | 1418 | ||
1419 | if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) | 1419 | if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) |
1420 | return -EPERM; | 1420 | return -EPERM; |
1421 | if (len < 0 || len > __NEW_UTS_LEN) | 1421 | if (len < 0 || len > __NEW_UTS_LEN) |
1422 | return -EINVAL; | 1422 | return -EINVAL; |
1423 | 1423 | ||
1424 | down_write(&uts_sem); | 1424 | down_write(&uts_sem); |
1425 | errno = -EFAULT; | 1425 | errno = -EFAULT; |
1426 | if (!copy_from_user(tmp, name, len)) { | 1426 | if (!copy_from_user(tmp, name, len)) { |
1427 | struct new_utsname *u = utsname(); | 1427 | struct new_utsname *u = utsname(); |
1428 | 1428 | ||
1429 | memcpy(u->domainname, tmp, len); | 1429 | memcpy(u->domainname, tmp, len); |
1430 | memset(u->domainname + len, 0, sizeof(u->domainname) - len); | 1430 | memset(u->domainname + len, 0, sizeof(u->domainname) - len); |
1431 | errno = 0; | 1431 | errno = 0; |
1432 | uts_proc_notify(UTS_PROC_DOMAINNAME); | 1432 | uts_proc_notify(UTS_PROC_DOMAINNAME); |
1433 | } | 1433 | } |
1434 | up_write(&uts_sem); | 1434 | up_write(&uts_sem); |
1435 | return errno; | 1435 | return errno; |
1436 | } | 1436 | } |
1437 | 1437 | ||
1438 | SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim) | 1438 | SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim) |
1439 | { | 1439 | { |
1440 | struct rlimit value; | 1440 | struct rlimit value; |
1441 | int ret; | 1441 | int ret; |
1442 | 1442 | ||
1443 | ret = do_prlimit(current, resource, NULL, &value); | 1443 | ret = do_prlimit(current, resource, NULL, &value); |
1444 | if (!ret) | 1444 | if (!ret) |
1445 | ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; | 1445 | ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; |
1446 | 1446 | ||
1447 | return ret; | 1447 | return ret; |
1448 | } | 1448 | } |
1449 | 1449 | ||
1450 | #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT | 1450 | #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT |
1451 | 1451 | ||
1452 | /* | 1452 | /* |
1453 | * Back compatibility for getrlimit. Needed for some apps. | 1453 | * Back compatibility for getrlimit. Needed for some apps. |
1454 | */ | 1454 | */ |
1455 | 1455 | ||
1456 | SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, | 1456 | SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, |
1457 | struct rlimit __user *, rlim) | 1457 | struct rlimit __user *, rlim) |
1458 | { | 1458 | { |
1459 | struct rlimit x; | 1459 | struct rlimit x; |
1460 | if (resource >= RLIM_NLIMITS) | 1460 | if (resource >= RLIM_NLIMITS) |
1461 | return -EINVAL; | 1461 | return -EINVAL; |
1462 | 1462 | ||
1463 | task_lock(current->group_leader); | 1463 | task_lock(current->group_leader); |
1464 | x = current->signal->rlim[resource]; | 1464 | x = current->signal->rlim[resource]; |
1465 | task_unlock(current->group_leader); | 1465 | task_unlock(current->group_leader); |
1466 | if (x.rlim_cur > 0x7FFFFFFF) | 1466 | if (x.rlim_cur > 0x7FFFFFFF) |
1467 | x.rlim_cur = 0x7FFFFFFF; | 1467 | x.rlim_cur = 0x7FFFFFFF; |
1468 | if (x.rlim_max > 0x7FFFFFFF) | 1468 | if (x.rlim_max > 0x7FFFFFFF) |
1469 | x.rlim_max = 0x7FFFFFFF; | 1469 | x.rlim_max = 0x7FFFFFFF; |
1470 | return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0; | 1470 | return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0; |
1471 | } | 1471 | } |
1472 | 1472 | ||
1473 | #endif | 1473 | #endif |
1474 | 1474 | ||
1475 | static inline bool rlim64_is_infinity(__u64 rlim64) | 1475 | static inline bool rlim64_is_infinity(__u64 rlim64) |
1476 | { | 1476 | { |
1477 | #if BITS_PER_LONG < 64 | 1477 | #if BITS_PER_LONG < 64 |
1478 | return rlim64 >= ULONG_MAX; | 1478 | return rlim64 >= ULONG_MAX; |
1479 | #else | 1479 | #else |
1480 | return rlim64 == RLIM64_INFINITY; | 1480 | return rlim64 == RLIM64_INFINITY; |
1481 | #endif | 1481 | #endif |
1482 | } | 1482 | } |
1483 | 1483 | ||
1484 | static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64) | 1484 | static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64) |
1485 | { | 1485 | { |
1486 | if (rlim->rlim_cur == RLIM_INFINITY) | 1486 | if (rlim->rlim_cur == RLIM_INFINITY) |
1487 | rlim64->rlim_cur = RLIM64_INFINITY; | 1487 | rlim64->rlim_cur = RLIM64_INFINITY; |
1488 | else | 1488 | else |
1489 | rlim64->rlim_cur = rlim->rlim_cur; | 1489 | rlim64->rlim_cur = rlim->rlim_cur; |
1490 | if (rlim->rlim_max == RLIM_INFINITY) | 1490 | if (rlim->rlim_max == RLIM_INFINITY) |
1491 | rlim64->rlim_max = RLIM64_INFINITY; | 1491 | rlim64->rlim_max = RLIM64_INFINITY; |
1492 | else | 1492 | else |
1493 | rlim64->rlim_max = rlim->rlim_max; | 1493 | rlim64->rlim_max = rlim->rlim_max; |
1494 | } | 1494 | } |
1495 | 1495 | ||
1496 | static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim) | 1496 | static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim) |
1497 | { | 1497 | { |
1498 | if (rlim64_is_infinity(rlim64->rlim_cur)) | 1498 | if (rlim64_is_infinity(rlim64->rlim_cur)) |
1499 | rlim->rlim_cur = RLIM_INFINITY; | 1499 | rlim->rlim_cur = RLIM_INFINITY; |
1500 | else | 1500 | else |
1501 | rlim->rlim_cur = (unsigned long)rlim64->rlim_cur; | 1501 | rlim->rlim_cur = (unsigned long)rlim64->rlim_cur; |
1502 | if (rlim64_is_infinity(rlim64->rlim_max)) | 1502 | if (rlim64_is_infinity(rlim64->rlim_max)) |
1503 | rlim->rlim_max = RLIM_INFINITY; | 1503 | rlim->rlim_max = RLIM_INFINITY; |
1504 | else | 1504 | else |
1505 | rlim->rlim_max = (unsigned long)rlim64->rlim_max; | 1505 | rlim->rlim_max = (unsigned long)rlim64->rlim_max; |
1506 | } | 1506 | } |
1507 | 1507 | ||
1508 | /* make sure you are allowed to change @tsk limits before calling this */ | 1508 | /* make sure you are allowed to change @tsk limits before calling this */ |
1509 | int do_prlimit(struct task_struct *tsk, unsigned int resource, | 1509 | int do_prlimit(struct task_struct *tsk, unsigned int resource, |
1510 | struct rlimit *new_rlim, struct rlimit *old_rlim) | 1510 | struct rlimit *new_rlim, struct rlimit *old_rlim) |
1511 | { | 1511 | { |
1512 | struct rlimit *rlim; | 1512 | struct rlimit *rlim; |
1513 | int retval = 0; | 1513 | int retval = 0; |
1514 | 1514 | ||
1515 | if (resource >= RLIM_NLIMITS) | 1515 | if (resource >= RLIM_NLIMITS) |
1516 | return -EINVAL; | 1516 | return -EINVAL; |
1517 | if (new_rlim) { | 1517 | if (new_rlim) { |
1518 | if (new_rlim->rlim_cur > new_rlim->rlim_max) | 1518 | if (new_rlim->rlim_cur > new_rlim->rlim_max) |
1519 | return -EINVAL; | 1519 | return -EINVAL; |
1520 | if (resource == RLIMIT_NOFILE && | 1520 | if (resource == RLIMIT_NOFILE && |
1521 | new_rlim->rlim_max > sysctl_nr_open) | 1521 | new_rlim->rlim_max > sysctl_nr_open) |
1522 | return -EPERM; | 1522 | return -EPERM; |
1523 | } | 1523 | } |
1524 | 1524 | ||
1525 | /* protect tsk->signal and tsk->sighand from disappearing */ | 1525 | /* protect tsk->signal and tsk->sighand from disappearing */ |
1526 | read_lock(&tasklist_lock); | 1526 | read_lock(&tasklist_lock); |
1527 | if (!tsk->sighand) { | 1527 | if (!tsk->sighand) { |
1528 | retval = -ESRCH; | 1528 | retval = -ESRCH; |
1529 | goto out; | 1529 | goto out; |
1530 | } | 1530 | } |
1531 | 1531 | ||
1532 | rlim = tsk->signal->rlim + resource; | 1532 | rlim = tsk->signal->rlim + resource; |
1533 | task_lock(tsk->group_leader); | 1533 | task_lock(tsk->group_leader); |
1534 | if (new_rlim) { | 1534 | if (new_rlim) { |
1535 | /* Keep the capable check against init_user_ns until | 1535 | /* Keep the capable check against init_user_ns until |
1536 | cgroups can contain all limits */ | 1536 | cgroups can contain all limits */ |
1537 | if (new_rlim->rlim_max > rlim->rlim_max && | 1537 | if (new_rlim->rlim_max > rlim->rlim_max && |
1538 | !capable(CAP_SYS_RESOURCE)) | 1538 | !capable(CAP_SYS_RESOURCE)) |
1539 | retval = -EPERM; | 1539 | retval = -EPERM; |
1540 | if (!retval) | 1540 | if (!retval) |
1541 | retval = security_task_setrlimit(tsk->group_leader, | 1541 | retval = security_task_setrlimit(tsk->group_leader, |
1542 | resource, new_rlim); | 1542 | resource, new_rlim); |
1543 | if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) { | 1543 | if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) { |
1544 | /* | 1544 | /* |
1545 | * The caller is asking for an immediate RLIMIT_CPU | 1545 | * The caller is asking for an immediate RLIMIT_CPU |
1546 | * expiry. But we use the zero value to mean "it was | 1546 | * expiry. But we use the zero value to mean "it was |
1547 | * never set". So let's cheat and make it one second | 1547 | * never set". So let's cheat and make it one second |
1548 | * instead | 1548 | * instead |
1549 | */ | 1549 | */ |
1550 | new_rlim->rlim_cur = 1; | 1550 | new_rlim->rlim_cur = 1; |
1551 | } | 1551 | } |
1552 | } | 1552 | } |
1553 | if (!retval) { | 1553 | if (!retval) { |
1554 | if (old_rlim) | 1554 | if (old_rlim) |
1555 | *old_rlim = *rlim; | 1555 | *old_rlim = *rlim; |
1556 | if (new_rlim) | 1556 | if (new_rlim) |
1557 | *rlim = *new_rlim; | 1557 | *rlim = *new_rlim; |
1558 | } | 1558 | } |
1559 | task_unlock(tsk->group_leader); | 1559 | task_unlock(tsk->group_leader); |
1560 | 1560 | ||
1561 | /* | 1561 | /* |
1562 | * RLIMIT_CPU handling. Note that the kernel fails to return an error | 1562 | * RLIMIT_CPU handling. Note that the kernel fails to return an error |
1563 | * code if it rejected the user's attempt to set RLIMIT_CPU. This is a | 1563 | * code if it rejected the user's attempt to set RLIMIT_CPU. This is a |
1564 | * very long-standing error, and fixing it now risks breakage of | 1564 | * very long-standing error, and fixing it now risks breakage of |
1565 | * applications, so we live with it | 1565 | * applications, so we live with it |
1566 | */ | 1566 | */ |
1567 | if (!retval && new_rlim && resource == RLIMIT_CPU && | 1567 | if (!retval && new_rlim && resource == RLIMIT_CPU && |
1568 | new_rlim->rlim_cur != RLIM_INFINITY) | 1568 | new_rlim->rlim_cur != RLIM_INFINITY) |
1569 | update_rlimit_cpu(tsk, new_rlim->rlim_cur); | 1569 | update_rlimit_cpu(tsk, new_rlim->rlim_cur); |
1570 | out: | 1570 | out: |
1571 | read_unlock(&tasklist_lock); | 1571 | read_unlock(&tasklist_lock); |
1572 | return retval; | 1572 | return retval; |
1573 | } | 1573 | } |
1574 | 1574 | ||
1575 | /* rcu lock must be held */ | 1575 | /* rcu lock must be held */ |
1576 | static int check_prlimit_permission(struct task_struct *task) | 1576 | static int check_prlimit_permission(struct task_struct *task) |
1577 | { | 1577 | { |
1578 | const struct cred *cred = current_cred(), *tcred; | 1578 | const struct cred *cred = current_cred(), *tcred; |
1579 | 1579 | ||
1580 | if (current == task) | 1580 | if (current == task) |
1581 | return 0; | 1581 | return 0; |
1582 | 1582 | ||
1583 | tcred = __task_cred(task); | 1583 | tcred = __task_cred(task); |
1584 | if (uid_eq(cred->uid, tcred->euid) && | 1584 | if (uid_eq(cred->uid, tcred->euid) && |
1585 | uid_eq(cred->uid, tcred->suid) && | 1585 | uid_eq(cred->uid, tcred->suid) && |
1586 | uid_eq(cred->uid, tcred->uid) && | 1586 | uid_eq(cred->uid, tcred->uid) && |
1587 | gid_eq(cred->gid, tcred->egid) && | 1587 | gid_eq(cred->gid, tcred->egid) && |
1588 | gid_eq(cred->gid, tcred->sgid) && | 1588 | gid_eq(cred->gid, tcred->sgid) && |
1589 | gid_eq(cred->gid, tcred->gid)) | 1589 | gid_eq(cred->gid, tcred->gid)) |
1590 | return 0; | 1590 | return 0; |
1591 | if (ns_capable(tcred->user_ns, CAP_SYS_RESOURCE)) | 1591 | if (ns_capable(tcred->user_ns, CAP_SYS_RESOURCE)) |
1592 | return 0; | 1592 | return 0; |
1593 | 1593 | ||
1594 | return -EPERM; | 1594 | return -EPERM; |
1595 | } | 1595 | } |
1596 | 1596 | ||
1597 | SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource, | 1597 | SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource, |
1598 | const struct rlimit64 __user *, new_rlim, | 1598 | const struct rlimit64 __user *, new_rlim, |
1599 | struct rlimit64 __user *, old_rlim) | 1599 | struct rlimit64 __user *, old_rlim) |
1600 | { | 1600 | { |
1601 | struct rlimit64 old64, new64; | 1601 | struct rlimit64 old64, new64; |
1602 | struct rlimit old, new; | 1602 | struct rlimit old, new; |
1603 | struct task_struct *tsk; | 1603 | struct task_struct *tsk; |
1604 | int ret; | 1604 | int ret; |
1605 | 1605 | ||
1606 | if (new_rlim) { | 1606 | if (new_rlim) { |
1607 | if (copy_from_user(&new64, new_rlim, sizeof(new64))) | 1607 | if (copy_from_user(&new64, new_rlim, sizeof(new64))) |
1608 | return -EFAULT; | 1608 | return -EFAULT; |
1609 | rlim64_to_rlim(&new64, &new); | 1609 | rlim64_to_rlim(&new64, &new); |
1610 | } | 1610 | } |
1611 | 1611 | ||
1612 | rcu_read_lock(); | 1612 | rcu_read_lock(); |
1613 | tsk = pid ? find_task_by_vpid(pid) : current; | 1613 | tsk = pid ? find_task_by_vpid(pid) : current; |
1614 | if (!tsk) { | 1614 | if (!tsk) { |
1615 | rcu_read_unlock(); | 1615 | rcu_read_unlock(); |
1616 | return -ESRCH; | 1616 | return -ESRCH; |
1617 | } | 1617 | } |
1618 | ret = check_prlimit_permission(tsk); | 1618 | ret = check_prlimit_permission(tsk); |
1619 | if (ret) { | 1619 | if (ret) { |
1620 | rcu_read_unlock(); | 1620 | rcu_read_unlock(); |
1621 | return ret; | 1621 | return ret; |
1622 | } | 1622 | } |
1623 | get_task_struct(tsk); | 1623 | get_task_struct(tsk); |
1624 | rcu_read_unlock(); | 1624 | rcu_read_unlock(); |
1625 | 1625 | ||
1626 | ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL, | 1626 | ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL, |
1627 | old_rlim ? &old : NULL); | 1627 | old_rlim ? &old : NULL); |
1628 | 1628 | ||
1629 | if (!ret && old_rlim) { | 1629 | if (!ret && old_rlim) { |
1630 | rlim_to_rlim64(&old, &old64); | 1630 | rlim_to_rlim64(&old, &old64); |
1631 | if (copy_to_user(old_rlim, &old64, sizeof(old64))) | 1631 | if (copy_to_user(old_rlim, &old64, sizeof(old64))) |
1632 | ret = -EFAULT; | 1632 | ret = -EFAULT; |
1633 | } | 1633 | } |
1634 | 1634 | ||
1635 | put_task_struct(tsk); | 1635 | put_task_struct(tsk); |
1636 | return ret; | 1636 | return ret; |
1637 | } | 1637 | } |
1638 | 1638 | ||
1639 | SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) | 1639 | SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) |
1640 | { | 1640 | { |
1641 | struct rlimit new_rlim; | 1641 | struct rlimit new_rlim; |
1642 | 1642 | ||
1643 | if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) | 1643 | if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) |
1644 | return -EFAULT; | 1644 | return -EFAULT; |
1645 | return do_prlimit(current, resource, &new_rlim, NULL); | 1645 | return do_prlimit(current, resource, &new_rlim, NULL); |
1646 | } | 1646 | } |
1647 | 1647 | ||
1648 | /* | 1648 | /* |
1649 | * It would make sense to put struct rusage in the task_struct, | 1649 | * It would make sense to put struct rusage in the task_struct, |
1650 | * except that would make the task_struct be *really big*. After | 1650 | * except that would make the task_struct be *really big*. After |
1651 | * task_struct gets moved into malloc'ed memory, it would | 1651 | * task_struct gets moved into malloc'ed memory, it would |
1652 | * make sense to do this. It will make moving the rest of the information | 1652 | * make sense to do this. It will make moving the rest of the information |
1653 | * a lot simpler! (Which we're not doing right now because we're not | 1653 | * a lot simpler! (Which we're not doing right now because we're not |
1654 | * measuring them yet). | 1654 | * measuring them yet). |
1655 | * | 1655 | * |
1656 | * When sampling multiple threads for RUSAGE_SELF, under SMP we might have | 1656 | * When sampling multiple threads for RUSAGE_SELF, under SMP we might have |
1657 | * races with threads incrementing their own counters. But since word | 1657 | * races with threads incrementing their own counters. But since word |
1658 | * reads are atomic, we either get new values or old values and we don't | 1658 | * reads are atomic, we either get new values or old values and we don't |
1659 | * care which for the sums. We always take the siglock to protect reading | 1659 | * care which for the sums. We always take the siglock to protect reading |
1660 | * the c* fields from p->signal from races with exit.c updating those | 1660 | * the c* fields from p->signal from races with exit.c updating those |
1661 | * fields when reaping, so a sample either gets all the additions of a | 1661 | * fields when reaping, so a sample either gets all the additions of a |
1662 | * given child after it's reaped, or none so this sample is before reaping. | 1662 | * given child after it's reaped, or none so this sample is before reaping. |
1663 | * | 1663 | * |
1664 | * Locking: | 1664 | * Locking: |
1665 | * We need to take the siglock for CHILDEREN, SELF and BOTH | 1665 | * We need to take the siglock for CHILDEREN, SELF and BOTH |
1666 | * for the cases current multithreaded, non-current single threaded | 1666 | * for the cases current multithreaded, non-current single threaded |
1667 | * non-current multithreaded. Thread traversal is now safe with | 1667 | * non-current multithreaded. Thread traversal is now safe with |
1668 | * the siglock held. | 1668 | * the siglock held. |
1669 | * Strictly speaking, we donot need to take the siglock if we are current and | 1669 | * Strictly speaking, we donot need to take the siglock if we are current and |
1670 | * single threaded, as no one else can take our signal_struct away, no one | 1670 | * single threaded, as no one else can take our signal_struct away, no one |
1671 | * else can reap the children to update signal->c* counters, and no one else | 1671 | * else can reap the children to update signal->c* counters, and no one else |
1672 | * can race with the signal-> fields. If we do not take any lock, the | 1672 | * can race with the signal-> fields. If we do not take any lock, the |
1673 | * signal-> fields could be read out of order while another thread was just | 1673 | * signal-> fields could be read out of order while another thread was just |
1674 | * exiting. So we should place a read memory barrier when we avoid the lock. | 1674 | * exiting. So we should place a read memory barrier when we avoid the lock. |
1675 | * On the writer side, write memory barrier is implied in __exit_signal | 1675 | * On the writer side, write memory barrier is implied in __exit_signal |
1676 | * as __exit_signal releases the siglock spinlock after updating the signal-> | 1676 | * as __exit_signal releases the siglock spinlock after updating the signal-> |
1677 | * fields. But we don't do this yet to keep things simple. | 1677 | * fields. But we don't do this yet to keep things simple. |
1678 | * | 1678 | * |
1679 | */ | 1679 | */ |
1680 | 1680 | ||
1681 | static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r) | 1681 | static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r) |
1682 | { | 1682 | { |
1683 | r->ru_nvcsw += t->nvcsw; | 1683 | r->ru_nvcsw += t->nvcsw; |
1684 | r->ru_nivcsw += t->nivcsw; | 1684 | r->ru_nivcsw += t->nivcsw; |
1685 | r->ru_minflt += t->min_flt; | 1685 | r->ru_minflt += t->min_flt; |
1686 | r->ru_majflt += t->maj_flt; | 1686 | r->ru_majflt += t->maj_flt; |
1687 | r->ru_inblock += task_io_get_inblock(t); | 1687 | r->ru_inblock += task_io_get_inblock(t); |
1688 | r->ru_oublock += task_io_get_oublock(t); | 1688 | r->ru_oublock += task_io_get_oublock(t); |
1689 | } | 1689 | } |
1690 | 1690 | ||
1691 | static void k_getrusage(struct task_struct *p, int who, struct rusage *r) | 1691 | static void k_getrusage(struct task_struct *p, int who, struct rusage *r) |
1692 | { | 1692 | { |
1693 | struct task_struct *t; | 1693 | struct task_struct *t; |
1694 | unsigned long flags; | 1694 | unsigned long flags; |
1695 | cputime_t tgutime, tgstime, utime, stime; | 1695 | cputime_t tgutime, tgstime, utime, stime; |
1696 | unsigned long maxrss = 0; | 1696 | unsigned long maxrss = 0; |
1697 | 1697 | ||
1698 | memset((char *) r, 0, sizeof *r); | 1698 | memset((char *) r, 0, sizeof *r); |
1699 | utime = stime = 0; | 1699 | utime = stime = 0; |
1700 | 1700 | ||
1701 | if (who == RUSAGE_THREAD) { | 1701 | if (who == RUSAGE_THREAD) { |
1702 | task_times(current, &utime, &stime); | 1702 | task_times(current, &utime, &stime); |
1703 | accumulate_thread_rusage(p, r); | 1703 | accumulate_thread_rusage(p, r); |
1704 | maxrss = p->signal->maxrss; | 1704 | maxrss = p->signal->maxrss; |
1705 | goto out; | 1705 | goto out; |
1706 | } | 1706 | } |
1707 | 1707 | ||
1708 | if (!lock_task_sighand(p, &flags)) | 1708 | if (!lock_task_sighand(p, &flags)) |
1709 | return; | 1709 | return; |
1710 | 1710 | ||
1711 | switch (who) { | 1711 | switch (who) { |
1712 | case RUSAGE_BOTH: | 1712 | case RUSAGE_BOTH: |
1713 | case RUSAGE_CHILDREN: | 1713 | case RUSAGE_CHILDREN: |
1714 | utime = p->signal->cutime; | 1714 | utime = p->signal->cutime; |
1715 | stime = p->signal->cstime; | 1715 | stime = p->signal->cstime; |
1716 | r->ru_nvcsw = p->signal->cnvcsw; | 1716 | r->ru_nvcsw = p->signal->cnvcsw; |
1717 | r->ru_nivcsw = p->signal->cnivcsw; | 1717 | r->ru_nivcsw = p->signal->cnivcsw; |
1718 | r->ru_minflt = p->signal->cmin_flt; | 1718 | r->ru_minflt = p->signal->cmin_flt; |
1719 | r->ru_majflt = p->signal->cmaj_flt; | 1719 | r->ru_majflt = p->signal->cmaj_flt; |
1720 | r->ru_inblock = p->signal->cinblock; | 1720 | r->ru_inblock = p->signal->cinblock; |
1721 | r->ru_oublock = p->signal->coublock; | 1721 | r->ru_oublock = p->signal->coublock; |
1722 | maxrss = p->signal->cmaxrss; | 1722 | maxrss = p->signal->cmaxrss; |
1723 | 1723 | ||
1724 | if (who == RUSAGE_CHILDREN) | 1724 | if (who == RUSAGE_CHILDREN) |
1725 | break; | 1725 | break; |
1726 | 1726 | ||
1727 | case RUSAGE_SELF: | 1727 | case RUSAGE_SELF: |
1728 | thread_group_times(p, &tgutime, &tgstime); | 1728 | thread_group_times(p, &tgutime, &tgstime); |
1729 | utime += tgutime; | 1729 | utime += tgutime; |
1730 | stime += tgstime; | 1730 | stime += tgstime; |
1731 | r->ru_nvcsw += p->signal->nvcsw; | 1731 | r->ru_nvcsw += p->signal->nvcsw; |
1732 | r->ru_nivcsw += p->signal->nivcsw; | 1732 | r->ru_nivcsw += p->signal->nivcsw; |
1733 | r->ru_minflt += p->signal->min_flt; | 1733 | r->ru_minflt += p->signal->min_flt; |
1734 | r->ru_majflt += p->signal->maj_flt; | 1734 | r->ru_majflt += p->signal->maj_flt; |
1735 | r->ru_inblock += p->signal->inblock; | 1735 | r->ru_inblock += p->signal->inblock; |
1736 | r->ru_oublock += p->signal->oublock; | 1736 | r->ru_oublock += p->signal->oublock; |
1737 | if (maxrss < p->signal->maxrss) | 1737 | if (maxrss < p->signal->maxrss) |
1738 | maxrss = p->signal->maxrss; | 1738 | maxrss = p->signal->maxrss; |
1739 | t = p; | 1739 | t = p; |
1740 | do { | 1740 | do { |
1741 | accumulate_thread_rusage(t, r); | 1741 | accumulate_thread_rusage(t, r); |
1742 | t = next_thread(t); | 1742 | t = next_thread(t); |
1743 | } while (t != p); | 1743 | } while (t != p); |
1744 | break; | 1744 | break; |
1745 | 1745 | ||
1746 | default: | 1746 | default: |
1747 | BUG(); | 1747 | BUG(); |
1748 | } | 1748 | } |
1749 | unlock_task_sighand(p, &flags); | 1749 | unlock_task_sighand(p, &flags); |
1750 | 1750 | ||
1751 | out: | 1751 | out: |
1752 | cputime_to_timeval(utime, &r->ru_utime); | 1752 | cputime_to_timeval(utime, &r->ru_utime); |
1753 | cputime_to_timeval(stime, &r->ru_stime); | 1753 | cputime_to_timeval(stime, &r->ru_stime); |
1754 | 1754 | ||
1755 | if (who != RUSAGE_CHILDREN) { | 1755 | if (who != RUSAGE_CHILDREN) { |
1756 | struct mm_struct *mm = get_task_mm(p); | 1756 | struct mm_struct *mm = get_task_mm(p); |
1757 | if (mm) { | 1757 | if (mm) { |
1758 | setmax_mm_hiwater_rss(&maxrss, mm); | 1758 | setmax_mm_hiwater_rss(&maxrss, mm); |
1759 | mmput(mm); | 1759 | mmput(mm); |
1760 | } | 1760 | } |
1761 | } | 1761 | } |
1762 | r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */ | 1762 | r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */ |
1763 | } | 1763 | } |
1764 | 1764 | ||
1765 | int getrusage(struct task_struct *p, int who, struct rusage __user *ru) | 1765 | int getrusage(struct task_struct *p, int who, struct rusage __user *ru) |
1766 | { | 1766 | { |
1767 | struct rusage r; | 1767 | struct rusage r; |
1768 | k_getrusage(p, who, &r); | 1768 | k_getrusage(p, who, &r); |
1769 | return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; | 1769 | return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; |
1770 | } | 1770 | } |
1771 | 1771 | ||
1772 | SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru) | 1772 | SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru) |
1773 | { | 1773 | { |
1774 | if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && | 1774 | if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && |
1775 | who != RUSAGE_THREAD) | 1775 | who != RUSAGE_THREAD) |
1776 | return -EINVAL; | 1776 | return -EINVAL; |
1777 | return getrusage(current, who, ru); | 1777 | return getrusage(current, who, ru); |
1778 | } | 1778 | } |
1779 | 1779 | ||
1780 | SYSCALL_DEFINE1(umask, int, mask) | 1780 | SYSCALL_DEFINE1(umask, int, mask) |
1781 | { | 1781 | { |
1782 | mask = xchg(¤t->fs->umask, mask & S_IRWXUGO); | 1782 | mask = xchg(¤t->fs->umask, mask & S_IRWXUGO); |
1783 | return mask; | 1783 | return mask; |
1784 | } | 1784 | } |
1785 | 1785 | ||
1786 | #ifdef CONFIG_CHECKPOINT_RESTORE | 1786 | #ifdef CONFIG_CHECKPOINT_RESTORE |
1787 | static int prctl_set_mm(int opt, unsigned long addr, | 1787 | static int prctl_set_mm(int opt, unsigned long addr, |
1788 | unsigned long arg4, unsigned long arg5) | 1788 | unsigned long arg4, unsigned long arg5) |
1789 | { | 1789 | { |
1790 | unsigned long rlim = rlimit(RLIMIT_DATA); | 1790 | unsigned long rlim = rlimit(RLIMIT_DATA); |
1791 | unsigned long vm_req_flags; | 1791 | unsigned long vm_req_flags; |
1792 | unsigned long vm_bad_flags; | 1792 | unsigned long vm_bad_flags; |
1793 | struct vm_area_struct *vma; | 1793 | struct vm_area_struct *vma; |
1794 | int error = 0; | 1794 | int error = 0; |
1795 | struct mm_struct *mm = current->mm; | 1795 | struct mm_struct *mm = current->mm; |
1796 | 1796 | ||
1797 | if (arg4 | arg5) | 1797 | if (arg4 | arg5) |
1798 | return -EINVAL; | 1798 | return -EINVAL; |
1799 | 1799 | ||
1800 | if (!capable(CAP_SYS_RESOURCE)) | 1800 | if (!capable(CAP_SYS_RESOURCE)) |
1801 | return -EPERM; | 1801 | return -EPERM; |
1802 | 1802 | ||
1803 | if (addr >= TASK_SIZE) | 1803 | if (addr >= TASK_SIZE) |
1804 | return -EINVAL; | 1804 | return -EINVAL; |
1805 | 1805 | ||
1806 | down_read(&mm->mmap_sem); | 1806 | down_read(&mm->mmap_sem); |
1807 | vma = find_vma(mm, addr); | 1807 | vma = find_vma(mm, addr); |
1808 | 1808 | ||
1809 | if (opt != PR_SET_MM_START_BRK && opt != PR_SET_MM_BRK) { | 1809 | if (opt != PR_SET_MM_START_BRK && opt != PR_SET_MM_BRK) { |
1810 | /* It must be existing VMA */ | 1810 | /* It must be existing VMA */ |
1811 | if (!vma || vma->vm_start > addr) | 1811 | if (!vma || vma->vm_start > addr) |
1812 | goto out; | 1812 | goto out; |
1813 | } | 1813 | } |
1814 | 1814 | ||
1815 | error = -EINVAL; | 1815 | error = -EINVAL; |
1816 | switch (opt) { | 1816 | switch (opt) { |
1817 | case PR_SET_MM_START_CODE: | 1817 | case PR_SET_MM_START_CODE: |
1818 | case PR_SET_MM_END_CODE: | 1818 | case PR_SET_MM_END_CODE: |
1819 | vm_req_flags = VM_READ | VM_EXEC; | 1819 | vm_req_flags = VM_READ | VM_EXEC; |
1820 | vm_bad_flags = VM_WRITE | VM_MAYSHARE; | 1820 | vm_bad_flags = VM_WRITE | VM_MAYSHARE; |
1821 | 1821 | ||
1822 | if ((vma->vm_flags & vm_req_flags) != vm_req_flags || | 1822 | if ((vma->vm_flags & vm_req_flags) != vm_req_flags || |
1823 | (vma->vm_flags & vm_bad_flags)) | 1823 | (vma->vm_flags & vm_bad_flags)) |
1824 | goto out; | 1824 | goto out; |
1825 | 1825 | ||
1826 | if (opt == PR_SET_MM_START_CODE) | 1826 | if (opt == PR_SET_MM_START_CODE) |
1827 | mm->start_code = addr; | 1827 | mm->start_code = addr; |
1828 | else | 1828 | else |
1829 | mm->end_code = addr; | 1829 | mm->end_code = addr; |
1830 | break; | 1830 | break; |
1831 | 1831 | ||
1832 | case PR_SET_MM_START_DATA: | 1832 | case PR_SET_MM_START_DATA: |
1833 | case PR_SET_MM_END_DATA: | 1833 | case PR_SET_MM_END_DATA: |
1834 | vm_req_flags = VM_READ | VM_WRITE; | 1834 | vm_req_flags = VM_READ | VM_WRITE; |
1835 | vm_bad_flags = VM_EXEC | VM_MAYSHARE; | 1835 | vm_bad_flags = VM_EXEC | VM_MAYSHARE; |
1836 | 1836 | ||
1837 | if ((vma->vm_flags & vm_req_flags) != vm_req_flags || | 1837 | if ((vma->vm_flags & vm_req_flags) != vm_req_flags || |
1838 | (vma->vm_flags & vm_bad_flags)) | 1838 | (vma->vm_flags & vm_bad_flags)) |
1839 | goto out; | 1839 | goto out; |
1840 | 1840 | ||
1841 | if (opt == PR_SET_MM_START_DATA) | 1841 | if (opt == PR_SET_MM_START_DATA) |
1842 | mm->start_data = addr; | 1842 | mm->start_data = addr; |
1843 | else | 1843 | else |
1844 | mm->end_data = addr; | 1844 | mm->end_data = addr; |
1845 | break; | 1845 | break; |
1846 | 1846 | ||
1847 | case PR_SET_MM_START_STACK: | 1847 | case PR_SET_MM_START_STACK: |
1848 | 1848 | ||
1849 | #ifdef CONFIG_STACK_GROWSUP | 1849 | #ifdef CONFIG_STACK_GROWSUP |
1850 | vm_req_flags = VM_READ | VM_WRITE | VM_GROWSUP; | 1850 | vm_req_flags = VM_READ | VM_WRITE | VM_GROWSUP; |
1851 | #else | 1851 | #else |
1852 | vm_req_flags = VM_READ | VM_WRITE | VM_GROWSDOWN; | 1852 | vm_req_flags = VM_READ | VM_WRITE | VM_GROWSDOWN; |
1853 | #endif | 1853 | #endif |
1854 | if ((vma->vm_flags & vm_req_flags) != vm_req_flags) | 1854 | if ((vma->vm_flags & vm_req_flags) != vm_req_flags) |
1855 | goto out; | 1855 | goto out; |
1856 | 1856 | ||
1857 | mm->start_stack = addr; | 1857 | mm->start_stack = addr; |
1858 | break; | 1858 | break; |
1859 | 1859 | ||
1860 | case PR_SET_MM_START_BRK: | 1860 | case PR_SET_MM_START_BRK: |
1861 | if (addr <= mm->end_data) | 1861 | if (addr <= mm->end_data) |
1862 | goto out; | 1862 | goto out; |
1863 | 1863 | ||
1864 | if (rlim < RLIM_INFINITY && | 1864 | if (rlim < RLIM_INFINITY && |
1865 | (mm->brk - addr) + | 1865 | (mm->brk - addr) + |
1866 | (mm->end_data - mm->start_data) > rlim) | 1866 | (mm->end_data - mm->start_data) > rlim) |
1867 | goto out; | 1867 | goto out; |
1868 | 1868 | ||
1869 | mm->start_brk = addr; | 1869 | mm->start_brk = addr; |
1870 | break; | 1870 | break; |
1871 | 1871 | ||
1872 | case PR_SET_MM_BRK: | 1872 | case PR_SET_MM_BRK: |
1873 | if (addr <= mm->end_data) | 1873 | if (addr <= mm->end_data) |
1874 | goto out; | 1874 | goto out; |
1875 | 1875 | ||
1876 | if (rlim < RLIM_INFINITY && | 1876 | if (rlim < RLIM_INFINITY && |
1877 | (addr - mm->start_brk) + | 1877 | (addr - mm->start_brk) + |
1878 | (mm->end_data - mm->start_data) > rlim) | 1878 | (mm->end_data - mm->start_data) > rlim) |
1879 | goto out; | 1879 | goto out; |
1880 | 1880 | ||
1881 | mm->brk = addr; | 1881 | mm->brk = addr; |
1882 | break; | 1882 | break; |
1883 | 1883 | ||
1884 | default: | 1884 | default: |
1885 | error = -EINVAL; | 1885 | error = -EINVAL; |
1886 | goto out; | 1886 | goto out; |
1887 | } | 1887 | } |
1888 | 1888 | ||
1889 | error = 0; | 1889 | error = 0; |
1890 | 1890 | ||
1891 | out: | 1891 | out: |
1892 | up_read(&mm->mmap_sem); | 1892 | up_read(&mm->mmap_sem); |
1893 | 1893 | ||
1894 | return error; | 1894 | return error; |
1895 | } | 1895 | } |
1896 | #else /* CONFIG_CHECKPOINT_RESTORE */ | 1896 | #else /* CONFIG_CHECKPOINT_RESTORE */ |
1897 | static int prctl_set_mm(int opt, unsigned long addr, | 1897 | static int prctl_set_mm(int opt, unsigned long addr, |
1898 | unsigned long arg4, unsigned long arg5) | 1898 | unsigned long arg4, unsigned long arg5) |
1899 | { | 1899 | { |
1900 | return -EINVAL; | 1900 | return -EINVAL; |
1901 | } | 1901 | } |
1902 | #endif | 1902 | #endif |
1903 | 1903 | ||
1904 | SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, | 1904 | SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, |
1905 | unsigned long, arg4, unsigned long, arg5) | 1905 | unsigned long, arg4, unsigned long, arg5) |
1906 | { | 1906 | { |
1907 | struct task_struct *me = current; | 1907 | struct task_struct *me = current; |
1908 | unsigned char comm[sizeof(me->comm)]; | 1908 | unsigned char comm[sizeof(me->comm)]; |
1909 | long error; | 1909 | long error; |
1910 | 1910 | ||
1911 | error = security_task_prctl(option, arg2, arg3, arg4, arg5); | 1911 | error = security_task_prctl(option, arg2, arg3, arg4, arg5); |
1912 | if (error != -ENOSYS) | 1912 | if (error != -ENOSYS) |
1913 | return error; | 1913 | return error; |
1914 | 1914 | ||
1915 | error = 0; | 1915 | error = 0; |
1916 | switch (option) { | 1916 | switch (option) { |
1917 | case PR_SET_PDEATHSIG: | 1917 | case PR_SET_PDEATHSIG: |
1918 | if (!valid_signal(arg2)) { | 1918 | if (!valid_signal(arg2)) { |
1919 | error = -EINVAL; | 1919 | error = -EINVAL; |
1920 | break; | 1920 | break; |
1921 | } | 1921 | } |
1922 | me->pdeath_signal = arg2; | 1922 | me->pdeath_signal = arg2; |
1923 | error = 0; | 1923 | error = 0; |
1924 | break; | 1924 | break; |
1925 | case PR_GET_PDEATHSIG: | 1925 | case PR_GET_PDEATHSIG: |
1926 | error = put_user(me->pdeath_signal, (int __user *)arg2); | 1926 | error = put_user(me->pdeath_signal, (int __user *)arg2); |
1927 | break; | 1927 | break; |
1928 | case PR_GET_DUMPABLE: | 1928 | case PR_GET_DUMPABLE: |
1929 | error = get_dumpable(me->mm); | 1929 | error = get_dumpable(me->mm); |
1930 | break; | 1930 | break; |
1931 | case PR_SET_DUMPABLE: | 1931 | case PR_SET_DUMPABLE: |
1932 | if (arg2 < 0 || arg2 > 1) { | 1932 | if (arg2 < 0 || arg2 > 1) { |
1933 | error = -EINVAL; | 1933 | error = -EINVAL; |
1934 | break; | 1934 | break; |
1935 | } | 1935 | } |
1936 | set_dumpable(me->mm, arg2); | 1936 | set_dumpable(me->mm, arg2); |
1937 | error = 0; | 1937 | error = 0; |
1938 | break; | 1938 | break; |
1939 | 1939 | ||
1940 | case PR_SET_UNALIGN: | 1940 | case PR_SET_UNALIGN: |
1941 | error = SET_UNALIGN_CTL(me, arg2); | 1941 | error = SET_UNALIGN_CTL(me, arg2); |
1942 | break; | 1942 | break; |
1943 | case PR_GET_UNALIGN: | 1943 | case PR_GET_UNALIGN: |
1944 | error = GET_UNALIGN_CTL(me, arg2); | 1944 | error = GET_UNALIGN_CTL(me, arg2); |
1945 | break; | 1945 | break; |
1946 | case PR_SET_FPEMU: | 1946 | case PR_SET_FPEMU: |
1947 | error = SET_FPEMU_CTL(me, arg2); | 1947 | error = SET_FPEMU_CTL(me, arg2); |
1948 | break; | 1948 | break; |
1949 | case PR_GET_FPEMU: | 1949 | case PR_GET_FPEMU: |
1950 | error = GET_FPEMU_CTL(me, arg2); | 1950 | error = GET_FPEMU_CTL(me, arg2); |
1951 | break; | 1951 | break; |
1952 | case PR_SET_FPEXC: | 1952 | case PR_SET_FPEXC: |
1953 | error = SET_FPEXC_CTL(me, arg2); | 1953 | error = SET_FPEXC_CTL(me, arg2); |
1954 | break; | 1954 | break; |
1955 | case PR_GET_FPEXC: | 1955 | case PR_GET_FPEXC: |
1956 | error = GET_FPEXC_CTL(me, arg2); | 1956 | error = GET_FPEXC_CTL(me, arg2); |
1957 | break; | 1957 | break; |
1958 | case PR_GET_TIMING: | 1958 | case PR_GET_TIMING: |
1959 | error = PR_TIMING_STATISTICAL; | 1959 | error = PR_TIMING_STATISTICAL; |
1960 | break; | 1960 | break; |
1961 | case PR_SET_TIMING: | 1961 | case PR_SET_TIMING: |
1962 | if (arg2 != PR_TIMING_STATISTICAL) | 1962 | if (arg2 != PR_TIMING_STATISTICAL) |
1963 | error = -EINVAL; | 1963 | error = -EINVAL; |
1964 | else | 1964 | else |
1965 | error = 0; | 1965 | error = 0; |
1966 | break; | 1966 | break; |
1967 | 1967 | ||
1968 | case PR_SET_NAME: | 1968 | case PR_SET_NAME: |
1969 | comm[sizeof(me->comm)-1] = 0; | 1969 | comm[sizeof(me->comm)-1] = 0; |
1970 | if (strncpy_from_user(comm, (char __user *)arg2, | 1970 | if (strncpy_from_user(comm, (char __user *)arg2, |
1971 | sizeof(me->comm) - 1) < 0) | 1971 | sizeof(me->comm) - 1) < 0) |
1972 | return -EFAULT; | 1972 | return -EFAULT; |
1973 | set_task_comm(me, comm); | 1973 | set_task_comm(me, comm); |
1974 | proc_comm_connector(me); | 1974 | proc_comm_connector(me); |
1975 | return 0; | 1975 | return 0; |
1976 | case PR_GET_NAME: | 1976 | case PR_GET_NAME: |
1977 | get_task_comm(comm, me); | 1977 | get_task_comm(comm, me); |
1978 | if (copy_to_user((char __user *)arg2, comm, | 1978 | if (copy_to_user((char __user *)arg2, comm, |
1979 | sizeof(comm))) | 1979 | sizeof(comm))) |
1980 | return -EFAULT; | 1980 | return -EFAULT; |
1981 | return 0; | 1981 | return 0; |
1982 | case PR_GET_ENDIAN: | 1982 | case PR_GET_ENDIAN: |
1983 | error = GET_ENDIAN(me, arg2); | 1983 | error = GET_ENDIAN(me, arg2); |
1984 | break; | 1984 | break; |
1985 | case PR_SET_ENDIAN: | 1985 | case PR_SET_ENDIAN: |
1986 | error = SET_ENDIAN(me, arg2); | 1986 | error = SET_ENDIAN(me, arg2); |
1987 | break; | 1987 | break; |
1988 | 1988 | ||
1989 | case PR_GET_SECCOMP: | 1989 | case PR_GET_SECCOMP: |
1990 | error = prctl_get_seccomp(); | 1990 | error = prctl_get_seccomp(); |
1991 | break; | 1991 | break; |
1992 | case PR_SET_SECCOMP: | 1992 | case PR_SET_SECCOMP: |
1993 | error = prctl_set_seccomp(arg2, (char __user *)arg3); | 1993 | error = prctl_set_seccomp(arg2, (char __user *)arg3); |
1994 | break; | 1994 | break; |
1995 | case PR_GET_TSC: | 1995 | case PR_GET_TSC: |
1996 | error = GET_TSC_CTL(arg2); | 1996 | error = GET_TSC_CTL(arg2); |
1997 | break; | 1997 | break; |
1998 | case PR_SET_TSC: | 1998 | case PR_SET_TSC: |
1999 | error = SET_TSC_CTL(arg2); | 1999 | error = SET_TSC_CTL(arg2); |
2000 | break; | 2000 | break; |
2001 | case PR_TASK_PERF_EVENTS_DISABLE: | 2001 | case PR_TASK_PERF_EVENTS_DISABLE: |
2002 | error = perf_event_task_disable(); | 2002 | error = perf_event_task_disable(); |
2003 | break; | 2003 | break; |
2004 | case PR_TASK_PERF_EVENTS_ENABLE: | 2004 | case PR_TASK_PERF_EVENTS_ENABLE: |
2005 | error = perf_event_task_enable(); | 2005 | error = perf_event_task_enable(); |
2006 | break; | 2006 | break; |
2007 | case PR_GET_TIMERSLACK: | 2007 | case PR_GET_TIMERSLACK: |
2008 | error = current->timer_slack_ns; | 2008 | error = current->timer_slack_ns; |
2009 | break; | 2009 | break; |
2010 | case PR_SET_TIMERSLACK: | 2010 | case PR_SET_TIMERSLACK: |
2011 | if (arg2 <= 0) | 2011 | if (arg2 <= 0) |
2012 | current->timer_slack_ns = | 2012 | current->timer_slack_ns = |
2013 | current->default_timer_slack_ns; | 2013 | current->default_timer_slack_ns; |
2014 | else | 2014 | else |
2015 | current->timer_slack_ns = arg2; | 2015 | current->timer_slack_ns = arg2; |
2016 | error = 0; | 2016 | error = 0; |
2017 | break; | 2017 | break; |
2018 | case PR_MCE_KILL: | 2018 | case PR_MCE_KILL: |
2019 | if (arg4 | arg5) | 2019 | if (arg4 | arg5) |
2020 | return -EINVAL; | 2020 | return -EINVAL; |
2021 | switch (arg2) { | 2021 | switch (arg2) { |
2022 | case PR_MCE_KILL_CLEAR: | 2022 | case PR_MCE_KILL_CLEAR: |
2023 | if (arg3 != 0) | 2023 | if (arg3 != 0) |
2024 | return -EINVAL; | 2024 | return -EINVAL; |
2025 | current->flags &= ~PF_MCE_PROCESS; | 2025 | current->flags &= ~PF_MCE_PROCESS; |
2026 | break; | 2026 | break; |
2027 | case PR_MCE_KILL_SET: | 2027 | case PR_MCE_KILL_SET: |
2028 | current->flags |= PF_MCE_PROCESS; | 2028 | current->flags |= PF_MCE_PROCESS; |
2029 | if (arg3 == PR_MCE_KILL_EARLY) | 2029 | if (arg3 == PR_MCE_KILL_EARLY) |
2030 | current->flags |= PF_MCE_EARLY; | 2030 | current->flags |= PF_MCE_EARLY; |
2031 | else if (arg3 == PR_MCE_KILL_LATE) | 2031 | else if (arg3 == PR_MCE_KILL_LATE) |
2032 | current->flags &= ~PF_MCE_EARLY; | 2032 | current->flags &= ~PF_MCE_EARLY; |
2033 | else if (arg3 == PR_MCE_KILL_DEFAULT) | 2033 | else if (arg3 == PR_MCE_KILL_DEFAULT) |
2034 | current->flags &= | 2034 | current->flags &= |
2035 | ~(PF_MCE_EARLY|PF_MCE_PROCESS); | 2035 | ~(PF_MCE_EARLY|PF_MCE_PROCESS); |
2036 | else | 2036 | else |
2037 | return -EINVAL; | 2037 | return -EINVAL; |
2038 | break; | 2038 | break; |
2039 | default: | 2039 | default: |
2040 | return -EINVAL; | 2040 | return -EINVAL; |
2041 | } | 2041 | } |
2042 | error = 0; | 2042 | error = 0; |
2043 | break; | 2043 | break; |
2044 | case PR_MCE_KILL_GET: | 2044 | case PR_MCE_KILL_GET: |
2045 | if (arg2 | arg3 | arg4 | arg5) | 2045 | if (arg2 | arg3 | arg4 | arg5) |
2046 | return -EINVAL; | 2046 | return -EINVAL; |
2047 | if (current->flags & PF_MCE_PROCESS) | 2047 | if (current->flags & PF_MCE_PROCESS) |
2048 | error = (current->flags & PF_MCE_EARLY) ? | 2048 | error = (current->flags & PF_MCE_EARLY) ? |
2049 | PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE; | 2049 | PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE; |
2050 | else | 2050 | else |
2051 | error = PR_MCE_KILL_DEFAULT; | 2051 | error = PR_MCE_KILL_DEFAULT; |
2052 | break; | 2052 | break; |
2053 | case PR_SET_MM: | 2053 | case PR_SET_MM: |
2054 | error = prctl_set_mm(arg2, arg3, arg4, arg5); | 2054 | error = prctl_set_mm(arg2, arg3, arg4, arg5); |
2055 | break; | 2055 | break; |
2056 | case PR_SET_CHILD_SUBREAPER: | 2056 | case PR_SET_CHILD_SUBREAPER: |
2057 | me->signal->is_child_subreaper = !!arg2; | 2057 | me->signal->is_child_subreaper = !!arg2; |
2058 | error = 0; | 2058 | error = 0; |
2059 | break; | 2059 | break; |
2060 | case PR_GET_CHILD_SUBREAPER: | 2060 | case PR_GET_CHILD_SUBREAPER: |
2061 | error = put_user(me->signal->is_child_subreaper, | 2061 | error = put_user(me->signal->is_child_subreaper, |
2062 | (int __user *) arg2); | 2062 | (int __user *) arg2); |
2063 | break; | 2063 | break; |
2064 | case PR_SET_NO_NEW_PRIVS: | 2064 | case PR_SET_NO_NEW_PRIVS: |
2065 | if (arg2 != 1 || arg3 || arg4 || arg5) | 2065 | if (arg2 != 1 || arg3 || arg4 || arg5) |
2066 | return -EINVAL; | 2066 | return -EINVAL; |
2067 | 2067 | ||
2068 | current->no_new_privs = 1; | 2068 | current->no_new_privs = 1; |
2069 | break; | 2069 | break; |
2070 | case PR_GET_NO_NEW_PRIVS: | 2070 | case PR_GET_NO_NEW_PRIVS: |
2071 | if (arg2 || arg3 || arg4 || arg5) | 2071 | if (arg2 || arg3 || arg4 || arg5) |
2072 | return -EINVAL; | 2072 | return -EINVAL; |
2073 | return current->no_new_privs ? 1 : 0; | 2073 | return current->no_new_privs ? 1 : 0; |
2074 | default: | 2074 | default: |
2075 | error = -EINVAL; | 2075 | error = -EINVAL; |
2076 | break; | 2076 | break; |
2077 | } | 2077 | } |
2078 | return error; | 2078 | return error; |
2079 | } | 2079 | } |
2080 | 2080 | ||
2081 | SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, | 2081 | SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, |
2082 | struct getcpu_cache __user *, unused) | 2082 | struct getcpu_cache __user *, unused) |
2083 | { | 2083 | { |
2084 | int err = 0; | 2084 | int err = 0; |
2085 | int cpu = raw_smp_processor_id(); | 2085 | int cpu = raw_smp_processor_id(); |
2086 | if (cpup) | 2086 | if (cpup) |
2087 | err |= put_user(cpu, cpup); | 2087 | err |= put_user(cpu, cpup); |
2088 | if (nodep) | 2088 | if (nodep) |
2089 | err |= put_user(cpu_to_node(cpu), nodep); | 2089 | err |= put_user(cpu_to_node(cpu), nodep); |
2090 | return err ? -EFAULT : 0; | 2090 | return err ? -EFAULT : 0; |
2091 | } | 2091 | } |
2092 | 2092 | ||
2093 | char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; | 2093 | char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; |
2094 | 2094 | ||
2095 | static void argv_cleanup(struct subprocess_info *info) | 2095 | static void argv_cleanup(struct subprocess_info *info) |
2096 | { | 2096 | { |
2097 | argv_free(info->argv); | 2097 | argv_free(info->argv); |
2098 | } | 2098 | } |
2099 | 2099 | ||
2100 | /** | 2100 | /** |
2101 | * orderly_poweroff - Trigger an orderly system poweroff | 2101 | * orderly_poweroff - Trigger an orderly system poweroff |
2102 | * @force: force poweroff if command execution fails | 2102 | * @force: force poweroff if command execution fails |
2103 | * | 2103 | * |
2104 | * This may be called from any context to trigger a system shutdown. | 2104 | * This may be called from any context to trigger a system shutdown. |
2105 | * If the orderly shutdown fails, it will force an immediate shutdown. | 2105 | * If the orderly shutdown fails, it will force an immediate shutdown. |
2106 | */ | 2106 | */ |
2107 | int orderly_poweroff(bool force) | 2107 | int orderly_poweroff(bool force) |
2108 | { | 2108 | { |
2109 | int argc; | 2109 | int argc; |
2110 | char **argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc); | 2110 | char **argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc); |
2111 | static char *envp[] = { | 2111 | static char *envp[] = { |
2112 | "HOME=/", | 2112 | "HOME=/", |
2113 | "PATH=/sbin:/bin:/usr/sbin:/usr/bin", | 2113 | "PATH=/sbin:/bin:/usr/sbin:/usr/bin", |
2114 | NULL | 2114 | NULL |
2115 | }; | 2115 | }; |
2116 | int ret = -ENOMEM; | 2116 | int ret = -ENOMEM; |
2117 | struct subprocess_info *info; | ||
2118 | 2117 | ||
2119 | if (argv == NULL) { | 2118 | if (argv == NULL) { |
2120 | printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n", | 2119 | printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n", |
2121 | __func__, poweroff_cmd); | 2120 | __func__, poweroff_cmd); |
2122 | goto out; | 2121 | goto out; |
2123 | } | 2122 | } |
2124 | 2123 | ||
2125 | info = call_usermodehelper_setup(argv[0], argv, envp, GFP_ATOMIC); | 2124 | ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_NO_WAIT, |
2126 | if (info == NULL) { | 2125 | NULL, argv_cleanup, NULL); |
2126 | out: | ||
2127 | if (likely(!ret)) | ||
2128 | return 0; | ||
2129 | |||
2130 | if (ret == -ENOMEM) | ||
2127 | argv_free(argv); | 2131 | argv_free(argv); |
2128 | goto out; | ||
2129 | } | ||
2130 | 2132 | ||
2131 | call_usermodehelper_setfns(info, NULL, argv_cleanup, NULL); | 2133 | if (force) { |
2132 | |||
2133 | ret = call_usermodehelper_exec(info, UMH_NO_WAIT); | ||
2134 | |||
2135 | out: | ||
2136 | if (ret && force) { | ||
2137 | printk(KERN_WARNING "Failed to start orderly shutdown: " | 2134 | printk(KERN_WARNING "Failed to start orderly shutdown: " |
2138 | "forcing the issue\n"); | 2135 | "forcing the issue\n"); |
2139 | 2136 | ||
2140 | /* I guess this should try to kick off some daemon to | 2137 | /* I guess this should try to kick off some daemon to |
2141 | sync and poweroff asap. Or not even bother syncing | 2138 | sync and poweroff asap. Or not even bother syncing |
2142 | if we're doing an emergency shutdown? */ | 2139 | if we're doing an emergency shutdown? */ |
2143 | emergency_sync(); | 2140 | emergency_sync(); |
2144 | kernel_power_off(); | 2141 | kernel_power_off(); |
2145 | } | 2142 | } |
security/keys/request_key.c
1 | /* Request a key from userspace | 1 | /* Request a key from userspace |
2 | * | 2 | * |
3 | * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License | 7 | * modify it under the terms of the GNU General Public License |
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | * | 10 | * |
11 | * See Documentation/security/keys-request-key.txt | 11 | * See Documentation/security/keys-request-key.txt |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/kmod.h> | 16 | #include <linux/kmod.h> |
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <linux/keyctl.h> | 18 | #include <linux/keyctl.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include "internal.h" | 20 | #include "internal.h" |
21 | 21 | ||
22 | #define key_negative_timeout 60 /* default timeout on a negative key's existence */ | 22 | #define key_negative_timeout 60 /* default timeout on a negative key's existence */ |
23 | 23 | ||
24 | /* | 24 | /* |
25 | * wait_on_bit() sleep function for uninterruptible waiting | 25 | * wait_on_bit() sleep function for uninterruptible waiting |
26 | */ | 26 | */ |
27 | static int key_wait_bit(void *flags) | 27 | static int key_wait_bit(void *flags) |
28 | { | 28 | { |
29 | schedule(); | 29 | schedule(); |
30 | return 0; | 30 | return 0; |
31 | } | 31 | } |
32 | 32 | ||
33 | /* | 33 | /* |
34 | * wait_on_bit() sleep function for interruptible waiting | 34 | * wait_on_bit() sleep function for interruptible waiting |
35 | */ | 35 | */ |
36 | static int key_wait_bit_intr(void *flags) | 36 | static int key_wait_bit_intr(void *flags) |
37 | { | 37 | { |
38 | schedule(); | 38 | schedule(); |
39 | return signal_pending(current) ? -ERESTARTSYS : 0; | 39 | return signal_pending(current) ? -ERESTARTSYS : 0; |
40 | } | 40 | } |
41 | 41 | ||
42 | /** | 42 | /** |
43 | * complete_request_key - Complete the construction of a key. | 43 | * complete_request_key - Complete the construction of a key. |
44 | * @cons: The key construction record. | 44 | * @cons: The key construction record. |
45 | * @error: The success or failute of the construction. | 45 | * @error: The success or failute of the construction. |
46 | * | 46 | * |
47 | * Complete the attempt to construct a key. The key will be negated | 47 | * Complete the attempt to construct a key. The key will be negated |
48 | * if an error is indicated. The authorisation key will be revoked | 48 | * if an error is indicated. The authorisation key will be revoked |
49 | * unconditionally. | 49 | * unconditionally. |
50 | */ | 50 | */ |
51 | void complete_request_key(struct key_construction *cons, int error) | 51 | void complete_request_key(struct key_construction *cons, int error) |
52 | { | 52 | { |
53 | kenter("{%d,%d},%d", cons->key->serial, cons->authkey->serial, error); | 53 | kenter("{%d,%d},%d", cons->key->serial, cons->authkey->serial, error); |
54 | 54 | ||
55 | if (error < 0) | 55 | if (error < 0) |
56 | key_negate_and_link(cons->key, key_negative_timeout, NULL, | 56 | key_negate_and_link(cons->key, key_negative_timeout, NULL, |
57 | cons->authkey); | 57 | cons->authkey); |
58 | else | 58 | else |
59 | key_revoke(cons->authkey); | 59 | key_revoke(cons->authkey); |
60 | 60 | ||
61 | key_put(cons->key); | 61 | key_put(cons->key); |
62 | key_put(cons->authkey); | 62 | key_put(cons->authkey); |
63 | kfree(cons); | 63 | kfree(cons); |
64 | } | 64 | } |
65 | EXPORT_SYMBOL(complete_request_key); | 65 | EXPORT_SYMBOL(complete_request_key); |
66 | 66 | ||
67 | /* | 67 | /* |
68 | * Initialise a usermode helper that is going to have a specific session | 68 | * Initialise a usermode helper that is going to have a specific session |
69 | * keyring. | 69 | * keyring. |
70 | * | 70 | * |
71 | * This is called in context of freshly forked kthread before kernel_execve(), | 71 | * This is called in context of freshly forked kthread before kernel_execve(), |
72 | * so we can simply install the desired session_keyring at this point. | 72 | * so we can simply install the desired session_keyring at this point. |
73 | */ | 73 | */ |
74 | static int umh_keys_init(struct subprocess_info *info, struct cred *cred) | 74 | static int umh_keys_init(struct subprocess_info *info, struct cred *cred) |
75 | { | 75 | { |
76 | struct key *keyring = info->data; | 76 | struct key *keyring = info->data; |
77 | 77 | ||
78 | return install_session_keyring_to_cred(cred, keyring); | 78 | return install_session_keyring_to_cred(cred, keyring); |
79 | } | 79 | } |
80 | 80 | ||
81 | /* | 81 | /* |
82 | * Clean up a usermode helper with session keyring. | 82 | * Clean up a usermode helper with session keyring. |
83 | */ | 83 | */ |
84 | static void umh_keys_cleanup(struct subprocess_info *info) | 84 | static void umh_keys_cleanup(struct subprocess_info *info) |
85 | { | 85 | { |
86 | struct key *keyring = info->data; | 86 | struct key *keyring = info->data; |
87 | key_put(keyring); | 87 | key_put(keyring); |
88 | } | 88 | } |
89 | 89 | ||
90 | /* | 90 | /* |
91 | * Call a usermode helper with a specific session keyring. | 91 | * Call a usermode helper with a specific session keyring. |
92 | */ | 92 | */ |
93 | static int call_usermodehelper_keys(char *path, char **argv, char **envp, | 93 | static int call_usermodehelper_keys(char *path, char **argv, char **envp, |
94 | struct key *session_keyring, int wait) | 94 | struct key *session_keyring, int wait) |
95 | { | 95 | { |
96 | gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; | 96 | return call_usermodehelper_fns(path, argv, envp, wait, |
97 | struct subprocess_info *info = | 97 | umh_keys_init, umh_keys_cleanup, |
98 | call_usermodehelper_setup(path, argv, envp, gfp_mask); | 98 | key_get(session_keyring)); |
99 | |||
100 | if (!info) | ||
101 | return -ENOMEM; | ||
102 | |||
103 | call_usermodehelper_setfns(info, umh_keys_init, umh_keys_cleanup, | ||
104 | key_get(session_keyring)); | ||
105 | return call_usermodehelper_exec(info, wait); | ||
106 | } | 99 | } |
107 | 100 | ||
108 | /* | 101 | /* |
109 | * Request userspace finish the construction of a key | 102 | * Request userspace finish the construction of a key |
110 | * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>" | 103 | * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>" |
111 | */ | 104 | */ |
112 | static int call_sbin_request_key(struct key_construction *cons, | 105 | static int call_sbin_request_key(struct key_construction *cons, |
113 | const char *op, | 106 | const char *op, |
114 | void *aux) | 107 | void *aux) |
115 | { | 108 | { |
116 | const struct cred *cred = current_cred(); | 109 | const struct cred *cred = current_cred(); |
117 | key_serial_t prkey, sskey; | 110 | key_serial_t prkey, sskey; |
118 | struct key *key = cons->key, *authkey = cons->authkey, *keyring, | 111 | struct key *key = cons->key, *authkey = cons->authkey, *keyring, |
119 | *session; | 112 | *session; |
120 | char *argv[9], *envp[3], uid_str[12], gid_str[12]; | 113 | char *argv[9], *envp[3], uid_str[12], gid_str[12]; |
121 | char key_str[12], keyring_str[3][12]; | 114 | char key_str[12], keyring_str[3][12]; |
122 | char desc[20]; | 115 | char desc[20]; |
123 | int ret, i; | 116 | int ret, i; |
124 | 117 | ||
125 | kenter("{%d},{%d},%s", key->serial, authkey->serial, op); | 118 | kenter("{%d},{%d},%s", key->serial, authkey->serial, op); |
126 | 119 | ||
127 | ret = install_user_keyrings(); | 120 | ret = install_user_keyrings(); |
128 | if (ret < 0) | 121 | if (ret < 0) |
129 | goto error_alloc; | 122 | goto error_alloc; |
130 | 123 | ||
131 | /* allocate a new session keyring */ | 124 | /* allocate a new session keyring */ |
132 | sprintf(desc, "_req.%u", key->serial); | 125 | sprintf(desc, "_req.%u", key->serial); |
133 | 126 | ||
134 | cred = get_current_cred(); | 127 | cred = get_current_cred(); |
135 | keyring = keyring_alloc(desc, cred->fsuid, cred->fsgid, cred, | 128 | keyring = keyring_alloc(desc, cred->fsuid, cred->fsgid, cred, |
136 | KEY_ALLOC_QUOTA_OVERRUN, NULL); | 129 | KEY_ALLOC_QUOTA_OVERRUN, NULL); |
137 | put_cred(cred); | 130 | put_cred(cred); |
138 | if (IS_ERR(keyring)) { | 131 | if (IS_ERR(keyring)) { |
139 | ret = PTR_ERR(keyring); | 132 | ret = PTR_ERR(keyring); |
140 | goto error_alloc; | 133 | goto error_alloc; |
141 | } | 134 | } |
142 | 135 | ||
143 | /* attach the auth key to the session keyring */ | 136 | /* attach the auth key to the session keyring */ |
144 | ret = key_link(keyring, authkey); | 137 | ret = key_link(keyring, authkey); |
145 | if (ret < 0) | 138 | if (ret < 0) |
146 | goto error_link; | 139 | goto error_link; |
147 | 140 | ||
148 | /* record the UID and GID */ | 141 | /* record the UID and GID */ |
149 | sprintf(uid_str, "%d", cred->fsuid); | 142 | sprintf(uid_str, "%d", cred->fsuid); |
150 | sprintf(gid_str, "%d", cred->fsgid); | 143 | sprintf(gid_str, "%d", cred->fsgid); |
151 | 144 | ||
152 | /* we say which key is under construction */ | 145 | /* we say which key is under construction */ |
153 | sprintf(key_str, "%d", key->serial); | 146 | sprintf(key_str, "%d", key->serial); |
154 | 147 | ||
155 | /* we specify the process's default keyrings */ | 148 | /* we specify the process's default keyrings */ |
156 | sprintf(keyring_str[0], "%d", | 149 | sprintf(keyring_str[0], "%d", |
157 | cred->thread_keyring ? cred->thread_keyring->serial : 0); | 150 | cred->thread_keyring ? cred->thread_keyring->serial : 0); |
158 | 151 | ||
159 | prkey = 0; | 152 | prkey = 0; |
160 | if (cred->tgcred->process_keyring) | 153 | if (cred->tgcred->process_keyring) |
161 | prkey = cred->tgcred->process_keyring->serial; | 154 | prkey = cred->tgcred->process_keyring->serial; |
162 | sprintf(keyring_str[1], "%d", prkey); | 155 | sprintf(keyring_str[1], "%d", prkey); |
163 | 156 | ||
164 | rcu_read_lock(); | 157 | rcu_read_lock(); |
165 | session = rcu_dereference(cred->tgcred->session_keyring); | 158 | session = rcu_dereference(cred->tgcred->session_keyring); |
166 | if (!session) | 159 | if (!session) |
167 | session = cred->user->session_keyring; | 160 | session = cred->user->session_keyring; |
168 | sskey = session->serial; | 161 | sskey = session->serial; |
169 | rcu_read_unlock(); | 162 | rcu_read_unlock(); |
170 | 163 | ||
171 | sprintf(keyring_str[2], "%d", sskey); | 164 | sprintf(keyring_str[2], "%d", sskey); |
172 | 165 | ||
173 | /* set up a minimal environment */ | 166 | /* set up a minimal environment */ |
174 | i = 0; | 167 | i = 0; |
175 | envp[i++] = "HOME=/"; | 168 | envp[i++] = "HOME=/"; |
176 | envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; | 169 | envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; |
177 | envp[i] = NULL; | 170 | envp[i] = NULL; |
178 | 171 | ||
179 | /* set up the argument list */ | 172 | /* set up the argument list */ |
180 | i = 0; | 173 | i = 0; |
181 | argv[i++] = "/sbin/request-key"; | 174 | argv[i++] = "/sbin/request-key"; |
182 | argv[i++] = (char *) op; | 175 | argv[i++] = (char *) op; |
183 | argv[i++] = key_str; | 176 | argv[i++] = key_str; |
184 | argv[i++] = uid_str; | 177 | argv[i++] = uid_str; |
185 | argv[i++] = gid_str; | 178 | argv[i++] = gid_str; |
186 | argv[i++] = keyring_str[0]; | 179 | argv[i++] = keyring_str[0]; |
187 | argv[i++] = keyring_str[1]; | 180 | argv[i++] = keyring_str[1]; |
188 | argv[i++] = keyring_str[2]; | 181 | argv[i++] = keyring_str[2]; |
189 | argv[i] = NULL; | 182 | argv[i] = NULL; |
190 | 183 | ||
191 | /* do it */ | 184 | /* do it */ |
192 | ret = call_usermodehelper_keys(argv[0], argv, envp, keyring, | 185 | ret = call_usermodehelper_keys(argv[0], argv, envp, keyring, |
193 | UMH_WAIT_PROC); | 186 | UMH_WAIT_PROC); |
194 | kdebug("usermode -> 0x%x", ret); | 187 | kdebug("usermode -> 0x%x", ret); |
195 | if (ret >= 0) { | 188 | if (ret >= 0) { |
196 | /* ret is the exit/wait code */ | 189 | /* ret is the exit/wait code */ |
197 | if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags) || | 190 | if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags) || |
198 | key_validate(key) < 0) | 191 | key_validate(key) < 0) |
199 | ret = -ENOKEY; | 192 | ret = -ENOKEY; |
200 | else | 193 | else |
201 | /* ignore any errors from userspace if the key was | 194 | /* ignore any errors from userspace if the key was |
202 | * instantiated */ | 195 | * instantiated */ |
203 | ret = 0; | 196 | ret = 0; |
204 | } | 197 | } |
205 | 198 | ||
206 | error_link: | 199 | error_link: |
207 | key_put(keyring); | 200 | key_put(keyring); |
208 | 201 | ||
209 | error_alloc: | 202 | error_alloc: |
210 | complete_request_key(cons, ret); | 203 | complete_request_key(cons, ret); |
211 | kleave(" = %d", ret); | 204 | kleave(" = %d", ret); |
212 | return ret; | 205 | return ret; |
213 | } | 206 | } |
214 | 207 | ||
215 | /* | 208 | /* |
216 | * Call out to userspace for key construction. | 209 | * Call out to userspace for key construction. |
217 | * | 210 | * |
218 | * Program failure is ignored in favour of key status. | 211 | * Program failure is ignored in favour of key status. |
219 | */ | 212 | */ |
220 | static int construct_key(struct key *key, const void *callout_info, | 213 | static int construct_key(struct key *key, const void *callout_info, |
221 | size_t callout_len, void *aux, | 214 | size_t callout_len, void *aux, |
222 | struct key *dest_keyring) | 215 | struct key *dest_keyring) |
223 | { | 216 | { |
224 | struct key_construction *cons; | 217 | struct key_construction *cons; |
225 | request_key_actor_t actor; | 218 | request_key_actor_t actor; |
226 | struct key *authkey; | 219 | struct key *authkey; |
227 | int ret; | 220 | int ret; |
228 | 221 | ||
229 | kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux); | 222 | kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux); |
230 | 223 | ||
231 | cons = kmalloc(sizeof(*cons), GFP_KERNEL); | 224 | cons = kmalloc(sizeof(*cons), GFP_KERNEL); |
232 | if (!cons) | 225 | if (!cons) |
233 | return -ENOMEM; | 226 | return -ENOMEM; |
234 | 227 | ||
235 | /* allocate an authorisation key */ | 228 | /* allocate an authorisation key */ |
236 | authkey = request_key_auth_new(key, callout_info, callout_len, | 229 | authkey = request_key_auth_new(key, callout_info, callout_len, |
237 | dest_keyring); | 230 | dest_keyring); |
238 | if (IS_ERR(authkey)) { | 231 | if (IS_ERR(authkey)) { |
239 | kfree(cons); | 232 | kfree(cons); |
240 | ret = PTR_ERR(authkey); | 233 | ret = PTR_ERR(authkey); |
241 | authkey = NULL; | 234 | authkey = NULL; |
242 | } else { | 235 | } else { |
243 | cons->authkey = key_get(authkey); | 236 | cons->authkey = key_get(authkey); |
244 | cons->key = key_get(key); | 237 | cons->key = key_get(key); |
245 | 238 | ||
246 | /* make the call */ | 239 | /* make the call */ |
247 | actor = call_sbin_request_key; | 240 | actor = call_sbin_request_key; |
248 | if (key->type->request_key) | 241 | if (key->type->request_key) |
249 | actor = key->type->request_key; | 242 | actor = key->type->request_key; |
250 | 243 | ||
251 | ret = actor(cons, "create", aux); | 244 | ret = actor(cons, "create", aux); |
252 | 245 | ||
253 | /* check that the actor called complete_request_key() prior to | 246 | /* check that the actor called complete_request_key() prior to |
254 | * returning an error */ | 247 | * returning an error */ |
255 | WARN_ON(ret < 0 && | 248 | WARN_ON(ret < 0 && |
256 | !test_bit(KEY_FLAG_REVOKED, &authkey->flags)); | 249 | !test_bit(KEY_FLAG_REVOKED, &authkey->flags)); |
257 | key_put(authkey); | 250 | key_put(authkey); |
258 | } | 251 | } |
259 | 252 | ||
260 | kleave(" = %d", ret); | 253 | kleave(" = %d", ret); |
261 | return ret; | 254 | return ret; |
262 | } | 255 | } |
263 | 256 | ||
264 | /* | 257 | /* |
265 | * Get the appropriate destination keyring for the request. | 258 | * Get the appropriate destination keyring for the request. |
266 | * | 259 | * |
267 | * The keyring selected is returned with an extra reference upon it which the | 260 | * The keyring selected is returned with an extra reference upon it which the |
268 | * caller must release. | 261 | * caller must release. |
269 | */ | 262 | */ |
270 | static void construct_get_dest_keyring(struct key **_dest_keyring) | 263 | static void construct_get_dest_keyring(struct key **_dest_keyring) |
271 | { | 264 | { |
272 | struct request_key_auth *rka; | 265 | struct request_key_auth *rka; |
273 | const struct cred *cred = current_cred(); | 266 | const struct cred *cred = current_cred(); |
274 | struct key *dest_keyring = *_dest_keyring, *authkey; | 267 | struct key *dest_keyring = *_dest_keyring, *authkey; |
275 | 268 | ||
276 | kenter("%p", dest_keyring); | 269 | kenter("%p", dest_keyring); |
277 | 270 | ||
278 | /* find the appropriate keyring */ | 271 | /* find the appropriate keyring */ |
279 | if (dest_keyring) { | 272 | if (dest_keyring) { |
280 | /* the caller supplied one */ | 273 | /* the caller supplied one */ |
281 | key_get(dest_keyring); | 274 | key_get(dest_keyring); |
282 | } else { | 275 | } else { |
283 | /* use a default keyring; falling through the cases until we | 276 | /* use a default keyring; falling through the cases until we |
284 | * find one that we actually have */ | 277 | * find one that we actually have */ |
285 | switch (cred->jit_keyring) { | 278 | switch (cred->jit_keyring) { |
286 | case KEY_REQKEY_DEFL_DEFAULT: | 279 | case KEY_REQKEY_DEFL_DEFAULT: |
287 | case KEY_REQKEY_DEFL_REQUESTOR_KEYRING: | 280 | case KEY_REQKEY_DEFL_REQUESTOR_KEYRING: |
288 | if (cred->request_key_auth) { | 281 | if (cred->request_key_auth) { |
289 | authkey = cred->request_key_auth; | 282 | authkey = cred->request_key_auth; |
290 | down_read(&authkey->sem); | 283 | down_read(&authkey->sem); |
291 | rka = authkey->payload.data; | 284 | rka = authkey->payload.data; |
292 | if (!test_bit(KEY_FLAG_REVOKED, | 285 | if (!test_bit(KEY_FLAG_REVOKED, |
293 | &authkey->flags)) | 286 | &authkey->flags)) |
294 | dest_keyring = | 287 | dest_keyring = |
295 | key_get(rka->dest_keyring); | 288 | key_get(rka->dest_keyring); |
296 | up_read(&authkey->sem); | 289 | up_read(&authkey->sem); |
297 | if (dest_keyring) | 290 | if (dest_keyring) |
298 | break; | 291 | break; |
299 | } | 292 | } |
300 | 293 | ||
301 | case KEY_REQKEY_DEFL_THREAD_KEYRING: | 294 | case KEY_REQKEY_DEFL_THREAD_KEYRING: |
302 | dest_keyring = key_get(cred->thread_keyring); | 295 | dest_keyring = key_get(cred->thread_keyring); |
303 | if (dest_keyring) | 296 | if (dest_keyring) |
304 | break; | 297 | break; |
305 | 298 | ||
306 | case KEY_REQKEY_DEFL_PROCESS_KEYRING: | 299 | case KEY_REQKEY_DEFL_PROCESS_KEYRING: |
307 | dest_keyring = key_get(cred->tgcred->process_keyring); | 300 | dest_keyring = key_get(cred->tgcred->process_keyring); |
308 | if (dest_keyring) | 301 | if (dest_keyring) |
309 | break; | 302 | break; |
310 | 303 | ||
311 | case KEY_REQKEY_DEFL_SESSION_KEYRING: | 304 | case KEY_REQKEY_DEFL_SESSION_KEYRING: |
312 | rcu_read_lock(); | 305 | rcu_read_lock(); |
313 | dest_keyring = key_get( | 306 | dest_keyring = key_get( |
314 | rcu_dereference(cred->tgcred->session_keyring)); | 307 | rcu_dereference(cred->tgcred->session_keyring)); |
315 | rcu_read_unlock(); | 308 | rcu_read_unlock(); |
316 | 309 | ||
317 | if (dest_keyring) | 310 | if (dest_keyring) |
318 | break; | 311 | break; |
319 | 312 | ||
320 | case KEY_REQKEY_DEFL_USER_SESSION_KEYRING: | 313 | case KEY_REQKEY_DEFL_USER_SESSION_KEYRING: |
321 | dest_keyring = | 314 | dest_keyring = |
322 | key_get(cred->user->session_keyring); | 315 | key_get(cred->user->session_keyring); |
323 | break; | 316 | break; |
324 | 317 | ||
325 | case KEY_REQKEY_DEFL_USER_KEYRING: | 318 | case KEY_REQKEY_DEFL_USER_KEYRING: |
326 | dest_keyring = key_get(cred->user->uid_keyring); | 319 | dest_keyring = key_get(cred->user->uid_keyring); |
327 | break; | 320 | break; |
328 | 321 | ||
329 | case KEY_REQKEY_DEFL_GROUP_KEYRING: | 322 | case KEY_REQKEY_DEFL_GROUP_KEYRING: |
330 | default: | 323 | default: |
331 | BUG(); | 324 | BUG(); |
332 | } | 325 | } |
333 | } | 326 | } |
334 | 327 | ||
335 | *_dest_keyring = dest_keyring; | 328 | *_dest_keyring = dest_keyring; |
336 | kleave(" [dk %d]", key_serial(dest_keyring)); | 329 | kleave(" [dk %d]", key_serial(dest_keyring)); |
337 | return; | 330 | return; |
338 | } | 331 | } |
339 | 332 | ||
340 | /* | 333 | /* |
341 | * Allocate a new key in under-construction state and attempt to link it in to | 334 | * Allocate a new key in under-construction state and attempt to link it in to |
342 | * the requested keyring. | 335 | * the requested keyring. |
343 | * | 336 | * |
344 | * May return a key that's already under construction instead if there was a | 337 | * May return a key that's already under construction instead if there was a |
345 | * race between two thread calling request_key(). | 338 | * race between two thread calling request_key(). |
346 | */ | 339 | */ |
347 | static int construct_alloc_key(struct key_type *type, | 340 | static int construct_alloc_key(struct key_type *type, |
348 | const char *description, | 341 | const char *description, |
349 | struct key *dest_keyring, | 342 | struct key *dest_keyring, |
350 | unsigned long flags, | 343 | unsigned long flags, |
351 | struct key_user *user, | 344 | struct key_user *user, |
352 | struct key **_key) | 345 | struct key **_key) |
353 | { | 346 | { |
354 | const struct cred *cred = current_cred(); | 347 | const struct cred *cred = current_cred(); |
355 | unsigned long prealloc; | 348 | unsigned long prealloc; |
356 | struct key *key; | 349 | struct key *key; |
357 | key_ref_t key_ref; | 350 | key_ref_t key_ref; |
358 | int ret; | 351 | int ret; |
359 | 352 | ||
360 | kenter("%s,%s,,,", type->name, description); | 353 | kenter("%s,%s,,,", type->name, description); |
361 | 354 | ||
362 | *_key = NULL; | 355 | *_key = NULL; |
363 | mutex_lock(&user->cons_lock); | 356 | mutex_lock(&user->cons_lock); |
364 | 357 | ||
365 | key = key_alloc(type, description, cred->fsuid, cred->fsgid, cred, | 358 | key = key_alloc(type, description, cred->fsuid, cred->fsgid, cred, |
366 | KEY_POS_ALL, flags); | 359 | KEY_POS_ALL, flags); |
367 | if (IS_ERR(key)) | 360 | if (IS_ERR(key)) |
368 | goto alloc_failed; | 361 | goto alloc_failed; |
369 | 362 | ||
370 | set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags); | 363 | set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags); |
371 | 364 | ||
372 | if (dest_keyring) { | 365 | if (dest_keyring) { |
373 | ret = __key_link_begin(dest_keyring, type, description, | 366 | ret = __key_link_begin(dest_keyring, type, description, |
374 | &prealloc); | 367 | &prealloc); |
375 | if (ret < 0) | 368 | if (ret < 0) |
376 | goto link_prealloc_failed; | 369 | goto link_prealloc_failed; |
377 | } | 370 | } |
378 | 371 | ||
379 | /* attach the key to the destination keyring under lock, but we do need | 372 | /* attach the key to the destination keyring under lock, but we do need |
380 | * to do another check just in case someone beat us to it whilst we | 373 | * to do another check just in case someone beat us to it whilst we |
381 | * waited for locks */ | 374 | * waited for locks */ |
382 | mutex_lock(&key_construction_mutex); | 375 | mutex_lock(&key_construction_mutex); |
383 | 376 | ||
384 | key_ref = search_process_keyrings(type, description, type->match, cred); | 377 | key_ref = search_process_keyrings(type, description, type->match, cred); |
385 | if (!IS_ERR(key_ref)) | 378 | if (!IS_ERR(key_ref)) |
386 | goto key_already_present; | 379 | goto key_already_present; |
387 | 380 | ||
388 | if (dest_keyring) | 381 | if (dest_keyring) |
389 | __key_link(dest_keyring, key, &prealloc); | 382 | __key_link(dest_keyring, key, &prealloc); |
390 | 383 | ||
391 | mutex_unlock(&key_construction_mutex); | 384 | mutex_unlock(&key_construction_mutex); |
392 | if (dest_keyring) | 385 | if (dest_keyring) |
393 | __key_link_end(dest_keyring, type, prealloc); | 386 | __key_link_end(dest_keyring, type, prealloc); |
394 | mutex_unlock(&user->cons_lock); | 387 | mutex_unlock(&user->cons_lock); |
395 | *_key = key; | 388 | *_key = key; |
396 | kleave(" = 0 [%d]", key_serial(key)); | 389 | kleave(" = 0 [%d]", key_serial(key)); |
397 | return 0; | 390 | return 0; |
398 | 391 | ||
399 | /* the key is now present - we tell the caller that we found it by | 392 | /* the key is now present - we tell the caller that we found it by |
400 | * returning -EINPROGRESS */ | 393 | * returning -EINPROGRESS */ |
401 | key_already_present: | 394 | key_already_present: |
402 | key_put(key); | 395 | key_put(key); |
403 | mutex_unlock(&key_construction_mutex); | 396 | mutex_unlock(&key_construction_mutex); |
404 | key = key_ref_to_ptr(key_ref); | 397 | key = key_ref_to_ptr(key_ref); |
405 | if (dest_keyring) { | 398 | if (dest_keyring) { |
406 | ret = __key_link_check_live_key(dest_keyring, key); | 399 | ret = __key_link_check_live_key(dest_keyring, key); |
407 | if (ret == 0) | 400 | if (ret == 0) |
408 | __key_link(dest_keyring, key, &prealloc); | 401 | __key_link(dest_keyring, key, &prealloc); |
409 | __key_link_end(dest_keyring, type, prealloc); | 402 | __key_link_end(dest_keyring, type, prealloc); |
410 | if (ret < 0) | 403 | if (ret < 0) |
411 | goto link_check_failed; | 404 | goto link_check_failed; |
412 | } | 405 | } |
413 | mutex_unlock(&user->cons_lock); | 406 | mutex_unlock(&user->cons_lock); |
414 | *_key = key; | 407 | *_key = key; |
415 | kleave(" = -EINPROGRESS [%d]", key_serial(key)); | 408 | kleave(" = -EINPROGRESS [%d]", key_serial(key)); |
416 | return -EINPROGRESS; | 409 | return -EINPROGRESS; |
417 | 410 | ||
418 | link_check_failed: | 411 | link_check_failed: |
419 | mutex_unlock(&user->cons_lock); | 412 | mutex_unlock(&user->cons_lock); |
420 | key_put(key); | 413 | key_put(key); |
421 | kleave(" = %d [linkcheck]", ret); | 414 | kleave(" = %d [linkcheck]", ret); |
422 | return ret; | 415 | return ret; |
423 | 416 | ||
424 | link_prealloc_failed: | 417 | link_prealloc_failed: |
425 | mutex_unlock(&user->cons_lock); | 418 | mutex_unlock(&user->cons_lock); |
426 | kleave(" = %d [prelink]", ret); | 419 | kleave(" = %d [prelink]", ret); |
427 | return ret; | 420 | return ret; |
428 | 421 | ||
429 | alloc_failed: | 422 | alloc_failed: |
430 | mutex_unlock(&user->cons_lock); | 423 | mutex_unlock(&user->cons_lock); |
431 | kleave(" = %ld", PTR_ERR(key)); | 424 | kleave(" = %ld", PTR_ERR(key)); |
432 | return PTR_ERR(key); | 425 | return PTR_ERR(key); |
433 | } | 426 | } |
434 | 427 | ||
435 | /* | 428 | /* |
436 | * Commence key construction. | 429 | * Commence key construction. |
437 | */ | 430 | */ |
438 | static struct key *construct_key_and_link(struct key_type *type, | 431 | static struct key *construct_key_and_link(struct key_type *type, |
439 | const char *description, | 432 | const char *description, |
440 | const char *callout_info, | 433 | const char *callout_info, |
441 | size_t callout_len, | 434 | size_t callout_len, |
442 | void *aux, | 435 | void *aux, |
443 | struct key *dest_keyring, | 436 | struct key *dest_keyring, |
444 | unsigned long flags) | 437 | unsigned long flags) |
445 | { | 438 | { |
446 | struct key_user *user; | 439 | struct key_user *user; |
447 | struct key *key; | 440 | struct key *key; |
448 | int ret; | 441 | int ret; |
449 | 442 | ||
450 | kenter(""); | 443 | kenter(""); |
451 | 444 | ||
452 | user = key_user_lookup(current_fsuid(), current_user_ns()); | 445 | user = key_user_lookup(current_fsuid(), current_user_ns()); |
453 | if (!user) | 446 | if (!user) |
454 | return ERR_PTR(-ENOMEM); | 447 | return ERR_PTR(-ENOMEM); |
455 | 448 | ||
456 | construct_get_dest_keyring(&dest_keyring); | 449 | construct_get_dest_keyring(&dest_keyring); |
457 | 450 | ||
458 | ret = construct_alloc_key(type, description, dest_keyring, flags, user, | 451 | ret = construct_alloc_key(type, description, dest_keyring, flags, user, |
459 | &key); | 452 | &key); |
460 | key_user_put(user); | 453 | key_user_put(user); |
461 | 454 | ||
462 | if (ret == 0) { | 455 | if (ret == 0) { |
463 | ret = construct_key(key, callout_info, callout_len, aux, | 456 | ret = construct_key(key, callout_info, callout_len, aux, |
464 | dest_keyring); | 457 | dest_keyring); |
465 | if (ret < 0) { | 458 | if (ret < 0) { |
466 | kdebug("cons failed"); | 459 | kdebug("cons failed"); |
467 | goto construction_failed; | 460 | goto construction_failed; |
468 | } | 461 | } |
469 | } else if (ret == -EINPROGRESS) { | 462 | } else if (ret == -EINPROGRESS) { |
470 | ret = 0; | 463 | ret = 0; |
471 | } else { | 464 | } else { |
472 | goto couldnt_alloc_key; | 465 | goto couldnt_alloc_key; |
473 | } | 466 | } |
474 | 467 | ||
475 | key_put(dest_keyring); | 468 | key_put(dest_keyring); |
476 | kleave(" = key %d", key_serial(key)); | 469 | kleave(" = key %d", key_serial(key)); |
477 | return key; | 470 | return key; |
478 | 471 | ||
479 | construction_failed: | 472 | construction_failed: |
480 | key_negate_and_link(key, key_negative_timeout, NULL, NULL); | 473 | key_negate_and_link(key, key_negative_timeout, NULL, NULL); |
481 | key_put(key); | 474 | key_put(key); |
482 | couldnt_alloc_key: | 475 | couldnt_alloc_key: |
483 | key_put(dest_keyring); | 476 | key_put(dest_keyring); |
484 | kleave(" = %d", ret); | 477 | kleave(" = %d", ret); |
485 | return ERR_PTR(ret); | 478 | return ERR_PTR(ret); |
486 | } | 479 | } |
487 | 480 | ||
488 | /** | 481 | /** |
489 | * request_key_and_link - Request a key and cache it in a keyring. | 482 | * request_key_and_link - Request a key and cache it in a keyring. |
490 | * @type: The type of key we want. | 483 | * @type: The type of key we want. |
491 | * @description: The searchable description of the key. | 484 | * @description: The searchable description of the key. |
492 | * @callout_info: The data to pass to the instantiation upcall (or NULL). | 485 | * @callout_info: The data to pass to the instantiation upcall (or NULL). |
493 | * @callout_len: The length of callout_info. | 486 | * @callout_len: The length of callout_info. |
494 | * @aux: Auxiliary data for the upcall. | 487 | * @aux: Auxiliary data for the upcall. |
495 | * @dest_keyring: Where to cache the key. | 488 | * @dest_keyring: Where to cache the key. |
496 | * @flags: Flags to key_alloc(). | 489 | * @flags: Flags to key_alloc(). |
497 | * | 490 | * |
498 | * A key matching the specified criteria is searched for in the process's | 491 | * A key matching the specified criteria is searched for in the process's |
499 | * keyrings and returned with its usage count incremented if found. Otherwise, | 492 | * keyrings and returned with its usage count incremented if found. Otherwise, |
500 | * if callout_info is not NULL, a key will be allocated and some service | 493 | * if callout_info is not NULL, a key will be allocated and some service |
501 | * (probably in userspace) will be asked to instantiate it. | 494 | * (probably in userspace) will be asked to instantiate it. |
502 | * | 495 | * |
503 | * If successfully found or created, the key will be linked to the destination | 496 | * If successfully found or created, the key will be linked to the destination |
504 | * keyring if one is provided. | 497 | * keyring if one is provided. |
505 | * | 498 | * |
506 | * Returns a pointer to the key if successful; -EACCES, -ENOKEY, -EKEYREVOKED | 499 | * Returns a pointer to the key if successful; -EACCES, -ENOKEY, -EKEYREVOKED |
507 | * or -EKEYEXPIRED if an inaccessible, negative, revoked or expired key was | 500 | * or -EKEYEXPIRED if an inaccessible, negative, revoked or expired key was |
508 | * found; -ENOKEY if no key was found and no @callout_info was given; -EDQUOT | 501 | * found; -ENOKEY if no key was found and no @callout_info was given; -EDQUOT |
509 | * if insufficient key quota was available to create a new key; or -ENOMEM if | 502 | * if insufficient key quota was available to create a new key; or -ENOMEM if |
510 | * insufficient memory was available. | 503 | * insufficient memory was available. |
511 | * | 504 | * |
512 | * If the returned key was created, then it may still be under construction, | 505 | * If the returned key was created, then it may still be under construction, |
513 | * and wait_for_key_construction() should be used to wait for that to complete. | 506 | * and wait_for_key_construction() should be used to wait for that to complete. |
514 | */ | 507 | */ |
515 | struct key *request_key_and_link(struct key_type *type, | 508 | struct key *request_key_and_link(struct key_type *type, |
516 | const char *description, | 509 | const char *description, |
517 | const void *callout_info, | 510 | const void *callout_info, |
518 | size_t callout_len, | 511 | size_t callout_len, |
519 | void *aux, | 512 | void *aux, |
520 | struct key *dest_keyring, | 513 | struct key *dest_keyring, |
521 | unsigned long flags) | 514 | unsigned long flags) |
522 | { | 515 | { |
523 | const struct cred *cred = current_cred(); | 516 | const struct cred *cred = current_cred(); |
524 | struct key *key; | 517 | struct key *key; |
525 | key_ref_t key_ref; | 518 | key_ref_t key_ref; |
526 | int ret; | 519 | int ret; |
527 | 520 | ||
528 | kenter("%s,%s,%p,%zu,%p,%p,%lx", | 521 | kenter("%s,%s,%p,%zu,%p,%p,%lx", |
529 | type->name, description, callout_info, callout_len, aux, | 522 | type->name, description, callout_info, callout_len, aux, |
530 | dest_keyring, flags); | 523 | dest_keyring, flags); |
531 | 524 | ||
532 | /* search all the process keyrings for a key */ | 525 | /* search all the process keyrings for a key */ |
533 | key_ref = search_process_keyrings(type, description, type->match, cred); | 526 | key_ref = search_process_keyrings(type, description, type->match, cred); |
534 | 527 | ||
535 | if (!IS_ERR(key_ref)) { | 528 | if (!IS_ERR(key_ref)) { |
536 | key = key_ref_to_ptr(key_ref); | 529 | key = key_ref_to_ptr(key_ref); |
537 | if (dest_keyring) { | 530 | if (dest_keyring) { |
538 | construct_get_dest_keyring(&dest_keyring); | 531 | construct_get_dest_keyring(&dest_keyring); |
539 | ret = key_link(dest_keyring, key); | 532 | ret = key_link(dest_keyring, key); |
540 | key_put(dest_keyring); | 533 | key_put(dest_keyring); |
541 | if (ret < 0) { | 534 | if (ret < 0) { |
542 | key_put(key); | 535 | key_put(key); |
543 | key = ERR_PTR(ret); | 536 | key = ERR_PTR(ret); |
544 | goto error; | 537 | goto error; |
545 | } | 538 | } |
546 | } | 539 | } |
547 | } else if (PTR_ERR(key_ref) != -EAGAIN) { | 540 | } else if (PTR_ERR(key_ref) != -EAGAIN) { |
548 | key = ERR_CAST(key_ref); | 541 | key = ERR_CAST(key_ref); |
549 | } else { | 542 | } else { |
550 | /* the search failed, but the keyrings were searchable, so we | 543 | /* the search failed, but the keyrings were searchable, so we |
551 | * should consult userspace if we can */ | 544 | * should consult userspace if we can */ |
552 | key = ERR_PTR(-ENOKEY); | 545 | key = ERR_PTR(-ENOKEY); |
553 | if (!callout_info) | 546 | if (!callout_info) |
554 | goto error; | 547 | goto error; |
555 | 548 | ||
556 | key = construct_key_and_link(type, description, callout_info, | 549 | key = construct_key_and_link(type, description, callout_info, |
557 | callout_len, aux, dest_keyring, | 550 | callout_len, aux, dest_keyring, |
558 | flags); | 551 | flags); |
559 | } | 552 | } |
560 | 553 | ||
561 | error: | 554 | error: |
562 | kleave(" = %p", key); | 555 | kleave(" = %p", key); |
563 | return key; | 556 | return key; |
564 | } | 557 | } |
565 | 558 | ||
566 | /** | 559 | /** |
567 | * wait_for_key_construction - Wait for construction of a key to complete | 560 | * wait_for_key_construction - Wait for construction of a key to complete |
568 | * @key: The key being waited for. | 561 | * @key: The key being waited for. |
569 | * @intr: Whether to wait interruptibly. | 562 | * @intr: Whether to wait interruptibly. |
570 | * | 563 | * |
571 | * Wait for a key to finish being constructed. | 564 | * Wait for a key to finish being constructed. |
572 | * | 565 | * |
573 | * Returns 0 if successful; -ERESTARTSYS if the wait was interrupted; -ENOKEY | 566 | * Returns 0 if successful; -ERESTARTSYS if the wait was interrupted; -ENOKEY |
574 | * if the key was negated; or -EKEYREVOKED or -EKEYEXPIRED if the key was | 567 | * if the key was negated; or -EKEYREVOKED or -EKEYEXPIRED if the key was |
575 | * revoked or expired. | 568 | * revoked or expired. |
576 | */ | 569 | */ |
577 | int wait_for_key_construction(struct key *key, bool intr) | 570 | int wait_for_key_construction(struct key *key, bool intr) |
578 | { | 571 | { |
579 | int ret; | 572 | int ret; |
580 | 573 | ||
581 | ret = wait_on_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT, | 574 | ret = wait_on_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT, |
582 | intr ? key_wait_bit_intr : key_wait_bit, | 575 | intr ? key_wait_bit_intr : key_wait_bit, |
583 | intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); | 576 | intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); |
584 | if (ret < 0) | 577 | if (ret < 0) |
585 | return ret; | 578 | return ret; |
586 | if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) | 579 | if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) |
587 | return key->type_data.reject_error; | 580 | return key->type_data.reject_error; |
588 | return key_validate(key); | 581 | return key_validate(key); |
589 | } | 582 | } |
590 | EXPORT_SYMBOL(wait_for_key_construction); | 583 | EXPORT_SYMBOL(wait_for_key_construction); |
591 | 584 | ||
592 | /** | 585 | /** |
593 | * request_key - Request a key and wait for construction | 586 | * request_key - Request a key and wait for construction |
594 | * @type: Type of key. | 587 | * @type: Type of key. |
595 | * @description: The searchable description of the key. | 588 | * @description: The searchable description of the key. |
596 | * @callout_info: The data to pass to the instantiation upcall (or NULL). | 589 | * @callout_info: The data to pass to the instantiation upcall (or NULL). |
597 | * | 590 | * |
598 | * As for request_key_and_link() except that it does not add the returned key | 591 | * As for request_key_and_link() except that it does not add the returned key |
599 | * to a keyring if found, new keys are always allocated in the user's quota, | 592 | * to a keyring if found, new keys are always allocated in the user's quota, |
600 | * the callout_info must be a NUL-terminated string and no auxiliary data can | 593 | * the callout_info must be a NUL-terminated string and no auxiliary data can |
601 | * be passed. | 594 | * be passed. |
602 | * | 595 | * |
603 | * Furthermore, it then works as wait_for_key_construction() to wait for the | 596 | * Furthermore, it then works as wait_for_key_construction() to wait for the |
604 | * completion of keys undergoing construction with a non-interruptible wait. | 597 | * completion of keys undergoing construction with a non-interruptible wait. |
605 | */ | 598 | */ |
606 | struct key *request_key(struct key_type *type, | 599 | struct key *request_key(struct key_type *type, |
607 | const char *description, | 600 | const char *description, |
608 | const char *callout_info) | 601 | const char *callout_info) |
609 | { | 602 | { |
610 | struct key *key; | 603 | struct key *key; |
611 | size_t callout_len = 0; | 604 | size_t callout_len = 0; |
612 | int ret; | 605 | int ret; |
613 | 606 | ||
614 | if (callout_info) | 607 | if (callout_info) |
615 | callout_len = strlen(callout_info); | 608 | callout_len = strlen(callout_info); |
616 | key = request_key_and_link(type, description, callout_info, callout_len, | 609 | key = request_key_and_link(type, description, callout_info, callout_len, |
617 | NULL, NULL, KEY_ALLOC_IN_QUOTA); | 610 | NULL, NULL, KEY_ALLOC_IN_QUOTA); |
618 | if (!IS_ERR(key)) { | 611 | if (!IS_ERR(key)) { |
619 | ret = wait_for_key_construction(key, false); | 612 | ret = wait_for_key_construction(key, false); |
620 | if (ret < 0) { | 613 | if (ret < 0) { |
621 | key_put(key); | 614 | key_put(key); |
622 | return ERR_PTR(ret); | 615 | return ERR_PTR(ret); |
623 | } | 616 | } |
624 | } | 617 | } |
625 | return key; | 618 | return key; |
626 | } | 619 | } |
627 | EXPORT_SYMBOL(request_key); | 620 | EXPORT_SYMBOL(request_key); |
628 | 621 | ||
629 | /** | 622 | /** |
630 | * request_key_with_auxdata - Request a key with auxiliary data for the upcaller | 623 | * request_key_with_auxdata - Request a key with auxiliary data for the upcaller |
631 | * @type: The type of key we want. | 624 | * @type: The type of key we want. |
632 | * @description: The searchable description of the key. | 625 | * @description: The searchable description of the key. |
633 | * @callout_info: The data to pass to the instantiation upcall (or NULL). | 626 | * @callout_info: The data to pass to the instantiation upcall (or NULL). |
634 | * @callout_len: The length of callout_info. | 627 | * @callout_len: The length of callout_info. |
635 | * @aux: Auxiliary data for the upcall. | 628 | * @aux: Auxiliary data for the upcall. |
636 | * | 629 | * |
637 | * As for request_key_and_link() except that it does not add the returned key | 630 | * As for request_key_and_link() except that it does not add the returned key |
638 | * to a keyring if found and new keys are always allocated in the user's quota. | 631 | * to a keyring if found and new keys are always allocated in the user's quota. |
639 | * | 632 | * |
640 | * Furthermore, it then works as wait_for_key_construction() to wait for the | 633 | * Furthermore, it then works as wait_for_key_construction() to wait for the |
641 | * completion of keys undergoing construction with a non-interruptible wait. | 634 | * completion of keys undergoing construction with a non-interruptible wait. |
642 | */ | 635 | */ |
643 | struct key *request_key_with_auxdata(struct key_type *type, | 636 | struct key *request_key_with_auxdata(struct key_type *type, |
644 | const char *description, | 637 | const char *description, |
645 | const void *callout_info, | 638 | const void *callout_info, |
646 | size_t callout_len, | 639 | size_t callout_len, |
647 | void *aux) | 640 | void *aux) |
648 | { | 641 | { |
649 | struct key *key; | 642 | struct key *key; |
650 | int ret; | 643 | int ret; |
651 | 644 | ||
652 | key = request_key_and_link(type, description, callout_info, callout_len, | 645 | key = request_key_and_link(type, description, callout_info, callout_len, |
653 | aux, NULL, KEY_ALLOC_IN_QUOTA); | 646 | aux, NULL, KEY_ALLOC_IN_QUOTA); |
654 | if (!IS_ERR(key)) { | 647 | if (!IS_ERR(key)) { |
655 | ret = wait_for_key_construction(key, false); | 648 | ret = wait_for_key_construction(key, false); |
656 | if (ret < 0) { | 649 | if (ret < 0) { |
657 | key_put(key); | 650 | key_put(key); |
658 | return ERR_PTR(ret); | 651 | return ERR_PTR(ret); |
659 | } | 652 | } |
660 | } | 653 | } |
661 | return key; | 654 | return key; |
662 | } | 655 | } |
663 | EXPORT_SYMBOL(request_key_with_auxdata); | 656 | EXPORT_SYMBOL(request_key_with_auxdata); |
664 | 657 | ||
665 | /* | 658 | /* |
666 | * request_key_async - Request a key (allow async construction) | 659 | * request_key_async - Request a key (allow async construction) |
667 | * @type: Type of key. | 660 | * @type: Type of key. |
668 | * @description: The searchable description of the key. | 661 | * @description: The searchable description of the key. |
669 | * @callout_info: The data to pass to the instantiation upcall (or NULL). | 662 | * @callout_info: The data to pass to the instantiation upcall (or NULL). |
670 | * @callout_len: The length of callout_info. | 663 | * @callout_len: The length of callout_info. |
671 | * | 664 | * |
672 | * As for request_key_and_link() except that it does not add the returned key | 665 | * As for request_key_and_link() except that it does not add the returned key |
673 | * to a keyring if found, new keys are always allocated in the user's quota and | 666 | * to a keyring if found, new keys are always allocated in the user's quota and |
674 | * no auxiliary data can be passed. | 667 | * no auxiliary data can be passed. |
675 | * | 668 | * |
676 | * The caller should call wait_for_key_construction() to wait for the | 669 | * The caller should call wait_for_key_construction() to wait for the |
677 | * completion of the returned key if it is still undergoing construction. | 670 | * completion of the returned key if it is still undergoing construction. |
678 | */ | 671 | */ |
679 | struct key *request_key_async(struct key_type *type, | 672 | struct key *request_key_async(struct key_type *type, |
680 | const char *description, | 673 | const char *description, |
681 | const void *callout_info, | 674 | const void *callout_info, |
682 | size_t callout_len) | 675 | size_t callout_len) |
683 | { | 676 | { |
684 | return request_key_and_link(type, description, callout_info, | 677 | return request_key_and_link(type, description, callout_info, |
685 | callout_len, NULL, NULL, | 678 | callout_len, NULL, NULL, |
686 | KEY_ALLOC_IN_QUOTA); | 679 | KEY_ALLOC_IN_QUOTA); |
687 | } | 680 | } |
688 | EXPORT_SYMBOL(request_key_async); | 681 | EXPORT_SYMBOL(request_key_async); |
689 | 682 | ||
690 | /* | 683 | /* |
691 | * request a key with auxiliary data for the upcaller (allow async construction) | 684 | * request a key with auxiliary data for the upcaller (allow async construction) |
692 | * @type: Type of key. | 685 | * @type: Type of key. |
693 | * @description: The searchable description of the key. | 686 | * @description: The searchable description of the key. |
694 | * @callout_info: The data to pass to the instantiation upcall (or NULL). | 687 | * @callout_info: The data to pass to the instantiation upcall (or NULL). |
695 | * @callout_len: The length of callout_info. | 688 | * @callout_len: The length of callout_info. |
696 | * @aux: Auxiliary data for the upcall. | 689 | * @aux: Auxiliary data for the upcall. |
697 | * | 690 | * |
698 | * As for request_key_and_link() except that it does not add the returned key | 691 | * As for request_key_and_link() except that it does not add the returned key |
699 | * to a keyring if found and new keys are always allocated in the user's quota. | 692 | * to a keyring if found and new keys are always allocated in the user's quota. |
700 | * | 693 | * |
701 | * The caller should call wait_for_key_construction() to wait for the | 694 | * The caller should call wait_for_key_construction() to wait for the |
702 | * completion of the returned key if it is still undergoing construction. | 695 | * completion of the returned key if it is still undergoing construction. |
703 | */ | 696 | */ |
704 | struct key *request_key_async_with_auxdata(struct key_type *type, | 697 | struct key *request_key_async_with_auxdata(struct key_type *type, |
705 | const char *description, | 698 | const char *description, |
706 | const void *callout_info, | 699 | const void *callout_info, |
707 | size_t callout_len, | 700 | size_t callout_len, |
708 | void *aux) | 701 | void *aux) |
709 | { | 702 | { |
710 | return request_key_and_link(type, description, callout_info, | 703 | return request_key_and_link(type, description, callout_info, |
711 | callout_len, aux, NULL, KEY_ALLOC_IN_QUOTA); | 704 | callout_len, aux, NULL, KEY_ALLOC_IN_QUOTA); |
712 | } | 705 | } |
713 | EXPORT_SYMBOL(request_key_async_with_auxdata); | 706 | EXPORT_SYMBOL(request_key_async_with_auxdata); |
714 | 707 |