Commit 37252db6aa576c34fd794a5a54fb32d7a8b3a07a

Authored by Jiri Kosina
Committed by Rusty Russell
1 parent c3b92c8787

kmod: prevent kmod_loop_msg overflow in __request_module()

Due to post-increment in condition of kmod_loop_msg in __request_module(),
the system log can be spammed by much more than 5 instances of the 'runaway
loop' message if the number of events triggering it makes the kmod_loop_msg
to overflow.

Fix that by making sure we never increment it past the threshold.

Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
CC: stable@kernel.org

Showing 1 changed file with 3 additions and 1 deletions Inline Diff

1 /* 1 /*
2 kmod, the new module loader (replaces kerneld) 2 kmod, the new module loader (replaces kerneld)
3 Kirk Petersen 3 Kirk Petersen
4 4
5 Reorganized not to be a daemon by Adam Richter, with guidance 5 Reorganized not to be a daemon by Adam Richter, with guidance
6 from Greg Zornetzer. 6 from Greg Zornetzer.
7 7
8 Modified to avoid chroot and file sharing problems. 8 Modified to avoid chroot and file sharing problems.
9 Mikael Pettersson 9 Mikael Pettersson
10 10
11 Limit the concurrent number of kmod modprobes to catch loops from 11 Limit the concurrent number of kmod modprobes to catch loops from
12 "modprobe needs a service that is in a module". 12 "modprobe needs a service that is in a module".
13 Keith Owens <kaos@ocs.com.au> December 1999 13 Keith Owens <kaos@ocs.com.au> December 1999
14 14
15 Unblock all signals when we exec a usermode process. 15 Unblock all signals when we exec a usermode process.
16 Shuu Yamaguchi <shuu@wondernetworkresources.com> December 2000 16 Shuu Yamaguchi <shuu@wondernetworkresources.com> December 2000
17 17
18 call_usermodehelper wait flag, and remove exec_usermodehelper. 18 call_usermodehelper wait flag, and remove exec_usermodehelper.
19 Rusty Russell <rusty@rustcorp.com.au> Jan 2003 19 Rusty Russell <rusty@rustcorp.com.au> Jan 2003
20 */ 20 */
21 #include <linux/module.h> 21 #include <linux/module.h>
22 #include <linux/sched.h> 22 #include <linux/sched.h>
23 #include <linux/syscalls.h> 23 #include <linux/syscalls.h>
24 #include <linux/unistd.h> 24 #include <linux/unistd.h>
25 #include <linux/kmod.h> 25 #include <linux/kmod.h>
26 #include <linux/slab.h> 26 #include <linux/slab.h>
27 #include <linux/completion.h> 27 #include <linux/completion.h>
28 #include <linux/cred.h> 28 #include <linux/cred.h>
29 #include <linux/file.h> 29 #include <linux/file.h>
30 #include <linux/fdtable.h> 30 #include <linux/fdtable.h>
31 #include <linux/workqueue.h> 31 #include <linux/workqueue.h>
32 #include <linux/security.h> 32 #include <linux/security.h>
33 #include <linux/mount.h> 33 #include <linux/mount.h>
34 #include <linux/kernel.h> 34 #include <linux/kernel.h>
35 #include <linux/init.h> 35 #include <linux/init.h>
36 #include <linux/resource.h> 36 #include <linux/resource.h>
37 #include <linux/notifier.h> 37 #include <linux/notifier.h>
38 #include <linux/suspend.h> 38 #include <linux/suspend.h>
39 #include <asm/uaccess.h> 39 #include <asm/uaccess.h>
40 40
41 #include <trace/events/module.h> 41 #include <trace/events/module.h>
42 42
43 extern int max_threads; 43 extern int max_threads;
44 44
45 static struct workqueue_struct *khelper_wq; 45 static struct workqueue_struct *khelper_wq;
46 46
47 #define CAP_BSET (void *)1 47 #define CAP_BSET (void *)1
48 #define CAP_PI (void *)2 48 #define CAP_PI (void *)2
49 49
50 static kernel_cap_t usermodehelper_bset = CAP_FULL_SET; 50 static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
51 static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET; 51 static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
52 static DEFINE_SPINLOCK(umh_sysctl_lock); 52 static DEFINE_SPINLOCK(umh_sysctl_lock);
53 53
54 #ifdef CONFIG_MODULES 54 #ifdef CONFIG_MODULES
55 55
56 /* 56 /*
57 modprobe_path is set via /proc/sys. 57 modprobe_path is set via /proc/sys.
58 */ 58 */
59 char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe"; 59 char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
60 60
61 /** 61 /**
62 * __request_module - try to load a kernel module 62 * __request_module - try to load a kernel module
63 * @wait: wait (or not) for the operation to complete 63 * @wait: wait (or not) for the operation to complete
64 * @fmt: printf style format string for the name of the module 64 * @fmt: printf style format string for the name of the module
65 * @...: arguments as specified in the format string 65 * @...: arguments as specified in the format string
66 * 66 *
67 * Load a module using the user mode module loader. The function returns 67 * Load a module using the user mode module loader. The function returns
68 * zero on success or a negative errno code on failure. Note that a 68 * zero on success or a negative errno code on failure. Note that a
69 * successful module load does not mean the module did not then unload 69 * successful module load does not mean the module did not then unload
70 * and exit on an error of its own. Callers must check that the service 70 * and exit on an error of its own. Callers must check that the service
71 * they requested is now available not blindly invoke it. 71 * they requested is now available not blindly invoke it.
72 * 72 *
73 * If module auto-loading support is disabled then this function 73 * If module auto-loading support is disabled then this function
74 * becomes a no-operation. 74 * becomes a no-operation.
75 */ 75 */
76 int __request_module(bool wait, const char *fmt, ...) 76 int __request_module(bool wait, const char *fmt, ...)
77 { 77 {
78 va_list args; 78 va_list args;
79 char module_name[MODULE_NAME_LEN]; 79 char module_name[MODULE_NAME_LEN];
80 unsigned int max_modprobes; 80 unsigned int max_modprobes;
81 int ret; 81 int ret;
82 char *argv[] = { modprobe_path, "-q", "--", module_name, NULL }; 82 char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
83 static char *envp[] = { "HOME=/", 83 static char *envp[] = { "HOME=/",
84 "TERM=linux", 84 "TERM=linux",
85 "PATH=/sbin:/usr/sbin:/bin:/usr/bin", 85 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
86 NULL }; 86 NULL };
87 static atomic_t kmod_concurrent = ATOMIC_INIT(0); 87 static atomic_t kmod_concurrent = ATOMIC_INIT(0);
88 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */ 88 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
89 static int kmod_loop_msg; 89 static int kmod_loop_msg;
90 90
91 va_start(args, fmt); 91 va_start(args, fmt);
92 ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); 92 ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
93 va_end(args); 93 va_end(args);
94 if (ret >= MODULE_NAME_LEN) 94 if (ret >= MODULE_NAME_LEN)
95 return -ENAMETOOLONG; 95 return -ENAMETOOLONG;
96 96
97 ret = security_kernel_module_request(module_name); 97 ret = security_kernel_module_request(module_name);
98 if (ret) 98 if (ret)
99 return ret; 99 return ret;
100 100
101 /* If modprobe needs a service that is in a module, we get a recursive 101 /* If modprobe needs a service that is in a module, we get a recursive
102 * loop. Limit the number of running kmod threads to max_threads/2 or 102 * loop. Limit the number of running kmod threads to max_threads/2 or
103 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method 103 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
104 * would be to run the parents of this process, counting how many times 104 * would be to run the parents of this process, counting how many times
105 * kmod was invoked. That would mean accessing the internals of the 105 * kmod was invoked. That would mean accessing the internals of the
106 * process tables to get the command line, proc_pid_cmdline is static 106 * process tables to get the command line, proc_pid_cmdline is static
107 * and it is not worth changing the proc code just to handle this case. 107 * and it is not worth changing the proc code just to handle this case.
108 * KAO. 108 * KAO.
109 * 109 *
110 * "trace the ppid" is simple, but will fail if someone's 110 * "trace the ppid" is simple, but will fail if someone's
111 * parent exits. I think this is as good as it gets. --RR 111 * parent exits. I think this is as good as it gets. --RR
112 */ 112 */
113 max_modprobes = min(max_threads/2, MAX_KMOD_CONCURRENT); 113 max_modprobes = min(max_threads/2, MAX_KMOD_CONCURRENT);
114 atomic_inc(&kmod_concurrent); 114 atomic_inc(&kmod_concurrent);
115 if (atomic_read(&kmod_concurrent) > max_modprobes) { 115 if (atomic_read(&kmod_concurrent) > max_modprobes) {
116 /* We may be blaming an innocent here, but unlikely */ 116 /* We may be blaming an innocent here, but unlikely */
117 if (kmod_loop_msg++ < 5) 117 if (kmod_loop_msg < 5) {
118 printk(KERN_ERR 118 printk(KERN_ERR
119 "request_module: runaway loop modprobe %s\n", 119 "request_module: runaway loop modprobe %s\n",
120 module_name); 120 module_name);
121 kmod_loop_msg++;
122 }
121 atomic_dec(&kmod_concurrent); 123 atomic_dec(&kmod_concurrent);
122 return -ENOMEM; 124 return -ENOMEM;
123 } 125 }
124 126
125 trace_module_request(module_name, wait, _RET_IP_); 127 trace_module_request(module_name, wait, _RET_IP_);
126 128
127 ret = call_usermodehelper_fns(modprobe_path, argv, envp, 129 ret = call_usermodehelper_fns(modprobe_path, argv, envp,
128 wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC, 130 wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC,
129 NULL, NULL, NULL); 131 NULL, NULL, NULL);
130 132
131 atomic_dec(&kmod_concurrent); 133 atomic_dec(&kmod_concurrent);
132 return ret; 134 return ret;
133 } 135 }
134 EXPORT_SYMBOL(__request_module); 136 EXPORT_SYMBOL(__request_module);
135 #endif /* CONFIG_MODULES */ 137 #endif /* CONFIG_MODULES */
136 138
137 /* 139 /*
138 * This is the task which runs the usermode application 140 * This is the task which runs the usermode application
139 */ 141 */
140 static int ____call_usermodehelper(void *data) 142 static int ____call_usermodehelper(void *data)
141 { 143 {
142 struct subprocess_info *sub_info = data; 144 struct subprocess_info *sub_info = data;
143 struct cred *new; 145 struct cred *new;
144 int retval; 146 int retval;
145 147
146 spin_lock_irq(&current->sighand->siglock); 148 spin_lock_irq(&current->sighand->siglock);
147 flush_signal_handlers(current, 1); 149 flush_signal_handlers(current, 1);
148 spin_unlock_irq(&current->sighand->siglock); 150 spin_unlock_irq(&current->sighand->siglock);
149 151
150 /* We can run anywhere, unlike our parent keventd(). */ 152 /* We can run anywhere, unlike our parent keventd(). */
151 set_cpus_allowed_ptr(current, cpu_all_mask); 153 set_cpus_allowed_ptr(current, cpu_all_mask);
152 154
153 /* 155 /*
154 * Our parent is keventd, which runs with elevated scheduling priority. 156 * Our parent is keventd, which runs with elevated scheduling priority.
155 * Avoid propagating that into the userspace child. 157 * Avoid propagating that into the userspace child.
156 */ 158 */
157 set_user_nice(current, 0); 159 set_user_nice(current, 0);
158 160
159 retval = -ENOMEM; 161 retval = -ENOMEM;
160 new = prepare_kernel_cred(current); 162 new = prepare_kernel_cred(current);
161 if (!new) 163 if (!new)
162 goto fail; 164 goto fail;
163 165
164 spin_lock(&umh_sysctl_lock); 166 spin_lock(&umh_sysctl_lock);
165 new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset); 167 new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
166 new->cap_inheritable = cap_intersect(usermodehelper_inheritable, 168 new->cap_inheritable = cap_intersect(usermodehelper_inheritable,
167 new->cap_inheritable); 169 new->cap_inheritable);
168 spin_unlock(&umh_sysctl_lock); 170 spin_unlock(&umh_sysctl_lock);
169 171
170 if (sub_info->init) { 172 if (sub_info->init) {
171 retval = sub_info->init(sub_info, new); 173 retval = sub_info->init(sub_info, new);
172 if (retval) { 174 if (retval) {
173 abort_creds(new); 175 abort_creds(new);
174 goto fail; 176 goto fail;
175 } 177 }
176 } 178 }
177 179
178 commit_creds(new); 180 commit_creds(new);
179 181
180 retval = kernel_execve(sub_info->path, 182 retval = kernel_execve(sub_info->path,
181 (const char *const *)sub_info->argv, 183 (const char *const *)sub_info->argv,
182 (const char *const *)sub_info->envp); 184 (const char *const *)sub_info->envp);
183 185
184 /* Exec failed? */ 186 /* Exec failed? */
185 fail: 187 fail:
186 sub_info->retval = retval; 188 sub_info->retval = retval;
187 do_exit(0); 189 do_exit(0);
188 } 190 }
189 191
190 void call_usermodehelper_freeinfo(struct subprocess_info *info) 192 void call_usermodehelper_freeinfo(struct subprocess_info *info)
191 { 193 {
192 if (info->cleanup) 194 if (info->cleanup)
193 (*info->cleanup)(info); 195 (*info->cleanup)(info);
194 kfree(info); 196 kfree(info);
195 } 197 }
196 EXPORT_SYMBOL(call_usermodehelper_freeinfo); 198 EXPORT_SYMBOL(call_usermodehelper_freeinfo);
197 199
198 /* Keventd can't block, but this (a child) can. */ 200 /* Keventd can't block, but this (a child) can. */
199 static int wait_for_helper(void *data) 201 static int wait_for_helper(void *data)
200 { 202 {
201 struct subprocess_info *sub_info = data; 203 struct subprocess_info *sub_info = data;
202 pid_t pid; 204 pid_t pid;
203 205
204 /* If SIGCLD is ignored sys_wait4 won't populate the status. */ 206 /* If SIGCLD is ignored sys_wait4 won't populate the status. */
205 spin_lock_irq(&current->sighand->siglock); 207 spin_lock_irq(&current->sighand->siglock);
206 current->sighand->action[SIGCHLD-1].sa.sa_handler = SIG_DFL; 208 current->sighand->action[SIGCHLD-1].sa.sa_handler = SIG_DFL;
207 spin_unlock_irq(&current->sighand->siglock); 209 spin_unlock_irq(&current->sighand->siglock);
208 210
209 pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD); 211 pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD);
210 if (pid < 0) { 212 if (pid < 0) {
211 sub_info->retval = pid; 213 sub_info->retval = pid;
212 } else { 214 } else {
213 int ret = -ECHILD; 215 int ret = -ECHILD;
214 /* 216 /*
215 * Normally it is bogus to call wait4() from in-kernel because 217 * Normally it is bogus to call wait4() from in-kernel because
216 * wait4() wants to write the exit code to a userspace address. 218 * wait4() wants to write the exit code to a userspace address.
217 * But wait_for_helper() always runs as keventd, and put_user() 219 * But wait_for_helper() always runs as keventd, and put_user()
218 * to a kernel address works OK for kernel threads, due to their 220 * to a kernel address works OK for kernel threads, due to their
219 * having an mm_segment_t which spans the entire address space. 221 * having an mm_segment_t which spans the entire address space.
220 * 222 *
221 * Thus the __user pointer cast is valid here. 223 * Thus the __user pointer cast is valid here.
222 */ 224 */
223 sys_wait4(pid, (int __user *)&ret, 0, NULL); 225 sys_wait4(pid, (int __user *)&ret, 0, NULL);
224 226
225 /* 227 /*
226 * If ret is 0, either ____call_usermodehelper failed and the 228 * If ret is 0, either ____call_usermodehelper failed and the
227 * real error code is already in sub_info->retval or 229 * real error code is already in sub_info->retval or
228 * sub_info->retval is 0 anyway, so don't mess with it then. 230 * sub_info->retval is 0 anyway, so don't mess with it then.
229 */ 231 */
230 if (ret) 232 if (ret)
231 sub_info->retval = ret; 233 sub_info->retval = ret;
232 } 234 }
233 235
234 complete(sub_info->complete); 236 complete(sub_info->complete);
235 return 0; 237 return 0;
236 } 238 }
237 239
238 /* This is run by khelper thread */ 240 /* This is run by khelper thread */
239 static void __call_usermodehelper(struct work_struct *work) 241 static void __call_usermodehelper(struct work_struct *work)
240 { 242 {
241 struct subprocess_info *sub_info = 243 struct subprocess_info *sub_info =
242 container_of(work, struct subprocess_info, work); 244 container_of(work, struct subprocess_info, work);
243 enum umh_wait wait = sub_info->wait; 245 enum umh_wait wait = sub_info->wait;
244 pid_t pid; 246 pid_t pid;
245 247
246 /* CLONE_VFORK: wait until the usermode helper has execve'd 248 /* CLONE_VFORK: wait until the usermode helper has execve'd
247 * successfully We need the data structures to stay around 249 * successfully We need the data structures to stay around
248 * until that is done. */ 250 * until that is done. */
249 if (wait == UMH_WAIT_PROC) 251 if (wait == UMH_WAIT_PROC)
250 pid = kernel_thread(wait_for_helper, sub_info, 252 pid = kernel_thread(wait_for_helper, sub_info,
251 CLONE_FS | CLONE_FILES | SIGCHLD); 253 CLONE_FS | CLONE_FILES | SIGCHLD);
252 else 254 else
253 pid = kernel_thread(____call_usermodehelper, sub_info, 255 pid = kernel_thread(____call_usermodehelper, sub_info,
254 CLONE_VFORK | SIGCHLD); 256 CLONE_VFORK | SIGCHLD);
255 257
256 switch (wait) { 258 switch (wait) {
257 case UMH_NO_WAIT: 259 case UMH_NO_WAIT:
258 call_usermodehelper_freeinfo(sub_info); 260 call_usermodehelper_freeinfo(sub_info);
259 break; 261 break;
260 262
261 case UMH_WAIT_PROC: 263 case UMH_WAIT_PROC:
262 if (pid > 0) 264 if (pid > 0)
263 break; 265 break;
264 /* FALLTHROUGH */ 266 /* FALLTHROUGH */
265 case UMH_WAIT_EXEC: 267 case UMH_WAIT_EXEC:
266 if (pid < 0) 268 if (pid < 0)
267 sub_info->retval = pid; 269 sub_info->retval = pid;
268 complete(sub_info->complete); 270 complete(sub_info->complete);
269 } 271 }
270 } 272 }
271 273
272 /* 274 /*
273 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY 275 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
274 * (used for preventing user land processes from being created after the user 276 * (used for preventing user land processes from being created after the user
275 * land has been frozen during a system-wide hibernation or suspend operation). 277 * land has been frozen during a system-wide hibernation or suspend operation).
276 */ 278 */
277 static int usermodehelper_disabled = 1; 279 static int usermodehelper_disabled = 1;
278 280
279 /* Number of helpers running */ 281 /* Number of helpers running */
280 static atomic_t running_helpers = ATOMIC_INIT(0); 282 static atomic_t running_helpers = ATOMIC_INIT(0);
281 283
282 /* 284 /*
283 * Wait queue head used by usermodehelper_pm_callback() to wait for all running 285 * Wait queue head used by usermodehelper_pm_callback() to wait for all running
284 * helpers to finish. 286 * helpers to finish.
285 */ 287 */
286 static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq); 288 static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
287 289
288 /* 290 /*
289 * Time to wait for running_helpers to become zero before the setting of 291 * Time to wait for running_helpers to become zero before the setting of
290 * usermodehelper_disabled in usermodehelper_pm_callback() fails 292 * usermodehelper_disabled in usermodehelper_pm_callback() fails
291 */ 293 */
292 #define RUNNING_HELPERS_TIMEOUT (5 * HZ) 294 #define RUNNING_HELPERS_TIMEOUT (5 * HZ)
293 295
294 /** 296 /**
295 * usermodehelper_disable - prevent new helpers from being started 297 * usermodehelper_disable - prevent new helpers from being started
296 */ 298 */
297 int usermodehelper_disable(void) 299 int usermodehelper_disable(void)
298 { 300 {
299 long retval; 301 long retval;
300 302
301 usermodehelper_disabled = 1; 303 usermodehelper_disabled = 1;
302 smp_mb(); 304 smp_mb();
303 /* 305 /*
304 * From now on call_usermodehelper_exec() won't start any new 306 * From now on call_usermodehelper_exec() won't start any new
305 * helpers, so it is sufficient if running_helpers turns out to 307 * helpers, so it is sufficient if running_helpers turns out to
306 * be zero at one point (it may be increased later, but that 308 * be zero at one point (it may be increased later, but that
307 * doesn't matter). 309 * doesn't matter).
308 */ 310 */
309 retval = wait_event_timeout(running_helpers_waitq, 311 retval = wait_event_timeout(running_helpers_waitq,
310 atomic_read(&running_helpers) == 0, 312 atomic_read(&running_helpers) == 0,
311 RUNNING_HELPERS_TIMEOUT); 313 RUNNING_HELPERS_TIMEOUT);
312 if (retval) 314 if (retval)
313 return 0; 315 return 0;
314 316
315 usermodehelper_disabled = 0; 317 usermodehelper_disabled = 0;
316 return -EAGAIN; 318 return -EAGAIN;
317 } 319 }
318 320
319 /** 321 /**
320 * usermodehelper_enable - allow new helpers to be started again 322 * usermodehelper_enable - allow new helpers to be started again
321 */ 323 */
322 void usermodehelper_enable(void) 324 void usermodehelper_enable(void)
323 { 325 {
324 usermodehelper_disabled = 0; 326 usermodehelper_disabled = 0;
325 } 327 }
326 328
327 /** 329 /**
328 * usermodehelper_is_disabled - check if new helpers are allowed to be started 330 * usermodehelper_is_disabled - check if new helpers are allowed to be started
329 */ 331 */
330 bool usermodehelper_is_disabled(void) 332 bool usermodehelper_is_disabled(void)
331 { 333 {
332 return usermodehelper_disabled; 334 return usermodehelper_disabled;
333 } 335 }
334 EXPORT_SYMBOL_GPL(usermodehelper_is_disabled); 336 EXPORT_SYMBOL_GPL(usermodehelper_is_disabled);
335 337
336 static void helper_lock(void) 338 static void helper_lock(void)
337 { 339 {
338 atomic_inc(&running_helpers); 340 atomic_inc(&running_helpers);
339 smp_mb__after_atomic_inc(); 341 smp_mb__after_atomic_inc();
340 } 342 }
341 343
342 static void helper_unlock(void) 344 static void helper_unlock(void)
343 { 345 {
344 if (atomic_dec_and_test(&running_helpers)) 346 if (atomic_dec_and_test(&running_helpers))
345 wake_up(&running_helpers_waitq); 347 wake_up(&running_helpers_waitq);
346 } 348 }
347 349
348 /** 350 /**
349 * call_usermodehelper_setup - prepare to call a usermode helper 351 * call_usermodehelper_setup - prepare to call a usermode helper
350 * @path: path to usermode executable 352 * @path: path to usermode executable
351 * @argv: arg vector for process 353 * @argv: arg vector for process
352 * @envp: environment for process 354 * @envp: environment for process
353 * @gfp_mask: gfp mask for memory allocation 355 * @gfp_mask: gfp mask for memory allocation
354 * 356 *
355 * Returns either %NULL on allocation failure, or a subprocess_info 357 * Returns either %NULL on allocation failure, or a subprocess_info
356 * structure. This should be passed to call_usermodehelper_exec to 358 * structure. This should be passed to call_usermodehelper_exec to
357 * exec the process and free the structure. 359 * exec the process and free the structure.
358 */ 360 */
359 struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, 361 struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
360 char **envp, gfp_t gfp_mask) 362 char **envp, gfp_t gfp_mask)
361 { 363 {
362 struct subprocess_info *sub_info; 364 struct subprocess_info *sub_info;
363 sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask); 365 sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
364 if (!sub_info) 366 if (!sub_info)
365 goto out; 367 goto out;
366 368
367 INIT_WORK(&sub_info->work, __call_usermodehelper); 369 INIT_WORK(&sub_info->work, __call_usermodehelper);
368 sub_info->path = path; 370 sub_info->path = path;
369 sub_info->argv = argv; 371 sub_info->argv = argv;
370 sub_info->envp = envp; 372 sub_info->envp = envp;
371 out: 373 out:
372 return sub_info; 374 return sub_info;
373 } 375 }
374 EXPORT_SYMBOL(call_usermodehelper_setup); 376 EXPORT_SYMBOL(call_usermodehelper_setup);
375 377
376 /** 378 /**
377 * call_usermodehelper_setfns - set a cleanup/init function 379 * call_usermodehelper_setfns - set a cleanup/init function
378 * @info: a subprocess_info returned by call_usermodehelper_setup 380 * @info: a subprocess_info returned by call_usermodehelper_setup
379 * @cleanup: a cleanup function 381 * @cleanup: a cleanup function
380 * @init: an init function 382 * @init: an init function
381 * @data: arbitrary context sensitive data 383 * @data: arbitrary context sensitive data
382 * 384 *
383 * The init function is used to customize the helper process prior to 385 * The init function is used to customize the helper process prior to
384 * exec. A non-zero return code causes the process to error out, exit, 386 * exec. A non-zero return code causes the process to error out, exit,
385 * and return the failure to the calling process 387 * and return the failure to the calling process
386 * 388 *
387 * The cleanup function is just before ethe subprocess_info is about to 389 * The cleanup function is just before ethe subprocess_info is about to
388 * be freed. This can be used for freeing the argv and envp. The 390 * be freed. This can be used for freeing the argv and envp. The
389 * Function must be runnable in either a process context or the 391 * Function must be runnable in either a process context or the
390 * context in which call_usermodehelper_exec is called. 392 * context in which call_usermodehelper_exec is called.
391 */ 393 */
392 void call_usermodehelper_setfns(struct subprocess_info *info, 394 void call_usermodehelper_setfns(struct subprocess_info *info,
393 int (*init)(struct subprocess_info *info, struct cred *new), 395 int (*init)(struct subprocess_info *info, struct cred *new),
394 void (*cleanup)(struct subprocess_info *info), 396 void (*cleanup)(struct subprocess_info *info),
395 void *data) 397 void *data)
396 { 398 {
397 info->cleanup = cleanup; 399 info->cleanup = cleanup;
398 info->init = init; 400 info->init = init;
399 info->data = data; 401 info->data = data;
400 } 402 }
401 EXPORT_SYMBOL(call_usermodehelper_setfns); 403 EXPORT_SYMBOL(call_usermodehelper_setfns);
402 404
403 /** 405 /**
404 * call_usermodehelper_exec - start a usermode application 406 * call_usermodehelper_exec - start a usermode application
405 * @sub_info: information about the subprocessa 407 * @sub_info: information about the subprocessa
406 * @wait: wait for the application to finish and return status. 408 * @wait: wait for the application to finish and return status.
407 * when -1 don't wait at all, but you get no useful error back when 409 * when -1 don't wait at all, but you get no useful error back when
408 * the program couldn't be exec'ed. This makes it safe to call 410 * the program couldn't be exec'ed. This makes it safe to call
409 * from interrupt context. 411 * from interrupt context.
410 * 412 *
411 * Runs a user-space application. The application is started 413 * Runs a user-space application. The application is started
412 * asynchronously if wait is not set, and runs as a child of keventd. 414 * asynchronously if wait is not set, and runs as a child of keventd.
413 * (ie. it runs with full root capabilities). 415 * (ie. it runs with full root capabilities).
414 */ 416 */
415 int call_usermodehelper_exec(struct subprocess_info *sub_info, 417 int call_usermodehelper_exec(struct subprocess_info *sub_info,
416 enum umh_wait wait) 418 enum umh_wait wait)
417 { 419 {
418 DECLARE_COMPLETION_ONSTACK(done); 420 DECLARE_COMPLETION_ONSTACK(done);
419 int retval = 0; 421 int retval = 0;
420 422
421 helper_lock(); 423 helper_lock();
422 if (sub_info->path[0] == '\0') 424 if (sub_info->path[0] == '\0')
423 goto out; 425 goto out;
424 426
425 if (!khelper_wq || usermodehelper_disabled) { 427 if (!khelper_wq || usermodehelper_disabled) {
426 retval = -EBUSY; 428 retval = -EBUSY;
427 goto out; 429 goto out;
428 } 430 }
429 431
430 sub_info->complete = &done; 432 sub_info->complete = &done;
431 sub_info->wait = wait; 433 sub_info->wait = wait;
432 434
433 queue_work(khelper_wq, &sub_info->work); 435 queue_work(khelper_wq, &sub_info->work);
434 if (wait == UMH_NO_WAIT) /* task has freed sub_info */ 436 if (wait == UMH_NO_WAIT) /* task has freed sub_info */
435 goto unlock; 437 goto unlock;
436 wait_for_completion(&done); 438 wait_for_completion(&done);
437 retval = sub_info->retval; 439 retval = sub_info->retval;
438 440
439 out: 441 out:
440 call_usermodehelper_freeinfo(sub_info); 442 call_usermodehelper_freeinfo(sub_info);
441 unlock: 443 unlock:
442 helper_unlock(); 444 helper_unlock();
443 return retval; 445 return retval;
444 } 446 }
445 EXPORT_SYMBOL(call_usermodehelper_exec); 447 EXPORT_SYMBOL(call_usermodehelper_exec);
446 448
447 static int proc_cap_handler(struct ctl_table *table, int write, 449 static int proc_cap_handler(struct ctl_table *table, int write,
448 void __user *buffer, size_t *lenp, loff_t *ppos) 450 void __user *buffer, size_t *lenp, loff_t *ppos)
449 { 451 {
450 struct ctl_table t; 452 struct ctl_table t;
451 unsigned long cap_array[_KERNEL_CAPABILITY_U32S]; 453 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
452 kernel_cap_t new_cap; 454 kernel_cap_t new_cap;
453 int err, i; 455 int err, i;
454 456
455 if (write && (!capable(CAP_SETPCAP) || 457 if (write && (!capable(CAP_SETPCAP) ||
456 !capable(CAP_SYS_MODULE))) 458 !capable(CAP_SYS_MODULE)))
457 return -EPERM; 459 return -EPERM;
458 460
459 /* 461 /*
460 * convert from the global kernel_cap_t to the ulong array to print to 462 * convert from the global kernel_cap_t to the ulong array to print to
461 * userspace if this is a read. 463 * userspace if this is a read.
462 */ 464 */
463 spin_lock(&umh_sysctl_lock); 465 spin_lock(&umh_sysctl_lock);
464 for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) { 466 for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) {
465 if (table->data == CAP_BSET) 467 if (table->data == CAP_BSET)
466 cap_array[i] = usermodehelper_bset.cap[i]; 468 cap_array[i] = usermodehelper_bset.cap[i];
467 else if (table->data == CAP_PI) 469 else if (table->data == CAP_PI)
468 cap_array[i] = usermodehelper_inheritable.cap[i]; 470 cap_array[i] = usermodehelper_inheritable.cap[i];
469 else 471 else
470 BUG(); 472 BUG();
471 } 473 }
472 spin_unlock(&umh_sysctl_lock); 474 spin_unlock(&umh_sysctl_lock);
473 475
474 t = *table; 476 t = *table;
475 t.data = &cap_array; 477 t.data = &cap_array;
476 478
477 /* 479 /*
478 * actually read or write and array of ulongs from userspace. Remember 480 * actually read or write and array of ulongs from userspace. Remember
479 * these are least significant 32 bits first 481 * these are least significant 32 bits first
480 */ 482 */
481 err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos); 483 err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
482 if (err < 0) 484 if (err < 0)
483 return err; 485 return err;
484 486
485 /* 487 /*
486 * convert from the sysctl array of ulongs to the kernel_cap_t 488 * convert from the sysctl array of ulongs to the kernel_cap_t
487 * internal representation 489 * internal representation
488 */ 490 */
489 for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) 491 for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)
490 new_cap.cap[i] = cap_array[i]; 492 new_cap.cap[i] = cap_array[i];
491 493
492 /* 494 /*
493 * Drop everything not in the new_cap (but don't add things) 495 * Drop everything not in the new_cap (but don't add things)
494 */ 496 */
495 spin_lock(&umh_sysctl_lock); 497 spin_lock(&umh_sysctl_lock);
496 if (write) { 498 if (write) {
497 if (table->data == CAP_BSET) 499 if (table->data == CAP_BSET)
498 usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap); 500 usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap);
499 if (table->data == CAP_PI) 501 if (table->data == CAP_PI)
500 usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap); 502 usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap);
501 } 503 }
502 spin_unlock(&umh_sysctl_lock); 504 spin_unlock(&umh_sysctl_lock);
503 505
504 return 0; 506 return 0;
505 } 507 }
506 508
507 struct ctl_table usermodehelper_table[] = { 509 struct ctl_table usermodehelper_table[] = {
508 { 510 {
509 .procname = "bset", 511 .procname = "bset",
510 .data = CAP_BSET, 512 .data = CAP_BSET,
511 .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long), 513 .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
512 .mode = 0600, 514 .mode = 0600,
513 .proc_handler = proc_cap_handler, 515 .proc_handler = proc_cap_handler,
514 }, 516 },
515 { 517 {
516 .procname = "inheritable", 518 .procname = "inheritable",
517 .data = CAP_PI, 519 .data = CAP_PI,
518 .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long), 520 .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
519 .mode = 0600, 521 .mode = 0600,
520 .proc_handler = proc_cap_handler, 522 .proc_handler = proc_cap_handler,
521 }, 523 },
522 { } 524 { }
523 }; 525 };
524 526
525 void __init usermodehelper_init(void) 527 void __init usermodehelper_init(void)
526 { 528 {
527 khelper_wq = create_singlethread_workqueue("khelper"); 529 khelper_wq = create_singlethread_workqueue("khelper");
528 BUG_ON(!khelper_wq); 530 BUG_ON(!khelper_wq);
529 } 531 }
530 532