2 kmod, the new module loader (replaces kerneld)
5 Reorganized not to be a daemon by Adam Richter, with guidance
8 Modified to avoid chroot and file sharing problems.
11 Limit the concurrent number of kmod modprobes to catch loops from
12 "modprobe needs a service that is in a module".
13 Keith Owens <kaos@ocs.com.au> December 1999
15 Unblock all signals when we exec a usermode process.
16 Shuu Yamaguchi <shuu@wondernetworkresources.com> December 2000
18 call_usermodehelper wait flag, and remove exec_usermodehelper.
19 Rusty Russell <rusty@rustcorp.com.au> Jan 2003
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/syscalls.h>
24 #include <linux/unistd.h>
25 #include <linux/kmod.h>
26 #include <linux/slab.h>
27 #include <linux/completion.h>
28 #include <linux/cred.h>
29 #include <linux/file.h>
30 #include <linux/fdtable.h>
31 #include <linux/workqueue.h>
32 #include <linux/security.h>
33 #include <linux/mount.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/resource.h>
37 #include <linux/notifier.h>
38 #include <linux/suspend.h>
39 #include <linux/rwsem.h>
40 #include <asm/uaccess.h>
42 #include <trace/events/module.h>
44 extern int max_threads
;
46 static struct workqueue_struct
*khelper_wq
;
49 * kmod_thread_locker is used for deadlock avoidance. There is no explicit
50 * locking to protect this global - it is private to the singleton khelper
51 * thread and should only ever be modified by that thread.
53 static const struct task_struct
*kmod_thread_locker
;
55 #define CAP_BSET (void *)1
56 #define CAP_PI (void *)2
58 static kernel_cap_t usermodehelper_bset
= CAP_FULL_SET
;
59 static kernel_cap_t usermodehelper_inheritable
= CAP_FULL_SET
;
60 static DEFINE_SPINLOCK(umh_sysctl_lock
);
61 static DECLARE_RWSEM(umhelper_sem
);
66 modprobe_path is set via /proc/sys.
68 char modprobe_path
[KMOD_PATH_LEN
] = "/sbin/modprobe";
70 static void free_modprobe_argv(struct subprocess_info
*info
)
72 kfree(info
->argv
[3]); /* check call_modprobe() */
76 static int call_modprobe(char *module_name
, int wait
)
78 static char *envp
[] = {
81 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
85 char **argv
= kmalloc(sizeof(char *[5]), GFP_KERNEL
);
89 module_name
= kstrdup(module_name
, GFP_KERNEL
);
93 argv
[0] = modprobe_path
;
96 argv
[3] = module_name
; /* check free_modprobe_argv() */
99 return call_usermodehelper_fns(modprobe_path
, argv
, envp
,
100 wait
| UMH_KILLABLE
, NULL
, free_modprobe_argv
, NULL
);
108 * __request_module - try to load a kernel module
109 * @wait: wait (or not) for the operation to complete
110 * @fmt: printf style format string for the name of the module
111 * @...: arguments as specified in the format string
113 * Load a module using the user mode module loader. The function returns
114 * zero on success or a negative errno code on failure. Note that a
115 * successful module load does not mean the module did not then unload
116 * and exit on an error of its own. Callers must check that the service
117 * they requested is now available not blindly invoke it.
119 * If module auto-loading support is disabled then this function
120 * becomes a no-operation.
122 int __request_module(bool wait
, const char *fmt
, ...)
125 char module_name
[MODULE_NAME_LEN
];
126 unsigned int max_modprobes
;
128 static atomic_t kmod_concurrent
= ATOMIC_INIT(0);
129 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
130 static int kmod_loop_msg
;
133 ret
= vsnprintf(module_name
, MODULE_NAME_LEN
, fmt
, args
);
135 if (ret
>= MODULE_NAME_LEN
)
136 return -ENAMETOOLONG
;
138 ret
= security_kernel_module_request(module_name
);
142 /* If modprobe needs a service that is in a module, we get a recursive
143 * loop. Limit the number of running kmod threads to max_threads/2 or
144 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
145 * would be to run the parents of this process, counting how many times
146 * kmod was invoked. That would mean accessing the internals of the
147 * process tables to get the command line, proc_pid_cmdline is static
148 * and it is not worth changing the proc code just to handle this case.
151 * "trace the ppid" is simple, but will fail if someone's
152 * parent exits. I think this is as good as it gets. --RR
154 max_modprobes
= min(max_threads
/2, MAX_KMOD_CONCURRENT
);
155 atomic_inc(&kmod_concurrent
);
156 if (atomic_read(&kmod_concurrent
) > max_modprobes
) {
157 /* We may be blaming an innocent here, but unlikely */
158 if (kmod_loop_msg
< 5) {
160 "request_module: runaway loop modprobe %s\n",
164 atomic_dec(&kmod_concurrent
);
168 trace_module_request(module_name
, wait
, _RET_IP_
);
170 ret
= call_modprobe(module_name
, wait
? UMH_WAIT_PROC
: UMH_WAIT_EXEC
);
172 atomic_dec(&kmod_concurrent
);
175 EXPORT_SYMBOL(__request_module
);
176 #endif /* CONFIG_MODULES */
179 * This is the task which runs the usermode application
181 static int ____call_usermodehelper(void *data
)
183 struct subprocess_info
*sub_info
= data
;
187 spin_lock_irq(¤t
->sighand
->siglock
);
188 flush_signal_handlers(current
, 1);
189 spin_unlock_irq(¤t
->sighand
->siglock
);
191 /* We can run anywhere, unlike our parent keventd(). */
192 set_cpus_allowed_ptr(current
, cpu_all_mask
);
195 * Our parent is keventd, which runs with elevated scheduling priority.
196 * Avoid propagating that into the userspace child.
198 set_user_nice(current
, 0);
201 new = prepare_kernel_cred(current
);
205 spin_lock(&umh_sysctl_lock
);
206 new->cap_bset
= cap_intersect(usermodehelper_bset
, new->cap_bset
);
207 new->cap_inheritable
= cap_intersect(usermodehelper_inheritable
,
208 new->cap_inheritable
);
209 spin_unlock(&umh_sysctl_lock
);
211 if (sub_info
->init
) {
212 retval
= sub_info
->init(sub_info
, new);
221 retval
= kernel_execve(sub_info
->path
,
222 (const char *const *)sub_info
->argv
,
223 (const char *const *)sub_info
->envp
);
227 sub_info
->retval
= retval
;
231 static int call_helper(void *data
)
233 /* Worker thread started blocking khelper thread. */
234 kmod_thread_locker
= current
;
235 return ____call_usermodehelper(data
);
238 static void call_usermodehelper_freeinfo(struct subprocess_info
*info
)
241 (*info
->cleanup
)(info
);
245 static void umh_complete(struct subprocess_info
*sub_info
)
247 struct completion
*comp
= xchg(&sub_info
->complete
, NULL
);
249 * See call_usermodehelper_exec(). If xchg() returns NULL
250 * we own sub_info, the UMH_KILLABLE caller has gone away.
255 call_usermodehelper_freeinfo(sub_info
);
258 /* Keventd can't block, but this (a child) can. */
259 static int wait_for_helper(void *data
)
261 struct subprocess_info
*sub_info
= data
;
264 /* If SIGCLD is ignored sys_wait4 won't populate the status. */
265 spin_lock_irq(¤t
->sighand
->siglock
);
266 current
->sighand
->action
[SIGCHLD
-1].sa
.sa_handler
= SIG_DFL
;
267 spin_unlock_irq(¤t
->sighand
->siglock
);
269 pid
= kernel_thread(____call_usermodehelper
, sub_info
, SIGCHLD
);
271 sub_info
->retval
= pid
;
275 * Normally it is bogus to call wait4() from in-kernel because
276 * wait4() wants to write the exit code to a userspace address.
277 * But wait_for_helper() always runs as keventd, and put_user()
278 * to a kernel address works OK for kernel threads, due to their
279 * having an mm_segment_t which spans the entire address space.
281 * Thus the __user pointer cast is valid here.
283 sys_wait4(pid
, (int __user
*)&ret
, 0, NULL
);
286 * If ret is 0, either ____call_usermodehelper failed and the
287 * real error code is already in sub_info->retval or
288 * sub_info->retval is 0 anyway, so don't mess with it then.
291 sub_info
->retval
= ret
;
294 umh_complete(sub_info
);
298 /* This is run by khelper thread */
299 static void __call_usermodehelper(struct work_struct
*work
)
301 struct subprocess_info
*sub_info
=
302 container_of(work
, struct subprocess_info
, work
);
303 int wait
= sub_info
->wait
& ~UMH_KILLABLE
;
306 /* CLONE_VFORK: wait until the usermode helper has execve'd
307 * successfully We need the data structures to stay around
308 * until that is done. */
309 if (wait
== UMH_WAIT_PROC
)
310 pid
= kernel_thread(wait_for_helper
, sub_info
,
311 CLONE_FS
| CLONE_FILES
| SIGCHLD
);
313 pid
= kernel_thread(call_helper
, sub_info
,
314 CLONE_VFORK
| SIGCHLD
);
315 /* Worker thread stopped blocking khelper thread. */
316 kmod_thread_locker
= NULL
;
321 call_usermodehelper_freeinfo(sub_info
);
330 sub_info
->retval
= pid
;
331 umh_complete(sub_info
);
336 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
337 * (used for preventing user land processes from being created after the user
338 * land has been frozen during a system-wide hibernation or suspend operation).
339 * Should always be manipulated under umhelper_sem acquired for write.
341 static enum umh_disable_depth usermodehelper_disabled
= UMH_DISABLED
;
343 /* Number of helpers running */
344 static atomic_t running_helpers
= ATOMIC_INIT(0);
347 * Wait queue head used by usermodehelper_disable() to wait for all running
350 static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq
);
353 * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled
356 static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq
);
359 * Time to wait for running_helpers to become zero before the setting of
360 * usermodehelper_disabled in usermodehelper_disable() fails
362 #define RUNNING_HELPERS_TIMEOUT (5 * HZ)
364 int usermodehelper_read_trylock(void)
369 down_read(&umhelper_sem
);
371 prepare_to_wait(&usermodehelper_disabled_waitq
, &wait
,
373 if (!usermodehelper_disabled
)
376 if (usermodehelper_disabled
== UMH_DISABLED
)
379 up_read(&umhelper_sem
);
387 down_read(&umhelper_sem
);
389 finish_wait(&usermodehelper_disabled_waitq
, &wait
);
392 EXPORT_SYMBOL_GPL(usermodehelper_read_trylock
);
394 long usermodehelper_read_lock_wait(long timeout
)
401 down_read(&umhelper_sem
);
403 prepare_to_wait(&usermodehelper_disabled_waitq
, &wait
,
404 TASK_UNINTERRUPTIBLE
);
405 if (!usermodehelper_disabled
)
408 up_read(&umhelper_sem
);
410 timeout
= schedule_timeout(timeout
);
414 down_read(&umhelper_sem
);
416 finish_wait(&usermodehelper_disabled_waitq
, &wait
);
419 EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait
);
421 void usermodehelper_read_unlock(void)
423 up_read(&umhelper_sem
);
425 EXPORT_SYMBOL_GPL(usermodehelper_read_unlock
);
428 * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
429 * @depth: New value to assign to usermodehelper_disabled.
431 * Change the value of usermodehelper_disabled (under umhelper_sem locked for
432 * writing) and wakeup tasks waiting for it to change.
434 void __usermodehelper_set_disable_depth(enum umh_disable_depth depth
)
436 down_write(&umhelper_sem
);
437 usermodehelper_disabled
= depth
;
438 wake_up(&usermodehelper_disabled_waitq
);
439 up_write(&umhelper_sem
);
443 * __usermodehelper_disable - Prevent new helpers from being started.
444 * @depth: New value to assign to usermodehelper_disabled.
446 * Set usermodehelper_disabled to @depth and wait for running helpers to exit.
448 int __usermodehelper_disable(enum umh_disable_depth depth
)
455 down_write(&umhelper_sem
);
456 usermodehelper_disabled
= depth
;
457 up_write(&umhelper_sem
);
460 * From now on call_usermodehelper_exec() won't start any new
461 * helpers, so it is sufficient if running_helpers turns out to
462 * be zero at one point (it may be increased later, but that
465 retval
= wait_event_timeout(running_helpers_waitq
,
466 atomic_read(&running_helpers
) == 0,
467 RUNNING_HELPERS_TIMEOUT
);
471 __usermodehelper_set_disable_depth(UMH_ENABLED
);
475 static void helper_lock(void)
477 atomic_inc(&running_helpers
);
478 smp_mb__after_atomic_inc();
481 static void helper_unlock(void)
483 if (atomic_dec_and_test(&running_helpers
))
484 wake_up(&running_helpers_waitq
);
488 * call_usermodehelper_setup - prepare to call a usermode helper
489 * @path: path to usermode executable
490 * @argv: arg vector for process
491 * @envp: environment for process
492 * @gfp_mask: gfp mask for memory allocation
494 * Returns either %NULL on allocation failure, or a subprocess_info
495 * structure. This should be passed to call_usermodehelper_exec to
496 * exec the process and free the structure.
499 struct subprocess_info
*call_usermodehelper_setup(char *path
, char **argv
,
500 char **envp
, gfp_t gfp_mask
)
502 struct subprocess_info
*sub_info
;
503 sub_info
= kzalloc(sizeof(struct subprocess_info
), gfp_mask
);
507 INIT_WORK(&sub_info
->work
, __call_usermodehelper
);
508 sub_info
->path
= path
;
509 sub_info
->argv
= argv
;
510 sub_info
->envp
= envp
;
516 * call_usermodehelper_setfns - set a cleanup/init function
517 * @info: a subprocess_info returned by call_usermodehelper_setup
518 * @cleanup: a cleanup function
519 * @init: an init function
520 * @data: arbitrary context sensitive data
522 * The init function is used to customize the helper process prior to
523 * exec. A non-zero return code causes the process to error out, exit,
524 * and return the failure to the calling process
526 * The cleanup function is just before ethe subprocess_info is about to
527 * be freed. This can be used for freeing the argv and envp. The
528 * Function must be runnable in either a process context or the
529 * context in which call_usermodehelper_exec is called.
532 void call_usermodehelper_setfns(struct subprocess_info
*info
,
533 int (*init
)(struct subprocess_info
*info
, struct cred
*new),
534 void (*cleanup
)(struct subprocess_info
*info
),
537 info
->cleanup
= cleanup
;
543 * call_usermodehelper_exec - start a usermode application
544 * @sub_info: information about the subprocessa
545 * @wait: wait for the application to finish and return status.
546 * when -1 don't wait at all, but you get no useful error back when
547 * the program couldn't be exec'ed. This makes it safe to call
548 * from interrupt context.
550 * Runs a user-space application. The application is started
551 * asynchronously if wait is not set, and runs as a child of keventd.
552 * (ie. it runs with full root capabilities).
555 int call_usermodehelper_exec(struct subprocess_info
*sub_info
, int wait
)
557 DECLARE_COMPLETION_ONSTACK(done
);
561 if (sub_info
->path
[0] == '\0')
564 if (!khelper_wq
|| usermodehelper_disabled
) {
569 * Worker thread must not wait for khelper thread at below
570 * wait_for_completion() if the thread was created with CLONE_VFORK
571 * flag, for khelper thread is already waiting for the thread at
572 * wait_for_completion() in do_fork().
574 if (wait
!= UMH_NO_WAIT
&& current
== kmod_thread_locker
) {
579 sub_info
->complete
= &done
;
580 sub_info
->wait
= wait
;
582 queue_work(khelper_wq
, &sub_info
->work
);
583 if (wait
== UMH_NO_WAIT
) /* task has freed sub_info */
586 if (wait
& UMH_KILLABLE
) {
587 retval
= wait_for_completion_killable(&done
);
591 /* umh_complete() will see NULL and free sub_info */
592 if (xchg(&sub_info
->complete
, NULL
))
594 /* fallthrough, umh_complete() was already called */
597 wait_for_completion(&done
);
599 retval
= sub_info
->retval
;
601 call_usermodehelper_freeinfo(sub_info
);
608 * call_usermodehelper_fns() will not run the caller-provided cleanup function
609 * if a memory allocation failure is experienced. So the caller might need to
610 * check the call_usermodehelper_fns() return value: if it is -ENOMEM, perform
611 * the necessaary cleanup within the caller.
613 int call_usermodehelper_fns(
614 char *path
, char **argv
, char **envp
, int wait
,
615 int (*init
)(struct subprocess_info
*info
, struct cred
*new),
616 void (*cleanup
)(struct subprocess_info
*), void *data
)
618 struct subprocess_info
*info
;
619 gfp_t gfp_mask
= (wait
== UMH_NO_WAIT
) ? GFP_ATOMIC
: GFP_KERNEL
;
621 info
= call_usermodehelper_setup(path
, argv
, envp
, gfp_mask
);
626 call_usermodehelper_setfns(info
, init
, cleanup
, data
);
628 return call_usermodehelper_exec(info
, wait
);
630 EXPORT_SYMBOL(call_usermodehelper_fns
);
632 static int proc_cap_handler(struct ctl_table
*table
, int write
,
633 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
636 unsigned long cap_array
[_KERNEL_CAPABILITY_U32S
];
637 kernel_cap_t new_cap
;
640 if (write
&& (!capable(CAP_SETPCAP
) ||
641 !capable(CAP_SYS_MODULE
)))
645 * convert from the global kernel_cap_t to the ulong array to print to
646 * userspace if this is a read.
648 spin_lock(&umh_sysctl_lock
);
649 for (i
= 0; i
< _KERNEL_CAPABILITY_U32S
; i
++) {
650 if (table
->data
== CAP_BSET
)
651 cap_array
[i
] = usermodehelper_bset
.cap
[i
];
652 else if (table
->data
== CAP_PI
)
653 cap_array
[i
] = usermodehelper_inheritable
.cap
[i
];
657 spin_unlock(&umh_sysctl_lock
);
663 * actually read or write and array of ulongs from userspace. Remember
664 * these are least significant 32 bits first
666 err
= proc_doulongvec_minmax(&t
, write
, buffer
, lenp
, ppos
);
671 * convert from the sysctl array of ulongs to the kernel_cap_t
672 * internal representation
674 for (i
= 0; i
< _KERNEL_CAPABILITY_U32S
; i
++)
675 new_cap
.cap
[i
] = cap_array
[i
];
678 * Drop everything not in the new_cap (but don't add things)
680 spin_lock(&umh_sysctl_lock
);
682 if (table
->data
== CAP_BSET
)
683 usermodehelper_bset
= cap_intersect(usermodehelper_bset
, new_cap
);
684 if (table
->data
== CAP_PI
)
685 usermodehelper_inheritable
= cap_intersect(usermodehelper_inheritable
, new_cap
);
687 spin_unlock(&umh_sysctl_lock
);
692 struct ctl_table usermodehelper_table
[] = {
696 .maxlen
= _KERNEL_CAPABILITY_U32S
* sizeof(unsigned long),
698 .proc_handler
= proc_cap_handler
,
701 .procname
= "inheritable",
703 .maxlen
= _KERNEL_CAPABILITY_U32S
* sizeof(unsigned long),
705 .proc_handler
= proc_cap_handler
,
710 void __init
usermodehelper_init(void)
712 khelper_wq
= create_singlethread_workqueue("khelper");