blob: 30db93fd7e39b60eae6fb98aa31d8dc8106f1e56 [file] [log] [blame]
Luis R. Rodriguez23558692017-09-08 16:17:00 -07001/*
2 * umh - the kernel usermode helper
3 */
4#include <linux/module.h>
5#include <linux/sched.h>
6#include <linux/sched/task.h>
7#include <linux/binfmts.h>
8#include <linux/syscalls.h>
9#include <linux/unistd.h>
10#include <linux/kmod.h>
11#include <linux/slab.h>
12#include <linux/completion.h>
13#include <linux/cred.h>
14#include <linux/file.h>
15#include <linux/fdtable.h>
16#include <linux/workqueue.h>
17#include <linux/security.h>
18#include <linux/mount.h>
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/resource.h>
22#include <linux/notifier.h>
23#include <linux/suspend.h>
24#include <linux/rwsem.h>
25#include <linux/ptrace.h>
26#include <linux/async.h>
27#include <linux/uaccess.h>
Alexei Starovoitov449325b2018-05-21 19:22:29 -070028#include <linux/shmem_fs.h>
29#include <linux/pipe_fs_i.h>
Luis R. Rodriguez23558692017-09-08 16:17:00 -070030
31#include <trace/events/module.h>
32
33#define CAP_BSET (void *)1
34#define CAP_PI (void *)2
35
36static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
37static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
38static DEFINE_SPINLOCK(umh_sysctl_lock);
39static DECLARE_RWSEM(umhelper_sem);
40
41static void call_usermodehelper_freeinfo(struct subprocess_info *info)
42{
43 if (info->cleanup)
44 (*info->cleanup)(info);
45 kfree(info);
46}
47
48static void umh_complete(struct subprocess_info *sub_info)
49{
50 struct completion *comp = xchg(&sub_info->complete, NULL);
51 /*
52 * See call_usermodehelper_exec(). If xchg() returns NULL
53 * we own sub_info, the UMH_KILLABLE caller has gone away
54 * or the caller used UMH_NO_WAIT.
55 */
56 if (comp)
57 complete(comp);
58 else
59 call_usermodehelper_freeinfo(sub_info);
60}
61
62/*
63 * This is the task which runs the usermode application
64 */
65static int call_usermodehelper_exec_async(void *data)
66{
67 struct subprocess_info *sub_info = data;
68 struct cred *new;
69 int retval;
70
71 spin_lock_irq(&current->sighand->siglock);
72 flush_signal_handlers(current, 1);
73 spin_unlock_irq(&current->sighand->siglock);
74
75 /*
76 * Our parent (unbound workqueue) runs with elevated scheduling
77 * priority. Avoid propagating that into the userspace child.
78 */
79 set_user_nice(current, 0);
80
81 retval = -ENOMEM;
82 new = prepare_kernel_cred(current);
83 if (!new)
84 goto out;
85
86 spin_lock(&umh_sysctl_lock);
87 new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
88 new->cap_inheritable = cap_intersect(usermodehelper_inheritable,
89 new->cap_inheritable);
90 spin_unlock(&umh_sysctl_lock);
91
92 if (sub_info->init) {
93 retval = sub_info->init(sub_info, new);
94 if (retval) {
95 abort_creds(new);
96 goto out;
97 }
98 }
99
100 commit_creds(new);
101
Alexei Starovoitov449325b2018-05-21 19:22:29 -0700102 if (sub_info->file)
103 retval = do_execve_file(sub_info->file,
104 sub_info->argv, sub_info->envp);
105 else
106 retval = do_execve(getname_kernel(sub_info->path),
107 (const char __user *const __user *)sub_info->argv,
108 (const char __user *const __user *)sub_info->envp);
Luis R. Rodriguez23558692017-09-08 16:17:00 -0700109out:
110 sub_info->retval = retval;
111 /*
112 * call_usermodehelper_exec_sync() will call umh_complete
113 * if UHM_WAIT_PROC.
114 */
115 if (!(sub_info->wait & UMH_WAIT_PROC))
116 umh_complete(sub_info);
117 if (!retval)
118 return 0;
119 do_exit(0);
120}
121
122/* Handles UMH_WAIT_PROC. */
123static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info)
124{
125 pid_t pid;
126
Dominik Brodowskid300b6102018-03-11 11:34:26 +0100127 /* If SIGCLD is ignored kernel_wait4 won't populate the status. */
Luis R. Rodriguez23558692017-09-08 16:17:00 -0700128 kernel_sigaction(SIGCHLD, SIG_DFL);
129 pid = kernel_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD);
130 if (pid < 0) {
131 sub_info->retval = pid;
132 } else {
133 int ret = -ECHILD;
134 /*
135 * Normally it is bogus to call wait4() from in-kernel because
136 * wait4() wants to write the exit code to a userspace address.
137 * But call_usermodehelper_exec_sync() always runs as kernel
138 * thread (workqueue) and put_user() to a kernel address works
139 * OK for kernel threads, due to their having an mm_segment_t
140 * which spans the entire address space.
141 *
142 * Thus the __user pointer cast is valid here.
143 */
Dominik Brodowskid300b6102018-03-11 11:34:26 +0100144 kernel_wait4(pid, (int __user *)&ret, 0, NULL);
Luis R. Rodriguez23558692017-09-08 16:17:00 -0700145
146 /*
147 * If ret is 0, either call_usermodehelper_exec_async failed and
148 * the real error code is already in sub_info->retval or
149 * sub_info->retval is 0 anyway, so don't mess with it then.
150 */
151 if (ret)
152 sub_info->retval = ret;
153 }
154
155 /* Restore default kernel sig handler */
156 kernel_sigaction(SIGCHLD, SIG_IGN);
157
158 umh_complete(sub_info);
159}
160
161/*
162 * We need to create the usermodehelper kernel thread from a task that is affine
163 * to an optimized set of CPUs (or nohz housekeeping ones) such that they
164 * inherit a widest affinity irrespective of call_usermodehelper() callers with
165 * possibly reduced affinity (eg: per-cpu workqueues). We don't want
166 * usermodehelper targets to contend a busy CPU.
167 *
168 * Unbound workqueues provide such wide affinity and allow to block on
169 * UMH_WAIT_PROC requests without blocking pending request (up to some limit).
170 *
171 * Besides, workqueues provide the privilege level that caller might not have
172 * to perform the usermodehelper request.
173 *
174 */
175static void call_usermodehelper_exec_work(struct work_struct *work)
176{
177 struct subprocess_info *sub_info =
178 container_of(work, struct subprocess_info, work);
179
180 if (sub_info->wait & UMH_WAIT_PROC) {
181 call_usermodehelper_exec_sync(sub_info);
182 } else {
183 pid_t pid;
184 /*
185 * Use CLONE_PARENT to reparent it to kthreadd; we do not
186 * want to pollute current->children, and we need a parent
187 * that always ignores SIGCHLD to ensure auto-reaping.
188 */
189 pid = kernel_thread(call_usermodehelper_exec_async, sub_info,
190 CLONE_PARENT | SIGCHLD);
191 if (pid < 0) {
192 sub_info->retval = pid;
193 umh_complete(sub_info);
Alexei Starovoitov449325b2018-05-21 19:22:29 -0700194 } else {
195 sub_info->pid = pid;
Luis R. Rodriguez23558692017-09-08 16:17:00 -0700196 }
197 }
198}
199
200/*
201 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
202 * (used for preventing user land processes from being created after the user
203 * land has been frozen during a system-wide hibernation or suspend operation).
204 * Should always be manipulated under umhelper_sem acquired for write.
205 */
206static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED;
207
208/* Number of helpers running */
209static atomic_t running_helpers = ATOMIC_INIT(0);
210
211/*
212 * Wait queue head used by usermodehelper_disable() to wait for all running
213 * helpers to finish.
214 */
215static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
216
217/*
218 * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled
219 * to become 'false'.
220 */
221static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq);
222
223/*
224 * Time to wait for running_helpers to become zero before the setting of
225 * usermodehelper_disabled in usermodehelper_disable() fails
226 */
227#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
228
229int usermodehelper_read_trylock(void)
230{
231 DEFINE_WAIT(wait);
232 int ret = 0;
233
234 down_read(&umhelper_sem);
235 for (;;) {
236 prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
237 TASK_INTERRUPTIBLE);
238 if (!usermodehelper_disabled)
239 break;
240
241 if (usermodehelper_disabled == UMH_DISABLED)
242 ret = -EAGAIN;
243
244 up_read(&umhelper_sem);
245
246 if (ret)
247 break;
248
249 schedule();
250 try_to_freeze();
251
252 down_read(&umhelper_sem);
253 }
254 finish_wait(&usermodehelper_disabled_waitq, &wait);
255 return ret;
256}
257EXPORT_SYMBOL_GPL(usermodehelper_read_trylock);
258
259long usermodehelper_read_lock_wait(long timeout)
260{
261 DEFINE_WAIT(wait);
262
263 if (timeout < 0)
264 return -EINVAL;
265
266 down_read(&umhelper_sem);
267 for (;;) {
268 prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
269 TASK_UNINTERRUPTIBLE);
270 if (!usermodehelper_disabled)
271 break;
272
273 up_read(&umhelper_sem);
274
275 timeout = schedule_timeout(timeout);
276 if (!timeout)
277 break;
278
279 down_read(&umhelper_sem);
280 }
281 finish_wait(&usermodehelper_disabled_waitq, &wait);
282 return timeout;
283}
284EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait);
285
286void usermodehelper_read_unlock(void)
287{
288 up_read(&umhelper_sem);
289}
290EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
291
292/**
293 * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
294 * @depth: New value to assign to usermodehelper_disabled.
295 *
296 * Change the value of usermodehelper_disabled (under umhelper_sem locked for
297 * writing) and wakeup tasks waiting for it to change.
298 */
299void __usermodehelper_set_disable_depth(enum umh_disable_depth depth)
300{
301 down_write(&umhelper_sem);
302 usermodehelper_disabled = depth;
303 wake_up(&usermodehelper_disabled_waitq);
304 up_write(&umhelper_sem);
305}
306
307/**
308 * __usermodehelper_disable - Prevent new helpers from being started.
309 * @depth: New value to assign to usermodehelper_disabled.
310 *
311 * Set usermodehelper_disabled to @depth and wait for running helpers to exit.
312 */
313int __usermodehelper_disable(enum umh_disable_depth depth)
314{
315 long retval;
316
317 if (!depth)
318 return -EINVAL;
319
320 down_write(&umhelper_sem);
321 usermodehelper_disabled = depth;
322 up_write(&umhelper_sem);
323
324 /*
325 * From now on call_usermodehelper_exec() won't start any new
326 * helpers, so it is sufficient if running_helpers turns out to
327 * be zero at one point (it may be increased later, but that
328 * doesn't matter).
329 */
330 retval = wait_event_timeout(running_helpers_waitq,
331 atomic_read(&running_helpers) == 0,
332 RUNNING_HELPERS_TIMEOUT);
333 if (retval)
334 return 0;
335
336 __usermodehelper_set_disable_depth(UMH_ENABLED);
337 return -EAGAIN;
338}
339
340static void helper_lock(void)
341{
342 atomic_inc(&running_helpers);
343 smp_mb__after_atomic();
344}
345
346static void helper_unlock(void)
347{
348 if (atomic_dec_and_test(&running_helpers))
349 wake_up(&running_helpers_waitq);
350}
351
352/**
353 * call_usermodehelper_setup - prepare to call a usermode helper
354 * @path: path to usermode executable
355 * @argv: arg vector for process
356 * @envp: environment for process
357 * @gfp_mask: gfp mask for memory allocation
358 * @cleanup: a cleanup function
359 * @init: an init function
360 * @data: arbitrary context sensitive data
361 *
362 * Returns either %NULL on allocation failure, or a subprocess_info
363 * structure. This should be passed to call_usermodehelper_exec to
364 * exec the process and free the structure.
365 *
366 * The init function is used to customize the helper process prior to
367 * exec. A non-zero return code causes the process to error out, exit,
368 * and return the failure to the calling process
369 *
370 * The cleanup function is just before ethe subprocess_info is about to
371 * be freed. This can be used for freeing the argv and envp. The
372 * Function must be runnable in either a process context or the
373 * context in which call_usermodehelper_exec is called.
374 */
375struct subprocess_info *call_usermodehelper_setup(const char *path, char **argv,
376 char **envp, gfp_t gfp_mask,
377 int (*init)(struct subprocess_info *info, struct cred *new),
378 void (*cleanup)(struct subprocess_info *info),
379 void *data)
380{
381 struct subprocess_info *sub_info;
382 sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
383 if (!sub_info)
384 goto out;
385
386 INIT_WORK(&sub_info->work, call_usermodehelper_exec_work);
387
388#ifdef CONFIG_STATIC_USERMODEHELPER
389 sub_info->path = CONFIG_STATIC_USERMODEHELPER_PATH;
390#else
391 sub_info->path = path;
392#endif
393 sub_info->argv = argv;
394 sub_info->envp = envp;
395
396 sub_info->cleanup = cleanup;
397 sub_info->init = init;
398 sub_info->data = data;
399 out:
400 return sub_info;
401}
402EXPORT_SYMBOL(call_usermodehelper_setup);
403
Alexei Starovoitov449325b2018-05-21 19:22:29 -0700404struct subprocess_info *call_usermodehelper_setup_file(struct file *file,
405 int (*init)(struct subprocess_info *info, struct cred *new),
406 void (*cleanup)(struct subprocess_info *info), void *data)
407{
408 struct subprocess_info *sub_info;
409
410 sub_info = kzalloc(sizeof(struct subprocess_info), GFP_KERNEL);
411 if (!sub_info)
412 return NULL;
413
414 INIT_WORK(&sub_info->work, call_usermodehelper_exec_work);
415 sub_info->path = "none";
416 sub_info->file = file;
417 sub_info->init = init;
418 sub_info->cleanup = cleanup;
419 sub_info->data = data;
420 return sub_info;
421}
422
423static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
424{
425 struct umh_info *umh_info = info->data;
426 struct file *from_umh[2];
427 struct file *to_umh[2];
428 int err;
429
430 /* create pipe to send data to umh */
431 err = create_pipe_files(to_umh, 0);
432 if (err)
433 return err;
434 err = replace_fd(0, to_umh[0], 0);
435 fput(to_umh[0]);
436 if (err < 0) {
437 fput(to_umh[1]);
438 return err;
439 }
440
441 /* create pipe to receive data from umh */
442 err = create_pipe_files(from_umh, 0);
443 if (err) {
444 fput(to_umh[1]);
445 replace_fd(0, NULL, 0);
446 return err;
447 }
448 err = replace_fd(1, from_umh[1], 0);
449 fput(from_umh[1]);
450 if (err < 0) {
451 fput(to_umh[1]);
452 replace_fd(0, NULL, 0);
453 fput(from_umh[0]);
454 return err;
455 }
456
457 umh_info->pipe_to_umh = to_umh[1];
458 umh_info->pipe_from_umh = from_umh[0];
459 return 0;
460}
461
462static void umh_save_pid(struct subprocess_info *info)
463{
464 struct umh_info *umh_info = info->data;
465
466 umh_info->pid = info->pid;
467}
468
469/**
470 * fork_usermode_blob - fork a blob of bytes as a usermode process
471 * @data: a blob of bytes that can be do_execv-ed as a file
472 * @len: length of the blob
473 * @info: information about usermode process (shouldn't be NULL)
474 *
475 * Returns either negative error or zero which indicates success
476 * in executing a blob of bytes as a usermode process. In such
477 * case 'struct umh_info *info' is populated with two pipes
478 * and a pid of the process. The caller is responsible for health
479 * check of the user process, killing it via pid, and closing the
480 * pipes when user process is no longer needed.
481 */
482int fork_usermode_blob(void *data, size_t len, struct umh_info *info)
483{
484 struct subprocess_info *sub_info;
485 struct file *file;
486 ssize_t written;
487 loff_t pos = 0;
488 int err;
489
490 file = shmem_kernel_file_setup("", len, 0);
491 if (IS_ERR(file))
492 return PTR_ERR(file);
493
494 written = kernel_write(file, data, len, &pos);
495 if (written != len) {
496 err = written;
497 if (err >= 0)
498 err = -ENOMEM;
499 goto out;
500 }
501
502 err = -ENOMEM;
503 sub_info = call_usermodehelper_setup_file(file, umh_pipe_setup,
504 umh_save_pid, info);
505 if (!sub_info)
506 goto out;
507
508 err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC);
509out:
510 fput(file);
511 return err;
512}
513EXPORT_SYMBOL_GPL(fork_usermode_blob);
514
Luis R. Rodriguez23558692017-09-08 16:17:00 -0700515/**
516 * call_usermodehelper_exec - start a usermode application
517 * @sub_info: information about the subprocessa
518 * @wait: wait for the application to finish and return status.
519 * when UMH_NO_WAIT don't wait at all, but you get no useful error back
520 * when the program couldn't be exec'ed. This makes it safe to call
521 * from interrupt context.
522 *
523 * Runs a user-space application. The application is started
524 * asynchronously if wait is not set, and runs as a child of system workqueues.
525 * (ie. it runs with full root capabilities and optimized affinity).
526 */
527int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
528{
529 DECLARE_COMPLETION_ONSTACK(done);
530 int retval = 0;
531
532 if (!sub_info->path) {
533 call_usermodehelper_freeinfo(sub_info);
534 return -EINVAL;
535 }
536 helper_lock();
537 if (usermodehelper_disabled) {
538 retval = -EBUSY;
539 goto out;
540 }
541
542 /*
543 * If there is no binary for us to call, then just return and get out of
544 * here. This allows us to set STATIC_USERMODEHELPER_PATH to "" and
545 * disable all call_usermodehelper() calls.
546 */
547 if (strlen(sub_info->path) == 0)
548 goto out;
549
550 /*
551 * Set the completion pointer only if there is a waiter.
552 * This makes it possible to use umh_complete to free
553 * the data structure in case of UMH_NO_WAIT.
554 */
555 sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done;
556 sub_info->wait = wait;
557
558 queue_work(system_unbound_wq, &sub_info->work);
559 if (wait == UMH_NO_WAIT) /* task has freed sub_info */
560 goto unlock;
561
562 if (wait & UMH_KILLABLE) {
563 retval = wait_for_completion_killable(&done);
564 if (!retval)
565 goto wait_done;
566
567 /* umh_complete() will see NULL and free sub_info */
568 if (xchg(&sub_info->complete, NULL))
569 goto unlock;
570 /* fallthrough, umh_complete() was already called */
571 }
572
573 wait_for_completion(&done);
574wait_done:
575 retval = sub_info->retval;
576out:
577 call_usermodehelper_freeinfo(sub_info);
578unlock:
579 helper_unlock();
580 return retval;
581}
582EXPORT_SYMBOL(call_usermodehelper_exec);
583
584/**
585 * call_usermodehelper() - prepare and start a usermode application
586 * @path: path to usermode executable
587 * @argv: arg vector for process
588 * @envp: environment for process
589 * @wait: wait for the application to finish and return status.
590 * when UMH_NO_WAIT don't wait at all, but you get no useful error back
591 * when the program couldn't be exec'ed. This makes it safe to call
592 * from interrupt context.
593 *
594 * This function is the equivalent to use call_usermodehelper_setup() and
595 * call_usermodehelper_exec().
596 */
597int call_usermodehelper(const char *path, char **argv, char **envp, int wait)
598{
599 struct subprocess_info *info;
600 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
601
602 info = call_usermodehelper_setup(path, argv, envp, gfp_mask,
603 NULL, NULL, NULL);
604 if (info == NULL)
605 return -ENOMEM;
606
607 return call_usermodehelper_exec(info, wait);
608}
609EXPORT_SYMBOL(call_usermodehelper);
610
611static int proc_cap_handler(struct ctl_table *table, int write,
612 void __user *buffer, size_t *lenp, loff_t *ppos)
613{
614 struct ctl_table t;
615 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
616 kernel_cap_t new_cap;
617 int err, i;
618
619 if (write && (!capable(CAP_SETPCAP) ||
620 !capable(CAP_SYS_MODULE)))
621 return -EPERM;
622
623 /*
624 * convert from the global kernel_cap_t to the ulong array to print to
625 * userspace if this is a read.
626 */
627 spin_lock(&umh_sysctl_lock);
628 for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) {
629 if (table->data == CAP_BSET)
630 cap_array[i] = usermodehelper_bset.cap[i];
631 else if (table->data == CAP_PI)
632 cap_array[i] = usermodehelper_inheritable.cap[i];
633 else
634 BUG();
635 }
636 spin_unlock(&umh_sysctl_lock);
637
638 t = *table;
639 t.data = &cap_array;
640
641 /*
642 * actually read or write and array of ulongs from userspace. Remember
643 * these are least significant 32 bits first
644 */
645 err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
646 if (err < 0)
647 return err;
648
649 /*
650 * convert from the sysctl array of ulongs to the kernel_cap_t
651 * internal representation
652 */
653 for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)
654 new_cap.cap[i] = cap_array[i];
655
656 /*
657 * Drop everything not in the new_cap (but don't add things)
658 */
Luis R. Rodriguez23558692017-09-08 16:17:00 -0700659 if (write) {
Christophe JAILLET8c703d62017-11-17 15:27:32 -0800660 spin_lock(&umh_sysctl_lock);
Luis R. Rodriguez23558692017-09-08 16:17:00 -0700661 if (table->data == CAP_BSET)
662 usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap);
663 if (table->data == CAP_PI)
664 usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap);
Christophe JAILLET8c703d62017-11-17 15:27:32 -0800665 spin_unlock(&umh_sysctl_lock);
Luis R. Rodriguez23558692017-09-08 16:17:00 -0700666 }
Luis R. Rodriguez23558692017-09-08 16:17:00 -0700667
668 return 0;
669}
670
671struct ctl_table usermodehelper_table[] = {
672 {
673 .procname = "bset",
674 .data = CAP_BSET,
675 .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
676 .mode = 0600,
677 .proc_handler = proc_cap_handler,
678 },
679 {
680 .procname = "inheritable",
681 .data = CAP_PI,
682 .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
683 .mode = 0600,
684 .proc_handler = proc_cap_handler,
685 },
686 { }
687};