blob: 2c819d65e15f8a9b26a906b378b5900c004571a0 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/kernel/seccomp.c
4 *
5 * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
6 *
Will Drewrye2cfabdf2012-04-12 16:47:57 -05007 * Copyright (C) 2012 Google, Inc.
8 * Will Drewry <wad@chromium.org>
9 *
10 * This defines a simple but solid secure-computing facility.
11 *
12 * Mode 1 uses a fixed list of allowed system calls.
13 * Mode 2 allows user-defined system call filters in the form
14 * of Berkeley Packet Filters/Linux Socket Filters.
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 */
16
Kees Cook0b5fa222017-06-26 09:24:00 -070017#include <linux/refcount.h>
Eric Paris85e7bac32012-01-03 14:23:05 -050018#include <linux/audit.h>
Roland McGrath5b101742009-02-27 23:25:54 -080019#include <linux/compat.h>
Mike Frysingerb25e6712017-01-19 22:28:57 -060020#include <linux/coredump.h>
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +000021#include <linux/kmemleak.h>
Kees Cook5c307082018-05-01 15:07:31 -070022#include <linux/nospec.h>
23#include <linux/prctl.h>
Will Drewrye2cfabdf2012-04-12 16:47:57 -050024#include <linux/sched.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010025#include <linux/sched/task_stack.h>
Will Drewrye2cfabdf2012-04-12 16:47:57 -050026#include <linux/seccomp.h>
Kees Cookc8bee432014-06-27 15:16:33 -070027#include <linux/slab.h>
Kees Cook48dc92b2014-06-25 16:08:24 -070028#include <linux/syscalls.h>
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +000029#include <linux/sysctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Andy Lutomirskia4412fc2014-07-21 18:49:14 -070031#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
Will Drewrye2cfabdf2012-04-12 16:47:57 -050032#include <asm/syscall.h>
Andy Lutomirskia4412fc2014-07-21 18:49:14 -070033#endif
Will Drewrye2cfabdf2012-04-12 16:47:57 -050034
35#ifdef CONFIG_SECCOMP_FILTER
Will Drewrye2cfabdf2012-04-12 16:47:57 -050036#include <linux/filter.h>
Kees Cookc2e1f2e2014-06-05 00:23:17 -070037#include <linux/pid.h>
Will Drewryfb0fadf2012-04-12 16:48:02 -050038#include <linux/ptrace.h>
Will Drewrye2cfabdf2012-04-12 16:47:57 -050039#include <linux/security.h>
Will Drewrye2cfabdf2012-04-12 16:47:57 -050040#include <linux/tracehook.h>
41#include <linux/uaccess.h>
42
43/**
44 * struct seccomp_filter - container for seccomp BPF programs
45 *
46 * @usage: reference count to manage the object lifetime.
47 * get/put helpers should be used when accessing an instance
48 * outside of a lifetime-guarded section. In general, this
49 * is only needed for handling filters shared across tasks.
Tyler Hickse66a3992017-08-11 04:33:56 +000050 * @log: true if all actions except for SECCOMP_RET_ALLOW should be logged
Will Drewrye2cfabdf2012-04-12 16:47:57 -050051 * @prev: points to a previously installed, or inherited, filter
Mickaël Salaün285fdfc2016-09-20 19:39:47 +020052 * @prog: the BPF program to evaluate
Will Drewrye2cfabdf2012-04-12 16:47:57 -050053 *
54 * seccomp_filter objects are organized in a tree linked via the @prev
55 * pointer. For any task, it appears to be a singly-linked list starting
56 * with current->seccomp.filter, the most recently attached or inherited filter.
57 * However, multiple filters may share a @prev node, by way of fork(), which
58 * results in a unidirectional tree existing in memory. This is similar to
59 * how namespaces work.
60 *
61 * seccomp_filter objects should never be modified after being attached
62 * to a task_struct (other than @usage).
63 */
64struct seccomp_filter {
Kees Cook0b5fa222017-06-26 09:24:00 -070065 refcount_t usage;
Tyler Hickse66a3992017-08-11 04:33:56 +000066 bool log;
Will Drewrye2cfabdf2012-04-12 16:47:57 -050067 struct seccomp_filter *prev;
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -070068 struct bpf_prog *prog;
Will Drewrye2cfabdf2012-04-12 16:47:57 -050069};
70
71/* Limit any path through the tree to 256KB worth of instructions. */
72#define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
73
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +010074/*
Will Drewrye2cfabdf2012-04-12 16:47:57 -050075 * Endianness is explicitly ignored and left for BPF program authors to manage
76 * as per the specific architecture.
77 */
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +010078static void populate_seccomp_data(struct seccomp_data *sd)
Will Drewrye2cfabdf2012-04-12 16:47:57 -050079{
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +010080 struct task_struct *task = current;
81 struct pt_regs *regs = task_pt_regs(task);
Daniel Borkmann2eac7642014-04-14 21:02:59 +020082 unsigned long args[6];
Will Drewrye2cfabdf2012-04-12 16:47:57 -050083
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +010084 sd->nr = syscall_get_nr(task, regs);
Linus Torvalds0b747172014-04-12 12:38:53 -070085 sd->arch = syscall_get_arch();
Daniel Borkmann2eac7642014-04-14 21:02:59 +020086 syscall_get_arguments(task, regs, 0, 6, args);
87 sd->args[0] = args[0];
88 sd->args[1] = args[1];
89 sd->args[2] = args[2];
90 sd->args[3] = args[3];
91 sd->args[4] = args[4];
92 sd->args[5] = args[5];
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +010093 sd->instruction_pointer = KSTK_EIP(task);
Will Drewrye2cfabdf2012-04-12 16:47:57 -050094}
95
96/**
97 * seccomp_check_filter - verify seccomp filter code
98 * @filter: filter to verify
99 * @flen: length of filter
100 *
Alexei Starovoitov4df95ff2014-07-30 20:34:14 -0700101 * Takes a previously checked filter (by bpf_check_classic) and
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500102 * redirects all filter code that loads struct sk_buff data
103 * and related data through seccomp_bpf_load. It also
104 * enforces length and alignment checking of those loads.
105 *
106 * Returns 0 if the rule set is legal or -EINVAL if not.
107 */
108static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
109{
110 int pc;
111 for (pc = 0; pc < flen; pc++) {
112 struct sock_filter *ftest = &filter[pc];
113 u16 code = ftest->code;
114 u32 k = ftest->k;
115
116 switch (code) {
Daniel Borkmann348059312014-05-29 10:22:50 +0200117 case BPF_LD | BPF_W | BPF_ABS:
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100118 ftest->code = BPF_LDX | BPF_W | BPF_ABS;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500119 /* 32-bit aligned and not out of bounds. */
120 if (k >= sizeof(struct seccomp_data) || k & 3)
121 return -EINVAL;
122 continue;
Daniel Borkmann348059312014-05-29 10:22:50 +0200123 case BPF_LD | BPF_W | BPF_LEN:
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100124 ftest->code = BPF_LD | BPF_IMM;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500125 ftest->k = sizeof(struct seccomp_data);
126 continue;
Daniel Borkmann348059312014-05-29 10:22:50 +0200127 case BPF_LDX | BPF_W | BPF_LEN:
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100128 ftest->code = BPF_LDX | BPF_IMM;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500129 ftest->k = sizeof(struct seccomp_data);
130 continue;
131 /* Explicitly include allowed calls. */
Daniel Borkmann348059312014-05-29 10:22:50 +0200132 case BPF_RET | BPF_K:
133 case BPF_RET | BPF_A:
134 case BPF_ALU | BPF_ADD | BPF_K:
135 case BPF_ALU | BPF_ADD | BPF_X:
136 case BPF_ALU | BPF_SUB | BPF_K:
137 case BPF_ALU | BPF_SUB | BPF_X:
138 case BPF_ALU | BPF_MUL | BPF_K:
139 case BPF_ALU | BPF_MUL | BPF_X:
140 case BPF_ALU | BPF_DIV | BPF_K:
141 case BPF_ALU | BPF_DIV | BPF_X:
142 case BPF_ALU | BPF_AND | BPF_K:
143 case BPF_ALU | BPF_AND | BPF_X:
144 case BPF_ALU | BPF_OR | BPF_K:
145 case BPF_ALU | BPF_OR | BPF_X:
146 case BPF_ALU | BPF_XOR | BPF_K:
147 case BPF_ALU | BPF_XOR | BPF_X:
148 case BPF_ALU | BPF_LSH | BPF_K:
149 case BPF_ALU | BPF_LSH | BPF_X:
150 case BPF_ALU | BPF_RSH | BPF_K:
151 case BPF_ALU | BPF_RSH | BPF_X:
152 case BPF_ALU | BPF_NEG:
153 case BPF_LD | BPF_IMM:
154 case BPF_LDX | BPF_IMM:
155 case BPF_MISC | BPF_TAX:
156 case BPF_MISC | BPF_TXA:
157 case BPF_LD | BPF_MEM:
158 case BPF_LDX | BPF_MEM:
159 case BPF_ST:
160 case BPF_STX:
161 case BPF_JMP | BPF_JA:
162 case BPF_JMP | BPF_JEQ | BPF_K:
163 case BPF_JMP | BPF_JEQ | BPF_X:
164 case BPF_JMP | BPF_JGE | BPF_K:
165 case BPF_JMP | BPF_JGE | BPF_X:
166 case BPF_JMP | BPF_JGT | BPF_K:
167 case BPF_JMP | BPF_JGT | BPF_X:
168 case BPF_JMP | BPF_JSET | BPF_K:
169 case BPF_JMP | BPF_JSET | BPF_X:
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500170 continue;
171 default:
172 return -EINVAL;
173 }
174 }
175 return 0;
176}
177
178/**
Mickaël Salaün285fdfc2016-09-20 19:39:47 +0200179 * seccomp_run_filters - evaluates all seccomp filters against @sd
180 * @sd: optional seccomp data to be passed to filters
Kees Cookdeb4de82017-08-02 15:00:40 -0700181 * @match: stores struct seccomp_filter that resulted in the return value,
182 * unless filter returned SECCOMP_RET_ALLOW, in which case it will
183 * be unchanged.
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500184 *
185 * Returns valid seccomp BPF response codes.
186 */
Kees Cook0466bdb2017-08-11 13:12:11 -0700187#define ACTION_ONLY(ret) ((s32)((ret) & (SECCOMP_RET_ACTION_FULL)))
Kees Cookdeb4de82017-08-02 15:00:40 -0700188static u32 seccomp_run_filters(const struct seccomp_data *sd,
189 struct seccomp_filter **match)
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500190{
Andy Lutomirskid39bd002014-07-21 18:49:16 -0700191 struct seccomp_data sd_local;
Will Drewryacf3b2c2012-04-12 16:47:59 -0500192 u32 ret = SECCOMP_RET_ALLOW;
Pranith Kumar8225d382014-11-21 10:06:01 -0500193 /* Make sure cross-thread synced filter points somewhere sane. */
194 struct seccomp_filter *f =
Will Deacon506458e2017-10-24 11:22:48 +0100195 READ_ONCE(current->seccomp.filter);
Will Drewryacf3b2c2012-04-12 16:47:59 -0500196
197 /* Ensure unexpected behavior doesn't result in failing open. */
Kees Cook3ba25302014-06-27 15:01:35 -0700198 if (unlikely(WARN_ON(f == NULL)))
Kees Cook4d3b0b02017-08-11 13:01:39 -0700199 return SECCOMP_RET_KILL_PROCESS;
Will Drewryacf3b2c2012-04-12 16:47:59 -0500200
Andy Lutomirskid39bd002014-07-21 18:49:16 -0700201 if (!sd) {
202 populate_seccomp_data(&sd_local);
203 sd = &sd_local;
204 }
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100205
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500206 /*
207 * All filters in the list are evaluated and the lowest BPF return
Will Drewryacf3b2c2012-04-12 16:47:59 -0500208 * value always takes priority (ignoring the DATA).
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500209 */
Kees Cook3ba25302014-06-27 15:01:35 -0700210 for (; f; f = f->prev) {
Daniel Borkmann88575192016-11-26 01:28:04 +0100211 u32 cur_ret = BPF_PROG_RUN(f->prog, sd);
Alexei Starovoitov8f577ca2014-05-13 19:50:47 -0700212
Kees Cook0466bdb2017-08-11 13:12:11 -0700213 if (ACTION_ONLY(cur_ret) < ACTION_ONLY(ret)) {
Will Drewryacf3b2c2012-04-12 16:47:59 -0500214 ret = cur_ret;
Kees Cookdeb4de82017-08-02 15:00:40 -0700215 *match = f;
216 }
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500217 }
218 return ret;
219}
Kees Cook1f41b4502014-06-25 15:38:02 -0700220#endif /* CONFIG_SECCOMP_FILTER */
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500221
Kees Cook1f41b4502014-06-25 15:38:02 -0700222static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
223{
Guenter Roeck69f6a342014-08-10 20:50:30 -0700224 assert_spin_locked(&current->sighand->siglock);
Kees Cookdbd952122014-06-27 15:18:48 -0700225
Kees Cook1f41b4502014-06-25 15:38:02 -0700226 if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
227 return false;
228
229 return true;
230}
231
Kees Cook5c307082018-05-01 15:07:31 -0700232/*
233 * If a given speculation mitigation is opt-in (prctl()-controlled),
234 * select it, by disabling speculation (enabling mitigation).
235 */
236static inline void spec_mitigate(struct task_struct *task,
237 unsigned long which)
238{
239 int state = arch_prctl_spec_ctrl_get(task, which);
240
241 if (state > 0 && (state & PR_SPEC_PRCTL))
Thomas Gleixnerb849a812018-05-04 09:40:03 +0200242 arch_prctl_spec_ctrl_set(task, which, PR_SPEC_FORCE_DISABLE);
Kees Cook5c307082018-05-01 15:07:31 -0700243}
244
Kees Cook3ba25302014-06-27 15:01:35 -0700245static inline void seccomp_assign_mode(struct task_struct *task,
246 unsigned long seccomp_mode)
Kees Cook1f41b4502014-06-25 15:38:02 -0700247{
Guenter Roeck69f6a342014-08-10 20:50:30 -0700248 assert_spin_locked(&task->sighand->siglock);
Kees Cookdbd952122014-06-27 15:18:48 -0700249
Kees Cook3ba25302014-06-27 15:01:35 -0700250 task->seccomp.mode = seccomp_mode;
251 /*
252 * Make sure TIF_SECCOMP cannot be set before the mode (and
253 * filter) is set.
254 */
255 smp_mb__before_atomic();
Kees Cook5c307082018-05-01 15:07:31 -0700256 /* Assume seccomp processes want speculation flaw mitigation. */
257 spec_mitigate(task, PR_SPEC_STORE_BYPASS);
Kees Cook3ba25302014-06-27 15:01:35 -0700258 set_tsk_thread_flag(task, TIF_SECCOMP);
Kees Cook1f41b4502014-06-25 15:38:02 -0700259}
260
261#ifdef CONFIG_SECCOMP_FILTER
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700262/* Returns 1 if the parent is an ancestor of the child. */
263static int is_ancestor(struct seccomp_filter *parent,
264 struct seccomp_filter *child)
265{
266 /* NULL is the root ancestor. */
267 if (parent == NULL)
268 return 1;
269 for (; child; child = child->prev)
270 if (child == parent)
271 return 1;
272 return 0;
273}
274
275/**
276 * seccomp_can_sync_threads: checks if all threads can be synchronized
277 *
278 * Expects sighand and cred_guard_mutex locks to be held.
279 *
280 * Returns 0 on success, -ve on error, or the pid of a thread which was
281 * either not in the correct seccomp mode or it did not have an ancestral
282 * seccomp filter.
283 */
284static inline pid_t seccomp_can_sync_threads(void)
285{
286 struct task_struct *thread, *caller;
287
288 BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
Guenter Roeck69f6a342014-08-10 20:50:30 -0700289 assert_spin_locked(&current->sighand->siglock);
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700290
291 /* Validate all threads being eligible for synchronization. */
292 caller = current;
293 for_each_thread(caller, thread) {
294 pid_t failed;
295
296 /* Skip current, since it is initiating the sync. */
297 if (thread == caller)
298 continue;
299
300 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
301 (thread->seccomp.mode == SECCOMP_MODE_FILTER &&
302 is_ancestor(thread->seccomp.filter,
303 caller->seccomp.filter)))
304 continue;
305
306 /* Return the first thread that cannot be synchronized. */
307 failed = task_pid_vnr(thread);
308 /* If the pid cannot be resolved, then return -ESRCH */
309 if (unlikely(WARN_ON(failed == 0)))
310 failed = -ESRCH;
311 return failed;
312 }
313
314 return 0;
315}
316
317/**
318 * seccomp_sync_threads: sets all threads to use current's filter
319 *
320 * Expects sighand and cred_guard_mutex locks to be held, and for
321 * seccomp_can_sync_threads() to have returned success already
322 * without dropping the locks.
323 *
324 */
325static inline void seccomp_sync_threads(void)
326{
327 struct task_struct *thread, *caller;
328
329 BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
Guenter Roeck69f6a342014-08-10 20:50:30 -0700330 assert_spin_locked(&current->sighand->siglock);
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700331
332 /* Synchronize all threads. */
333 caller = current;
334 for_each_thread(caller, thread) {
335 /* Skip current, since it needs no changes. */
336 if (thread == caller)
337 continue;
338
339 /* Get a task reference for the new leaf node. */
340 get_seccomp_filter(caller);
341 /*
342 * Drop the task reference to the shared ancestor since
343 * current's path will hold a reference. (This also
344 * allows a put before the assignment.)
345 */
346 put_seccomp_filter(thread);
347 smp_store_release(&thread->seccomp.filter,
348 caller->seccomp.filter);
Jann Horn103502a2015-12-26 06:00:48 +0100349
350 /*
351 * Don't let an unprivileged task work around
352 * the no_new_privs restriction by creating
353 * a thread that sets it up, enters seccomp,
354 * then dies.
355 */
356 if (task_no_new_privs(caller))
357 task_set_no_new_privs(thread);
358
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700359 /*
360 * Opt the other thread into seccomp if needed.
361 * As threads are considered to be trust-realm
362 * equivalent (see ptrace_may_access), it is safe to
363 * allow one thread to transition the other.
364 */
Jann Horn103502a2015-12-26 06:00:48 +0100365 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700366 seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700367 }
368}
369
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500370/**
Kees Cookc8bee432014-06-27 15:16:33 -0700371 * seccomp_prepare_filter: Prepares a seccomp filter for use.
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500372 * @fprog: BPF program to install
373 *
Kees Cookc8bee432014-06-27 15:16:33 -0700374 * Returns filter on success or an ERR_PTR on failure.
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500375 */
Kees Cookc8bee432014-06-27 15:16:33 -0700376static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500377{
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200378 struct seccomp_filter *sfilter;
379 int ret;
Masahiro Yamada97f26452016-08-03 13:45:50 -0700380 const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500381
382 if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
Kees Cookc8bee432014-06-27 15:16:33 -0700383 return ERR_PTR(-EINVAL);
Nicolas Schichand9e12f42015-05-06 16:12:28 +0200384
Kees Cookc8bee432014-06-27 15:16:33 -0700385 BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500386
387 /*
Fabian Frederick119ce5c2014-06-06 14:37:53 -0700388 * Installing a seccomp filter requires that the task has
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500389 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
390 * This avoids scenarios where unprivileged tasks can affect the
391 * behavior of privileged children.
392 */
Kees Cook1d4457f2014-05-21 15:23:46 -0700393 if (!task_no_new_privs(current) &&
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500394 security_capable_noaudit(current_cred(), current_user_ns(),
395 CAP_SYS_ADMIN) != 0)
Kees Cookc8bee432014-06-27 15:16:33 -0700396 return ERR_PTR(-EACCES);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500397
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100398 /* Allocate a new seccomp_filter */
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200399 sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN);
400 if (!sfilter)
Nicolas Schichand9e12f42015-05-06 16:12:28 +0200401 return ERR_PTR(-ENOMEM);
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200402
403 ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
Tycho Andersenf8e529e2015-10-27 09:23:59 +0900404 seccomp_check_filter, save_orig);
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200405 if (ret < 0) {
406 kfree(sfilter);
407 return ERR_PTR(ret);
Nicolas Schichand9e12f42015-05-06 16:12:28 +0200408 }
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100409
Kees Cook0b5fa222017-06-26 09:24:00 -0700410 refcount_set(&sfilter->usage, 1);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500411
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200412 return sfilter;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500413}
414
415/**
Kees Cookc8bee432014-06-27 15:16:33 -0700416 * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500417 * @user_filter: pointer to the user data containing a sock_fprog.
418 *
419 * Returns 0 on success and non-zero otherwise.
420 */
Kees Cookc8bee432014-06-27 15:16:33 -0700421static struct seccomp_filter *
422seccomp_prepare_user_filter(const char __user *user_filter)
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500423{
424 struct sock_fprog fprog;
Kees Cookc8bee432014-06-27 15:16:33 -0700425 struct seccomp_filter *filter = ERR_PTR(-EFAULT);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500426
427#ifdef CONFIG_COMPAT
Andy Lutomirski5c380652016-03-22 14:24:52 -0700428 if (in_compat_syscall()) {
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500429 struct compat_sock_fprog fprog32;
430 if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
431 goto out;
432 fprog.len = fprog32.len;
433 fprog.filter = compat_ptr(fprog32.filter);
434 } else /* falls through to the if below. */
435#endif
436 if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
437 goto out;
Kees Cookc8bee432014-06-27 15:16:33 -0700438 filter = seccomp_prepare_filter(&fprog);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500439out:
Kees Cookc8bee432014-06-27 15:16:33 -0700440 return filter;
441}
442
443/**
444 * seccomp_attach_filter: validate and attach filter
445 * @flags: flags to change filter behavior
446 * @filter: seccomp filter to add to the current process
447 *
Kees Cookdbd952122014-06-27 15:18:48 -0700448 * Caller must be holding current->sighand->siglock lock.
449 *
Kees Cookc8bee432014-06-27 15:16:33 -0700450 * Returns 0 on success, -ve on error.
451 */
452static long seccomp_attach_filter(unsigned int flags,
453 struct seccomp_filter *filter)
454{
455 unsigned long total_insns;
456 struct seccomp_filter *walker;
457
Guenter Roeck69f6a342014-08-10 20:50:30 -0700458 assert_spin_locked(&current->sighand->siglock);
Kees Cookdbd952122014-06-27 15:18:48 -0700459
Kees Cookc8bee432014-06-27 15:16:33 -0700460 /* Validate resulting filter length. */
461 total_insns = filter->prog->len;
462 for (walker = current->seccomp.filter; walker; walker = walker->prev)
463 total_insns += walker->prog->len + 4; /* 4 instr penalty */
464 if (total_insns > MAX_INSNS_PER_PATH)
465 return -ENOMEM;
466
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700467 /* If thread sync has been requested, check that it is possible. */
468 if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
469 int ret;
470
471 ret = seccomp_can_sync_threads();
472 if (ret)
473 return ret;
474 }
475
Tyler Hickse66a3992017-08-11 04:33:56 +0000476 /* Set log flag, if present. */
477 if (flags & SECCOMP_FILTER_FLAG_LOG)
478 filter->log = true;
479
Kees Cookc8bee432014-06-27 15:16:33 -0700480 /*
481 * If there is an existing filter, make it the prev and don't drop its
482 * task reference.
483 */
484 filter->prev = current->seccomp.filter;
485 current->seccomp.filter = filter;
486
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700487 /* Now that the new filter is in place, synchronize to all threads. */
488 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
489 seccomp_sync_threads();
490
Kees Cookc8bee432014-06-27 15:16:33 -0700491 return 0;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500492}
493
Colin Ian King084f5602017-09-29 14:26:48 +0100494static void __get_seccomp_filter(struct seccomp_filter *filter)
Oleg Nesterov66a733e2017-09-27 09:25:30 -0600495{
496 /* Reference count is bounded by the number of total processes. */
497 refcount_inc(&filter->usage);
498}
499
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500500/* get_seccomp_filter - increments the reference count of the filter on @tsk */
501void get_seccomp_filter(struct task_struct *tsk)
502{
503 struct seccomp_filter *orig = tsk->seccomp.filter;
504 if (!orig)
505 return;
Oleg Nesterov66a733e2017-09-27 09:25:30 -0600506 __get_seccomp_filter(orig);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500507}
508
Kees Cookc8bee432014-06-27 15:16:33 -0700509static inline void seccomp_filter_free(struct seccomp_filter *filter)
510{
511 if (filter) {
Daniel Borkmannbab18992015-10-02 15:17:33 +0200512 bpf_prog_destroy(filter->prog);
Kees Cookc8bee432014-06-27 15:16:33 -0700513 kfree(filter);
514 }
515}
516
Oleg Nesterov66a733e2017-09-27 09:25:30 -0600517static void __put_seccomp_filter(struct seccomp_filter *orig)
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500518{
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500519 /* Clean up single-reference branches iteratively. */
Kees Cook0b5fa222017-06-26 09:24:00 -0700520 while (orig && refcount_dec_and_test(&orig->usage)) {
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500521 struct seccomp_filter *freeme = orig;
522 orig = orig->prev;
Kees Cookc8bee432014-06-27 15:16:33 -0700523 seccomp_filter_free(freeme);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500524 }
525}
Will Drewrybb6ea432012-04-12 16:48:01 -0500526
Oleg Nesterov66a733e2017-09-27 09:25:30 -0600527/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
528void put_seccomp_filter(struct task_struct *tsk)
529{
530 __put_seccomp_filter(tsk->seccomp.filter);
531}
532
Mike Frysingerb25e6712017-01-19 22:28:57 -0600533static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason)
534{
Eric W. Biederman3b10db2b2017-08-18 19:56:27 -0500535 clear_siginfo(info);
Mike Frysingerb25e6712017-01-19 22:28:57 -0600536 info->si_signo = SIGSYS;
537 info->si_code = SYS_SECCOMP;
538 info->si_call_addr = (void __user *)KSTK_EIP(current);
539 info->si_errno = reason;
540 info->si_arch = syscall_get_arch();
541 info->si_syscall = syscall;
542}
543
Will Drewrybb6ea432012-04-12 16:48:01 -0500544/**
545 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
546 * @syscall: syscall number to send to userland
547 * @reason: filter-supplied reason code to send to userland (via si_errno)
548 *
549 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
550 */
551static void seccomp_send_sigsys(int syscall, int reason)
552{
553 struct siginfo info;
Mike Frysingerb25e6712017-01-19 22:28:57 -0600554 seccomp_init_siginfo(&info, syscall, reason);
Will Drewrybb6ea432012-04-12 16:48:01 -0500555 force_sig_info(SIGSYS, &info, current);
556}
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500557#endif /* CONFIG_SECCOMP_FILTER */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000559/* For use with seccomp_actions_logged */
Kees Cook4d3b0b02017-08-11 13:01:39 -0700560#define SECCOMP_LOG_KILL_PROCESS (1 << 0)
561#define SECCOMP_LOG_KILL_THREAD (1 << 1)
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000562#define SECCOMP_LOG_TRAP (1 << 2)
563#define SECCOMP_LOG_ERRNO (1 << 3)
564#define SECCOMP_LOG_TRACE (1 << 4)
Tyler Hicks59f5cf42017-08-11 04:33:57 +0000565#define SECCOMP_LOG_LOG (1 << 5)
566#define SECCOMP_LOG_ALLOW (1 << 6)
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000567
Kees Cook4d3b0b02017-08-11 13:01:39 -0700568static u32 seccomp_actions_logged = SECCOMP_LOG_KILL_PROCESS |
569 SECCOMP_LOG_KILL_THREAD |
Kees Cookfd768752017-08-11 12:53:18 -0700570 SECCOMP_LOG_TRAP |
571 SECCOMP_LOG_ERRNO |
572 SECCOMP_LOG_TRACE |
Tyler Hicks59f5cf42017-08-11 04:33:57 +0000573 SECCOMP_LOG_LOG;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000574
Tyler Hickse66a3992017-08-11 04:33:56 +0000575static inline void seccomp_log(unsigned long syscall, long signr, u32 action,
576 bool requested)
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000577{
578 bool log = false;
579
580 switch (action) {
581 case SECCOMP_RET_ALLOW:
Tyler Hickse66a3992017-08-11 04:33:56 +0000582 break;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000583 case SECCOMP_RET_TRAP:
Tyler Hickse66a3992017-08-11 04:33:56 +0000584 log = requested && seccomp_actions_logged & SECCOMP_LOG_TRAP;
585 break;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000586 case SECCOMP_RET_ERRNO:
Tyler Hickse66a3992017-08-11 04:33:56 +0000587 log = requested && seccomp_actions_logged & SECCOMP_LOG_ERRNO;
588 break;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000589 case SECCOMP_RET_TRACE:
Tyler Hickse66a3992017-08-11 04:33:56 +0000590 log = requested && seccomp_actions_logged & SECCOMP_LOG_TRACE;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000591 break;
Tyler Hicks59f5cf42017-08-11 04:33:57 +0000592 case SECCOMP_RET_LOG:
593 log = seccomp_actions_logged & SECCOMP_LOG_LOG;
594 break;
Kees Cookfd768752017-08-11 12:53:18 -0700595 case SECCOMP_RET_KILL_THREAD:
Kees Cookfd768752017-08-11 12:53:18 -0700596 log = seccomp_actions_logged & SECCOMP_LOG_KILL_THREAD;
Kees Cook4d3b0b02017-08-11 13:01:39 -0700597 break;
598 case SECCOMP_RET_KILL_PROCESS:
599 default:
600 log = seccomp_actions_logged & SECCOMP_LOG_KILL_PROCESS;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000601 }
602
603 /*
Kees Cookfd768752017-08-11 12:53:18 -0700604 * Force an audit message to be emitted when the action is RET_KILL_*,
Tyler Hicks59f5cf42017-08-11 04:33:57 +0000605 * RET_LOG, or the FILTER_FLAG_LOG bit was set and the action is
606 * allowed to be logged by the admin.
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000607 */
608 if (log)
609 return __audit_seccomp(syscall, signr, action);
610
611 /*
612 * Let the audit subsystem decide if the action should be audited based
613 * on whether the current task itself is being audited.
614 */
615 return audit_seccomp(syscall, signr, action);
616}
617
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618/*
619 * Secure computing mode 1 allows only read/write/exit/sigreturn.
620 * To be fully secure this must be combined with rlimit
621 * to limit the stack allocations too.
622 */
Matt Redfearncb4253a2016-03-29 09:35:34 +0100623static const int mode1_syscalls[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
625 0, /* null terminated */
626};
627
Andy Lutomirskia4412fc2014-07-21 18:49:14 -0700628static void __secure_computing_strict(int this_syscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629{
Matt Redfearncb4253a2016-03-29 09:35:34 +0100630 const int *syscall_whitelist = mode1_syscalls;
Andy Lutomirskia4412fc2014-07-21 18:49:14 -0700631#ifdef CONFIG_COMPAT
Andy Lutomirski5c380652016-03-22 14:24:52 -0700632 if (in_compat_syscall())
Matt Redfearnc983f0e2016-03-29 09:35:32 +0100633 syscall_whitelist = get_compat_mode1_syscalls();
Andy Lutomirskia4412fc2014-07-21 18:49:14 -0700634#endif
635 do {
636 if (*syscall_whitelist == this_syscall)
637 return;
638 } while (*++syscall_whitelist);
639
640#ifdef SECCOMP_DEBUG
641 dump_stack();
642#endif
Kees Cookfd768752017-08-11 12:53:18 -0700643 seccomp_log(this_syscall, SIGKILL, SECCOMP_RET_KILL_THREAD, true);
Andy Lutomirskia4412fc2014-07-21 18:49:14 -0700644 do_exit(SIGKILL);
645}
646
647#ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
648void secure_computing_strict(int this_syscall)
649{
650 int mode = current->seccomp.mode;
651
Masahiro Yamada97f26452016-08-03 13:45:50 -0700652 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
Tycho Andersen13c4a902015-06-13 09:02:48 -0600653 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
654 return;
655
Kees Cook221272f2015-06-15 15:29:16 -0700656 if (mode == SECCOMP_MODE_DISABLED)
Andy Lutomirskia4412fc2014-07-21 18:49:14 -0700657 return;
658 else if (mode == SECCOMP_MODE_STRICT)
659 __secure_computing_strict(this_syscall);
660 else
661 BUG();
662}
663#else
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700664
665#ifdef CONFIG_SECCOMP_FILTER
Kees Cookce6526e2016-06-01 19:29:15 -0700666static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
667 const bool recheck_after_trace)
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700668{
669 u32 filter_ret, action;
Kees Cookdeb4de82017-08-02 15:00:40 -0700670 struct seccomp_filter *match = NULL;
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700671 int data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672
Kees Cook3ba25302014-06-27 15:01:35 -0700673 /*
674 * Make sure that any changes to mode from another thread have
675 * been seen after TIF_SECCOMP was seen.
676 */
677 rmb();
678
Kees Cookdeb4de82017-08-02 15:00:40 -0700679 filter_ret = seccomp_run_filters(sd, &match);
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700680 data = filter_ret & SECCOMP_RET_DATA;
Kees Cook0466bdb2017-08-11 13:12:11 -0700681 action = filter_ret & SECCOMP_RET_ACTION_FULL;
Andy Lutomirski87b526d2012-10-01 11:40:45 -0700682
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700683 switch (action) {
684 case SECCOMP_RET_ERRNO:
Kees Cook580c57f2015-02-17 13:48:00 -0800685 /* Set low-order bits as an errno, capped at MAX_ERRNO. */
686 if (data > MAX_ERRNO)
687 data = MAX_ERRNO;
Andy Lutomirskid39bd002014-07-21 18:49:16 -0700688 syscall_set_return_value(current, task_pt_regs(current),
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700689 -data, 0);
690 goto skip;
691
692 case SECCOMP_RET_TRAP:
693 /* Show the handler the original registers. */
Andy Lutomirskid39bd002014-07-21 18:49:16 -0700694 syscall_rollback(current, task_pt_regs(current));
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700695 /* Let the filter pass back 16 bits of data. */
696 seccomp_send_sigsys(this_syscall, data);
697 goto skip;
698
699 case SECCOMP_RET_TRACE:
Kees Cookce6526e2016-06-01 19:29:15 -0700700 /* We've been put in this state by the ptracer already. */
701 if (recheck_after_trace)
702 return 0;
703
Kees Cook8112c4f2016-06-01 16:02:17 -0700704 /* ENOSYS these calls if there is no tracer attached. */
705 if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
706 syscall_set_return_value(current,
707 task_pt_regs(current),
708 -ENOSYS, 0);
709 goto skip;
710 }
711
712 /* Allow the BPF to provide the event message */
713 ptrace_event(PTRACE_EVENT_SECCOMP, data);
714 /*
715 * The delivery of a fatal signal during event
Kees Cook485a2522016-08-10 16:28:09 -0700716 * notification may silently skip tracer notification,
717 * which could leave us with a potentially unmodified
718 * syscall that the tracer would have liked to have
719 * changed. Since the process is about to die, we just
720 * force the syscall to be skipped and let the signal
721 * kill the process and correctly handle any tracer exit
722 * notifications.
Kees Cook8112c4f2016-06-01 16:02:17 -0700723 */
724 if (fatal_signal_pending(current))
Kees Cook485a2522016-08-10 16:28:09 -0700725 goto skip;
Kees Cook8112c4f2016-06-01 16:02:17 -0700726 /* Check if the tracer forced the syscall to be skipped. */
727 this_syscall = syscall_get_nr(current, task_pt_regs(current));
728 if (this_syscall < 0)
729 goto skip;
730
Kees Cookce6526e2016-06-01 19:29:15 -0700731 /*
732 * Recheck the syscall, since it may have changed. This
733 * intentionally uses a NULL struct seccomp_data to force
734 * a reload of all registers. This does not goto skip since
735 * a skip would have already been reported.
736 */
737 if (__seccomp_filter(this_syscall, NULL, true))
738 return -1;
739
Kees Cook8112c4f2016-06-01 16:02:17 -0700740 return 0;
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700741
Tyler Hicks59f5cf42017-08-11 04:33:57 +0000742 case SECCOMP_RET_LOG:
743 seccomp_log(this_syscall, 0, action, true);
744 return 0;
745
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700746 case SECCOMP_RET_ALLOW:
Kees Cookdeb4de82017-08-02 15:00:40 -0700747 /*
748 * Note that the "match" filter will always be NULL for
749 * this action since SECCOMP_RET_ALLOW is the starting
750 * state in seccomp_run_filters().
751 */
Kees Cook8112c4f2016-06-01 16:02:17 -0700752 return 0;
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700753
Kees Cookfd768752017-08-11 12:53:18 -0700754 case SECCOMP_RET_KILL_THREAD:
Kees Cook4d3b0b02017-08-11 13:01:39 -0700755 case SECCOMP_RET_KILL_PROCESS:
Kees Cook131b6352017-02-23 09:24:24 -0800756 default:
Tyler Hickse66a3992017-08-11 04:33:56 +0000757 seccomp_log(this_syscall, SIGSYS, action, true);
Kees Cookd7276e32017-02-07 15:18:51 -0800758 /* Dump core only if this is the last remaining thread. */
Kees Cook4d3b0b02017-08-11 13:01:39 -0700759 if (action == SECCOMP_RET_KILL_PROCESS ||
760 get_nr_threads(current) == 1) {
Kees Cook131b6352017-02-23 09:24:24 -0800761 siginfo_t info;
762
Kees Cookd7276e32017-02-07 15:18:51 -0800763 /* Show the original registers in the dump. */
764 syscall_rollback(current, task_pt_regs(current));
765 /* Trigger a manual coredump since do_exit skips it. */
766 seccomp_init_siginfo(&info, this_syscall, data);
767 do_coredump(&info);
768 }
Kees Cook4d3b0b02017-08-11 13:01:39 -0700769 if (action == SECCOMP_RET_KILL_PROCESS)
770 do_group_exit(SIGSYS);
771 else
772 do_exit(SIGSYS);
Will Drewry8156b452012-04-17 14:48:58 -0500773 }
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700774
775 unreachable();
776
777skip:
Tyler Hickse66a3992017-08-11 04:33:56 +0000778 seccomp_log(this_syscall, 0, action, match ? match->log : false);
Kees Cook8112c4f2016-06-01 16:02:17 -0700779 return -1;
780}
781#else
Kees Cookce6526e2016-06-01 19:29:15 -0700782static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
783 const bool recheck_after_trace)
Kees Cook8112c4f2016-06-01 16:02:17 -0700784{
785 BUG();
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700786}
787#endif
788
Kees Cook8112c4f2016-06-01 16:02:17 -0700789int __secure_computing(const struct seccomp_data *sd)
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700790{
791 int mode = current->seccomp.mode;
Kees Cook8112c4f2016-06-01 16:02:17 -0700792 int this_syscall;
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700793
Masahiro Yamada97f26452016-08-03 13:45:50 -0700794 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
Tycho Andersen13c4a902015-06-13 09:02:48 -0600795 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
Kees Cook8112c4f2016-06-01 16:02:17 -0700796 return 0;
797
798 this_syscall = sd ? sd->nr :
799 syscall_get_nr(current, task_pt_regs(current));
Tycho Andersen13c4a902015-06-13 09:02:48 -0600800
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700801 switch (mode) {
802 case SECCOMP_MODE_STRICT:
803 __secure_computing_strict(this_syscall); /* may call do_exit */
Kees Cook8112c4f2016-06-01 16:02:17 -0700804 return 0;
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700805 case SECCOMP_MODE_FILTER:
Kees Cookce6526e2016-06-01 19:29:15 -0700806 return __seccomp_filter(this_syscall, sd, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 default:
808 BUG();
809 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810}
Andy Lutomirskia4412fc2014-07-21 18:49:14 -0700811#endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700812
813long prctl_get_seccomp(void)
814{
815 return current->seccomp.mode;
816}
817
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500818/**
Kees Cook3b23dd12014-06-25 15:55:25 -0700819 * seccomp_set_mode_strict: internal function for setting strict seccomp
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500820 *
821 * Once current->seccomp.mode is non-zero, it may not be changed.
822 *
823 * Returns 0 on success or -EINVAL on failure.
824 */
Kees Cook3b23dd12014-06-25 15:55:25 -0700825static long seccomp_set_mode_strict(void)
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700826{
Kees Cook3b23dd12014-06-25 15:55:25 -0700827 const unsigned long seccomp_mode = SECCOMP_MODE_STRICT;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500828 long ret = -EINVAL;
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700829
Kees Cookdbd952122014-06-27 15:18:48 -0700830 spin_lock_irq(&current->sighand->siglock);
831
Kees Cook1f41b4502014-06-25 15:38:02 -0700832 if (!seccomp_may_assign_mode(seccomp_mode))
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700833 goto out;
834
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700835#ifdef TIF_NOTSC
Kees Cook3b23dd12014-06-25 15:55:25 -0700836 disable_TSC();
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700837#endif
Kees Cook3ba25302014-06-27 15:01:35 -0700838 seccomp_assign_mode(current, seccomp_mode);
Kees Cook3b23dd12014-06-25 15:55:25 -0700839 ret = 0;
840
841out:
Kees Cookdbd952122014-06-27 15:18:48 -0700842 spin_unlock_irq(&current->sighand->siglock);
Kees Cook3b23dd12014-06-25 15:55:25 -0700843
844 return ret;
845}
846
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500847#ifdef CONFIG_SECCOMP_FILTER
Kees Cook3b23dd12014-06-25 15:55:25 -0700848/**
849 * seccomp_set_mode_filter: internal function for setting seccomp filter
Kees Cook48dc92b2014-06-25 16:08:24 -0700850 * @flags: flags to change filter behavior
Kees Cook3b23dd12014-06-25 15:55:25 -0700851 * @filter: struct sock_fprog containing filter
852 *
853 * This function may be called repeatedly to install additional filters.
854 * Every filter successfully installed will be evaluated (in reverse order)
855 * for each system call the task makes.
856 *
857 * Once current->seccomp.mode is non-zero, it may not be changed.
858 *
859 * Returns 0 on success or -EINVAL on failure.
860 */
Kees Cook48dc92b2014-06-25 16:08:24 -0700861static long seccomp_set_mode_filter(unsigned int flags,
862 const char __user *filter)
Kees Cook3b23dd12014-06-25 15:55:25 -0700863{
864 const unsigned long seccomp_mode = SECCOMP_MODE_FILTER;
Kees Cookc8bee432014-06-27 15:16:33 -0700865 struct seccomp_filter *prepared = NULL;
Kees Cook3b23dd12014-06-25 15:55:25 -0700866 long ret = -EINVAL;
867
Kees Cook48dc92b2014-06-25 16:08:24 -0700868 /* Validate flags. */
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700869 if (flags & ~SECCOMP_FILTER_FLAG_MASK)
Kees Cookdbd952122014-06-27 15:18:48 -0700870 return -EINVAL;
Kees Cook48dc92b2014-06-25 16:08:24 -0700871
Kees Cookc8bee432014-06-27 15:16:33 -0700872 /* Prepare the new filter before holding any locks. */
873 prepared = seccomp_prepare_user_filter(filter);
874 if (IS_ERR(prepared))
875 return PTR_ERR(prepared);
876
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700877 /*
878 * Make sure we cannot change seccomp or nnp state via TSYNC
879 * while another thread is in the middle of calling exec.
880 */
881 if (flags & SECCOMP_FILTER_FLAG_TSYNC &&
882 mutex_lock_killable(&current->signal->cred_guard_mutex))
883 goto out_free;
884
Kees Cookdbd952122014-06-27 15:18:48 -0700885 spin_lock_irq(&current->sighand->siglock);
886
Kees Cook3b23dd12014-06-25 15:55:25 -0700887 if (!seccomp_may_assign_mode(seccomp_mode))
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500888 goto out;
Kees Cook3b23dd12014-06-25 15:55:25 -0700889
Kees Cookc8bee432014-06-27 15:16:33 -0700890 ret = seccomp_attach_filter(flags, prepared);
Kees Cook3b23dd12014-06-25 15:55:25 -0700891 if (ret)
892 goto out;
Kees Cookc8bee432014-06-27 15:16:33 -0700893 /* Do not free the successfully attached filter. */
894 prepared = NULL;
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700895
Kees Cook3ba25302014-06-27 15:01:35 -0700896 seccomp_assign_mode(current, seccomp_mode);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500897out:
Kees Cookdbd952122014-06-27 15:18:48 -0700898 spin_unlock_irq(&current->sighand->siglock);
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700899 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
900 mutex_unlock(&current->signal->cred_guard_mutex);
901out_free:
Kees Cookc8bee432014-06-27 15:16:33 -0700902 seccomp_filter_free(prepared);
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700903 return ret;
904}
Kees Cook3b23dd12014-06-25 15:55:25 -0700905#else
Kees Cook48dc92b2014-06-25 16:08:24 -0700906static inline long seccomp_set_mode_filter(unsigned int flags,
907 const char __user *filter)
Kees Cook3b23dd12014-06-25 15:55:25 -0700908{
909 return -EINVAL;
910}
911#endif
Kees Cookd78ab022014-05-21 15:02:11 -0700912
Tyler Hicksd612b1f2017-08-11 04:33:53 +0000913static long seccomp_get_action_avail(const char __user *uaction)
914{
915 u32 action;
916
917 if (copy_from_user(&action, uaction, sizeof(action)))
918 return -EFAULT;
919
920 switch (action) {
Kees Cook0466bdb2017-08-11 13:12:11 -0700921 case SECCOMP_RET_KILL_PROCESS:
Kees Cookfd768752017-08-11 12:53:18 -0700922 case SECCOMP_RET_KILL_THREAD:
Tyler Hicksd612b1f2017-08-11 04:33:53 +0000923 case SECCOMP_RET_TRAP:
924 case SECCOMP_RET_ERRNO:
925 case SECCOMP_RET_TRACE:
Tyler Hicks59f5cf42017-08-11 04:33:57 +0000926 case SECCOMP_RET_LOG:
Tyler Hicksd612b1f2017-08-11 04:33:53 +0000927 case SECCOMP_RET_ALLOW:
928 break;
929 default:
930 return -EOPNOTSUPP;
931 }
932
933 return 0;
934}
935
Kees Cook48dc92b2014-06-25 16:08:24 -0700936/* Common entry point for both prctl and syscall. */
937static long do_seccomp(unsigned int op, unsigned int flags,
938 const char __user *uargs)
939{
940 switch (op) {
941 case SECCOMP_SET_MODE_STRICT:
942 if (flags != 0 || uargs != NULL)
943 return -EINVAL;
944 return seccomp_set_mode_strict();
945 case SECCOMP_SET_MODE_FILTER:
946 return seccomp_set_mode_filter(flags, uargs);
Tyler Hicksd612b1f2017-08-11 04:33:53 +0000947 case SECCOMP_GET_ACTION_AVAIL:
948 if (flags != 0)
949 return -EINVAL;
950
951 return seccomp_get_action_avail(uargs);
Kees Cook48dc92b2014-06-25 16:08:24 -0700952 default:
953 return -EINVAL;
954 }
955}
956
957SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags,
958 const char __user *, uargs)
959{
960 return do_seccomp(op, flags, uargs);
961}
962
Kees Cookd78ab022014-05-21 15:02:11 -0700963/**
964 * prctl_set_seccomp: configures current->seccomp.mode
965 * @seccomp_mode: requested mode to use
966 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
967 *
968 * Returns 0 on success or -EINVAL on failure.
969 */
970long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
971{
Kees Cook48dc92b2014-06-25 16:08:24 -0700972 unsigned int op;
973 char __user *uargs;
974
Kees Cook3b23dd12014-06-25 15:55:25 -0700975 switch (seccomp_mode) {
976 case SECCOMP_MODE_STRICT:
Kees Cook48dc92b2014-06-25 16:08:24 -0700977 op = SECCOMP_SET_MODE_STRICT;
978 /*
979 * Setting strict mode through prctl always ignored filter,
980 * so make sure it is always NULL here to pass the internal
981 * check in do_seccomp().
982 */
983 uargs = NULL;
984 break;
Kees Cook3b23dd12014-06-25 15:55:25 -0700985 case SECCOMP_MODE_FILTER:
Kees Cook48dc92b2014-06-25 16:08:24 -0700986 op = SECCOMP_SET_MODE_FILTER;
987 uargs = filter;
988 break;
Kees Cook3b23dd12014-06-25 15:55:25 -0700989 default:
990 return -EINVAL;
991 }
Kees Cook48dc92b2014-06-25 16:08:24 -0700992
993 /* prctl interface doesn't have flags, so they are always zero. */
994 return do_seccomp(op, 0, uargs);
Kees Cookd78ab022014-05-21 15:02:11 -0700995}
Tycho Andersenf8e529e2015-10-27 09:23:59 +0900996
997#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
Tycho Andersenf06eae82017-10-11 09:39:20 -0600998static struct seccomp_filter *get_nth_filter(struct task_struct *task,
999 unsigned long filter_off)
1000{
1001 struct seccomp_filter *orig, *filter;
1002 unsigned long count;
1003
1004 /*
1005 * Note: this is only correct because the caller should be the (ptrace)
1006 * tracer of the task, otherwise lock_task_sighand is needed.
1007 */
1008 spin_lock_irq(&task->sighand->siglock);
1009
1010 if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
1011 spin_unlock_irq(&task->sighand->siglock);
1012 return ERR_PTR(-EINVAL);
1013 }
1014
1015 orig = task->seccomp.filter;
1016 __get_seccomp_filter(orig);
1017 spin_unlock_irq(&task->sighand->siglock);
1018
1019 count = 0;
1020 for (filter = orig; filter; filter = filter->prev)
1021 count++;
1022
1023 if (filter_off >= count) {
1024 filter = ERR_PTR(-ENOENT);
1025 goto out;
1026 }
1027
1028 count -= filter_off;
1029 for (filter = orig; filter && count > 1; filter = filter->prev)
1030 count--;
1031
1032 if (WARN_ON(count != 1 || !filter)) {
1033 filter = ERR_PTR(-ENOENT);
1034 goto out;
1035 }
1036
1037 __get_seccomp_filter(filter);
1038
1039out:
1040 __put_seccomp_filter(orig);
1041 return filter;
1042}
1043
Tycho Andersenf8e529e2015-10-27 09:23:59 +09001044long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
1045 void __user *data)
1046{
1047 struct seccomp_filter *filter;
1048 struct sock_fprog_kern *fprog;
1049 long ret;
Tycho Andersenf8e529e2015-10-27 09:23:59 +09001050
1051 if (!capable(CAP_SYS_ADMIN) ||
1052 current->seccomp.mode != SECCOMP_MODE_DISABLED) {
1053 return -EACCES;
1054 }
1055
Tycho Andersenf06eae82017-10-11 09:39:20 -06001056 filter = get_nth_filter(task, filter_off);
1057 if (IS_ERR(filter))
1058 return PTR_ERR(filter);
Tycho Andersenf8e529e2015-10-27 09:23:59 +09001059
1060 fprog = filter->prog->orig_prog;
1061 if (!fprog) {
Mickaël Salaün470bf1f2016-03-24 02:46:33 +01001062 /* This must be a new non-cBPF filter, since we save
Tycho Andersenf8e529e2015-10-27 09:23:59 +09001063 * every cBPF filter's orig_prog above when
1064 * CONFIG_CHECKPOINT_RESTORE is enabled.
1065 */
1066 ret = -EMEDIUMTYPE;
1067 goto out;
1068 }
1069
1070 ret = fprog->len;
1071 if (!data)
1072 goto out;
1073
Tycho Andersenf8e529e2015-10-27 09:23:59 +09001074 if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
1075 ret = -EFAULT;
1076
Tycho Andersenf8e529e2015-10-27 09:23:59 +09001077out:
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00001078 __put_seccomp_filter(filter);
1079 return ret;
Tycho Andersenf8e529e2015-10-27 09:23:59 +09001080}
Tycho Andersenf8e529e2015-10-27 09:23:59 +09001081
Tycho Andersen26500472017-10-11 09:39:21 -06001082long seccomp_get_metadata(struct task_struct *task,
1083 unsigned long size, void __user *data)
1084{
1085 long ret;
1086 struct seccomp_filter *filter;
1087 struct seccomp_metadata kmd = {};
1088
1089 if (!capable(CAP_SYS_ADMIN) ||
1090 current->seccomp.mode != SECCOMP_MODE_DISABLED) {
1091 return -EACCES;
1092 }
1093
1094 size = min_t(unsigned long, size, sizeof(kmd));
1095
Tycho Andersen63bb0042018-02-20 19:47:46 -07001096 if (size < sizeof(kmd.filter_off))
1097 return -EINVAL;
1098
1099 if (copy_from_user(&kmd.filter_off, data, sizeof(kmd.filter_off)))
Tycho Andersen26500472017-10-11 09:39:21 -06001100 return -EFAULT;
1101
1102 filter = get_nth_filter(task, kmd.filter_off);
1103 if (IS_ERR(filter))
1104 return PTR_ERR(filter);
1105
Tycho Andersen26500472017-10-11 09:39:21 -06001106 if (filter->log)
1107 kmd.flags |= SECCOMP_FILTER_FLAG_LOG;
1108
1109 ret = size;
1110 if (copy_to_user(data, &kmd, size))
1111 ret = -EFAULT;
1112
1113 __put_seccomp_filter(filter);
Tycho Andersenf8e529e2015-10-27 09:23:59 +09001114 return ret;
1115}
1116#endif
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00001117
1118#ifdef CONFIG_SYSCTL
1119
1120/* Human readable action names for friendly sysctl interaction */
Kees Cook0466bdb2017-08-11 13:12:11 -07001121#define SECCOMP_RET_KILL_PROCESS_NAME "kill_process"
Kees Cookfd768752017-08-11 12:53:18 -07001122#define SECCOMP_RET_KILL_THREAD_NAME "kill_thread"
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00001123#define SECCOMP_RET_TRAP_NAME "trap"
1124#define SECCOMP_RET_ERRNO_NAME "errno"
1125#define SECCOMP_RET_TRACE_NAME "trace"
Tyler Hicks59f5cf42017-08-11 04:33:57 +00001126#define SECCOMP_RET_LOG_NAME "log"
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00001127#define SECCOMP_RET_ALLOW_NAME "allow"
1128
Kees Cookfd768752017-08-11 12:53:18 -07001129static const char seccomp_actions_avail[] =
Kees Cook0466bdb2017-08-11 13:12:11 -07001130 SECCOMP_RET_KILL_PROCESS_NAME " "
Kees Cookfd768752017-08-11 12:53:18 -07001131 SECCOMP_RET_KILL_THREAD_NAME " "
1132 SECCOMP_RET_TRAP_NAME " "
1133 SECCOMP_RET_ERRNO_NAME " "
1134 SECCOMP_RET_TRACE_NAME " "
1135 SECCOMP_RET_LOG_NAME " "
1136 SECCOMP_RET_ALLOW_NAME;
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00001137
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00001138struct seccomp_log_name {
1139 u32 log;
1140 const char *name;
1141};
1142
1143static const struct seccomp_log_name seccomp_log_names[] = {
Kees Cook0466bdb2017-08-11 13:12:11 -07001144 { SECCOMP_LOG_KILL_PROCESS, SECCOMP_RET_KILL_PROCESS_NAME },
Kees Cookfd768752017-08-11 12:53:18 -07001145 { SECCOMP_LOG_KILL_THREAD, SECCOMP_RET_KILL_THREAD_NAME },
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00001146 { SECCOMP_LOG_TRAP, SECCOMP_RET_TRAP_NAME },
1147 { SECCOMP_LOG_ERRNO, SECCOMP_RET_ERRNO_NAME },
1148 { SECCOMP_LOG_TRACE, SECCOMP_RET_TRACE_NAME },
Tyler Hicks59f5cf42017-08-11 04:33:57 +00001149 { SECCOMP_LOG_LOG, SECCOMP_RET_LOG_NAME },
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00001150 { SECCOMP_LOG_ALLOW, SECCOMP_RET_ALLOW_NAME },
1151 { }
1152};
1153
1154static bool seccomp_names_from_actions_logged(char *names, size_t size,
1155 u32 actions_logged)
1156{
1157 const struct seccomp_log_name *cur;
1158 bool append_space = false;
1159
1160 for (cur = seccomp_log_names; cur->name && size; cur++) {
1161 ssize_t ret;
1162
1163 if (!(actions_logged & cur->log))
1164 continue;
1165
1166 if (append_space) {
1167 ret = strscpy(names, " ", size);
1168 if (ret < 0)
1169 return false;
1170
1171 names += ret;
1172 size -= ret;
1173 } else
1174 append_space = true;
1175
1176 ret = strscpy(names, cur->name, size);
1177 if (ret < 0)
1178 return false;
1179
1180 names += ret;
1181 size -= ret;
1182 }
1183
1184 return true;
1185}
1186
1187static bool seccomp_action_logged_from_name(u32 *action_logged,
1188 const char *name)
1189{
1190 const struct seccomp_log_name *cur;
1191
1192 for (cur = seccomp_log_names; cur->name; cur++) {
1193 if (!strcmp(cur->name, name)) {
1194 *action_logged = cur->log;
1195 return true;
1196 }
1197 }
1198
1199 return false;
1200}
1201
1202static bool seccomp_actions_logged_from_names(u32 *actions_logged, char *names)
1203{
1204 char *name;
1205
1206 *actions_logged = 0;
1207 while ((name = strsep(&names, " ")) && *name) {
1208 u32 action_logged = 0;
1209
1210 if (!seccomp_action_logged_from_name(&action_logged, name))
1211 return false;
1212
1213 *actions_logged |= action_logged;
1214 }
1215
1216 return true;
1217}
1218
1219static int seccomp_actions_logged_handler(struct ctl_table *ro_table, int write,
1220 void __user *buffer, size_t *lenp,
1221 loff_t *ppos)
1222{
1223 char names[sizeof(seccomp_actions_avail)];
1224 struct ctl_table table;
1225 int ret;
1226
1227 if (write && !capable(CAP_SYS_ADMIN))
1228 return -EPERM;
1229
1230 memset(names, 0, sizeof(names));
1231
1232 if (!write) {
1233 if (!seccomp_names_from_actions_logged(names, sizeof(names),
1234 seccomp_actions_logged))
1235 return -EINVAL;
1236 }
1237
1238 table = *ro_table;
1239 table.data = names;
1240 table.maxlen = sizeof(names);
1241 ret = proc_dostring(&table, write, buffer, lenp, ppos);
1242 if (ret)
1243 return ret;
1244
1245 if (write) {
1246 u32 actions_logged;
1247
1248 if (!seccomp_actions_logged_from_names(&actions_logged,
1249 table.data))
1250 return -EINVAL;
1251
1252 if (actions_logged & SECCOMP_LOG_ALLOW)
1253 return -EINVAL;
1254
1255 seccomp_actions_logged = actions_logged;
1256 }
1257
1258 return 0;
1259}
1260
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00001261static struct ctl_path seccomp_sysctl_path[] = {
1262 { .procname = "kernel", },
1263 { .procname = "seccomp", },
1264 { }
1265};
1266
1267static struct ctl_table seccomp_sysctl_table[] = {
1268 {
1269 .procname = "actions_avail",
1270 .data = (void *) &seccomp_actions_avail,
1271 .maxlen = sizeof(seccomp_actions_avail),
1272 .mode = 0444,
1273 .proc_handler = proc_dostring,
1274 },
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00001275 {
1276 .procname = "actions_logged",
1277 .mode = 0644,
1278 .proc_handler = seccomp_actions_logged_handler,
1279 },
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00001280 { }
1281};
1282
1283static int __init seccomp_sysctl_init(void)
1284{
1285 struct ctl_table_header *hdr;
1286
1287 hdr = register_sysctl_paths(seccomp_sysctl_path, seccomp_sysctl_table);
1288 if (!hdr)
1289 pr_warn("seccomp: sysctl registration failed\n");
1290 else
1291 kmemleak_not_leak(hdr);
1292
1293 return 0;
1294}
1295
1296device_initcall(seccomp_sysctl_init)
1297
1298#endif /* CONFIG_SYSCTL */