blob: e691d9a6c58d1b629a390cf9e5da6c374c877873 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/kernel/seccomp.c
4 *
5 * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
6 *
Will Drewrye2cfabdf2012-04-12 16:47:57 -05007 * Copyright (C) 2012 Google, Inc.
8 * Will Drewry <wad@chromium.org>
9 *
10 * This defines a simple but solid secure-computing facility.
11 *
12 * Mode 1 uses a fixed list of allowed system calls.
13 * Mode 2 allows user-defined system call filters in the form
14 * of Berkeley Packet Filters/Linux Socket Filters.
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 */
16
Kees Cook0b5fa222017-06-26 09:24:00 -070017#include <linux/refcount.h>
Eric Paris85e7bac32012-01-03 14:23:05 -050018#include <linux/audit.h>
Roland McGrath5b101742009-02-27 23:25:54 -080019#include <linux/compat.h>
Mike Frysingerb25e6712017-01-19 22:28:57 -060020#include <linux/coredump.h>
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +000021#include <linux/kmemleak.h>
Kees Cook5c307082018-05-01 15:07:31 -070022#include <linux/nospec.h>
23#include <linux/prctl.h>
Will Drewrye2cfabdf2012-04-12 16:47:57 -050024#include <linux/sched.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010025#include <linux/sched/task_stack.h>
Will Drewrye2cfabdf2012-04-12 16:47:57 -050026#include <linux/seccomp.h>
Kees Cookc8bee432014-06-27 15:16:33 -070027#include <linux/slab.h>
Kees Cook48dc92b2014-06-25 16:08:24 -070028#include <linux/syscalls.h>
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +000029#include <linux/sysctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Andy Lutomirskia4412fc2014-07-21 18:49:14 -070031#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
Will Drewrye2cfabdf2012-04-12 16:47:57 -050032#include <asm/syscall.h>
Andy Lutomirskia4412fc2014-07-21 18:49:14 -070033#endif
Will Drewrye2cfabdf2012-04-12 16:47:57 -050034
35#ifdef CONFIG_SECCOMP_FILTER
Will Drewrye2cfabdf2012-04-12 16:47:57 -050036#include <linux/filter.h>
Kees Cookc2e1f2e2014-06-05 00:23:17 -070037#include <linux/pid.h>
Will Drewryfb0fadf2012-04-12 16:48:02 -050038#include <linux/ptrace.h>
Will Drewrye2cfabdf2012-04-12 16:47:57 -050039#include <linux/security.h>
Will Drewrye2cfabdf2012-04-12 16:47:57 -050040#include <linux/tracehook.h>
41#include <linux/uaccess.h>
42
43/**
44 * struct seccomp_filter - container for seccomp BPF programs
45 *
46 * @usage: reference count to manage the object lifetime.
47 * get/put helpers should be used when accessing an instance
48 * outside of a lifetime-guarded section. In general, this
49 * is only needed for handling filters shared across tasks.
Tyler Hickse66a3992017-08-11 04:33:56 +000050 * @log: true if all actions except for SECCOMP_RET_ALLOW should be logged
Will Drewrye2cfabdf2012-04-12 16:47:57 -050051 * @prev: points to a previously installed, or inherited, filter
Mickaël Salaün285fdfc2016-09-20 19:39:47 +020052 * @prog: the BPF program to evaluate
Will Drewrye2cfabdf2012-04-12 16:47:57 -050053 *
54 * seccomp_filter objects are organized in a tree linked via the @prev
55 * pointer. For any task, it appears to be a singly-linked list starting
56 * with current->seccomp.filter, the most recently attached or inherited filter.
57 * However, multiple filters may share a @prev node, by way of fork(), which
58 * results in a unidirectional tree existing in memory. This is similar to
59 * how namespaces work.
60 *
61 * seccomp_filter objects should never be modified after being attached
62 * to a task_struct (other than @usage).
63 */
64struct seccomp_filter {
Kees Cook0b5fa222017-06-26 09:24:00 -070065 refcount_t usage;
Tyler Hickse66a3992017-08-11 04:33:56 +000066 bool log;
Will Drewrye2cfabdf2012-04-12 16:47:57 -050067 struct seccomp_filter *prev;
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -070068 struct bpf_prog *prog;
Will Drewrye2cfabdf2012-04-12 16:47:57 -050069};
70
71/* Limit any path through the tree to 256KB worth of instructions. */
72#define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
73
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +010074/*
Will Drewrye2cfabdf2012-04-12 16:47:57 -050075 * Endianness is explicitly ignored and left for BPF program authors to manage
76 * as per the specific architecture.
77 */
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +010078static void populate_seccomp_data(struct seccomp_data *sd)
Will Drewrye2cfabdf2012-04-12 16:47:57 -050079{
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +010080 struct task_struct *task = current;
81 struct pt_regs *regs = task_pt_regs(task);
Daniel Borkmann2eac7642014-04-14 21:02:59 +020082 unsigned long args[6];
Will Drewrye2cfabdf2012-04-12 16:47:57 -050083
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +010084 sd->nr = syscall_get_nr(task, regs);
Linus Torvalds0b747172014-04-12 12:38:53 -070085 sd->arch = syscall_get_arch();
Daniel Borkmann2eac7642014-04-14 21:02:59 +020086 syscall_get_arguments(task, regs, 0, 6, args);
87 sd->args[0] = args[0];
88 sd->args[1] = args[1];
89 sd->args[2] = args[2];
90 sd->args[3] = args[3];
91 sd->args[4] = args[4];
92 sd->args[5] = args[5];
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +010093 sd->instruction_pointer = KSTK_EIP(task);
Will Drewrye2cfabdf2012-04-12 16:47:57 -050094}
95
96/**
97 * seccomp_check_filter - verify seccomp filter code
98 * @filter: filter to verify
99 * @flen: length of filter
100 *
Alexei Starovoitov4df95ff2014-07-30 20:34:14 -0700101 * Takes a previously checked filter (by bpf_check_classic) and
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500102 * redirects all filter code that loads struct sk_buff data
103 * and related data through seccomp_bpf_load. It also
104 * enforces length and alignment checking of those loads.
105 *
106 * Returns 0 if the rule set is legal or -EINVAL if not.
107 */
108static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
109{
110 int pc;
111 for (pc = 0; pc < flen; pc++) {
112 struct sock_filter *ftest = &filter[pc];
113 u16 code = ftest->code;
114 u32 k = ftest->k;
115
116 switch (code) {
Daniel Borkmann348059312014-05-29 10:22:50 +0200117 case BPF_LD | BPF_W | BPF_ABS:
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100118 ftest->code = BPF_LDX | BPF_W | BPF_ABS;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500119 /* 32-bit aligned and not out of bounds. */
120 if (k >= sizeof(struct seccomp_data) || k & 3)
121 return -EINVAL;
122 continue;
Daniel Borkmann348059312014-05-29 10:22:50 +0200123 case BPF_LD | BPF_W | BPF_LEN:
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100124 ftest->code = BPF_LD | BPF_IMM;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500125 ftest->k = sizeof(struct seccomp_data);
126 continue;
Daniel Borkmann348059312014-05-29 10:22:50 +0200127 case BPF_LDX | BPF_W | BPF_LEN:
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100128 ftest->code = BPF_LDX | BPF_IMM;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500129 ftest->k = sizeof(struct seccomp_data);
130 continue;
131 /* Explicitly include allowed calls. */
Daniel Borkmann348059312014-05-29 10:22:50 +0200132 case BPF_RET | BPF_K:
133 case BPF_RET | BPF_A:
134 case BPF_ALU | BPF_ADD | BPF_K:
135 case BPF_ALU | BPF_ADD | BPF_X:
136 case BPF_ALU | BPF_SUB | BPF_K:
137 case BPF_ALU | BPF_SUB | BPF_X:
138 case BPF_ALU | BPF_MUL | BPF_K:
139 case BPF_ALU | BPF_MUL | BPF_X:
140 case BPF_ALU | BPF_DIV | BPF_K:
141 case BPF_ALU | BPF_DIV | BPF_X:
142 case BPF_ALU | BPF_AND | BPF_K:
143 case BPF_ALU | BPF_AND | BPF_X:
144 case BPF_ALU | BPF_OR | BPF_K:
145 case BPF_ALU | BPF_OR | BPF_X:
146 case BPF_ALU | BPF_XOR | BPF_K:
147 case BPF_ALU | BPF_XOR | BPF_X:
148 case BPF_ALU | BPF_LSH | BPF_K:
149 case BPF_ALU | BPF_LSH | BPF_X:
150 case BPF_ALU | BPF_RSH | BPF_K:
151 case BPF_ALU | BPF_RSH | BPF_X:
152 case BPF_ALU | BPF_NEG:
153 case BPF_LD | BPF_IMM:
154 case BPF_LDX | BPF_IMM:
155 case BPF_MISC | BPF_TAX:
156 case BPF_MISC | BPF_TXA:
157 case BPF_LD | BPF_MEM:
158 case BPF_LDX | BPF_MEM:
159 case BPF_ST:
160 case BPF_STX:
161 case BPF_JMP | BPF_JA:
162 case BPF_JMP | BPF_JEQ | BPF_K:
163 case BPF_JMP | BPF_JEQ | BPF_X:
164 case BPF_JMP | BPF_JGE | BPF_K:
165 case BPF_JMP | BPF_JGE | BPF_X:
166 case BPF_JMP | BPF_JGT | BPF_K:
167 case BPF_JMP | BPF_JGT | BPF_X:
168 case BPF_JMP | BPF_JSET | BPF_K:
169 case BPF_JMP | BPF_JSET | BPF_X:
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500170 continue;
171 default:
172 return -EINVAL;
173 }
174 }
175 return 0;
176}
177
178/**
Mickaël Salaün285fdfc2016-09-20 19:39:47 +0200179 * seccomp_run_filters - evaluates all seccomp filters against @sd
180 * @sd: optional seccomp data to be passed to filters
Kees Cookdeb4de82017-08-02 15:00:40 -0700181 * @match: stores struct seccomp_filter that resulted in the return value,
182 * unless filter returned SECCOMP_RET_ALLOW, in which case it will
183 * be unchanged.
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500184 *
185 * Returns valid seccomp BPF response codes.
186 */
Kees Cook0466bdb2017-08-11 13:12:11 -0700187#define ACTION_ONLY(ret) ((s32)((ret) & (SECCOMP_RET_ACTION_FULL)))
Kees Cookdeb4de82017-08-02 15:00:40 -0700188static u32 seccomp_run_filters(const struct seccomp_data *sd,
189 struct seccomp_filter **match)
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500190{
Andy Lutomirskid39bd002014-07-21 18:49:16 -0700191 struct seccomp_data sd_local;
Will Drewryacf3b2c2012-04-12 16:47:59 -0500192 u32 ret = SECCOMP_RET_ALLOW;
Pranith Kumar8225d382014-11-21 10:06:01 -0500193 /* Make sure cross-thread synced filter points somewhere sane. */
194 struct seccomp_filter *f =
Will Deacon506458e2017-10-24 11:22:48 +0100195 READ_ONCE(current->seccomp.filter);
Will Drewryacf3b2c2012-04-12 16:47:59 -0500196
197 /* Ensure unexpected behavior doesn't result in failing open. */
Kees Cook3ba25302014-06-27 15:01:35 -0700198 if (unlikely(WARN_ON(f == NULL)))
Kees Cook4d3b0b02017-08-11 13:01:39 -0700199 return SECCOMP_RET_KILL_PROCESS;
Will Drewryacf3b2c2012-04-12 16:47:59 -0500200
Andy Lutomirskid39bd002014-07-21 18:49:16 -0700201 if (!sd) {
202 populate_seccomp_data(&sd_local);
203 sd = &sd_local;
204 }
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100205
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500206 /*
207 * All filters in the list are evaluated and the lowest BPF return
Will Drewryacf3b2c2012-04-12 16:47:59 -0500208 * value always takes priority (ignoring the DATA).
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500209 */
Kees Cook3ba25302014-06-27 15:01:35 -0700210 for (; f; f = f->prev) {
Daniel Borkmann88575192016-11-26 01:28:04 +0100211 u32 cur_ret = BPF_PROG_RUN(f->prog, sd);
Alexei Starovoitov8f577ca2014-05-13 19:50:47 -0700212
Kees Cook0466bdb2017-08-11 13:12:11 -0700213 if (ACTION_ONLY(cur_ret) < ACTION_ONLY(ret)) {
Will Drewryacf3b2c2012-04-12 16:47:59 -0500214 ret = cur_ret;
Kees Cookdeb4de82017-08-02 15:00:40 -0700215 *match = f;
216 }
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500217 }
218 return ret;
219}
Kees Cook1f41b4502014-06-25 15:38:02 -0700220#endif /* CONFIG_SECCOMP_FILTER */
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500221
Kees Cook1f41b4502014-06-25 15:38:02 -0700222static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
223{
Guenter Roeck69f6a342014-08-10 20:50:30 -0700224 assert_spin_locked(&current->sighand->siglock);
Kees Cookdbd952122014-06-27 15:18:48 -0700225
Kees Cook1f41b4502014-06-25 15:38:02 -0700226 if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
227 return false;
228
229 return true;
230}
231
Thomas Gleixner8bf37d82018-05-04 15:12:06 +0200232void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
Kees Cook5c307082018-05-01 15:07:31 -0700233
Kees Cook3ba25302014-06-27 15:01:35 -0700234static inline void seccomp_assign_mode(struct task_struct *task,
Kees Cook00a02d02018-05-03 14:56:12 -0700235 unsigned long seccomp_mode,
236 unsigned long flags)
Kees Cook1f41b4502014-06-25 15:38:02 -0700237{
Guenter Roeck69f6a342014-08-10 20:50:30 -0700238 assert_spin_locked(&task->sighand->siglock);
Kees Cookdbd952122014-06-27 15:18:48 -0700239
Kees Cook3ba25302014-06-27 15:01:35 -0700240 task->seccomp.mode = seccomp_mode;
241 /*
242 * Make sure TIF_SECCOMP cannot be set before the mode (and
243 * filter) is set.
244 */
245 smp_mb__before_atomic();
Kees Cook00a02d02018-05-03 14:56:12 -0700246 /* Assume default seccomp processes want spec flaw mitigation. */
247 if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
Thomas Gleixner8bf37d82018-05-04 15:12:06 +0200248 arch_seccomp_spec_mitigate(task);
Kees Cook3ba25302014-06-27 15:01:35 -0700249 set_tsk_thread_flag(task, TIF_SECCOMP);
Kees Cook1f41b4502014-06-25 15:38:02 -0700250}
251
252#ifdef CONFIG_SECCOMP_FILTER
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700253/* Returns 1 if the parent is an ancestor of the child. */
254static int is_ancestor(struct seccomp_filter *parent,
255 struct seccomp_filter *child)
256{
257 /* NULL is the root ancestor. */
258 if (parent == NULL)
259 return 1;
260 for (; child; child = child->prev)
261 if (child == parent)
262 return 1;
263 return 0;
264}
265
266/**
267 * seccomp_can_sync_threads: checks if all threads can be synchronized
268 *
269 * Expects sighand and cred_guard_mutex locks to be held.
270 *
271 * Returns 0 on success, -ve on error, or the pid of a thread which was
272 * either not in the correct seccomp mode or it did not have an ancestral
273 * seccomp filter.
274 */
275static inline pid_t seccomp_can_sync_threads(void)
276{
277 struct task_struct *thread, *caller;
278
279 BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
Guenter Roeck69f6a342014-08-10 20:50:30 -0700280 assert_spin_locked(&current->sighand->siglock);
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700281
282 /* Validate all threads being eligible for synchronization. */
283 caller = current;
284 for_each_thread(caller, thread) {
285 pid_t failed;
286
287 /* Skip current, since it is initiating the sync. */
288 if (thread == caller)
289 continue;
290
291 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
292 (thread->seccomp.mode == SECCOMP_MODE_FILTER &&
293 is_ancestor(thread->seccomp.filter,
294 caller->seccomp.filter)))
295 continue;
296
297 /* Return the first thread that cannot be synchronized. */
298 failed = task_pid_vnr(thread);
299 /* If the pid cannot be resolved, then return -ESRCH */
300 if (unlikely(WARN_ON(failed == 0)))
301 failed = -ESRCH;
302 return failed;
303 }
304
305 return 0;
306}
307
308/**
309 * seccomp_sync_threads: sets all threads to use current's filter
310 *
311 * Expects sighand and cred_guard_mutex locks to be held, and for
312 * seccomp_can_sync_threads() to have returned success already
313 * without dropping the locks.
314 *
315 */
Kees Cook00a02d02018-05-03 14:56:12 -0700316static inline void seccomp_sync_threads(unsigned long flags)
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700317{
318 struct task_struct *thread, *caller;
319
320 BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
Guenter Roeck69f6a342014-08-10 20:50:30 -0700321 assert_spin_locked(&current->sighand->siglock);
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700322
323 /* Synchronize all threads. */
324 caller = current;
325 for_each_thread(caller, thread) {
326 /* Skip current, since it needs no changes. */
327 if (thread == caller)
328 continue;
329
330 /* Get a task reference for the new leaf node. */
331 get_seccomp_filter(caller);
332 /*
333 * Drop the task reference to the shared ancestor since
334 * current's path will hold a reference. (This also
335 * allows a put before the assignment.)
336 */
337 put_seccomp_filter(thread);
338 smp_store_release(&thread->seccomp.filter,
339 caller->seccomp.filter);
Jann Horn103502a2015-12-26 06:00:48 +0100340
341 /*
342 * Don't let an unprivileged task work around
343 * the no_new_privs restriction by creating
344 * a thread that sets it up, enters seccomp,
345 * then dies.
346 */
347 if (task_no_new_privs(caller))
348 task_set_no_new_privs(thread);
349
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700350 /*
351 * Opt the other thread into seccomp if needed.
352 * As threads are considered to be trust-realm
353 * equivalent (see ptrace_may_access), it is safe to
354 * allow one thread to transition the other.
355 */
Jann Horn103502a2015-12-26 06:00:48 +0100356 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
Kees Cook00a02d02018-05-03 14:56:12 -0700357 seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
358 flags);
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700359 }
360}
361
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500362/**
Kees Cookc8bee432014-06-27 15:16:33 -0700363 * seccomp_prepare_filter: Prepares a seccomp filter for use.
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500364 * @fprog: BPF program to install
365 *
Kees Cookc8bee432014-06-27 15:16:33 -0700366 * Returns filter on success or an ERR_PTR on failure.
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500367 */
Kees Cookc8bee432014-06-27 15:16:33 -0700368static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500369{
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200370 struct seccomp_filter *sfilter;
371 int ret;
Masahiro Yamada97f26452016-08-03 13:45:50 -0700372 const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500373
374 if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
Kees Cookc8bee432014-06-27 15:16:33 -0700375 return ERR_PTR(-EINVAL);
Nicolas Schichand9e12f42015-05-06 16:12:28 +0200376
Kees Cookc8bee432014-06-27 15:16:33 -0700377 BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500378
379 /*
Fabian Frederick119ce5c2014-06-06 14:37:53 -0700380 * Installing a seccomp filter requires that the task has
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500381 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
382 * This avoids scenarios where unprivileged tasks can affect the
383 * behavior of privileged children.
384 */
Kees Cook1d4457f2014-05-21 15:23:46 -0700385 if (!task_no_new_privs(current) &&
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500386 security_capable_noaudit(current_cred(), current_user_ns(),
387 CAP_SYS_ADMIN) != 0)
Kees Cookc8bee432014-06-27 15:16:33 -0700388 return ERR_PTR(-EACCES);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500389
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100390 /* Allocate a new seccomp_filter */
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200391 sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN);
392 if (!sfilter)
Nicolas Schichand9e12f42015-05-06 16:12:28 +0200393 return ERR_PTR(-ENOMEM);
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200394
395 ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
Tycho Andersenf8e529e2015-10-27 09:23:59 +0900396 seccomp_check_filter, save_orig);
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200397 if (ret < 0) {
398 kfree(sfilter);
399 return ERR_PTR(ret);
Nicolas Schichand9e12f42015-05-06 16:12:28 +0200400 }
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100401
Kees Cook0b5fa222017-06-26 09:24:00 -0700402 refcount_set(&sfilter->usage, 1);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500403
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200404 return sfilter;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500405}
406
407/**
Kees Cookc8bee432014-06-27 15:16:33 -0700408 * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500409 * @user_filter: pointer to the user data containing a sock_fprog.
410 *
411 * Returns 0 on success and non-zero otherwise.
412 */
Kees Cookc8bee432014-06-27 15:16:33 -0700413static struct seccomp_filter *
414seccomp_prepare_user_filter(const char __user *user_filter)
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500415{
416 struct sock_fprog fprog;
Kees Cookc8bee432014-06-27 15:16:33 -0700417 struct seccomp_filter *filter = ERR_PTR(-EFAULT);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500418
419#ifdef CONFIG_COMPAT
Andy Lutomirski5c380652016-03-22 14:24:52 -0700420 if (in_compat_syscall()) {
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500421 struct compat_sock_fprog fprog32;
422 if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
423 goto out;
424 fprog.len = fprog32.len;
425 fprog.filter = compat_ptr(fprog32.filter);
426 } else /* falls through to the if below. */
427#endif
428 if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
429 goto out;
Kees Cookc8bee432014-06-27 15:16:33 -0700430 filter = seccomp_prepare_filter(&fprog);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500431out:
Kees Cookc8bee432014-06-27 15:16:33 -0700432 return filter;
433}
434
435/**
436 * seccomp_attach_filter: validate and attach filter
437 * @flags: flags to change filter behavior
438 * @filter: seccomp filter to add to the current process
439 *
Kees Cookdbd952122014-06-27 15:18:48 -0700440 * Caller must be holding current->sighand->siglock lock.
441 *
Kees Cookc8bee432014-06-27 15:16:33 -0700442 * Returns 0 on success, -ve on error.
443 */
444static long seccomp_attach_filter(unsigned int flags,
445 struct seccomp_filter *filter)
446{
447 unsigned long total_insns;
448 struct seccomp_filter *walker;
449
Guenter Roeck69f6a342014-08-10 20:50:30 -0700450 assert_spin_locked(&current->sighand->siglock);
Kees Cookdbd952122014-06-27 15:18:48 -0700451
Kees Cookc8bee432014-06-27 15:16:33 -0700452 /* Validate resulting filter length. */
453 total_insns = filter->prog->len;
454 for (walker = current->seccomp.filter; walker; walker = walker->prev)
455 total_insns += walker->prog->len + 4; /* 4 instr penalty */
456 if (total_insns > MAX_INSNS_PER_PATH)
457 return -ENOMEM;
458
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700459 /* If thread sync has been requested, check that it is possible. */
460 if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
461 int ret;
462
463 ret = seccomp_can_sync_threads();
464 if (ret)
465 return ret;
466 }
467
Tyler Hickse66a3992017-08-11 04:33:56 +0000468 /* Set log flag, if present. */
469 if (flags & SECCOMP_FILTER_FLAG_LOG)
470 filter->log = true;
471
Kees Cookc8bee432014-06-27 15:16:33 -0700472 /*
473 * If there is an existing filter, make it the prev and don't drop its
474 * task reference.
475 */
476 filter->prev = current->seccomp.filter;
477 current->seccomp.filter = filter;
478
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700479 /* Now that the new filter is in place, synchronize to all threads. */
480 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
Kees Cook00a02d02018-05-03 14:56:12 -0700481 seccomp_sync_threads(flags);
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700482
Kees Cookc8bee432014-06-27 15:16:33 -0700483 return 0;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500484}
485
Colin Ian King084f5602017-09-29 14:26:48 +0100486static void __get_seccomp_filter(struct seccomp_filter *filter)
Oleg Nesterov66a733e2017-09-27 09:25:30 -0600487{
488 /* Reference count is bounded by the number of total processes. */
489 refcount_inc(&filter->usage);
490}
491
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500492/* get_seccomp_filter - increments the reference count of the filter on @tsk */
493void get_seccomp_filter(struct task_struct *tsk)
494{
495 struct seccomp_filter *orig = tsk->seccomp.filter;
496 if (!orig)
497 return;
Oleg Nesterov66a733e2017-09-27 09:25:30 -0600498 __get_seccomp_filter(orig);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500499}
500
Kees Cookc8bee432014-06-27 15:16:33 -0700501static inline void seccomp_filter_free(struct seccomp_filter *filter)
502{
503 if (filter) {
Daniel Borkmannbab18992015-10-02 15:17:33 +0200504 bpf_prog_destroy(filter->prog);
Kees Cookc8bee432014-06-27 15:16:33 -0700505 kfree(filter);
506 }
507}
508
Oleg Nesterov66a733e2017-09-27 09:25:30 -0600509static void __put_seccomp_filter(struct seccomp_filter *orig)
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500510{
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500511 /* Clean up single-reference branches iteratively. */
Kees Cook0b5fa222017-06-26 09:24:00 -0700512 while (orig && refcount_dec_and_test(&orig->usage)) {
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500513 struct seccomp_filter *freeme = orig;
514 orig = orig->prev;
Kees Cookc8bee432014-06-27 15:16:33 -0700515 seccomp_filter_free(freeme);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500516 }
517}
Will Drewrybb6ea432012-04-12 16:48:01 -0500518
Oleg Nesterov66a733e2017-09-27 09:25:30 -0600519/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
520void put_seccomp_filter(struct task_struct *tsk)
521{
522 __put_seccomp_filter(tsk->seccomp.filter);
523}
524
Mike Frysingerb25e6712017-01-19 22:28:57 -0600525static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason)
526{
Eric W. Biederman3b10db2b2017-08-18 19:56:27 -0500527 clear_siginfo(info);
Mike Frysingerb25e6712017-01-19 22:28:57 -0600528 info->si_signo = SIGSYS;
529 info->si_code = SYS_SECCOMP;
530 info->si_call_addr = (void __user *)KSTK_EIP(current);
531 info->si_errno = reason;
532 info->si_arch = syscall_get_arch();
533 info->si_syscall = syscall;
534}
535
Will Drewrybb6ea432012-04-12 16:48:01 -0500536/**
537 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
538 * @syscall: syscall number to send to userland
539 * @reason: filter-supplied reason code to send to userland (via si_errno)
540 *
541 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
542 */
543static void seccomp_send_sigsys(int syscall, int reason)
544{
545 struct siginfo info;
Mike Frysingerb25e6712017-01-19 22:28:57 -0600546 seccomp_init_siginfo(&info, syscall, reason);
Will Drewrybb6ea432012-04-12 16:48:01 -0500547 force_sig_info(SIGSYS, &info, current);
548}
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500549#endif /* CONFIG_SECCOMP_FILTER */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000551/* For use with seccomp_actions_logged */
Kees Cook4d3b0b02017-08-11 13:01:39 -0700552#define SECCOMP_LOG_KILL_PROCESS (1 << 0)
553#define SECCOMP_LOG_KILL_THREAD (1 << 1)
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000554#define SECCOMP_LOG_TRAP (1 << 2)
555#define SECCOMP_LOG_ERRNO (1 << 3)
556#define SECCOMP_LOG_TRACE (1 << 4)
Tyler Hicks59f5cf42017-08-11 04:33:57 +0000557#define SECCOMP_LOG_LOG (1 << 5)
558#define SECCOMP_LOG_ALLOW (1 << 6)
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000559
Kees Cook4d3b0b02017-08-11 13:01:39 -0700560static u32 seccomp_actions_logged = SECCOMP_LOG_KILL_PROCESS |
561 SECCOMP_LOG_KILL_THREAD |
Kees Cookfd768752017-08-11 12:53:18 -0700562 SECCOMP_LOG_TRAP |
563 SECCOMP_LOG_ERRNO |
564 SECCOMP_LOG_TRACE |
Tyler Hicks59f5cf42017-08-11 04:33:57 +0000565 SECCOMP_LOG_LOG;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000566
Tyler Hickse66a3992017-08-11 04:33:56 +0000567static inline void seccomp_log(unsigned long syscall, long signr, u32 action,
568 bool requested)
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000569{
570 bool log = false;
571
572 switch (action) {
573 case SECCOMP_RET_ALLOW:
Tyler Hickse66a3992017-08-11 04:33:56 +0000574 break;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000575 case SECCOMP_RET_TRAP:
Tyler Hickse66a3992017-08-11 04:33:56 +0000576 log = requested && seccomp_actions_logged & SECCOMP_LOG_TRAP;
577 break;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000578 case SECCOMP_RET_ERRNO:
Tyler Hickse66a3992017-08-11 04:33:56 +0000579 log = requested && seccomp_actions_logged & SECCOMP_LOG_ERRNO;
580 break;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000581 case SECCOMP_RET_TRACE:
Tyler Hickse66a3992017-08-11 04:33:56 +0000582 log = requested && seccomp_actions_logged & SECCOMP_LOG_TRACE;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000583 break;
Tyler Hicks59f5cf42017-08-11 04:33:57 +0000584 case SECCOMP_RET_LOG:
585 log = seccomp_actions_logged & SECCOMP_LOG_LOG;
586 break;
Kees Cookfd768752017-08-11 12:53:18 -0700587 case SECCOMP_RET_KILL_THREAD:
Kees Cookfd768752017-08-11 12:53:18 -0700588 log = seccomp_actions_logged & SECCOMP_LOG_KILL_THREAD;
Kees Cook4d3b0b02017-08-11 13:01:39 -0700589 break;
590 case SECCOMP_RET_KILL_PROCESS:
591 default:
592 log = seccomp_actions_logged & SECCOMP_LOG_KILL_PROCESS;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000593 }
594
595 /*
Kees Cookfd768752017-08-11 12:53:18 -0700596 * Force an audit message to be emitted when the action is RET_KILL_*,
Tyler Hicks59f5cf42017-08-11 04:33:57 +0000597 * RET_LOG, or the FILTER_FLAG_LOG bit was set and the action is
598 * allowed to be logged by the admin.
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000599 */
600 if (log)
601 return __audit_seccomp(syscall, signr, action);
602
603 /*
604 * Let the audit subsystem decide if the action should be audited based
605 * on whether the current task itself is being audited.
606 */
607 return audit_seccomp(syscall, signr, action);
608}
609
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610/*
611 * Secure computing mode 1 allows only read/write/exit/sigreturn.
612 * To be fully secure this must be combined with rlimit
613 * to limit the stack allocations too.
614 */
Matt Redfearncb4253a2016-03-29 09:35:34 +0100615static const int mode1_syscalls[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
617 0, /* null terminated */
618};
619
Andy Lutomirskia4412fc2014-07-21 18:49:14 -0700620static void __secure_computing_strict(int this_syscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621{
Matt Redfearncb4253a2016-03-29 09:35:34 +0100622 const int *syscall_whitelist = mode1_syscalls;
Andy Lutomirskia4412fc2014-07-21 18:49:14 -0700623#ifdef CONFIG_COMPAT
Andy Lutomirski5c380652016-03-22 14:24:52 -0700624 if (in_compat_syscall())
Matt Redfearnc983f0e2016-03-29 09:35:32 +0100625 syscall_whitelist = get_compat_mode1_syscalls();
Andy Lutomirskia4412fc2014-07-21 18:49:14 -0700626#endif
627 do {
628 if (*syscall_whitelist == this_syscall)
629 return;
630 } while (*++syscall_whitelist);
631
632#ifdef SECCOMP_DEBUG
633 dump_stack();
634#endif
Kees Cookfd768752017-08-11 12:53:18 -0700635 seccomp_log(this_syscall, SIGKILL, SECCOMP_RET_KILL_THREAD, true);
Andy Lutomirskia4412fc2014-07-21 18:49:14 -0700636 do_exit(SIGKILL);
637}
638
639#ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
640void secure_computing_strict(int this_syscall)
641{
642 int mode = current->seccomp.mode;
643
Masahiro Yamada97f26452016-08-03 13:45:50 -0700644 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
Tycho Andersen13c4a902015-06-13 09:02:48 -0600645 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
646 return;
647
Kees Cook221272f2015-06-15 15:29:16 -0700648 if (mode == SECCOMP_MODE_DISABLED)
Andy Lutomirskia4412fc2014-07-21 18:49:14 -0700649 return;
650 else if (mode == SECCOMP_MODE_STRICT)
651 __secure_computing_strict(this_syscall);
652 else
653 BUG();
654}
655#else
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700656
657#ifdef CONFIG_SECCOMP_FILTER
Kees Cookce6526e2016-06-01 19:29:15 -0700658static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
659 const bool recheck_after_trace)
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700660{
661 u32 filter_ret, action;
Kees Cookdeb4de82017-08-02 15:00:40 -0700662 struct seccomp_filter *match = NULL;
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700663 int data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664
Kees Cook3ba25302014-06-27 15:01:35 -0700665 /*
666 * Make sure that any changes to mode from another thread have
667 * been seen after TIF_SECCOMP was seen.
668 */
669 rmb();
670
Kees Cookdeb4de82017-08-02 15:00:40 -0700671 filter_ret = seccomp_run_filters(sd, &match);
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700672 data = filter_ret & SECCOMP_RET_DATA;
Kees Cook0466bdb2017-08-11 13:12:11 -0700673 action = filter_ret & SECCOMP_RET_ACTION_FULL;
Andy Lutomirski87b526d2012-10-01 11:40:45 -0700674
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700675 switch (action) {
676 case SECCOMP_RET_ERRNO:
Kees Cook580c57f2015-02-17 13:48:00 -0800677 /* Set low-order bits as an errno, capped at MAX_ERRNO. */
678 if (data > MAX_ERRNO)
679 data = MAX_ERRNO;
Andy Lutomirskid39bd002014-07-21 18:49:16 -0700680 syscall_set_return_value(current, task_pt_regs(current),
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700681 -data, 0);
682 goto skip;
683
684 case SECCOMP_RET_TRAP:
685 /* Show the handler the original registers. */
Andy Lutomirskid39bd002014-07-21 18:49:16 -0700686 syscall_rollback(current, task_pt_regs(current));
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700687 /* Let the filter pass back 16 bits of data. */
688 seccomp_send_sigsys(this_syscall, data);
689 goto skip;
690
691 case SECCOMP_RET_TRACE:
Kees Cookce6526e2016-06-01 19:29:15 -0700692 /* We've been put in this state by the ptracer already. */
693 if (recheck_after_trace)
694 return 0;
695
Kees Cook8112c4f2016-06-01 16:02:17 -0700696 /* ENOSYS these calls if there is no tracer attached. */
697 if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
698 syscall_set_return_value(current,
699 task_pt_regs(current),
700 -ENOSYS, 0);
701 goto skip;
702 }
703
704 /* Allow the BPF to provide the event message */
705 ptrace_event(PTRACE_EVENT_SECCOMP, data);
706 /*
707 * The delivery of a fatal signal during event
Kees Cook485a2522016-08-10 16:28:09 -0700708 * notification may silently skip tracer notification,
709 * which could leave us with a potentially unmodified
710 * syscall that the tracer would have liked to have
711 * changed. Since the process is about to die, we just
712 * force the syscall to be skipped and let the signal
713 * kill the process and correctly handle any tracer exit
714 * notifications.
Kees Cook8112c4f2016-06-01 16:02:17 -0700715 */
716 if (fatal_signal_pending(current))
Kees Cook485a2522016-08-10 16:28:09 -0700717 goto skip;
Kees Cook8112c4f2016-06-01 16:02:17 -0700718 /* Check if the tracer forced the syscall to be skipped. */
719 this_syscall = syscall_get_nr(current, task_pt_regs(current));
720 if (this_syscall < 0)
721 goto skip;
722
Kees Cookce6526e2016-06-01 19:29:15 -0700723 /*
724 * Recheck the syscall, since it may have changed. This
725 * intentionally uses a NULL struct seccomp_data to force
726 * a reload of all registers. This does not goto skip since
727 * a skip would have already been reported.
728 */
729 if (__seccomp_filter(this_syscall, NULL, true))
730 return -1;
731
Kees Cook8112c4f2016-06-01 16:02:17 -0700732 return 0;
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700733
Tyler Hicks59f5cf42017-08-11 04:33:57 +0000734 case SECCOMP_RET_LOG:
735 seccomp_log(this_syscall, 0, action, true);
736 return 0;
737
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700738 case SECCOMP_RET_ALLOW:
Kees Cookdeb4de82017-08-02 15:00:40 -0700739 /*
740 * Note that the "match" filter will always be NULL for
741 * this action since SECCOMP_RET_ALLOW is the starting
742 * state in seccomp_run_filters().
743 */
Kees Cook8112c4f2016-06-01 16:02:17 -0700744 return 0;
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700745
Kees Cookfd768752017-08-11 12:53:18 -0700746 case SECCOMP_RET_KILL_THREAD:
Kees Cook4d3b0b02017-08-11 13:01:39 -0700747 case SECCOMP_RET_KILL_PROCESS:
Kees Cook131b6352017-02-23 09:24:24 -0800748 default:
Tyler Hickse66a3992017-08-11 04:33:56 +0000749 seccomp_log(this_syscall, SIGSYS, action, true);
Kees Cookd7276e32017-02-07 15:18:51 -0800750 /* Dump core only if this is the last remaining thread. */
Kees Cook4d3b0b02017-08-11 13:01:39 -0700751 if (action == SECCOMP_RET_KILL_PROCESS ||
752 get_nr_threads(current) == 1) {
Kees Cook131b6352017-02-23 09:24:24 -0800753 siginfo_t info;
754
Kees Cookd7276e32017-02-07 15:18:51 -0800755 /* Show the original registers in the dump. */
756 syscall_rollback(current, task_pt_regs(current));
757 /* Trigger a manual coredump since do_exit skips it. */
758 seccomp_init_siginfo(&info, this_syscall, data);
759 do_coredump(&info);
760 }
Kees Cook4d3b0b02017-08-11 13:01:39 -0700761 if (action == SECCOMP_RET_KILL_PROCESS)
762 do_group_exit(SIGSYS);
763 else
764 do_exit(SIGSYS);
Will Drewry8156b452012-04-17 14:48:58 -0500765 }
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700766
767 unreachable();
768
769skip:
Tyler Hickse66a3992017-08-11 04:33:56 +0000770 seccomp_log(this_syscall, 0, action, match ? match->log : false);
Kees Cook8112c4f2016-06-01 16:02:17 -0700771 return -1;
772}
773#else
Kees Cookce6526e2016-06-01 19:29:15 -0700774static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
775 const bool recheck_after_trace)
Kees Cook8112c4f2016-06-01 16:02:17 -0700776{
777 BUG();
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700778}
779#endif
780
Kees Cook8112c4f2016-06-01 16:02:17 -0700781int __secure_computing(const struct seccomp_data *sd)
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700782{
783 int mode = current->seccomp.mode;
Kees Cook8112c4f2016-06-01 16:02:17 -0700784 int this_syscall;
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700785
Masahiro Yamada97f26452016-08-03 13:45:50 -0700786 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
Tycho Andersen13c4a902015-06-13 09:02:48 -0600787 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
Kees Cook8112c4f2016-06-01 16:02:17 -0700788 return 0;
789
790 this_syscall = sd ? sd->nr :
791 syscall_get_nr(current, task_pt_regs(current));
Tycho Andersen13c4a902015-06-13 09:02:48 -0600792
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700793 switch (mode) {
794 case SECCOMP_MODE_STRICT:
795 __secure_computing_strict(this_syscall); /* may call do_exit */
Kees Cook8112c4f2016-06-01 16:02:17 -0700796 return 0;
Andy Lutomirski13aa72f2014-07-21 18:49:15 -0700797 case SECCOMP_MODE_FILTER:
Kees Cookce6526e2016-06-01 19:29:15 -0700798 return __seccomp_filter(this_syscall, sd, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 default:
800 BUG();
801 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802}
Andy Lutomirskia4412fc2014-07-21 18:49:14 -0700803#endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700804
805long prctl_get_seccomp(void)
806{
807 return current->seccomp.mode;
808}
809
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500810/**
Kees Cook3b23dd12014-06-25 15:55:25 -0700811 * seccomp_set_mode_strict: internal function for setting strict seccomp
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500812 *
813 * Once current->seccomp.mode is non-zero, it may not be changed.
814 *
815 * Returns 0 on success or -EINVAL on failure.
816 */
Kees Cook3b23dd12014-06-25 15:55:25 -0700817static long seccomp_set_mode_strict(void)
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700818{
Kees Cook3b23dd12014-06-25 15:55:25 -0700819 const unsigned long seccomp_mode = SECCOMP_MODE_STRICT;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500820 long ret = -EINVAL;
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700821
Kees Cookdbd952122014-06-27 15:18:48 -0700822 spin_lock_irq(&current->sighand->siglock);
823
Kees Cook1f41b4502014-06-25 15:38:02 -0700824 if (!seccomp_may_assign_mode(seccomp_mode))
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700825 goto out;
826
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700827#ifdef TIF_NOTSC
Kees Cook3b23dd12014-06-25 15:55:25 -0700828 disable_TSC();
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700829#endif
Kees Cook00a02d02018-05-03 14:56:12 -0700830 seccomp_assign_mode(current, seccomp_mode, 0);
Kees Cook3b23dd12014-06-25 15:55:25 -0700831 ret = 0;
832
833out:
Kees Cookdbd952122014-06-27 15:18:48 -0700834 spin_unlock_irq(&current->sighand->siglock);
Kees Cook3b23dd12014-06-25 15:55:25 -0700835
836 return ret;
837}
838
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500839#ifdef CONFIG_SECCOMP_FILTER
Kees Cook3b23dd12014-06-25 15:55:25 -0700840/**
841 * seccomp_set_mode_filter: internal function for setting seccomp filter
Kees Cook48dc92b2014-06-25 16:08:24 -0700842 * @flags: flags to change filter behavior
Kees Cook3b23dd12014-06-25 15:55:25 -0700843 * @filter: struct sock_fprog containing filter
844 *
845 * This function may be called repeatedly to install additional filters.
846 * Every filter successfully installed will be evaluated (in reverse order)
847 * for each system call the task makes.
848 *
849 * Once current->seccomp.mode is non-zero, it may not be changed.
850 *
851 * Returns 0 on success or -EINVAL on failure.
852 */
Kees Cook48dc92b2014-06-25 16:08:24 -0700853static long seccomp_set_mode_filter(unsigned int flags,
854 const char __user *filter)
Kees Cook3b23dd12014-06-25 15:55:25 -0700855{
856 const unsigned long seccomp_mode = SECCOMP_MODE_FILTER;
Kees Cookc8bee432014-06-27 15:16:33 -0700857 struct seccomp_filter *prepared = NULL;
Kees Cook3b23dd12014-06-25 15:55:25 -0700858 long ret = -EINVAL;
859
Kees Cook48dc92b2014-06-25 16:08:24 -0700860 /* Validate flags. */
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700861 if (flags & ~SECCOMP_FILTER_FLAG_MASK)
Kees Cookdbd952122014-06-27 15:18:48 -0700862 return -EINVAL;
Kees Cook48dc92b2014-06-25 16:08:24 -0700863
Kees Cookc8bee432014-06-27 15:16:33 -0700864 /* Prepare the new filter before holding any locks. */
865 prepared = seccomp_prepare_user_filter(filter);
866 if (IS_ERR(prepared))
867 return PTR_ERR(prepared);
868
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700869 /*
870 * Make sure we cannot change seccomp or nnp state via TSYNC
871 * while another thread is in the middle of calling exec.
872 */
873 if (flags & SECCOMP_FILTER_FLAG_TSYNC &&
874 mutex_lock_killable(&current->signal->cred_guard_mutex))
875 goto out_free;
876
Kees Cookdbd952122014-06-27 15:18:48 -0700877 spin_lock_irq(&current->sighand->siglock);
878
Kees Cook3b23dd12014-06-25 15:55:25 -0700879 if (!seccomp_may_assign_mode(seccomp_mode))
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500880 goto out;
Kees Cook3b23dd12014-06-25 15:55:25 -0700881
Kees Cookc8bee432014-06-27 15:16:33 -0700882 ret = seccomp_attach_filter(flags, prepared);
Kees Cook3b23dd12014-06-25 15:55:25 -0700883 if (ret)
884 goto out;
Kees Cookc8bee432014-06-27 15:16:33 -0700885 /* Do not free the successfully attached filter. */
886 prepared = NULL;
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700887
Kees Cook00a02d02018-05-03 14:56:12 -0700888 seccomp_assign_mode(current, seccomp_mode, flags);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500889out:
Kees Cookdbd952122014-06-27 15:18:48 -0700890 spin_unlock_irq(&current->sighand->siglock);
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700891 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
892 mutex_unlock(&current->signal->cred_guard_mutex);
893out_free:
Kees Cookc8bee432014-06-27 15:16:33 -0700894 seccomp_filter_free(prepared);
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700895 return ret;
896}
Kees Cook3b23dd12014-06-25 15:55:25 -0700897#else
Kees Cook48dc92b2014-06-25 16:08:24 -0700898static inline long seccomp_set_mode_filter(unsigned int flags,
899 const char __user *filter)
Kees Cook3b23dd12014-06-25 15:55:25 -0700900{
901 return -EINVAL;
902}
903#endif
Kees Cookd78ab022014-05-21 15:02:11 -0700904
Tyler Hicksd612b1f2017-08-11 04:33:53 +0000905static long seccomp_get_action_avail(const char __user *uaction)
906{
907 u32 action;
908
909 if (copy_from_user(&action, uaction, sizeof(action)))
910 return -EFAULT;
911
912 switch (action) {
Kees Cook0466bdb2017-08-11 13:12:11 -0700913 case SECCOMP_RET_KILL_PROCESS:
Kees Cookfd768752017-08-11 12:53:18 -0700914 case SECCOMP_RET_KILL_THREAD:
Tyler Hicksd612b1f2017-08-11 04:33:53 +0000915 case SECCOMP_RET_TRAP:
916 case SECCOMP_RET_ERRNO:
917 case SECCOMP_RET_TRACE:
Tyler Hicks59f5cf42017-08-11 04:33:57 +0000918 case SECCOMP_RET_LOG:
Tyler Hicksd612b1f2017-08-11 04:33:53 +0000919 case SECCOMP_RET_ALLOW:
920 break;
921 default:
922 return -EOPNOTSUPP;
923 }
924
925 return 0;
926}
927
Kees Cook48dc92b2014-06-25 16:08:24 -0700928/* Common entry point for both prctl and syscall. */
929static long do_seccomp(unsigned int op, unsigned int flags,
930 const char __user *uargs)
931{
932 switch (op) {
933 case SECCOMP_SET_MODE_STRICT:
934 if (flags != 0 || uargs != NULL)
935 return -EINVAL;
936 return seccomp_set_mode_strict();
937 case SECCOMP_SET_MODE_FILTER:
938 return seccomp_set_mode_filter(flags, uargs);
Tyler Hicksd612b1f2017-08-11 04:33:53 +0000939 case SECCOMP_GET_ACTION_AVAIL:
940 if (flags != 0)
941 return -EINVAL;
942
943 return seccomp_get_action_avail(uargs);
Kees Cook48dc92b2014-06-25 16:08:24 -0700944 default:
945 return -EINVAL;
946 }
947}
948
949SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags,
950 const char __user *, uargs)
951{
952 return do_seccomp(op, flags, uargs);
953}
954
Kees Cookd78ab022014-05-21 15:02:11 -0700955/**
956 * prctl_set_seccomp: configures current->seccomp.mode
957 * @seccomp_mode: requested mode to use
958 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
959 *
960 * Returns 0 on success or -EINVAL on failure.
961 */
962long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
963{
Kees Cook48dc92b2014-06-25 16:08:24 -0700964 unsigned int op;
965 char __user *uargs;
966
Kees Cook3b23dd12014-06-25 15:55:25 -0700967 switch (seccomp_mode) {
968 case SECCOMP_MODE_STRICT:
Kees Cook48dc92b2014-06-25 16:08:24 -0700969 op = SECCOMP_SET_MODE_STRICT;
970 /*
971 * Setting strict mode through prctl always ignored filter,
972 * so make sure it is always NULL here to pass the internal
973 * check in do_seccomp().
974 */
975 uargs = NULL;
976 break;
Kees Cook3b23dd12014-06-25 15:55:25 -0700977 case SECCOMP_MODE_FILTER:
Kees Cook48dc92b2014-06-25 16:08:24 -0700978 op = SECCOMP_SET_MODE_FILTER;
979 uargs = filter;
980 break;
Kees Cook3b23dd12014-06-25 15:55:25 -0700981 default:
982 return -EINVAL;
983 }
Kees Cook48dc92b2014-06-25 16:08:24 -0700984
985 /* prctl interface doesn't have flags, so they are always zero. */
986 return do_seccomp(op, 0, uargs);
Kees Cookd78ab022014-05-21 15:02:11 -0700987}
Tycho Andersenf8e529e2015-10-27 09:23:59 +0900988
989#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
Tycho Andersenf06eae82017-10-11 09:39:20 -0600990static struct seccomp_filter *get_nth_filter(struct task_struct *task,
991 unsigned long filter_off)
992{
993 struct seccomp_filter *orig, *filter;
994 unsigned long count;
995
996 /*
997 * Note: this is only correct because the caller should be the (ptrace)
998 * tracer of the task, otherwise lock_task_sighand is needed.
999 */
1000 spin_lock_irq(&task->sighand->siglock);
1001
1002 if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
1003 spin_unlock_irq(&task->sighand->siglock);
1004 return ERR_PTR(-EINVAL);
1005 }
1006
1007 orig = task->seccomp.filter;
1008 __get_seccomp_filter(orig);
1009 spin_unlock_irq(&task->sighand->siglock);
1010
1011 count = 0;
1012 for (filter = orig; filter; filter = filter->prev)
1013 count++;
1014
1015 if (filter_off >= count) {
1016 filter = ERR_PTR(-ENOENT);
1017 goto out;
1018 }
1019
1020 count -= filter_off;
1021 for (filter = orig; filter && count > 1; filter = filter->prev)
1022 count--;
1023
1024 if (WARN_ON(count != 1 || !filter)) {
1025 filter = ERR_PTR(-ENOENT);
1026 goto out;
1027 }
1028
1029 __get_seccomp_filter(filter);
1030
1031out:
1032 __put_seccomp_filter(orig);
1033 return filter;
1034}
1035
Tycho Andersenf8e529e2015-10-27 09:23:59 +09001036long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
1037 void __user *data)
1038{
1039 struct seccomp_filter *filter;
1040 struct sock_fprog_kern *fprog;
1041 long ret;
Tycho Andersenf8e529e2015-10-27 09:23:59 +09001042
1043 if (!capable(CAP_SYS_ADMIN) ||
1044 current->seccomp.mode != SECCOMP_MODE_DISABLED) {
1045 return -EACCES;
1046 }
1047
Tycho Andersenf06eae82017-10-11 09:39:20 -06001048 filter = get_nth_filter(task, filter_off);
1049 if (IS_ERR(filter))
1050 return PTR_ERR(filter);
Tycho Andersenf8e529e2015-10-27 09:23:59 +09001051
1052 fprog = filter->prog->orig_prog;
1053 if (!fprog) {
Mickaël Salaün470bf1f2016-03-24 02:46:33 +01001054 /* This must be a new non-cBPF filter, since we save
Tycho Andersenf8e529e2015-10-27 09:23:59 +09001055 * every cBPF filter's orig_prog above when
1056 * CONFIG_CHECKPOINT_RESTORE is enabled.
1057 */
1058 ret = -EMEDIUMTYPE;
1059 goto out;
1060 }
1061
1062 ret = fprog->len;
1063 if (!data)
1064 goto out;
1065
Tycho Andersenf8e529e2015-10-27 09:23:59 +09001066 if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
1067 ret = -EFAULT;
1068
Tycho Andersenf8e529e2015-10-27 09:23:59 +09001069out:
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00001070 __put_seccomp_filter(filter);
1071 return ret;
Tycho Andersenf8e529e2015-10-27 09:23:59 +09001072}
Tycho Andersenf8e529e2015-10-27 09:23:59 +09001073
Tycho Andersen26500472017-10-11 09:39:21 -06001074long seccomp_get_metadata(struct task_struct *task,
1075 unsigned long size, void __user *data)
1076{
1077 long ret;
1078 struct seccomp_filter *filter;
1079 struct seccomp_metadata kmd = {};
1080
1081 if (!capable(CAP_SYS_ADMIN) ||
1082 current->seccomp.mode != SECCOMP_MODE_DISABLED) {
1083 return -EACCES;
1084 }
1085
1086 size = min_t(unsigned long, size, sizeof(kmd));
1087
Tycho Andersen63bb0042018-02-20 19:47:46 -07001088 if (size < sizeof(kmd.filter_off))
1089 return -EINVAL;
1090
1091 if (copy_from_user(&kmd.filter_off, data, sizeof(kmd.filter_off)))
Tycho Andersen26500472017-10-11 09:39:21 -06001092 return -EFAULT;
1093
1094 filter = get_nth_filter(task, kmd.filter_off);
1095 if (IS_ERR(filter))
1096 return PTR_ERR(filter);
1097
Tycho Andersen26500472017-10-11 09:39:21 -06001098 if (filter->log)
1099 kmd.flags |= SECCOMP_FILTER_FLAG_LOG;
1100
1101 ret = size;
1102 if (copy_to_user(data, &kmd, size))
1103 ret = -EFAULT;
1104
1105 __put_seccomp_filter(filter);
Tycho Andersenf8e529e2015-10-27 09:23:59 +09001106 return ret;
1107}
1108#endif
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00001109
1110#ifdef CONFIG_SYSCTL
1111
1112/* Human readable action names for friendly sysctl interaction */
Kees Cook0466bdb2017-08-11 13:12:11 -07001113#define SECCOMP_RET_KILL_PROCESS_NAME "kill_process"
Kees Cookfd768752017-08-11 12:53:18 -07001114#define SECCOMP_RET_KILL_THREAD_NAME "kill_thread"
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00001115#define SECCOMP_RET_TRAP_NAME "trap"
1116#define SECCOMP_RET_ERRNO_NAME "errno"
1117#define SECCOMP_RET_TRACE_NAME "trace"
Tyler Hicks59f5cf42017-08-11 04:33:57 +00001118#define SECCOMP_RET_LOG_NAME "log"
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00001119#define SECCOMP_RET_ALLOW_NAME "allow"
1120
Kees Cookfd768752017-08-11 12:53:18 -07001121static const char seccomp_actions_avail[] =
Kees Cook0466bdb2017-08-11 13:12:11 -07001122 SECCOMP_RET_KILL_PROCESS_NAME " "
Kees Cookfd768752017-08-11 12:53:18 -07001123 SECCOMP_RET_KILL_THREAD_NAME " "
1124 SECCOMP_RET_TRAP_NAME " "
1125 SECCOMP_RET_ERRNO_NAME " "
1126 SECCOMP_RET_TRACE_NAME " "
1127 SECCOMP_RET_LOG_NAME " "
1128 SECCOMP_RET_ALLOW_NAME;
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00001129
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00001130struct seccomp_log_name {
1131 u32 log;
1132 const char *name;
1133};
1134
1135static const struct seccomp_log_name seccomp_log_names[] = {
Kees Cook0466bdb2017-08-11 13:12:11 -07001136 { SECCOMP_LOG_KILL_PROCESS, SECCOMP_RET_KILL_PROCESS_NAME },
Kees Cookfd768752017-08-11 12:53:18 -07001137 { SECCOMP_LOG_KILL_THREAD, SECCOMP_RET_KILL_THREAD_NAME },
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00001138 { SECCOMP_LOG_TRAP, SECCOMP_RET_TRAP_NAME },
1139 { SECCOMP_LOG_ERRNO, SECCOMP_RET_ERRNO_NAME },
1140 { SECCOMP_LOG_TRACE, SECCOMP_RET_TRACE_NAME },
Tyler Hicks59f5cf42017-08-11 04:33:57 +00001141 { SECCOMP_LOG_LOG, SECCOMP_RET_LOG_NAME },
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00001142 { SECCOMP_LOG_ALLOW, SECCOMP_RET_ALLOW_NAME },
1143 { }
1144};
1145
1146static bool seccomp_names_from_actions_logged(char *names, size_t size,
1147 u32 actions_logged)
1148{
1149 const struct seccomp_log_name *cur;
1150 bool append_space = false;
1151
1152 for (cur = seccomp_log_names; cur->name && size; cur++) {
1153 ssize_t ret;
1154
1155 if (!(actions_logged & cur->log))
1156 continue;
1157
1158 if (append_space) {
1159 ret = strscpy(names, " ", size);
1160 if (ret < 0)
1161 return false;
1162
1163 names += ret;
1164 size -= ret;
1165 } else
1166 append_space = true;
1167
1168 ret = strscpy(names, cur->name, size);
1169 if (ret < 0)
1170 return false;
1171
1172 names += ret;
1173 size -= ret;
1174 }
1175
1176 return true;
1177}
1178
1179static bool seccomp_action_logged_from_name(u32 *action_logged,
1180 const char *name)
1181{
1182 const struct seccomp_log_name *cur;
1183
1184 for (cur = seccomp_log_names; cur->name; cur++) {
1185 if (!strcmp(cur->name, name)) {
1186 *action_logged = cur->log;
1187 return true;
1188 }
1189 }
1190
1191 return false;
1192}
1193
1194static bool seccomp_actions_logged_from_names(u32 *actions_logged, char *names)
1195{
1196 char *name;
1197
1198 *actions_logged = 0;
1199 while ((name = strsep(&names, " ")) && *name) {
1200 u32 action_logged = 0;
1201
1202 if (!seccomp_action_logged_from_name(&action_logged, name))
1203 return false;
1204
1205 *actions_logged |= action_logged;
1206 }
1207
1208 return true;
1209}
1210
1211static int seccomp_actions_logged_handler(struct ctl_table *ro_table, int write,
1212 void __user *buffer, size_t *lenp,
1213 loff_t *ppos)
1214{
1215 char names[sizeof(seccomp_actions_avail)];
1216 struct ctl_table table;
1217 int ret;
1218
1219 if (write && !capable(CAP_SYS_ADMIN))
1220 return -EPERM;
1221
1222 memset(names, 0, sizeof(names));
1223
1224 if (!write) {
1225 if (!seccomp_names_from_actions_logged(names, sizeof(names),
1226 seccomp_actions_logged))
1227 return -EINVAL;
1228 }
1229
1230 table = *ro_table;
1231 table.data = names;
1232 table.maxlen = sizeof(names);
1233 ret = proc_dostring(&table, write, buffer, lenp, ppos);
1234 if (ret)
1235 return ret;
1236
1237 if (write) {
1238 u32 actions_logged;
1239
1240 if (!seccomp_actions_logged_from_names(&actions_logged,
1241 table.data))
1242 return -EINVAL;
1243
1244 if (actions_logged & SECCOMP_LOG_ALLOW)
1245 return -EINVAL;
1246
1247 seccomp_actions_logged = actions_logged;
1248 }
1249
1250 return 0;
1251}
1252
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00001253static struct ctl_path seccomp_sysctl_path[] = {
1254 { .procname = "kernel", },
1255 { .procname = "seccomp", },
1256 { }
1257};
1258
1259static struct ctl_table seccomp_sysctl_table[] = {
1260 {
1261 .procname = "actions_avail",
1262 .data = (void *) &seccomp_actions_avail,
1263 .maxlen = sizeof(seccomp_actions_avail),
1264 .mode = 0444,
1265 .proc_handler = proc_dostring,
1266 },
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00001267 {
1268 .procname = "actions_logged",
1269 .mode = 0644,
1270 .proc_handler = seccomp_actions_logged_handler,
1271 },
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00001272 { }
1273};
1274
1275static int __init seccomp_sysctl_init(void)
1276{
1277 struct ctl_table_header *hdr;
1278
1279 hdr = register_sysctl_paths(seccomp_sysctl_path, seccomp_sysctl_table);
1280 if (!hdr)
1281 pr_warn("seccomp: sysctl registration failed\n");
1282 else
1283 kmemleak_not_leak(hdr);
1284
1285 return 0;
1286}
1287
1288device_initcall(seccomp_sysctl_init)
1289
1290#endif /* CONFIG_SYSCTL */