blob: c653ee0bd162cba7925071976cd0a659d41e24af [file] [log] [blame]
Alexei Starovoitov99c55f72014-09-26 00:16:57 -07001/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12#include <linux/bpf.h>
Daniel Borkmanna67edbf2017-01-25 02:28:18 +010013#include <linux/bpf_trace.h>
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070014#include <linux/syscalls.h>
15#include <linux/slab.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010016#include <linux/sched/signal.h>
Daniel Borkmannd407bd22017-01-18 15:14:17 +010017#include <linux/vmalloc.h>
18#include <linux/mmzone.h>
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070019#include <linux/anon_inodes.h>
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -070020#include <linux/file.h>
Alexei Starovoitov09756af2014-09-26 00:17:00 -070021#include <linux/license.h>
22#include <linux/filter.h>
Alexei Starovoitov25415172015-03-25 12:49:20 -070023#include <linux/version.h>
Mickaël Salaün535e7b4b2016-11-13 19:44:03 +010024#include <linux/kernel.h>
Martin KaFai Laudc4bb0e2017-06-05 12:15:46 -070025#include <linux/idr.h>
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070026
Martin KaFai Lau14dc6f02017-06-27 23:08:34 -070027#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
28 (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
29 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
30 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
31#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
32#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map))
33
Alexei Starovoitovb121d1e2016-03-07 21:57:13 -080034DEFINE_PER_CPU(int, bpf_prog_active);
Martin KaFai Laudc4bb0e2017-06-05 12:15:46 -070035static DEFINE_IDR(prog_idr);
36static DEFINE_SPINLOCK(prog_idr_lock);
Martin KaFai Lauf3f1c052017-06-05 12:15:47 -070037static DEFINE_IDR(map_idr);
38static DEFINE_SPINLOCK(map_idr_lock);
Alexei Starovoitovb121d1e2016-03-07 21:57:13 -080039
Alexei Starovoitov1be7f752015-10-07 22:23:21 -070040int sysctl_unprivileged_bpf_disabled __read_mostly;
41
Johannes Berg40077e02017-04-11 15:34:58 +020042static const struct bpf_map_ops * const bpf_map_types[] = {
43#define BPF_PROG_TYPE(_id, _ops)
44#define BPF_MAP_TYPE(_id, _ops) \
45 [_id] = &_ops,
46#include <linux/bpf_types.h>
47#undef BPF_PROG_TYPE
48#undef BPF_MAP_TYPE
49};
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070050
Mickaël Salaün58291a72017-08-07 20:45:19 +020051static int check_uarg_tail_zero(void __user *uaddr,
52 size_t expected_size,
53 size_t actual_size)
54{
55 unsigned char __user *addr;
56 unsigned char __user *end;
57 unsigned char val;
58 int err;
59
60 if (actual_size <= expected_size)
61 return 0;
62
63 addr = uaddr + expected_size;
64 end = uaddr + actual_size;
65
66 for (; addr < end; addr++) {
67 err = get_user(val, addr);
68 if (err)
69 return err;
70 if (val)
71 return -E2BIG;
72 }
73
74 return 0;
75}
76
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070077static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
78{
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070079 struct bpf_map *map;
80
Johannes Berg40077e02017-04-11 15:34:58 +020081 if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
82 !bpf_map_types[attr->map_type])
83 return ERR_PTR(-EINVAL);
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070084
Johannes Berg40077e02017-04-11 15:34:58 +020085 map = bpf_map_types[attr->map_type]->map_alloc(attr);
86 if (IS_ERR(map))
87 return map;
88 map->ops = bpf_map_types[attr->map_type];
89 map->map_type = attr->map_type;
90 return map;
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070091}
92
Daniel Borkmannd407bd22017-01-18 15:14:17 +010093void *bpf_map_area_alloc(size_t size)
94{
95 /* We definitely need __GFP_NORETRY, so OOM killer doesn't
96 * trigger under memory pressure as we really just want to
97 * fail instead.
98 */
99 const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
100 void *area;
101
102 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
103 area = kmalloc(size, GFP_USER | flags);
104 if (area != NULL)
105 return area;
106 }
107
Michal Hocko19809c22017-05-08 15:57:44 -0700108 return __vmalloc(size, GFP_KERNEL | flags, PAGE_KERNEL);
Daniel Borkmannd407bd22017-01-18 15:14:17 +0100109}
110
111void bpf_map_area_free(void *area)
112{
113 kvfree(area);
114}
115
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800116int bpf_map_precharge_memlock(u32 pages)
117{
118 struct user_struct *user = get_current_user();
119 unsigned long memlock_limit, cur;
120
121 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
122 cur = atomic_long_read(&user->locked_vm);
123 free_uid(user);
124 if (cur + pages > memlock_limit)
125 return -EPERM;
126 return 0;
127}
128
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700129static int bpf_map_charge_memlock(struct bpf_map *map)
130{
131 struct user_struct *user = get_current_user();
132 unsigned long memlock_limit;
133
134 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
135
136 atomic_long_add(map->pages, &user->locked_vm);
137
138 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
139 atomic_long_sub(map->pages, &user->locked_vm);
140 free_uid(user);
141 return -EPERM;
142 }
143 map->user = user;
144 return 0;
145}
146
147static void bpf_map_uncharge_memlock(struct bpf_map *map)
148{
149 struct user_struct *user = map->user;
150
151 atomic_long_sub(map->pages, &user->locked_vm);
152 free_uid(user);
153}
154
Martin KaFai Lauf3f1c052017-06-05 12:15:47 -0700155static int bpf_map_alloc_id(struct bpf_map *map)
156{
157 int id;
158
159 spin_lock_bh(&map_idr_lock);
160 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
161 if (id > 0)
162 map->id = id;
163 spin_unlock_bh(&map_idr_lock);
164
165 if (WARN_ON_ONCE(!id))
166 return -ENOSPC;
167
168 return id > 0 ? 0 : id;
169}
170
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -0700171static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
Martin KaFai Lauf3f1c052017-06-05 12:15:47 -0700172{
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -0700173 if (do_idr_lock)
174 spin_lock_bh(&map_idr_lock);
175 else
176 __acquire(&map_idr_lock);
177
Martin KaFai Lauf3f1c052017-06-05 12:15:47 -0700178 idr_remove(&map_idr, map->id);
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -0700179
180 if (do_idr_lock)
181 spin_unlock_bh(&map_idr_lock);
182 else
183 __release(&map_idr_lock);
Martin KaFai Lauf3f1c052017-06-05 12:15:47 -0700184}
185
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700186/* called from workqueue */
187static void bpf_map_free_deferred(struct work_struct *work)
188{
189 struct bpf_map *map = container_of(work, struct bpf_map, work);
190
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700191 bpf_map_uncharge_memlock(map);
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700192 /* implementation dependent freeing */
193 map->ops->map_free(map);
194}
195
Daniel Borkmannc9da1612015-11-24 21:28:15 +0100196static void bpf_map_put_uref(struct bpf_map *map)
197{
198 if (atomic_dec_and_test(&map->usercnt)) {
199 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
200 bpf_fd_array_map_clear(map);
201 }
202}
203
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700204/* decrement map refcnt and schedule it for freeing via workqueue
205 * (unrelying map implementation ops->map_free() might sleep)
206 */
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -0700207static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700208{
209 if (atomic_dec_and_test(&map->refcnt)) {
Martin KaFai Lau34ad5582017-06-05 12:15:48 -0700210 /* bpf_map_free_id() must be called first */
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -0700211 bpf_map_free_id(map, do_idr_lock);
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700212 INIT_WORK(&map->work, bpf_map_free_deferred);
213 schedule_work(&map->work);
214 }
215}
216
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -0700217void bpf_map_put(struct bpf_map *map)
218{
219 __bpf_map_put(map, true);
220}
221
Daniel Borkmannc9da1612015-11-24 21:28:15 +0100222void bpf_map_put_with_uref(struct bpf_map *map)
223{
224 bpf_map_put_uref(map);
225 bpf_map_put(map);
226}
227
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700228static int bpf_map_release(struct inode *inode, struct file *filp)
229{
Daniel Borkmann61d1b6a2016-06-15 22:47:12 +0200230 struct bpf_map *map = filp->private_data;
231
232 if (map->ops->map_release)
233 map->ops->map_release(map, filp);
234
235 bpf_map_put_with_uref(map);
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700236 return 0;
237}
238
Daniel Borkmannf99bf202015-11-19 11:56:22 +0100239#ifdef CONFIG_PROC_FS
240static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
241{
242 const struct bpf_map *map = filp->private_data;
Daniel Borkmann21116b72016-11-26 01:28:07 +0100243 const struct bpf_array *array;
244 u32 owner_prog_type = 0;
Daniel Borkmann9780c0a2017-07-02 02:13:28 +0200245 u32 owner_jited = 0;
Daniel Borkmann21116b72016-11-26 01:28:07 +0100246
247 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
248 array = container_of(map, struct bpf_array, map);
249 owner_prog_type = array->owner_prog_type;
Daniel Borkmann9780c0a2017-07-02 02:13:28 +0200250 owner_jited = array->owner_jited;
Daniel Borkmann21116b72016-11-26 01:28:07 +0100251 }
Daniel Borkmannf99bf202015-11-19 11:56:22 +0100252
253 seq_printf(m,
254 "map_type:\t%u\n"
255 "key_size:\t%u\n"
256 "value_size:\t%u\n"
Daniel Borkmann322cea22016-03-25 00:30:25 +0100257 "max_entries:\t%u\n"
Daniel Borkmann21116b72016-11-26 01:28:07 +0100258 "map_flags:\t%#x\n"
259 "memlock:\t%llu\n",
Daniel Borkmannf99bf202015-11-19 11:56:22 +0100260 map->map_type,
261 map->key_size,
262 map->value_size,
Daniel Borkmann322cea22016-03-25 00:30:25 +0100263 map->max_entries,
Daniel Borkmann21116b72016-11-26 01:28:07 +0100264 map->map_flags,
265 map->pages * 1ULL << PAGE_SHIFT);
266
Daniel Borkmann9780c0a2017-07-02 02:13:28 +0200267 if (owner_prog_type) {
Daniel Borkmann21116b72016-11-26 01:28:07 +0100268 seq_printf(m, "owner_prog_type:\t%u\n",
269 owner_prog_type);
Daniel Borkmann9780c0a2017-07-02 02:13:28 +0200270 seq_printf(m, "owner_jited:\t%u\n",
271 owner_jited);
272 }
Daniel Borkmannf99bf202015-11-19 11:56:22 +0100273}
274#endif
275
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700276static const struct file_operations bpf_map_fops = {
Daniel Borkmannf99bf202015-11-19 11:56:22 +0100277#ifdef CONFIG_PROC_FS
278 .show_fdinfo = bpf_map_show_fdinfo,
279#endif
280 .release = bpf_map_release,
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700281};
282
Daniel Borkmannb2197752015-10-29 14:58:09 +0100283int bpf_map_new_fd(struct bpf_map *map)
Daniel Borkmannaa797812015-10-29 14:58:06 +0100284{
285 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
286 O_RDWR | O_CLOEXEC);
287}
288
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700289/* helper macro to check that unused fields 'union bpf_attr' are zero */
290#define CHECK_ATTR(CMD) \
291 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
292 sizeof(attr->CMD##_LAST_FIELD), 0, \
293 sizeof(*attr) - \
294 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
295 sizeof(attr->CMD##_LAST_FIELD)) != NULL
296
Martin KaFai Lau56f668d2017-03-22 10:00:33 -0700297#define BPF_MAP_CREATE_LAST_FIELD inner_map_fd
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700298/* called via syscall */
299static int map_create(union bpf_attr *attr)
300{
301 struct bpf_map *map;
302 int err;
303
304 err = CHECK_ATTR(BPF_MAP_CREATE);
305 if (err)
306 return -EINVAL;
307
308 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
309 map = find_and_alloc_map(attr);
310 if (IS_ERR(map))
311 return PTR_ERR(map);
312
313 atomic_set(&map->refcnt, 1);
Daniel Borkmannc9da1612015-11-24 21:28:15 +0100314 atomic_set(&map->usercnt, 1);
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700315
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700316 err = bpf_map_charge_memlock(map);
317 if (err)
Daniel Borkmann20b2b242016-11-04 00:56:31 +0100318 goto free_map_nouncharge;
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700319
Martin KaFai Lauf3f1c052017-06-05 12:15:47 -0700320 err = bpf_map_alloc_id(map);
321 if (err)
322 goto free_map;
323
Daniel Borkmannaa797812015-10-29 14:58:06 +0100324 err = bpf_map_new_fd(map);
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -0700325 if (err < 0) {
326 /* failed to allocate fd.
327 * bpf_map_put() is needed because the above
328 * bpf_map_alloc_id() has published the map
329 * to the userspace and the userspace may
330 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
331 */
332 bpf_map_put(map);
333 return err;
334 }
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700335
Daniel Borkmanna67edbf2017-01-25 02:28:18 +0100336 trace_bpf_map_create(map, err);
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700337 return err;
338
339free_map:
Daniel Borkmann20b2b242016-11-04 00:56:31 +0100340 bpf_map_uncharge_memlock(map);
341free_map_nouncharge:
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700342 map->ops->map_free(map);
343 return err;
344}
345
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700346/* if error is returned, fd is released.
347 * On success caller should complete fd access with matching fdput()
348 */
Daniel Borkmannc2101292015-10-29 14:58:07 +0100349struct bpf_map *__bpf_map_get(struct fd f)
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700350{
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700351 if (!f.file)
352 return ERR_PTR(-EBADF);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700353 if (f.file->f_op != &bpf_map_fops) {
354 fdput(f);
355 return ERR_PTR(-EINVAL);
356 }
357
Daniel Borkmannc2101292015-10-29 14:58:07 +0100358 return f.file->private_data;
359}
360
Alexei Starovoitov92117d82016-04-27 18:56:20 -0700361/* prog's and map's refcnt limit */
362#define BPF_MAX_REFCNT 32768
363
364struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
Daniel Borkmannc9da1612015-11-24 21:28:15 +0100365{
Alexei Starovoitov92117d82016-04-27 18:56:20 -0700366 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
367 atomic_dec(&map->refcnt);
368 return ERR_PTR(-EBUSY);
369 }
Daniel Borkmannc9da1612015-11-24 21:28:15 +0100370 if (uref)
371 atomic_inc(&map->usercnt);
Alexei Starovoitov92117d82016-04-27 18:56:20 -0700372 return map;
Daniel Borkmannc9da1612015-11-24 21:28:15 +0100373}
374
375struct bpf_map *bpf_map_get_with_uref(u32 ufd)
Daniel Borkmannc2101292015-10-29 14:58:07 +0100376{
377 struct fd f = fdget(ufd);
378 struct bpf_map *map;
379
380 map = __bpf_map_get(f);
381 if (IS_ERR(map))
382 return map;
383
Alexei Starovoitov92117d82016-04-27 18:56:20 -0700384 map = bpf_map_inc(map, true);
Daniel Borkmannc2101292015-10-29 14:58:07 +0100385 fdput(f);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700386
387 return map;
388}
389
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -0700390/* map_idr_lock should have been held */
391static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
392 bool uref)
393{
394 int refold;
395
396 refold = __atomic_add_unless(&map->refcnt, 1, 0);
397
398 if (refold >= BPF_MAX_REFCNT) {
399 __bpf_map_put(map, false);
400 return ERR_PTR(-EBUSY);
401 }
402
403 if (!refold)
404 return ERR_PTR(-ENOENT);
405
406 if (uref)
407 atomic_inc(&map->usercnt);
408
409 return map;
410}
411
Alexei Starovoitovb8cdc052016-03-09 18:56:49 -0800412int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
413{
414 return -ENOTSUPP;
415}
416
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700417/* last field in 'union bpf_attr' used by this command */
418#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
419
420static int map_lookup_elem(union bpf_attr *attr)
421{
Mickaël Salaün535e7b4b2016-11-13 19:44:03 +0100422 void __user *ukey = u64_to_user_ptr(attr->key);
423 void __user *uvalue = u64_to_user_ptr(attr->value);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700424 int ufd = attr->map_fd;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700425 struct bpf_map *map;
Alexei Starovoitov8ebe6672015-01-22 17:11:08 -0800426 void *key, *value, *ptr;
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800427 u32 value_size;
Daniel Borkmann592867b2015-09-08 18:00:09 +0200428 struct fd f;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700429 int err;
430
431 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
432 return -EINVAL;
433
Daniel Borkmann592867b2015-09-08 18:00:09 +0200434 f = fdget(ufd);
Daniel Borkmannc2101292015-10-29 14:58:07 +0100435 map = __bpf_map_get(f);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700436 if (IS_ERR(map))
437 return PTR_ERR(map);
438
Al Viroe4448ed2017-05-13 18:43:00 -0400439 key = memdup_user(ukey, map->key_size);
440 if (IS_ERR(key)) {
441 err = PTR_ERR(key);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700442 goto err_put;
Al Viroe4448ed2017-05-13 18:43:00 -0400443 }
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700444
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800445 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800446 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800447 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
448 value_size = round_up(map->value_size, 8) * num_possible_cpus();
Martin KaFai Lau14dc6f02017-06-27 23:08:34 -0700449 else if (IS_FD_MAP(map))
450 value_size = sizeof(u32);
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800451 else
452 value_size = map->value_size;
453
Alexei Starovoitov8ebe6672015-01-22 17:11:08 -0800454 err = -ENOMEM;
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800455 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700456 if (!value)
Alexei Starovoitov8ebe6672015-01-22 17:11:08 -0800457 goto free_key;
458
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800459 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
460 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800461 err = bpf_percpu_hash_copy(map, key, value);
462 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
463 err = bpf_percpu_array_copy(map, key, value);
Alexei Starovoitov557c0c62016-03-07 21:57:17 -0800464 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
465 err = bpf_stackmap_copy(map, key, value);
Martin KaFai Lau14dc6f02017-06-27 23:08:34 -0700466 } else if (IS_FD_ARRAY(map)) {
467 err = bpf_fd_array_map_lookup_elem(map, key, value);
468 } else if (IS_FD_HASH(map)) {
469 err = bpf_fd_htab_map_lookup_elem(map, key, value);
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800470 } else {
471 rcu_read_lock();
472 ptr = map->ops->map_lookup_elem(map, key);
473 if (ptr)
474 memcpy(value, ptr, value_size);
475 rcu_read_unlock();
476 err = ptr ? 0 : -ENOENT;
477 }
Alexei Starovoitov8ebe6672015-01-22 17:11:08 -0800478
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800479 if (err)
Alexei Starovoitov8ebe6672015-01-22 17:11:08 -0800480 goto free_value;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700481
482 err = -EFAULT;
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800483 if (copy_to_user(uvalue, value, value_size) != 0)
Alexei Starovoitov8ebe6672015-01-22 17:11:08 -0800484 goto free_value;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700485
Daniel Borkmanna67edbf2017-01-25 02:28:18 +0100486 trace_bpf_map_lookup_elem(map, ufd, key, value);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700487 err = 0;
488
Alexei Starovoitov8ebe6672015-01-22 17:11:08 -0800489free_value:
490 kfree(value);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700491free_key:
492 kfree(key);
493err_put:
494 fdput(f);
495 return err;
496}
497
Alexei Starovoitov3274f522014-11-13 17:36:44 -0800498#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700499
500static int map_update_elem(union bpf_attr *attr)
501{
Mickaël Salaün535e7b4b2016-11-13 19:44:03 +0100502 void __user *ukey = u64_to_user_ptr(attr->key);
503 void __user *uvalue = u64_to_user_ptr(attr->value);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700504 int ufd = attr->map_fd;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700505 struct bpf_map *map;
506 void *key, *value;
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800507 u32 value_size;
Daniel Borkmann592867b2015-09-08 18:00:09 +0200508 struct fd f;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700509 int err;
510
511 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
512 return -EINVAL;
513
Daniel Borkmann592867b2015-09-08 18:00:09 +0200514 f = fdget(ufd);
Daniel Borkmannc2101292015-10-29 14:58:07 +0100515 map = __bpf_map_get(f);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700516 if (IS_ERR(map))
517 return PTR_ERR(map);
518
Al Viroe4448ed2017-05-13 18:43:00 -0400519 key = memdup_user(ukey, map->key_size);
520 if (IS_ERR(key)) {
521 err = PTR_ERR(key);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700522 goto err_put;
Al Viroe4448ed2017-05-13 18:43:00 -0400523 }
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700524
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800525 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800526 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800527 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
528 value_size = round_up(map->value_size, 8) * num_possible_cpus();
529 else
530 value_size = map->value_size;
531
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700532 err = -ENOMEM;
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800533 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700534 if (!value)
535 goto free_key;
536
537 err = -EFAULT;
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800538 if (copy_from_user(value, uvalue, value_size) != 0)
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700539 goto free_value;
540
Alexei Starovoitovb121d1e2016-03-07 21:57:13 -0800541 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
542 * inside bpf map update or delete otherwise deadlocks are possible
543 */
544 preempt_disable();
545 __this_cpu_inc(bpf_prog_active);
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800546 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
547 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800548 err = bpf_percpu_hash_update(map, key, value, attr->flags);
549 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
550 err = bpf_percpu_array_update(map, key, value, attr->flags);
Daniel Borkmannd056a782016-06-15 22:47:13 +0200551 } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
Martin KaFai Lau4ed8ec52016-06-30 10:28:43 -0700552 map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
Martin KaFai Lau56f668d2017-03-22 10:00:33 -0700553 map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
554 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
Daniel Borkmannd056a782016-06-15 22:47:13 +0200555 rcu_read_lock();
556 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
557 attr->flags);
558 rcu_read_unlock();
Martin KaFai Laubcc6b1b2017-03-22 10:00:34 -0700559 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
560 rcu_read_lock();
561 err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
562 attr->flags);
563 rcu_read_unlock();
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800564 } else {
565 rcu_read_lock();
566 err = map->ops->map_update_elem(map, key, value, attr->flags);
567 rcu_read_unlock();
568 }
Alexei Starovoitovb121d1e2016-03-07 21:57:13 -0800569 __this_cpu_dec(bpf_prog_active);
570 preempt_enable();
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700571
Daniel Borkmanna67edbf2017-01-25 02:28:18 +0100572 if (!err)
573 trace_bpf_map_update_elem(map, ufd, key, value);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700574free_value:
575 kfree(value);
576free_key:
577 kfree(key);
578err_put:
579 fdput(f);
580 return err;
581}
582
583#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
584
585static int map_delete_elem(union bpf_attr *attr)
586{
Mickaël Salaün535e7b4b2016-11-13 19:44:03 +0100587 void __user *ukey = u64_to_user_ptr(attr->key);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700588 int ufd = attr->map_fd;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700589 struct bpf_map *map;
Daniel Borkmann592867b2015-09-08 18:00:09 +0200590 struct fd f;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700591 void *key;
592 int err;
593
594 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
595 return -EINVAL;
596
Daniel Borkmann592867b2015-09-08 18:00:09 +0200597 f = fdget(ufd);
Daniel Borkmannc2101292015-10-29 14:58:07 +0100598 map = __bpf_map_get(f);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700599 if (IS_ERR(map))
600 return PTR_ERR(map);
601
Al Viroe4448ed2017-05-13 18:43:00 -0400602 key = memdup_user(ukey, map->key_size);
603 if (IS_ERR(key)) {
604 err = PTR_ERR(key);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700605 goto err_put;
Al Viroe4448ed2017-05-13 18:43:00 -0400606 }
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700607
Alexei Starovoitovb121d1e2016-03-07 21:57:13 -0800608 preempt_disable();
609 __this_cpu_inc(bpf_prog_active);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700610 rcu_read_lock();
611 err = map->ops->map_delete_elem(map, key);
612 rcu_read_unlock();
Alexei Starovoitovb121d1e2016-03-07 21:57:13 -0800613 __this_cpu_dec(bpf_prog_active);
614 preempt_enable();
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700615
Daniel Borkmanna67edbf2017-01-25 02:28:18 +0100616 if (!err)
617 trace_bpf_map_delete_elem(map, ufd, key);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700618 kfree(key);
619err_put:
620 fdput(f);
621 return err;
622}
623
624/* last field in 'union bpf_attr' used by this command */
625#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
626
627static int map_get_next_key(union bpf_attr *attr)
628{
Mickaël Salaün535e7b4b2016-11-13 19:44:03 +0100629 void __user *ukey = u64_to_user_ptr(attr->key);
630 void __user *unext_key = u64_to_user_ptr(attr->next_key);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700631 int ufd = attr->map_fd;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700632 struct bpf_map *map;
633 void *key, *next_key;
Daniel Borkmann592867b2015-09-08 18:00:09 +0200634 struct fd f;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700635 int err;
636
637 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
638 return -EINVAL;
639
Daniel Borkmann592867b2015-09-08 18:00:09 +0200640 f = fdget(ufd);
Daniel Borkmannc2101292015-10-29 14:58:07 +0100641 map = __bpf_map_get(f);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700642 if (IS_ERR(map))
643 return PTR_ERR(map);
644
Teng Qin8fe45922017-04-24 19:00:37 -0700645 if (ukey) {
Al Viroe4448ed2017-05-13 18:43:00 -0400646 key = memdup_user(ukey, map->key_size);
647 if (IS_ERR(key)) {
648 err = PTR_ERR(key);
Teng Qin8fe45922017-04-24 19:00:37 -0700649 goto err_put;
Al Viroe4448ed2017-05-13 18:43:00 -0400650 }
Teng Qin8fe45922017-04-24 19:00:37 -0700651 } else {
652 key = NULL;
653 }
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700654
655 err = -ENOMEM;
656 next_key = kmalloc(map->key_size, GFP_USER);
657 if (!next_key)
658 goto free_key;
659
660 rcu_read_lock();
661 err = map->ops->map_get_next_key(map, key, next_key);
662 rcu_read_unlock();
663 if (err)
664 goto free_next_key;
665
666 err = -EFAULT;
667 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
668 goto free_next_key;
669
Daniel Borkmanna67edbf2017-01-25 02:28:18 +0100670 trace_bpf_map_next_key(map, ufd, key, next_key);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700671 err = 0;
672
673free_next_key:
674 kfree(next_key);
675free_key:
676 kfree(key);
677err_put:
678 fdput(f);
679 return err;
680}
681
Johannes Bergbe9370a2017-04-11 15:34:57 +0200682static const struct bpf_verifier_ops * const bpf_prog_types[] = {
683#define BPF_PROG_TYPE(_id, _ops) \
684 [_id] = &_ops,
Johannes Berg40077e02017-04-11 15:34:58 +0200685#define BPF_MAP_TYPE(_id, _ops)
Johannes Bergbe9370a2017-04-11 15:34:57 +0200686#include <linux/bpf_types.h>
687#undef BPF_PROG_TYPE
Johannes Berg40077e02017-04-11 15:34:58 +0200688#undef BPF_MAP_TYPE
Johannes Bergbe9370a2017-04-11 15:34:57 +0200689};
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700690
691static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
692{
Johannes Bergbe9370a2017-04-11 15:34:57 +0200693 if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
694 return -EINVAL;
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700695
Johannes Bergbe9370a2017-04-11 15:34:57 +0200696 prog->aux->ops = bpf_prog_types[type];
697 prog->type = type;
698 return 0;
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700699}
700
701/* drop refcnt on maps used by eBPF program and free auxilary data */
702static void free_used_maps(struct bpf_prog_aux *aux)
703{
704 int i;
705
706 for (i = 0; i < aux->used_map_cnt; i++)
707 bpf_map_put(aux->used_maps[i]);
708
709 kfree(aux->used_maps);
710}
711
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100712int __bpf_prog_charge(struct user_struct *user, u32 pages)
713{
714 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
715 unsigned long user_bufs;
716
717 if (user) {
718 user_bufs = atomic_long_add_return(pages, &user->locked_vm);
719 if (user_bufs > memlock_limit) {
720 atomic_long_sub(pages, &user->locked_vm);
721 return -EPERM;
722 }
723 }
724
725 return 0;
726}
727
728void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
729{
730 if (user)
731 atomic_long_sub(pages, &user->locked_vm);
732}
733
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700734static int bpf_prog_charge_memlock(struct bpf_prog *prog)
735{
736 struct user_struct *user = get_current_user();
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100737 int ret;
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700738
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100739 ret = __bpf_prog_charge(user, prog->pages);
740 if (ret) {
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700741 free_uid(user);
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100742 return ret;
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700743 }
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100744
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700745 prog->aux->user = user;
746 return 0;
747}
748
749static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
750{
751 struct user_struct *user = prog->aux->user;
752
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100753 __bpf_prog_uncharge(user, prog->pages);
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700754 free_uid(user);
755}
756
Martin KaFai Laudc4bb0e2017-06-05 12:15:46 -0700757static int bpf_prog_alloc_id(struct bpf_prog *prog)
758{
759 int id;
760
761 spin_lock_bh(&prog_idr_lock);
762 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
763 if (id > 0)
764 prog->aux->id = id;
765 spin_unlock_bh(&prog_idr_lock);
766
767 /* id is in [1, INT_MAX) */
768 if (WARN_ON_ONCE(!id))
769 return -ENOSPC;
770
771 return id > 0 ? 0 : id;
772}
773
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -0700774static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
Martin KaFai Laudc4bb0e2017-06-05 12:15:46 -0700775{
776 /* cBPF to eBPF migrations are currently not in the idr store. */
777 if (!prog->aux->id)
778 return;
779
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -0700780 if (do_idr_lock)
781 spin_lock_bh(&prog_idr_lock);
782 else
783 __acquire(&prog_idr_lock);
784
Martin KaFai Laudc4bb0e2017-06-05 12:15:46 -0700785 idr_remove(&prog_idr, prog->aux->id);
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -0700786
787 if (do_idr_lock)
788 spin_unlock_bh(&prog_idr_lock);
789 else
790 __release(&prog_idr_lock);
Martin KaFai Laudc4bb0e2017-06-05 12:15:46 -0700791}
792
Daniel Borkmann1aacde32016-06-30 17:24:43 +0200793static void __bpf_prog_put_rcu(struct rcu_head *rcu)
Alexei Starovoitovabf2e7d2015-05-28 19:26:02 -0700794{
795 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
796
797 free_used_maps(aux);
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700798 bpf_prog_uncharge_memlock(aux->prog);
Alexei Starovoitovabf2e7d2015-05-28 19:26:02 -0700799 bpf_prog_free(aux->prog);
800}
801
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -0700802static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700803{
Daniel Borkmanna67edbf2017-01-25 02:28:18 +0100804 if (atomic_dec_and_test(&prog->aux->refcnt)) {
805 trace_bpf_prog_put_rcu(prog);
Martin KaFai Lau34ad5582017-06-05 12:15:48 -0700806 /* bpf_prog_free_id() must be called first */
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -0700807 bpf_prog_free_id(prog, do_idr_lock);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100808 bpf_prog_kallsyms_del(prog);
Daniel Borkmann1aacde32016-06-30 17:24:43 +0200809 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
Daniel Borkmanna67edbf2017-01-25 02:28:18 +0100810 }
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700811}
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -0700812
813void bpf_prog_put(struct bpf_prog *prog)
814{
815 __bpf_prog_put(prog, true);
816}
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100817EXPORT_SYMBOL_GPL(bpf_prog_put);
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700818
819static int bpf_prog_release(struct inode *inode, struct file *filp)
820{
821 struct bpf_prog *prog = filp->private_data;
822
Daniel Borkmann1aacde32016-06-30 17:24:43 +0200823 bpf_prog_put(prog);
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700824 return 0;
825}
826
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100827#ifdef CONFIG_PROC_FS
828static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
829{
830 const struct bpf_prog *prog = filp->private_data;
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100831 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100832
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100833 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100834 seq_printf(m,
835 "prog_type:\t%u\n"
836 "prog_jited:\t%u\n"
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100837 "prog_tag:\t%s\n"
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100838 "memlock:\t%llu\n",
839 prog->type,
840 prog->jited,
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100841 prog_tag,
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100842 prog->pages * 1ULL << PAGE_SHIFT);
843}
844#endif
845
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700846static const struct file_operations bpf_prog_fops = {
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100847#ifdef CONFIG_PROC_FS
848 .show_fdinfo = bpf_prog_show_fdinfo,
849#endif
850 .release = bpf_prog_release,
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700851};
852
Daniel Borkmannb2197752015-10-29 14:58:09 +0100853int bpf_prog_new_fd(struct bpf_prog *prog)
Daniel Borkmannaa797812015-10-29 14:58:06 +0100854{
855 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
856 O_RDWR | O_CLOEXEC);
857}
858
Daniel Borkmann113214b2016-06-30 17:24:44 +0200859static struct bpf_prog *____bpf_prog_get(struct fd f)
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700860{
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700861 if (!f.file)
862 return ERR_PTR(-EBADF);
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700863 if (f.file->f_op != &bpf_prog_fops) {
864 fdput(f);
865 return ERR_PTR(-EINVAL);
866 }
867
Daniel Borkmannc2101292015-10-29 14:58:07 +0100868 return f.file->private_data;
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700869}
870
Brenden Blanco59d36562016-07-19 12:16:46 -0700871struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
Alexei Starovoitov92117d82016-04-27 18:56:20 -0700872{
Brenden Blanco59d36562016-07-19 12:16:46 -0700873 if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
874 atomic_sub(i, &prog->aux->refcnt);
Alexei Starovoitov92117d82016-04-27 18:56:20 -0700875 return ERR_PTR(-EBUSY);
876 }
877 return prog;
878}
Brenden Blanco59d36562016-07-19 12:16:46 -0700879EXPORT_SYMBOL_GPL(bpf_prog_add);
880
Daniel Borkmannc5405942016-11-09 22:02:34 +0100881void bpf_prog_sub(struct bpf_prog *prog, int i)
882{
883 /* Only to be used for undoing previous bpf_prog_add() in some
884 * error path. We still know that another entity in our call
885 * path holds a reference to the program, thus atomic_sub() can
886 * be safely used in such cases!
887 */
888 WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
889}
890EXPORT_SYMBOL_GPL(bpf_prog_sub);
891
Brenden Blanco59d36562016-07-19 12:16:46 -0700892struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
893{
894 return bpf_prog_add(prog, 1);
895}
Daniel Borkmann97bc4022016-11-19 01:45:00 +0100896EXPORT_SYMBOL_GPL(bpf_prog_inc);
Alexei Starovoitov92117d82016-04-27 18:56:20 -0700897
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -0700898/* prog_idr_lock should have been held */
899static struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
900{
901 int refold;
902
903 refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);
904
905 if (refold >= BPF_MAX_REFCNT) {
906 __bpf_prog_put(prog, false);
907 return ERR_PTR(-EBUSY);
908 }
909
910 if (!refold)
911 return ERR_PTR(-ENOENT);
912
913 return prog;
914}
915
Daniel Borkmann113214b2016-06-30 17:24:44 +0200916static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700917{
918 struct fd f = fdget(ufd);
919 struct bpf_prog *prog;
920
Daniel Borkmann113214b2016-06-30 17:24:44 +0200921 prog = ____bpf_prog_get(f);
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700922 if (IS_ERR(prog))
923 return prog;
Daniel Borkmann113214b2016-06-30 17:24:44 +0200924 if (type && prog->type != *type) {
925 prog = ERR_PTR(-EINVAL);
926 goto out;
927 }
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700928
Alexei Starovoitov92117d82016-04-27 18:56:20 -0700929 prog = bpf_prog_inc(prog);
Daniel Borkmann113214b2016-06-30 17:24:44 +0200930out:
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700931 fdput(f);
932 return prog;
933}
Daniel Borkmann113214b2016-06-30 17:24:44 +0200934
935struct bpf_prog *bpf_prog_get(u32 ufd)
936{
937 return __bpf_prog_get(ufd, NULL);
938}
939
940struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
941{
Daniel Borkmanna67edbf2017-01-25 02:28:18 +0100942 struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
943
944 if (!IS_ERR(prog))
945 trace_bpf_prog_get_type(prog);
946 return prog;
Daniel Borkmann113214b2016-06-30 17:24:44 +0200947}
948EXPORT_SYMBOL_GPL(bpf_prog_get_type);
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700949
950/* last field in 'union bpf_attr' used by this command */
David S. Millere07b98d2017-05-10 11:38:07 -0700951#define BPF_PROG_LOAD_LAST_FIELD prog_flags
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700952
953static int bpf_prog_load(union bpf_attr *attr)
954{
955 enum bpf_prog_type type = attr->prog_type;
956 struct bpf_prog *prog;
957 int err;
958 char license[128];
959 bool is_gpl;
960
961 if (CHECK_ATTR(BPF_PROG_LOAD))
962 return -EINVAL;
963
David S. Millere07b98d2017-05-10 11:38:07 -0700964 if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT)
965 return -EINVAL;
966
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700967 /* copy eBPF program license from user space */
Mickaël Salaün535e7b4b2016-11-13 19:44:03 +0100968 if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700969 sizeof(license) - 1) < 0)
970 return -EFAULT;
971 license[sizeof(license) - 1] = 0;
972
973 /* eBPF programs must be GPL compatible to use GPL-ed functions */
974 is_gpl = license_is_gpl_compatible(license);
975
Daniel Borkmannef0915c2016-12-07 01:15:44 +0100976 if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
977 return -E2BIG;
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700978
Alexei Starovoitov25415172015-03-25 12:49:20 -0700979 if (type == BPF_PROG_TYPE_KPROBE &&
980 attr->kern_version != LINUX_VERSION_CODE)
981 return -EINVAL;
982
Chenbo Feng80b7d812017-05-31 18:16:00 -0700983 if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
984 type != BPF_PROG_TYPE_CGROUP_SKB &&
985 !capable(CAP_SYS_ADMIN))
Alexei Starovoitov1be7f752015-10-07 22:23:21 -0700986 return -EPERM;
987
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700988 /* plain bpf_prog allocation */
989 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
990 if (!prog)
991 return -ENOMEM;
992
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700993 err = bpf_prog_charge_memlock(prog);
994 if (err)
995 goto free_prog_nouncharge;
996
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700997 prog->len = attr->insn_cnt;
998
999 err = -EFAULT;
Mickaël Salaün535e7b4b2016-11-13 19:44:03 +01001000 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +01001001 bpf_prog_insn_size(prog)) != 0)
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001002 goto free_prog;
1003
1004 prog->orig_prog = NULL;
Daniel Borkmanna91263d2015-09-30 01:41:50 +02001005 prog->jited = 0;
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001006
1007 atomic_set(&prog->aux->refcnt, 1);
Daniel Borkmanna91263d2015-09-30 01:41:50 +02001008 prog->gpl_compatible = is_gpl ? 1 : 0;
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001009
1010 /* find program type: socket_filter vs tracing_filter */
1011 err = find_prog_type(type, prog);
1012 if (err < 0)
1013 goto free_prog;
1014
1015 /* run eBPF verifier */
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -07001016 err = bpf_check(&prog, attr);
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001017 if (err < 0)
1018 goto free_used_maps;
1019
1020 /* eBPF program is ready to be JITed */
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001021 prog = bpf_prog_select_runtime(prog, &err);
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001022 if (err < 0)
1023 goto free_used_maps;
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001024
Martin KaFai Laudc4bb0e2017-06-05 12:15:46 -07001025 err = bpf_prog_alloc_id(prog);
1026 if (err)
1027 goto free_used_maps;
1028
Daniel Borkmannaa797812015-10-29 14:58:06 +01001029 err = bpf_prog_new_fd(prog);
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -07001030 if (err < 0) {
1031 /* failed to allocate fd.
1032 * bpf_prog_put() is needed because the above
1033 * bpf_prog_alloc_id() has published the prog
1034 * to the userspace and the userspace may
1035 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
1036 */
1037 bpf_prog_put(prog);
1038 return err;
1039 }
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001040
Daniel Borkmann74451e662017-02-16 22:24:50 +01001041 bpf_prog_kallsyms_add(prog);
Daniel Borkmanna67edbf2017-01-25 02:28:18 +01001042 trace_bpf_prog_load(prog, err);
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001043 return err;
1044
1045free_used_maps:
1046 free_used_maps(prog->aux);
1047free_prog:
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -07001048 bpf_prog_uncharge_memlock(prog);
1049free_prog_nouncharge:
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001050 bpf_prog_free(prog);
1051 return err;
1052}
1053
Daniel Borkmannb2197752015-10-29 14:58:09 +01001054#define BPF_OBJ_LAST_FIELD bpf_fd
1055
1056static int bpf_obj_pin(const union bpf_attr *attr)
1057{
1058 if (CHECK_ATTR(BPF_OBJ))
1059 return -EINVAL;
1060
Mickaël Salaün535e7b4b2016-11-13 19:44:03 +01001061 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
Daniel Borkmannb2197752015-10-29 14:58:09 +01001062}
1063
1064static int bpf_obj_get(const union bpf_attr *attr)
1065{
1066 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
1067 return -EINVAL;
1068
Mickaël Salaün535e7b4b2016-11-13 19:44:03 +01001069 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
Daniel Borkmannb2197752015-10-29 14:58:09 +01001070}
1071
Daniel Mackf4324552016-11-23 16:52:27 +01001072#ifdef CONFIG_CGROUP_BPF
1073
Alexei Starovoitov7f677632017-02-10 20:28:24 -08001074#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
Daniel Mackf4324552016-11-23 16:52:27 +01001075
1076static int bpf_prog_attach(const union bpf_attr *attr)
1077{
Alexei Starovoitov7f677632017-02-10 20:28:24 -08001078 enum bpf_prog_type ptype;
Daniel Mackf4324552016-11-23 16:52:27 +01001079 struct bpf_prog *prog;
1080 struct cgroup *cgrp;
Alexei Starovoitov7f677632017-02-10 20:28:24 -08001081 int ret;
Daniel Mackf4324552016-11-23 16:52:27 +01001082
1083 if (!capable(CAP_NET_ADMIN))
1084 return -EPERM;
1085
1086 if (CHECK_ATTR(BPF_PROG_ATTACH))
1087 return -EINVAL;
1088
Alexei Starovoitov7f677632017-02-10 20:28:24 -08001089 if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
1090 return -EINVAL;
1091
Daniel Mackf4324552016-11-23 16:52:27 +01001092 switch (attr->attach_type) {
1093 case BPF_CGROUP_INET_INGRESS:
1094 case BPF_CGROUP_INET_EGRESS:
David Ahernb2cd1252016-12-01 08:48:03 -08001095 ptype = BPF_PROG_TYPE_CGROUP_SKB;
Daniel Mackf4324552016-11-23 16:52:27 +01001096 break;
David Ahern610236582016-12-01 08:48:04 -08001097 case BPF_CGROUP_INET_SOCK_CREATE:
1098 ptype = BPF_PROG_TYPE_CGROUP_SOCK;
1099 break;
Lawrence Brakmo40304b22017-06-30 20:02:40 -07001100 case BPF_CGROUP_SOCK_OPS:
1101 ptype = BPF_PROG_TYPE_SOCK_OPS;
1102 break;
Daniel Mackf4324552016-11-23 16:52:27 +01001103 default:
1104 return -EINVAL;
1105 }
1106
David Ahernb2cd1252016-12-01 08:48:03 -08001107 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1108 if (IS_ERR(prog))
1109 return PTR_ERR(prog);
1110
1111 cgrp = cgroup_get_from_fd(attr->target_fd);
1112 if (IS_ERR(cgrp)) {
1113 bpf_prog_put(prog);
1114 return PTR_ERR(cgrp);
1115 }
1116
Alexei Starovoitov7f677632017-02-10 20:28:24 -08001117 ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
1118 attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
1119 if (ret)
1120 bpf_prog_put(prog);
David Ahernb2cd1252016-12-01 08:48:03 -08001121 cgroup_put(cgrp);
1122
Alexei Starovoitov7f677632017-02-10 20:28:24 -08001123 return ret;
Daniel Mackf4324552016-11-23 16:52:27 +01001124}
1125
1126#define BPF_PROG_DETACH_LAST_FIELD attach_type
1127
1128static int bpf_prog_detach(const union bpf_attr *attr)
1129{
1130 struct cgroup *cgrp;
Alexei Starovoitov7f677632017-02-10 20:28:24 -08001131 int ret;
Daniel Mackf4324552016-11-23 16:52:27 +01001132
1133 if (!capable(CAP_NET_ADMIN))
1134 return -EPERM;
1135
1136 if (CHECK_ATTR(BPF_PROG_DETACH))
1137 return -EINVAL;
1138
1139 switch (attr->attach_type) {
1140 case BPF_CGROUP_INET_INGRESS:
1141 case BPF_CGROUP_INET_EGRESS:
David Ahern610236582016-12-01 08:48:04 -08001142 case BPF_CGROUP_INET_SOCK_CREATE:
Lawrence Brakmo40304b22017-06-30 20:02:40 -07001143 case BPF_CGROUP_SOCK_OPS:
Daniel Mackf4324552016-11-23 16:52:27 +01001144 cgrp = cgroup_get_from_fd(attr->target_fd);
1145 if (IS_ERR(cgrp))
1146 return PTR_ERR(cgrp);
1147
Alexei Starovoitov7f677632017-02-10 20:28:24 -08001148 ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
Daniel Mackf4324552016-11-23 16:52:27 +01001149 cgroup_put(cgrp);
1150 break;
1151
1152 default:
1153 return -EINVAL;
1154 }
1155
Alexei Starovoitov7f677632017-02-10 20:28:24 -08001156 return ret;
Daniel Mackf4324552016-11-23 16:52:27 +01001157}
Lawrence Brakmo40304b22017-06-30 20:02:40 -07001158
Daniel Mackf4324552016-11-23 16:52:27 +01001159#endif /* CONFIG_CGROUP_BPF */
1160
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001161#define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
1162
1163static int bpf_prog_test_run(const union bpf_attr *attr,
1164 union bpf_attr __user *uattr)
1165{
1166 struct bpf_prog *prog;
1167 int ret = -ENOTSUPP;
1168
1169 if (CHECK_ATTR(BPF_PROG_TEST_RUN))
1170 return -EINVAL;
1171
1172 prog = bpf_prog_get(attr->test.prog_fd);
1173 if (IS_ERR(prog))
1174 return PTR_ERR(prog);
1175
1176 if (prog->aux->ops->test_run)
1177 ret = prog->aux->ops->test_run(prog, attr, uattr);
1178
1179 bpf_prog_put(prog);
1180 return ret;
1181}
1182
Martin KaFai Lau34ad5582017-06-05 12:15:48 -07001183#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
1184
1185static int bpf_obj_get_next_id(const union bpf_attr *attr,
1186 union bpf_attr __user *uattr,
1187 struct idr *idr,
1188 spinlock_t *lock)
1189{
1190 u32 next_id = attr->start_id;
1191 int err = 0;
1192
1193 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
1194 return -EINVAL;
1195
1196 if (!capable(CAP_SYS_ADMIN))
1197 return -EPERM;
1198
1199 next_id++;
1200 spin_lock_bh(lock);
1201 if (!idr_get_next(idr, &next_id))
1202 err = -ENOENT;
1203 spin_unlock_bh(lock);
1204
1205 if (!err)
1206 err = put_user(next_id, &uattr->next_id);
1207
1208 return err;
1209}
1210
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -07001211#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
1212
1213static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
1214{
1215 struct bpf_prog *prog;
1216 u32 id = attr->prog_id;
1217 int fd;
1218
1219 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
1220 return -EINVAL;
1221
1222 if (!capable(CAP_SYS_ADMIN))
1223 return -EPERM;
1224
1225 spin_lock_bh(&prog_idr_lock);
1226 prog = idr_find(&prog_idr, id);
1227 if (prog)
1228 prog = bpf_prog_inc_not_zero(prog);
1229 else
1230 prog = ERR_PTR(-ENOENT);
1231 spin_unlock_bh(&prog_idr_lock);
1232
1233 if (IS_ERR(prog))
1234 return PTR_ERR(prog);
1235
1236 fd = bpf_prog_new_fd(prog);
1237 if (fd < 0)
1238 bpf_prog_put(prog);
1239
1240 return fd;
1241}
1242
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -07001243#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD map_id
1244
1245static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
1246{
1247 struct bpf_map *map;
1248 u32 id = attr->map_id;
1249 int fd;
1250
1251 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID))
1252 return -EINVAL;
1253
1254 if (!capable(CAP_SYS_ADMIN))
1255 return -EPERM;
1256
1257 spin_lock_bh(&map_idr_lock);
1258 map = idr_find(&map_idr, id);
1259 if (map)
1260 map = bpf_map_inc_not_zero(map, true);
1261 else
1262 map = ERR_PTR(-ENOENT);
1263 spin_unlock_bh(&map_idr_lock);
1264
1265 if (IS_ERR(map))
1266 return PTR_ERR(map);
1267
1268 fd = bpf_map_new_fd(map);
1269 if (fd < 0)
1270 bpf_map_put(map);
1271
1272 return fd;
1273}
1274
Martin KaFai Lau1e270972017-06-05 12:15:52 -07001275static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
1276 const union bpf_attr *attr,
1277 union bpf_attr __user *uattr)
1278{
1279 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
1280 struct bpf_prog_info info = {};
1281 u32 info_len = attr->info.info_len;
1282 char __user *uinsns;
1283 u32 ulen;
1284 int err;
1285
1286 err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
1287 if (err)
1288 return err;
1289 info_len = min_t(u32, sizeof(info), info_len);
1290
1291 if (copy_from_user(&info, uinfo, info_len))
Daniel Borkmann89b09682017-07-27 21:02:46 +02001292 return -EFAULT;
Martin KaFai Lau1e270972017-06-05 12:15:52 -07001293
1294 info.type = prog->type;
1295 info.id = prog->aux->id;
1296
1297 memcpy(info.tag, prog->tag, sizeof(prog->tag));
1298
1299 if (!capable(CAP_SYS_ADMIN)) {
1300 info.jited_prog_len = 0;
1301 info.xlated_prog_len = 0;
1302 goto done;
1303 }
1304
1305 ulen = info.jited_prog_len;
1306 info.jited_prog_len = prog->jited_len;
1307 if (info.jited_prog_len && ulen) {
1308 uinsns = u64_to_user_ptr(info.jited_prog_insns);
1309 ulen = min_t(u32, info.jited_prog_len, ulen);
1310 if (copy_to_user(uinsns, prog->bpf_func, ulen))
1311 return -EFAULT;
1312 }
1313
1314 ulen = info.xlated_prog_len;
Daniel Borkmann9975a542017-07-28 17:05:25 +02001315 info.xlated_prog_len = bpf_prog_insn_size(prog);
Martin KaFai Lau1e270972017-06-05 12:15:52 -07001316 if (info.xlated_prog_len && ulen) {
1317 uinsns = u64_to_user_ptr(info.xlated_prog_insns);
1318 ulen = min_t(u32, info.xlated_prog_len, ulen);
1319 if (copy_to_user(uinsns, prog->insnsi, ulen))
1320 return -EFAULT;
1321 }
1322
1323done:
1324 if (copy_to_user(uinfo, &info, info_len) ||
1325 put_user(info_len, &uattr->info.info_len))
1326 return -EFAULT;
1327
1328 return 0;
1329}
1330
1331static int bpf_map_get_info_by_fd(struct bpf_map *map,
1332 const union bpf_attr *attr,
1333 union bpf_attr __user *uattr)
1334{
1335 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
1336 struct bpf_map_info info = {};
1337 u32 info_len = attr->info.info_len;
1338 int err;
1339
1340 err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
1341 if (err)
1342 return err;
1343 info_len = min_t(u32, sizeof(info), info_len);
1344
1345 info.type = map->map_type;
1346 info.id = map->id;
1347 info.key_size = map->key_size;
1348 info.value_size = map->value_size;
1349 info.max_entries = map->max_entries;
1350 info.map_flags = map->map_flags;
1351
1352 if (copy_to_user(uinfo, &info, info_len) ||
1353 put_user(info_len, &uattr->info.info_len))
1354 return -EFAULT;
1355
1356 return 0;
1357}
1358
1359#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
1360
1361static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
1362 union bpf_attr __user *uattr)
1363{
1364 int ufd = attr->info.bpf_fd;
1365 struct fd f;
1366 int err;
1367
1368 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
1369 return -EINVAL;
1370
1371 f = fdget(ufd);
1372 if (!f.file)
1373 return -EBADFD;
1374
1375 if (f.file->f_op == &bpf_prog_fops)
1376 err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
1377 uattr);
1378 else if (f.file->f_op == &bpf_map_fops)
1379 err = bpf_map_get_info_by_fd(f.file->private_data, attr,
1380 uattr);
1381 else
1382 err = -EINVAL;
1383
1384 fdput(f);
1385 return err;
1386}
1387
Alexei Starovoitov99c55f72014-09-26 00:16:57 -07001388SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
1389{
1390 union bpf_attr attr = {};
1391 int err;
1392
Alexei Starovoitov1be7f752015-10-07 22:23:21 -07001393 if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
Alexei Starovoitov99c55f72014-09-26 00:16:57 -07001394 return -EPERM;
1395
1396 if (!access_ok(VERIFY_READ, uattr, 1))
1397 return -EFAULT;
1398
1399 if (size > PAGE_SIZE) /* silly large */
1400 return -E2BIG;
1401
1402 /* If we're handed a bigger struct than we know of,
1403 * ensure all the unknown bits are 0 - i.e. new
1404 * user-space does not rely on any kernel feature
1405 * extensions we dont know about yet.
1406 */
Martin KaFai Lau1e270972017-06-05 12:15:52 -07001407 err = check_uarg_tail_zero(uattr, sizeof(attr), size);
1408 if (err)
1409 return err;
1410 size = min_t(u32, size, sizeof(attr));
Alexei Starovoitov99c55f72014-09-26 00:16:57 -07001411
1412 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
1413 if (copy_from_user(&attr, uattr, size) != 0)
1414 return -EFAULT;
1415
1416 switch (cmd) {
1417 case BPF_MAP_CREATE:
1418 err = map_create(&attr);
1419 break;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -07001420 case BPF_MAP_LOOKUP_ELEM:
1421 err = map_lookup_elem(&attr);
1422 break;
1423 case BPF_MAP_UPDATE_ELEM:
1424 err = map_update_elem(&attr);
1425 break;
1426 case BPF_MAP_DELETE_ELEM:
1427 err = map_delete_elem(&attr);
1428 break;
1429 case BPF_MAP_GET_NEXT_KEY:
1430 err = map_get_next_key(&attr);
1431 break;
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001432 case BPF_PROG_LOAD:
1433 err = bpf_prog_load(&attr);
1434 break;
Daniel Borkmannb2197752015-10-29 14:58:09 +01001435 case BPF_OBJ_PIN:
1436 err = bpf_obj_pin(&attr);
1437 break;
1438 case BPF_OBJ_GET:
1439 err = bpf_obj_get(&attr);
1440 break;
Daniel Mackf4324552016-11-23 16:52:27 +01001441#ifdef CONFIG_CGROUP_BPF
1442 case BPF_PROG_ATTACH:
1443 err = bpf_prog_attach(&attr);
1444 break;
1445 case BPF_PROG_DETACH:
1446 err = bpf_prog_detach(&attr);
1447 break;
1448#endif
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001449 case BPF_PROG_TEST_RUN:
1450 err = bpf_prog_test_run(&attr, uattr);
1451 break;
Martin KaFai Lau34ad5582017-06-05 12:15:48 -07001452 case BPF_PROG_GET_NEXT_ID:
1453 err = bpf_obj_get_next_id(&attr, uattr,
1454 &prog_idr, &prog_idr_lock);
1455 break;
1456 case BPF_MAP_GET_NEXT_ID:
1457 err = bpf_obj_get_next_id(&attr, uattr,
1458 &map_idr, &map_idr_lock);
1459 break;
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -07001460 case BPF_PROG_GET_FD_BY_ID:
1461 err = bpf_prog_get_fd_by_id(&attr);
1462 break;
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -07001463 case BPF_MAP_GET_FD_BY_ID:
1464 err = bpf_map_get_fd_by_id(&attr);
1465 break;
Martin KaFai Lau1e270972017-06-05 12:15:52 -07001466 case BPF_OBJ_GET_INFO_BY_FD:
1467 err = bpf_obj_get_info_by_fd(&attr, uattr);
1468 break;
Alexei Starovoitov99c55f72014-09-26 00:16:57 -07001469 default:
1470 err = -EINVAL;
1471 break;
1472 }
1473
1474 return err;
1475}