blob: fbe09a0cccf4024de8e2b88d019b1eb09197e307 [file] [log] [blame]
Alexei Starovoitov99c55f72014-09-26 00:16:57 -07001/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12#include <linux/bpf.h>
Daniel Borkmanna67edbf2017-01-25 02:28:18 +010013#include <linux/bpf_trace.h>
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070014#include <linux/syscalls.h>
15#include <linux/slab.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010016#include <linux/sched/signal.h>
Daniel Borkmannd407bd22017-01-18 15:14:17 +010017#include <linux/vmalloc.h>
18#include <linux/mmzone.h>
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070019#include <linux/anon_inodes.h>
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -070020#include <linux/file.h>
Alexei Starovoitov09756af2014-09-26 00:17:00 -070021#include <linux/license.h>
22#include <linux/filter.h>
Alexei Starovoitov25415172015-03-25 12:49:20 -070023#include <linux/version.h>
Mickaël Salaün535e7b4b2016-11-13 19:44:03 +010024#include <linux/kernel.h>
Martin KaFai Laudc4bb0e2017-06-05 12:15:46 -070025#include <linux/idr.h>
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070026
Martin KaFai Lau14dc6f02017-06-27 23:08:34 -070027#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
28 (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
29 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
30 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
31#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
32#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map))
33
Alexei Starovoitovb121d1e2016-03-07 21:57:13 -080034DEFINE_PER_CPU(int, bpf_prog_active);
Martin KaFai Laudc4bb0e2017-06-05 12:15:46 -070035static DEFINE_IDR(prog_idr);
36static DEFINE_SPINLOCK(prog_idr_lock);
Martin KaFai Lauf3f1c052017-06-05 12:15:47 -070037static DEFINE_IDR(map_idr);
38static DEFINE_SPINLOCK(map_idr_lock);
Alexei Starovoitovb121d1e2016-03-07 21:57:13 -080039
Alexei Starovoitov1be7f752015-10-07 22:23:21 -070040int sysctl_unprivileged_bpf_disabled __read_mostly;
41
Johannes Berg40077e02017-04-11 15:34:58 +020042static const struct bpf_map_ops * const bpf_map_types[] = {
43#define BPF_PROG_TYPE(_id, _ops)
44#define BPF_MAP_TYPE(_id, _ops) \
45 [_id] = &_ops,
46#include <linux/bpf_types.h>
47#undef BPF_PROG_TYPE
48#undef BPF_MAP_TYPE
49};
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070050
Mickaël Salaün752ba562017-08-07 20:45:20 +020051/*
52 * If we're handed a bigger struct than we know of, ensure all the unknown bits
53 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
54 * we don't know about yet.
55 *
56 * There is a ToCToU between this function call and the following
57 * copy_from_user() call. However, this is not a concern since this function is
58 * meant to be a future-proofing of bits.
59 */
Mickaël Salaün58291a72017-08-07 20:45:19 +020060static int check_uarg_tail_zero(void __user *uaddr,
61 size_t expected_size,
62 size_t actual_size)
63{
64 unsigned char __user *addr;
65 unsigned char __user *end;
66 unsigned char val;
67 int err;
68
Mickaël Salaün752ba562017-08-07 20:45:20 +020069 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */
70 return -E2BIG;
71
72 if (unlikely(!access_ok(VERIFY_READ, uaddr, actual_size)))
73 return -EFAULT;
74
Mickaël Salaün58291a72017-08-07 20:45:19 +020075 if (actual_size <= expected_size)
76 return 0;
77
78 addr = uaddr + expected_size;
79 end = uaddr + actual_size;
80
81 for (; addr < end; addr++) {
82 err = get_user(val, addr);
83 if (err)
84 return err;
85 if (val)
86 return -E2BIG;
87 }
88
89 return 0;
90}
91
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070092static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
93{
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070094 struct bpf_map *map;
95
Johannes Berg40077e02017-04-11 15:34:58 +020096 if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
97 !bpf_map_types[attr->map_type])
98 return ERR_PTR(-EINVAL);
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070099
Johannes Berg40077e02017-04-11 15:34:58 +0200100 map = bpf_map_types[attr->map_type]->map_alloc(attr);
101 if (IS_ERR(map))
102 return map;
103 map->ops = bpf_map_types[attr->map_type];
104 map->map_type = attr->map_type;
105 return map;
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700106}
107
Daniel Borkmannd407bd22017-01-18 15:14:17 +0100108void *bpf_map_area_alloc(size_t size)
109{
110 /* We definitely need __GFP_NORETRY, so OOM killer doesn't
111 * trigger under memory pressure as we really just want to
112 * fail instead.
113 */
114 const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
115 void *area;
116
117 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
118 area = kmalloc(size, GFP_USER | flags);
119 if (area != NULL)
120 return area;
121 }
122
Michal Hocko19809c22017-05-08 15:57:44 -0700123 return __vmalloc(size, GFP_KERNEL | flags, PAGE_KERNEL);
Daniel Borkmannd407bd22017-01-18 15:14:17 +0100124}
125
126void bpf_map_area_free(void *area)
127{
128 kvfree(area);
129}
130
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800131int bpf_map_precharge_memlock(u32 pages)
132{
133 struct user_struct *user = get_current_user();
134 unsigned long memlock_limit, cur;
135
136 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
137 cur = atomic_long_read(&user->locked_vm);
138 free_uid(user);
139 if (cur + pages > memlock_limit)
140 return -EPERM;
141 return 0;
142}
143
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700144static int bpf_map_charge_memlock(struct bpf_map *map)
145{
146 struct user_struct *user = get_current_user();
147 unsigned long memlock_limit;
148
149 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
150
151 atomic_long_add(map->pages, &user->locked_vm);
152
153 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
154 atomic_long_sub(map->pages, &user->locked_vm);
155 free_uid(user);
156 return -EPERM;
157 }
158 map->user = user;
159 return 0;
160}
161
162static void bpf_map_uncharge_memlock(struct bpf_map *map)
163{
164 struct user_struct *user = map->user;
165
166 atomic_long_sub(map->pages, &user->locked_vm);
167 free_uid(user);
168}
169
Martin KaFai Lauf3f1c052017-06-05 12:15:47 -0700170static int bpf_map_alloc_id(struct bpf_map *map)
171{
172 int id;
173
174 spin_lock_bh(&map_idr_lock);
175 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
176 if (id > 0)
177 map->id = id;
178 spin_unlock_bh(&map_idr_lock);
179
180 if (WARN_ON_ONCE(!id))
181 return -ENOSPC;
182
183 return id > 0 ? 0 : id;
184}
185
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -0700186static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
Martin KaFai Lauf3f1c052017-06-05 12:15:47 -0700187{
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -0700188 if (do_idr_lock)
189 spin_lock_bh(&map_idr_lock);
190 else
191 __acquire(&map_idr_lock);
192
Martin KaFai Lauf3f1c052017-06-05 12:15:47 -0700193 idr_remove(&map_idr, map->id);
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -0700194
195 if (do_idr_lock)
196 spin_unlock_bh(&map_idr_lock);
197 else
198 __release(&map_idr_lock);
Martin KaFai Lauf3f1c052017-06-05 12:15:47 -0700199}
200
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700201/* called from workqueue */
202static void bpf_map_free_deferred(struct work_struct *work)
203{
204 struct bpf_map *map = container_of(work, struct bpf_map, work);
205
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700206 bpf_map_uncharge_memlock(map);
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700207 /* implementation dependent freeing */
208 map->ops->map_free(map);
209}
210
Daniel Borkmannc9da1612015-11-24 21:28:15 +0100211static void bpf_map_put_uref(struct bpf_map *map)
212{
213 if (atomic_dec_and_test(&map->usercnt)) {
214 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
215 bpf_fd_array_map_clear(map);
216 }
217}
218
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700219/* decrement map refcnt and schedule it for freeing via workqueue
220 * (unrelying map implementation ops->map_free() might sleep)
221 */
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -0700222static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700223{
224 if (atomic_dec_and_test(&map->refcnt)) {
Martin KaFai Lau34ad5582017-06-05 12:15:48 -0700225 /* bpf_map_free_id() must be called first */
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -0700226 bpf_map_free_id(map, do_idr_lock);
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700227 INIT_WORK(&map->work, bpf_map_free_deferred);
228 schedule_work(&map->work);
229 }
230}
231
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -0700232void bpf_map_put(struct bpf_map *map)
233{
234 __bpf_map_put(map, true);
235}
236
Daniel Borkmannc9da1612015-11-24 21:28:15 +0100237void bpf_map_put_with_uref(struct bpf_map *map)
238{
239 bpf_map_put_uref(map);
240 bpf_map_put(map);
241}
242
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700243static int bpf_map_release(struct inode *inode, struct file *filp)
244{
Daniel Borkmann61d1b6a2016-06-15 22:47:12 +0200245 struct bpf_map *map = filp->private_data;
246
247 if (map->ops->map_release)
248 map->ops->map_release(map, filp);
249
250 bpf_map_put_with_uref(map);
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700251 return 0;
252}
253
Daniel Borkmannf99bf202015-11-19 11:56:22 +0100254#ifdef CONFIG_PROC_FS
255static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
256{
257 const struct bpf_map *map = filp->private_data;
Daniel Borkmann21116b72016-11-26 01:28:07 +0100258 const struct bpf_array *array;
259 u32 owner_prog_type = 0;
Daniel Borkmann9780c0a2017-07-02 02:13:28 +0200260 u32 owner_jited = 0;
Daniel Borkmann21116b72016-11-26 01:28:07 +0100261
262 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
263 array = container_of(map, struct bpf_array, map);
264 owner_prog_type = array->owner_prog_type;
Daniel Borkmann9780c0a2017-07-02 02:13:28 +0200265 owner_jited = array->owner_jited;
Daniel Borkmann21116b72016-11-26 01:28:07 +0100266 }
Daniel Borkmannf99bf202015-11-19 11:56:22 +0100267
268 seq_printf(m,
269 "map_type:\t%u\n"
270 "key_size:\t%u\n"
271 "value_size:\t%u\n"
Daniel Borkmann322cea22016-03-25 00:30:25 +0100272 "max_entries:\t%u\n"
Daniel Borkmann21116b72016-11-26 01:28:07 +0100273 "map_flags:\t%#x\n"
274 "memlock:\t%llu\n",
Daniel Borkmannf99bf202015-11-19 11:56:22 +0100275 map->map_type,
276 map->key_size,
277 map->value_size,
Daniel Borkmann322cea22016-03-25 00:30:25 +0100278 map->max_entries,
Daniel Borkmann21116b72016-11-26 01:28:07 +0100279 map->map_flags,
280 map->pages * 1ULL << PAGE_SHIFT);
281
Daniel Borkmann9780c0a2017-07-02 02:13:28 +0200282 if (owner_prog_type) {
Daniel Borkmann21116b72016-11-26 01:28:07 +0100283 seq_printf(m, "owner_prog_type:\t%u\n",
284 owner_prog_type);
Daniel Borkmann9780c0a2017-07-02 02:13:28 +0200285 seq_printf(m, "owner_jited:\t%u\n",
286 owner_jited);
287 }
Daniel Borkmannf99bf202015-11-19 11:56:22 +0100288}
289#endif
290
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700291static const struct file_operations bpf_map_fops = {
Daniel Borkmannf99bf202015-11-19 11:56:22 +0100292#ifdef CONFIG_PROC_FS
293 .show_fdinfo = bpf_map_show_fdinfo,
294#endif
295 .release = bpf_map_release,
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700296};
297
Daniel Borkmannb2197752015-10-29 14:58:09 +0100298int bpf_map_new_fd(struct bpf_map *map)
Daniel Borkmannaa797812015-10-29 14:58:06 +0100299{
300 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
301 O_RDWR | O_CLOEXEC);
302}
303
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700304/* helper macro to check that unused fields 'union bpf_attr' are zero */
305#define CHECK_ATTR(CMD) \
306 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
307 sizeof(attr->CMD##_LAST_FIELD), 0, \
308 sizeof(*attr) - \
309 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
310 sizeof(attr->CMD##_LAST_FIELD)) != NULL
311
Martin KaFai Lau56f668d2017-03-22 10:00:33 -0700312#define BPF_MAP_CREATE_LAST_FIELD inner_map_fd
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700313/* called via syscall */
314static int map_create(union bpf_attr *attr)
315{
316 struct bpf_map *map;
317 int err;
318
319 err = CHECK_ATTR(BPF_MAP_CREATE);
320 if (err)
321 return -EINVAL;
322
323 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
324 map = find_and_alloc_map(attr);
325 if (IS_ERR(map))
326 return PTR_ERR(map);
327
328 atomic_set(&map->refcnt, 1);
Daniel Borkmannc9da1612015-11-24 21:28:15 +0100329 atomic_set(&map->usercnt, 1);
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700330
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700331 err = bpf_map_charge_memlock(map);
332 if (err)
Daniel Borkmann20b2b242016-11-04 00:56:31 +0100333 goto free_map_nouncharge;
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700334
Martin KaFai Lauf3f1c052017-06-05 12:15:47 -0700335 err = bpf_map_alloc_id(map);
336 if (err)
337 goto free_map;
338
Daniel Borkmannaa797812015-10-29 14:58:06 +0100339 err = bpf_map_new_fd(map);
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -0700340 if (err < 0) {
341 /* failed to allocate fd.
342 * bpf_map_put() is needed because the above
343 * bpf_map_alloc_id() has published the map
344 * to the userspace and the userspace may
345 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
346 */
347 bpf_map_put(map);
348 return err;
349 }
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700350
Daniel Borkmanna67edbf2017-01-25 02:28:18 +0100351 trace_bpf_map_create(map, err);
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700352 return err;
353
354free_map:
Daniel Borkmann20b2b242016-11-04 00:56:31 +0100355 bpf_map_uncharge_memlock(map);
356free_map_nouncharge:
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700357 map->ops->map_free(map);
358 return err;
359}
360
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700361/* if error is returned, fd is released.
362 * On success caller should complete fd access with matching fdput()
363 */
Daniel Borkmannc2101292015-10-29 14:58:07 +0100364struct bpf_map *__bpf_map_get(struct fd f)
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700365{
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700366 if (!f.file)
367 return ERR_PTR(-EBADF);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700368 if (f.file->f_op != &bpf_map_fops) {
369 fdput(f);
370 return ERR_PTR(-EINVAL);
371 }
372
Daniel Borkmannc2101292015-10-29 14:58:07 +0100373 return f.file->private_data;
374}
375
Alexei Starovoitov92117d82016-04-27 18:56:20 -0700376/* prog's and map's refcnt limit */
377#define BPF_MAX_REFCNT 32768
378
379struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
Daniel Borkmannc9da1612015-11-24 21:28:15 +0100380{
Alexei Starovoitov92117d82016-04-27 18:56:20 -0700381 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
382 atomic_dec(&map->refcnt);
383 return ERR_PTR(-EBUSY);
384 }
Daniel Borkmannc9da1612015-11-24 21:28:15 +0100385 if (uref)
386 atomic_inc(&map->usercnt);
Alexei Starovoitov92117d82016-04-27 18:56:20 -0700387 return map;
Daniel Borkmannc9da1612015-11-24 21:28:15 +0100388}
389
390struct bpf_map *bpf_map_get_with_uref(u32 ufd)
Daniel Borkmannc2101292015-10-29 14:58:07 +0100391{
392 struct fd f = fdget(ufd);
393 struct bpf_map *map;
394
395 map = __bpf_map_get(f);
396 if (IS_ERR(map))
397 return map;
398
Alexei Starovoitov92117d82016-04-27 18:56:20 -0700399 map = bpf_map_inc(map, true);
Daniel Borkmannc2101292015-10-29 14:58:07 +0100400 fdput(f);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700401
402 return map;
403}
404
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -0700405/* map_idr_lock should have been held */
406static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
407 bool uref)
408{
409 int refold;
410
411 refold = __atomic_add_unless(&map->refcnt, 1, 0);
412
413 if (refold >= BPF_MAX_REFCNT) {
414 __bpf_map_put(map, false);
415 return ERR_PTR(-EBUSY);
416 }
417
418 if (!refold)
419 return ERR_PTR(-ENOENT);
420
421 if (uref)
422 atomic_inc(&map->usercnt);
423
424 return map;
425}
426
Alexei Starovoitovb8cdc052016-03-09 18:56:49 -0800427int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
428{
429 return -ENOTSUPP;
430}
431
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700432/* last field in 'union bpf_attr' used by this command */
433#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
434
435static int map_lookup_elem(union bpf_attr *attr)
436{
Mickaël Salaün535e7b4b2016-11-13 19:44:03 +0100437 void __user *ukey = u64_to_user_ptr(attr->key);
438 void __user *uvalue = u64_to_user_ptr(attr->value);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700439 int ufd = attr->map_fd;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700440 struct bpf_map *map;
Alexei Starovoitov8ebe6672015-01-22 17:11:08 -0800441 void *key, *value, *ptr;
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800442 u32 value_size;
Daniel Borkmann592867b2015-09-08 18:00:09 +0200443 struct fd f;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700444 int err;
445
446 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
447 return -EINVAL;
448
Daniel Borkmann592867b2015-09-08 18:00:09 +0200449 f = fdget(ufd);
Daniel Borkmannc2101292015-10-29 14:58:07 +0100450 map = __bpf_map_get(f);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700451 if (IS_ERR(map))
452 return PTR_ERR(map);
453
Al Viroe4448ed2017-05-13 18:43:00 -0400454 key = memdup_user(ukey, map->key_size);
455 if (IS_ERR(key)) {
456 err = PTR_ERR(key);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700457 goto err_put;
Al Viroe4448ed2017-05-13 18:43:00 -0400458 }
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700459
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800460 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800461 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800462 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
463 value_size = round_up(map->value_size, 8) * num_possible_cpus();
Martin KaFai Lau14dc6f02017-06-27 23:08:34 -0700464 else if (IS_FD_MAP(map))
465 value_size = sizeof(u32);
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800466 else
467 value_size = map->value_size;
468
Alexei Starovoitov8ebe6672015-01-22 17:11:08 -0800469 err = -ENOMEM;
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800470 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700471 if (!value)
Alexei Starovoitov8ebe6672015-01-22 17:11:08 -0800472 goto free_key;
473
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800474 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
475 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800476 err = bpf_percpu_hash_copy(map, key, value);
477 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
478 err = bpf_percpu_array_copy(map, key, value);
Alexei Starovoitov557c0c62016-03-07 21:57:17 -0800479 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
480 err = bpf_stackmap_copy(map, key, value);
Martin KaFai Lau14dc6f02017-06-27 23:08:34 -0700481 } else if (IS_FD_ARRAY(map)) {
482 err = bpf_fd_array_map_lookup_elem(map, key, value);
483 } else if (IS_FD_HASH(map)) {
484 err = bpf_fd_htab_map_lookup_elem(map, key, value);
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800485 } else {
486 rcu_read_lock();
487 ptr = map->ops->map_lookup_elem(map, key);
488 if (ptr)
489 memcpy(value, ptr, value_size);
490 rcu_read_unlock();
491 err = ptr ? 0 : -ENOENT;
492 }
Alexei Starovoitov8ebe6672015-01-22 17:11:08 -0800493
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800494 if (err)
Alexei Starovoitov8ebe6672015-01-22 17:11:08 -0800495 goto free_value;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700496
497 err = -EFAULT;
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800498 if (copy_to_user(uvalue, value, value_size) != 0)
Alexei Starovoitov8ebe6672015-01-22 17:11:08 -0800499 goto free_value;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700500
Daniel Borkmanna67edbf2017-01-25 02:28:18 +0100501 trace_bpf_map_lookup_elem(map, ufd, key, value);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700502 err = 0;
503
Alexei Starovoitov8ebe6672015-01-22 17:11:08 -0800504free_value:
505 kfree(value);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700506free_key:
507 kfree(key);
508err_put:
509 fdput(f);
510 return err;
511}
512
Alexei Starovoitov3274f522014-11-13 17:36:44 -0800513#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700514
515static int map_update_elem(union bpf_attr *attr)
516{
Mickaël Salaün535e7b4b2016-11-13 19:44:03 +0100517 void __user *ukey = u64_to_user_ptr(attr->key);
518 void __user *uvalue = u64_to_user_ptr(attr->value);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700519 int ufd = attr->map_fd;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700520 struct bpf_map *map;
521 void *key, *value;
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800522 u32 value_size;
Daniel Borkmann592867b2015-09-08 18:00:09 +0200523 struct fd f;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700524 int err;
525
526 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
527 return -EINVAL;
528
Daniel Borkmann592867b2015-09-08 18:00:09 +0200529 f = fdget(ufd);
Daniel Borkmannc2101292015-10-29 14:58:07 +0100530 map = __bpf_map_get(f);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700531 if (IS_ERR(map))
532 return PTR_ERR(map);
533
Al Viroe4448ed2017-05-13 18:43:00 -0400534 key = memdup_user(ukey, map->key_size);
535 if (IS_ERR(key)) {
536 err = PTR_ERR(key);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700537 goto err_put;
Al Viroe4448ed2017-05-13 18:43:00 -0400538 }
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700539
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800540 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800541 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800542 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
543 value_size = round_up(map->value_size, 8) * num_possible_cpus();
544 else
545 value_size = map->value_size;
546
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700547 err = -ENOMEM;
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800548 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700549 if (!value)
550 goto free_key;
551
552 err = -EFAULT;
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800553 if (copy_from_user(value, uvalue, value_size) != 0)
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700554 goto free_value;
555
Alexei Starovoitovb121d1e2016-03-07 21:57:13 -0800556 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
557 * inside bpf map update or delete otherwise deadlocks are possible
558 */
559 preempt_disable();
560 __this_cpu_inc(bpf_prog_active);
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800561 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
562 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800563 err = bpf_percpu_hash_update(map, key, value, attr->flags);
564 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
565 err = bpf_percpu_array_update(map, key, value, attr->flags);
Daniel Borkmannd056a782016-06-15 22:47:13 +0200566 } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
Martin KaFai Lau4ed8ec52016-06-30 10:28:43 -0700567 map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
Martin KaFai Lau56f668d2017-03-22 10:00:33 -0700568 map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
569 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
Daniel Borkmannd056a782016-06-15 22:47:13 +0200570 rcu_read_lock();
571 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
572 attr->flags);
573 rcu_read_unlock();
Martin KaFai Laubcc6b1b2017-03-22 10:00:34 -0700574 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
575 rcu_read_lock();
576 err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
577 attr->flags);
578 rcu_read_unlock();
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800579 } else {
580 rcu_read_lock();
581 err = map->ops->map_update_elem(map, key, value, attr->flags);
582 rcu_read_unlock();
583 }
Alexei Starovoitovb121d1e2016-03-07 21:57:13 -0800584 __this_cpu_dec(bpf_prog_active);
585 preempt_enable();
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700586
Daniel Borkmanna67edbf2017-01-25 02:28:18 +0100587 if (!err)
588 trace_bpf_map_update_elem(map, ufd, key, value);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700589free_value:
590 kfree(value);
591free_key:
592 kfree(key);
593err_put:
594 fdput(f);
595 return err;
596}
597
598#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
599
600static int map_delete_elem(union bpf_attr *attr)
601{
Mickaël Salaün535e7b4b2016-11-13 19:44:03 +0100602 void __user *ukey = u64_to_user_ptr(attr->key);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700603 int ufd = attr->map_fd;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700604 struct bpf_map *map;
Daniel Borkmann592867b2015-09-08 18:00:09 +0200605 struct fd f;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700606 void *key;
607 int err;
608
609 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
610 return -EINVAL;
611
Daniel Borkmann592867b2015-09-08 18:00:09 +0200612 f = fdget(ufd);
Daniel Borkmannc2101292015-10-29 14:58:07 +0100613 map = __bpf_map_get(f);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700614 if (IS_ERR(map))
615 return PTR_ERR(map);
616
Al Viroe4448ed2017-05-13 18:43:00 -0400617 key = memdup_user(ukey, map->key_size);
618 if (IS_ERR(key)) {
619 err = PTR_ERR(key);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700620 goto err_put;
Al Viroe4448ed2017-05-13 18:43:00 -0400621 }
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700622
Alexei Starovoitovb121d1e2016-03-07 21:57:13 -0800623 preempt_disable();
624 __this_cpu_inc(bpf_prog_active);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700625 rcu_read_lock();
626 err = map->ops->map_delete_elem(map, key);
627 rcu_read_unlock();
Alexei Starovoitovb121d1e2016-03-07 21:57:13 -0800628 __this_cpu_dec(bpf_prog_active);
629 preempt_enable();
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700630
Daniel Borkmanna67edbf2017-01-25 02:28:18 +0100631 if (!err)
632 trace_bpf_map_delete_elem(map, ufd, key);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700633 kfree(key);
634err_put:
635 fdput(f);
636 return err;
637}
638
639/* last field in 'union bpf_attr' used by this command */
640#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
641
642static int map_get_next_key(union bpf_attr *attr)
643{
Mickaël Salaün535e7b4b2016-11-13 19:44:03 +0100644 void __user *ukey = u64_to_user_ptr(attr->key);
645 void __user *unext_key = u64_to_user_ptr(attr->next_key);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700646 int ufd = attr->map_fd;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700647 struct bpf_map *map;
648 void *key, *next_key;
Daniel Borkmann592867b2015-09-08 18:00:09 +0200649 struct fd f;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700650 int err;
651
652 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
653 return -EINVAL;
654
Daniel Borkmann592867b2015-09-08 18:00:09 +0200655 f = fdget(ufd);
Daniel Borkmannc2101292015-10-29 14:58:07 +0100656 map = __bpf_map_get(f);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700657 if (IS_ERR(map))
658 return PTR_ERR(map);
659
Teng Qin8fe45922017-04-24 19:00:37 -0700660 if (ukey) {
Al Viroe4448ed2017-05-13 18:43:00 -0400661 key = memdup_user(ukey, map->key_size);
662 if (IS_ERR(key)) {
663 err = PTR_ERR(key);
Teng Qin8fe45922017-04-24 19:00:37 -0700664 goto err_put;
Al Viroe4448ed2017-05-13 18:43:00 -0400665 }
Teng Qin8fe45922017-04-24 19:00:37 -0700666 } else {
667 key = NULL;
668 }
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700669
670 err = -ENOMEM;
671 next_key = kmalloc(map->key_size, GFP_USER);
672 if (!next_key)
673 goto free_key;
674
675 rcu_read_lock();
676 err = map->ops->map_get_next_key(map, key, next_key);
677 rcu_read_unlock();
678 if (err)
679 goto free_next_key;
680
681 err = -EFAULT;
682 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
683 goto free_next_key;
684
Daniel Borkmanna67edbf2017-01-25 02:28:18 +0100685 trace_bpf_map_next_key(map, ufd, key, next_key);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700686 err = 0;
687
688free_next_key:
689 kfree(next_key);
690free_key:
691 kfree(key);
692err_put:
693 fdput(f);
694 return err;
695}
696
Johannes Bergbe9370a2017-04-11 15:34:57 +0200697static const struct bpf_verifier_ops * const bpf_prog_types[] = {
698#define BPF_PROG_TYPE(_id, _ops) \
699 [_id] = &_ops,
Johannes Berg40077e02017-04-11 15:34:58 +0200700#define BPF_MAP_TYPE(_id, _ops)
Johannes Bergbe9370a2017-04-11 15:34:57 +0200701#include <linux/bpf_types.h>
702#undef BPF_PROG_TYPE
Johannes Berg40077e02017-04-11 15:34:58 +0200703#undef BPF_MAP_TYPE
Johannes Bergbe9370a2017-04-11 15:34:57 +0200704};
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700705
706static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
707{
Johannes Bergbe9370a2017-04-11 15:34:57 +0200708 if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
709 return -EINVAL;
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700710
Johannes Bergbe9370a2017-04-11 15:34:57 +0200711 prog->aux->ops = bpf_prog_types[type];
712 prog->type = type;
713 return 0;
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700714}
715
716/* drop refcnt on maps used by eBPF program and free auxilary data */
717static void free_used_maps(struct bpf_prog_aux *aux)
718{
719 int i;
720
721 for (i = 0; i < aux->used_map_cnt; i++)
722 bpf_map_put(aux->used_maps[i]);
723
724 kfree(aux->used_maps);
725}
726
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100727int __bpf_prog_charge(struct user_struct *user, u32 pages)
728{
729 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
730 unsigned long user_bufs;
731
732 if (user) {
733 user_bufs = atomic_long_add_return(pages, &user->locked_vm);
734 if (user_bufs > memlock_limit) {
735 atomic_long_sub(pages, &user->locked_vm);
736 return -EPERM;
737 }
738 }
739
740 return 0;
741}
742
743void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
744{
745 if (user)
746 atomic_long_sub(pages, &user->locked_vm);
747}
748
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700749static int bpf_prog_charge_memlock(struct bpf_prog *prog)
750{
751 struct user_struct *user = get_current_user();
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100752 int ret;
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700753
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100754 ret = __bpf_prog_charge(user, prog->pages);
755 if (ret) {
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700756 free_uid(user);
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100757 return ret;
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700758 }
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100759
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700760 prog->aux->user = user;
761 return 0;
762}
763
764static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
765{
766 struct user_struct *user = prog->aux->user;
767
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100768 __bpf_prog_uncharge(user, prog->pages);
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700769 free_uid(user);
770}
771
Martin KaFai Laudc4bb0e2017-06-05 12:15:46 -0700772static int bpf_prog_alloc_id(struct bpf_prog *prog)
773{
774 int id;
775
776 spin_lock_bh(&prog_idr_lock);
777 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
778 if (id > 0)
779 prog->aux->id = id;
780 spin_unlock_bh(&prog_idr_lock);
781
782 /* id is in [1, INT_MAX) */
783 if (WARN_ON_ONCE(!id))
784 return -ENOSPC;
785
786 return id > 0 ? 0 : id;
787}
788
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -0700789static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
Martin KaFai Laudc4bb0e2017-06-05 12:15:46 -0700790{
791 /* cBPF to eBPF migrations are currently not in the idr store. */
792 if (!prog->aux->id)
793 return;
794
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -0700795 if (do_idr_lock)
796 spin_lock_bh(&prog_idr_lock);
797 else
798 __acquire(&prog_idr_lock);
799
Martin KaFai Laudc4bb0e2017-06-05 12:15:46 -0700800 idr_remove(&prog_idr, prog->aux->id);
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -0700801
802 if (do_idr_lock)
803 spin_unlock_bh(&prog_idr_lock);
804 else
805 __release(&prog_idr_lock);
Martin KaFai Laudc4bb0e2017-06-05 12:15:46 -0700806}
807
Daniel Borkmann1aacde32016-06-30 17:24:43 +0200808static void __bpf_prog_put_rcu(struct rcu_head *rcu)
Alexei Starovoitovabf2e7d2015-05-28 19:26:02 -0700809{
810 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
811
812 free_used_maps(aux);
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700813 bpf_prog_uncharge_memlock(aux->prog);
Alexei Starovoitovabf2e7d2015-05-28 19:26:02 -0700814 bpf_prog_free(aux->prog);
815}
816
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -0700817static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700818{
Daniel Borkmanna67edbf2017-01-25 02:28:18 +0100819 if (atomic_dec_and_test(&prog->aux->refcnt)) {
820 trace_bpf_prog_put_rcu(prog);
Martin KaFai Lau34ad5582017-06-05 12:15:48 -0700821 /* bpf_prog_free_id() must be called first */
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -0700822 bpf_prog_free_id(prog, do_idr_lock);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100823 bpf_prog_kallsyms_del(prog);
Daniel Borkmann1aacde32016-06-30 17:24:43 +0200824 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
Daniel Borkmanna67edbf2017-01-25 02:28:18 +0100825 }
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700826}
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -0700827
828void bpf_prog_put(struct bpf_prog *prog)
829{
830 __bpf_prog_put(prog, true);
831}
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100832EXPORT_SYMBOL_GPL(bpf_prog_put);
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700833
834static int bpf_prog_release(struct inode *inode, struct file *filp)
835{
836 struct bpf_prog *prog = filp->private_data;
837
Daniel Borkmann1aacde32016-06-30 17:24:43 +0200838 bpf_prog_put(prog);
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700839 return 0;
840}
841
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100842#ifdef CONFIG_PROC_FS
843static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
844{
845 const struct bpf_prog *prog = filp->private_data;
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100846 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100847
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100848 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100849 seq_printf(m,
850 "prog_type:\t%u\n"
851 "prog_jited:\t%u\n"
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100852 "prog_tag:\t%s\n"
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100853 "memlock:\t%llu\n",
854 prog->type,
855 prog->jited,
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100856 prog_tag,
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100857 prog->pages * 1ULL << PAGE_SHIFT);
858}
859#endif
860
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700861static const struct file_operations bpf_prog_fops = {
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100862#ifdef CONFIG_PROC_FS
863 .show_fdinfo = bpf_prog_show_fdinfo,
864#endif
865 .release = bpf_prog_release,
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700866};
867
Daniel Borkmannb2197752015-10-29 14:58:09 +0100868int bpf_prog_new_fd(struct bpf_prog *prog)
Daniel Borkmannaa797812015-10-29 14:58:06 +0100869{
870 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
871 O_RDWR | O_CLOEXEC);
872}
873
Daniel Borkmann113214b2016-06-30 17:24:44 +0200874static struct bpf_prog *____bpf_prog_get(struct fd f)
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700875{
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700876 if (!f.file)
877 return ERR_PTR(-EBADF);
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700878 if (f.file->f_op != &bpf_prog_fops) {
879 fdput(f);
880 return ERR_PTR(-EINVAL);
881 }
882
Daniel Borkmannc2101292015-10-29 14:58:07 +0100883 return f.file->private_data;
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700884}
885
Brenden Blanco59d36562016-07-19 12:16:46 -0700886struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
Alexei Starovoitov92117d82016-04-27 18:56:20 -0700887{
Brenden Blanco59d36562016-07-19 12:16:46 -0700888 if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
889 atomic_sub(i, &prog->aux->refcnt);
Alexei Starovoitov92117d82016-04-27 18:56:20 -0700890 return ERR_PTR(-EBUSY);
891 }
892 return prog;
893}
Brenden Blanco59d36562016-07-19 12:16:46 -0700894EXPORT_SYMBOL_GPL(bpf_prog_add);
895
Daniel Borkmannc5405942016-11-09 22:02:34 +0100896void bpf_prog_sub(struct bpf_prog *prog, int i)
897{
898 /* Only to be used for undoing previous bpf_prog_add() in some
899 * error path. We still know that another entity in our call
900 * path holds a reference to the program, thus atomic_sub() can
901 * be safely used in such cases!
902 */
903 WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
904}
905EXPORT_SYMBOL_GPL(bpf_prog_sub);
906
Brenden Blanco59d36562016-07-19 12:16:46 -0700907struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
908{
909 return bpf_prog_add(prog, 1);
910}
Daniel Borkmann97bc4022016-11-19 01:45:00 +0100911EXPORT_SYMBOL_GPL(bpf_prog_inc);
Alexei Starovoitov92117d82016-04-27 18:56:20 -0700912
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -0700913/* prog_idr_lock should have been held */
914static struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
915{
916 int refold;
917
918 refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);
919
920 if (refold >= BPF_MAX_REFCNT) {
921 __bpf_prog_put(prog, false);
922 return ERR_PTR(-EBUSY);
923 }
924
925 if (!refold)
926 return ERR_PTR(-ENOENT);
927
928 return prog;
929}
930
Daniel Borkmann113214b2016-06-30 17:24:44 +0200931static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700932{
933 struct fd f = fdget(ufd);
934 struct bpf_prog *prog;
935
Daniel Borkmann113214b2016-06-30 17:24:44 +0200936 prog = ____bpf_prog_get(f);
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700937 if (IS_ERR(prog))
938 return prog;
Daniel Borkmann113214b2016-06-30 17:24:44 +0200939 if (type && prog->type != *type) {
940 prog = ERR_PTR(-EINVAL);
941 goto out;
942 }
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700943
Alexei Starovoitov92117d82016-04-27 18:56:20 -0700944 prog = bpf_prog_inc(prog);
Daniel Borkmann113214b2016-06-30 17:24:44 +0200945out:
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700946 fdput(f);
947 return prog;
948}
Daniel Borkmann113214b2016-06-30 17:24:44 +0200949
950struct bpf_prog *bpf_prog_get(u32 ufd)
951{
952 return __bpf_prog_get(ufd, NULL);
953}
954
955struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
956{
Daniel Borkmanna67edbf2017-01-25 02:28:18 +0100957 struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
958
959 if (!IS_ERR(prog))
960 trace_bpf_prog_get_type(prog);
961 return prog;
Daniel Borkmann113214b2016-06-30 17:24:44 +0200962}
963EXPORT_SYMBOL_GPL(bpf_prog_get_type);
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700964
965/* last field in 'union bpf_attr' used by this command */
David S. Millere07b98d2017-05-10 11:38:07 -0700966#define BPF_PROG_LOAD_LAST_FIELD prog_flags
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700967
968static int bpf_prog_load(union bpf_attr *attr)
969{
970 enum bpf_prog_type type = attr->prog_type;
971 struct bpf_prog *prog;
972 int err;
973 char license[128];
974 bool is_gpl;
975
976 if (CHECK_ATTR(BPF_PROG_LOAD))
977 return -EINVAL;
978
David S. Millere07b98d2017-05-10 11:38:07 -0700979 if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT)
980 return -EINVAL;
981
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700982 /* copy eBPF program license from user space */
Mickaël Salaün535e7b4b2016-11-13 19:44:03 +0100983 if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700984 sizeof(license) - 1) < 0)
985 return -EFAULT;
986 license[sizeof(license) - 1] = 0;
987
988 /* eBPF programs must be GPL compatible to use GPL-ed functions */
989 is_gpl = license_is_gpl_compatible(license);
990
Daniel Borkmannef0915c2016-12-07 01:15:44 +0100991 if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
992 return -E2BIG;
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700993
Alexei Starovoitov25415172015-03-25 12:49:20 -0700994 if (type == BPF_PROG_TYPE_KPROBE &&
995 attr->kern_version != LINUX_VERSION_CODE)
996 return -EINVAL;
997
Chenbo Feng80b7d812017-05-31 18:16:00 -0700998 if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
999 type != BPF_PROG_TYPE_CGROUP_SKB &&
1000 !capable(CAP_SYS_ADMIN))
Alexei Starovoitov1be7f752015-10-07 22:23:21 -07001001 return -EPERM;
1002
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001003 /* plain bpf_prog allocation */
1004 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
1005 if (!prog)
1006 return -ENOMEM;
1007
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -07001008 err = bpf_prog_charge_memlock(prog);
1009 if (err)
1010 goto free_prog_nouncharge;
1011
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001012 prog->len = attr->insn_cnt;
1013
1014 err = -EFAULT;
Mickaël Salaün535e7b4b2016-11-13 19:44:03 +01001015 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +01001016 bpf_prog_insn_size(prog)) != 0)
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001017 goto free_prog;
1018
1019 prog->orig_prog = NULL;
Daniel Borkmanna91263d2015-09-30 01:41:50 +02001020 prog->jited = 0;
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001021
1022 atomic_set(&prog->aux->refcnt, 1);
Daniel Borkmanna91263d2015-09-30 01:41:50 +02001023 prog->gpl_compatible = is_gpl ? 1 : 0;
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001024
1025 /* find program type: socket_filter vs tracing_filter */
1026 err = find_prog_type(type, prog);
1027 if (err < 0)
1028 goto free_prog;
1029
1030 /* run eBPF verifier */
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -07001031 err = bpf_check(&prog, attr);
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001032 if (err < 0)
1033 goto free_used_maps;
1034
1035 /* eBPF program is ready to be JITed */
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001036 prog = bpf_prog_select_runtime(prog, &err);
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001037 if (err < 0)
1038 goto free_used_maps;
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001039
Martin KaFai Laudc4bb0e2017-06-05 12:15:46 -07001040 err = bpf_prog_alloc_id(prog);
1041 if (err)
1042 goto free_used_maps;
1043
Daniel Borkmannaa797812015-10-29 14:58:06 +01001044 err = bpf_prog_new_fd(prog);
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -07001045 if (err < 0) {
1046 /* failed to allocate fd.
1047 * bpf_prog_put() is needed because the above
1048 * bpf_prog_alloc_id() has published the prog
1049 * to the userspace and the userspace may
1050 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
1051 */
1052 bpf_prog_put(prog);
1053 return err;
1054 }
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001055
Daniel Borkmann74451e662017-02-16 22:24:50 +01001056 bpf_prog_kallsyms_add(prog);
Daniel Borkmanna67edbf2017-01-25 02:28:18 +01001057 trace_bpf_prog_load(prog, err);
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001058 return err;
1059
1060free_used_maps:
1061 free_used_maps(prog->aux);
1062free_prog:
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -07001063 bpf_prog_uncharge_memlock(prog);
1064free_prog_nouncharge:
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001065 bpf_prog_free(prog);
1066 return err;
1067}
1068
Daniel Borkmannb2197752015-10-29 14:58:09 +01001069#define BPF_OBJ_LAST_FIELD bpf_fd
1070
1071static int bpf_obj_pin(const union bpf_attr *attr)
1072{
1073 if (CHECK_ATTR(BPF_OBJ))
1074 return -EINVAL;
1075
Mickaël Salaün535e7b4b2016-11-13 19:44:03 +01001076 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
Daniel Borkmannb2197752015-10-29 14:58:09 +01001077}
1078
1079static int bpf_obj_get(const union bpf_attr *attr)
1080{
1081 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
1082 return -EINVAL;
1083
Mickaël Salaün535e7b4b2016-11-13 19:44:03 +01001084 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
Daniel Borkmannb2197752015-10-29 14:58:09 +01001085}
1086
Daniel Mackf4324552016-11-23 16:52:27 +01001087#ifdef CONFIG_CGROUP_BPF
1088
Alexei Starovoitov7f677632017-02-10 20:28:24 -08001089#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
Daniel Mackf4324552016-11-23 16:52:27 +01001090
1091static int bpf_prog_attach(const union bpf_attr *attr)
1092{
Alexei Starovoitov7f677632017-02-10 20:28:24 -08001093 enum bpf_prog_type ptype;
Daniel Mackf4324552016-11-23 16:52:27 +01001094 struct bpf_prog *prog;
1095 struct cgroup *cgrp;
Alexei Starovoitov7f677632017-02-10 20:28:24 -08001096 int ret;
Daniel Mackf4324552016-11-23 16:52:27 +01001097
1098 if (!capable(CAP_NET_ADMIN))
1099 return -EPERM;
1100
1101 if (CHECK_ATTR(BPF_PROG_ATTACH))
1102 return -EINVAL;
1103
Alexei Starovoitov7f677632017-02-10 20:28:24 -08001104 if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
1105 return -EINVAL;
1106
Daniel Mackf4324552016-11-23 16:52:27 +01001107 switch (attr->attach_type) {
1108 case BPF_CGROUP_INET_INGRESS:
1109 case BPF_CGROUP_INET_EGRESS:
David Ahernb2cd1252016-12-01 08:48:03 -08001110 ptype = BPF_PROG_TYPE_CGROUP_SKB;
Daniel Mackf4324552016-11-23 16:52:27 +01001111 break;
David Ahern610236582016-12-01 08:48:04 -08001112 case BPF_CGROUP_INET_SOCK_CREATE:
1113 ptype = BPF_PROG_TYPE_CGROUP_SOCK;
1114 break;
Lawrence Brakmo40304b22017-06-30 20:02:40 -07001115 case BPF_CGROUP_SOCK_OPS:
1116 ptype = BPF_PROG_TYPE_SOCK_OPS;
1117 break;
Daniel Mackf4324552016-11-23 16:52:27 +01001118 default:
1119 return -EINVAL;
1120 }
1121
David Ahernb2cd1252016-12-01 08:48:03 -08001122 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1123 if (IS_ERR(prog))
1124 return PTR_ERR(prog);
1125
1126 cgrp = cgroup_get_from_fd(attr->target_fd);
1127 if (IS_ERR(cgrp)) {
1128 bpf_prog_put(prog);
1129 return PTR_ERR(cgrp);
1130 }
1131
Alexei Starovoitov7f677632017-02-10 20:28:24 -08001132 ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
1133 attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
1134 if (ret)
1135 bpf_prog_put(prog);
David Ahernb2cd1252016-12-01 08:48:03 -08001136 cgroup_put(cgrp);
1137
Alexei Starovoitov7f677632017-02-10 20:28:24 -08001138 return ret;
Daniel Mackf4324552016-11-23 16:52:27 +01001139}
1140
1141#define BPF_PROG_DETACH_LAST_FIELD attach_type
1142
1143static int bpf_prog_detach(const union bpf_attr *attr)
1144{
1145 struct cgroup *cgrp;
Alexei Starovoitov7f677632017-02-10 20:28:24 -08001146 int ret;
Daniel Mackf4324552016-11-23 16:52:27 +01001147
1148 if (!capable(CAP_NET_ADMIN))
1149 return -EPERM;
1150
1151 if (CHECK_ATTR(BPF_PROG_DETACH))
1152 return -EINVAL;
1153
1154 switch (attr->attach_type) {
1155 case BPF_CGROUP_INET_INGRESS:
1156 case BPF_CGROUP_INET_EGRESS:
David Ahern610236582016-12-01 08:48:04 -08001157 case BPF_CGROUP_INET_SOCK_CREATE:
Lawrence Brakmo40304b22017-06-30 20:02:40 -07001158 case BPF_CGROUP_SOCK_OPS:
Daniel Mackf4324552016-11-23 16:52:27 +01001159 cgrp = cgroup_get_from_fd(attr->target_fd);
1160 if (IS_ERR(cgrp))
1161 return PTR_ERR(cgrp);
1162
Alexei Starovoitov7f677632017-02-10 20:28:24 -08001163 ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
Daniel Mackf4324552016-11-23 16:52:27 +01001164 cgroup_put(cgrp);
1165 break;
1166
1167 default:
1168 return -EINVAL;
1169 }
1170
Alexei Starovoitov7f677632017-02-10 20:28:24 -08001171 return ret;
Daniel Mackf4324552016-11-23 16:52:27 +01001172}
Lawrence Brakmo40304b22017-06-30 20:02:40 -07001173
Daniel Mackf4324552016-11-23 16:52:27 +01001174#endif /* CONFIG_CGROUP_BPF */
1175
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001176#define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
1177
1178static int bpf_prog_test_run(const union bpf_attr *attr,
1179 union bpf_attr __user *uattr)
1180{
1181 struct bpf_prog *prog;
1182 int ret = -ENOTSUPP;
1183
1184 if (CHECK_ATTR(BPF_PROG_TEST_RUN))
1185 return -EINVAL;
1186
1187 prog = bpf_prog_get(attr->test.prog_fd);
1188 if (IS_ERR(prog))
1189 return PTR_ERR(prog);
1190
1191 if (prog->aux->ops->test_run)
1192 ret = prog->aux->ops->test_run(prog, attr, uattr);
1193
1194 bpf_prog_put(prog);
1195 return ret;
1196}
1197
Martin KaFai Lau34ad5582017-06-05 12:15:48 -07001198#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
1199
1200static int bpf_obj_get_next_id(const union bpf_attr *attr,
1201 union bpf_attr __user *uattr,
1202 struct idr *idr,
1203 spinlock_t *lock)
1204{
1205 u32 next_id = attr->start_id;
1206 int err = 0;
1207
1208 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
1209 return -EINVAL;
1210
1211 if (!capable(CAP_SYS_ADMIN))
1212 return -EPERM;
1213
1214 next_id++;
1215 spin_lock_bh(lock);
1216 if (!idr_get_next(idr, &next_id))
1217 err = -ENOENT;
1218 spin_unlock_bh(lock);
1219
1220 if (!err)
1221 err = put_user(next_id, &uattr->next_id);
1222
1223 return err;
1224}
1225
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -07001226#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
1227
1228static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
1229{
1230 struct bpf_prog *prog;
1231 u32 id = attr->prog_id;
1232 int fd;
1233
1234 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
1235 return -EINVAL;
1236
1237 if (!capable(CAP_SYS_ADMIN))
1238 return -EPERM;
1239
1240 spin_lock_bh(&prog_idr_lock);
1241 prog = idr_find(&prog_idr, id);
1242 if (prog)
1243 prog = bpf_prog_inc_not_zero(prog);
1244 else
1245 prog = ERR_PTR(-ENOENT);
1246 spin_unlock_bh(&prog_idr_lock);
1247
1248 if (IS_ERR(prog))
1249 return PTR_ERR(prog);
1250
1251 fd = bpf_prog_new_fd(prog);
1252 if (fd < 0)
1253 bpf_prog_put(prog);
1254
1255 return fd;
1256}
1257
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -07001258#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD map_id
1259
1260static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
1261{
1262 struct bpf_map *map;
1263 u32 id = attr->map_id;
1264 int fd;
1265
1266 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID))
1267 return -EINVAL;
1268
1269 if (!capable(CAP_SYS_ADMIN))
1270 return -EPERM;
1271
1272 spin_lock_bh(&map_idr_lock);
1273 map = idr_find(&map_idr, id);
1274 if (map)
1275 map = bpf_map_inc_not_zero(map, true);
1276 else
1277 map = ERR_PTR(-ENOENT);
1278 spin_unlock_bh(&map_idr_lock);
1279
1280 if (IS_ERR(map))
1281 return PTR_ERR(map);
1282
1283 fd = bpf_map_new_fd(map);
1284 if (fd < 0)
1285 bpf_map_put(map);
1286
1287 return fd;
1288}
1289
Martin KaFai Lau1e270972017-06-05 12:15:52 -07001290static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
1291 const union bpf_attr *attr,
1292 union bpf_attr __user *uattr)
1293{
1294 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
1295 struct bpf_prog_info info = {};
1296 u32 info_len = attr->info.info_len;
1297 char __user *uinsns;
1298 u32 ulen;
1299 int err;
1300
1301 err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
1302 if (err)
1303 return err;
1304 info_len = min_t(u32, sizeof(info), info_len);
1305
1306 if (copy_from_user(&info, uinfo, info_len))
Daniel Borkmann89b09682017-07-27 21:02:46 +02001307 return -EFAULT;
Martin KaFai Lau1e270972017-06-05 12:15:52 -07001308
1309 info.type = prog->type;
1310 info.id = prog->aux->id;
1311
1312 memcpy(info.tag, prog->tag, sizeof(prog->tag));
1313
1314 if (!capable(CAP_SYS_ADMIN)) {
1315 info.jited_prog_len = 0;
1316 info.xlated_prog_len = 0;
1317 goto done;
1318 }
1319
1320 ulen = info.jited_prog_len;
1321 info.jited_prog_len = prog->jited_len;
1322 if (info.jited_prog_len && ulen) {
1323 uinsns = u64_to_user_ptr(info.jited_prog_insns);
1324 ulen = min_t(u32, info.jited_prog_len, ulen);
1325 if (copy_to_user(uinsns, prog->bpf_func, ulen))
1326 return -EFAULT;
1327 }
1328
1329 ulen = info.xlated_prog_len;
Daniel Borkmann9975a542017-07-28 17:05:25 +02001330 info.xlated_prog_len = bpf_prog_insn_size(prog);
Martin KaFai Lau1e270972017-06-05 12:15:52 -07001331 if (info.xlated_prog_len && ulen) {
1332 uinsns = u64_to_user_ptr(info.xlated_prog_insns);
1333 ulen = min_t(u32, info.xlated_prog_len, ulen);
1334 if (copy_to_user(uinsns, prog->insnsi, ulen))
1335 return -EFAULT;
1336 }
1337
1338done:
1339 if (copy_to_user(uinfo, &info, info_len) ||
1340 put_user(info_len, &uattr->info.info_len))
1341 return -EFAULT;
1342
1343 return 0;
1344}
1345
1346static int bpf_map_get_info_by_fd(struct bpf_map *map,
1347 const union bpf_attr *attr,
1348 union bpf_attr __user *uattr)
1349{
1350 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
1351 struct bpf_map_info info = {};
1352 u32 info_len = attr->info.info_len;
1353 int err;
1354
1355 err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
1356 if (err)
1357 return err;
1358 info_len = min_t(u32, sizeof(info), info_len);
1359
1360 info.type = map->map_type;
1361 info.id = map->id;
1362 info.key_size = map->key_size;
1363 info.value_size = map->value_size;
1364 info.max_entries = map->max_entries;
1365 info.map_flags = map->map_flags;
1366
1367 if (copy_to_user(uinfo, &info, info_len) ||
1368 put_user(info_len, &uattr->info.info_len))
1369 return -EFAULT;
1370
1371 return 0;
1372}
1373
1374#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
1375
1376static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
1377 union bpf_attr __user *uattr)
1378{
1379 int ufd = attr->info.bpf_fd;
1380 struct fd f;
1381 int err;
1382
1383 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
1384 return -EINVAL;
1385
1386 f = fdget(ufd);
1387 if (!f.file)
1388 return -EBADFD;
1389
1390 if (f.file->f_op == &bpf_prog_fops)
1391 err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
1392 uattr);
1393 else if (f.file->f_op == &bpf_map_fops)
1394 err = bpf_map_get_info_by_fd(f.file->private_data, attr,
1395 uattr);
1396 else
1397 err = -EINVAL;
1398
1399 fdput(f);
1400 return err;
1401}
1402
Alexei Starovoitov99c55f72014-09-26 00:16:57 -07001403SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
1404{
1405 union bpf_attr attr = {};
1406 int err;
1407
Alexei Starovoitov1be7f752015-10-07 22:23:21 -07001408 if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
Alexei Starovoitov99c55f72014-09-26 00:16:57 -07001409 return -EPERM;
1410
Martin KaFai Lau1e270972017-06-05 12:15:52 -07001411 err = check_uarg_tail_zero(uattr, sizeof(attr), size);
1412 if (err)
1413 return err;
1414 size = min_t(u32, size, sizeof(attr));
Alexei Starovoitov99c55f72014-09-26 00:16:57 -07001415
1416 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
1417 if (copy_from_user(&attr, uattr, size) != 0)
1418 return -EFAULT;
1419
1420 switch (cmd) {
1421 case BPF_MAP_CREATE:
1422 err = map_create(&attr);
1423 break;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -07001424 case BPF_MAP_LOOKUP_ELEM:
1425 err = map_lookup_elem(&attr);
1426 break;
1427 case BPF_MAP_UPDATE_ELEM:
1428 err = map_update_elem(&attr);
1429 break;
1430 case BPF_MAP_DELETE_ELEM:
1431 err = map_delete_elem(&attr);
1432 break;
1433 case BPF_MAP_GET_NEXT_KEY:
1434 err = map_get_next_key(&attr);
1435 break;
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001436 case BPF_PROG_LOAD:
1437 err = bpf_prog_load(&attr);
1438 break;
Daniel Borkmannb2197752015-10-29 14:58:09 +01001439 case BPF_OBJ_PIN:
1440 err = bpf_obj_pin(&attr);
1441 break;
1442 case BPF_OBJ_GET:
1443 err = bpf_obj_get(&attr);
1444 break;
Daniel Mackf4324552016-11-23 16:52:27 +01001445#ifdef CONFIG_CGROUP_BPF
1446 case BPF_PROG_ATTACH:
1447 err = bpf_prog_attach(&attr);
1448 break;
1449 case BPF_PROG_DETACH:
1450 err = bpf_prog_detach(&attr);
1451 break;
1452#endif
Alexei Starovoitov1cf1cae2017-03-30 21:45:38 -07001453 case BPF_PROG_TEST_RUN:
1454 err = bpf_prog_test_run(&attr, uattr);
1455 break;
Martin KaFai Lau34ad5582017-06-05 12:15:48 -07001456 case BPF_PROG_GET_NEXT_ID:
1457 err = bpf_obj_get_next_id(&attr, uattr,
1458 &prog_idr, &prog_idr_lock);
1459 break;
1460 case BPF_MAP_GET_NEXT_ID:
1461 err = bpf_obj_get_next_id(&attr, uattr,
1462 &map_idr, &map_idr_lock);
1463 break;
Martin KaFai Laub16d9aa2017-06-05 12:15:49 -07001464 case BPF_PROG_GET_FD_BY_ID:
1465 err = bpf_prog_get_fd_by_id(&attr);
1466 break;
Martin KaFai Laubd5f5f4e2017-06-05 12:15:50 -07001467 case BPF_MAP_GET_FD_BY_ID:
1468 err = bpf_map_get_fd_by_id(&attr);
1469 break;
Martin KaFai Lau1e270972017-06-05 12:15:52 -07001470 case BPF_OBJ_GET_INFO_BY_FD:
1471 err = bpf_obj_get_info_by_fd(&attr, uattr);
1472 break;
Alexei Starovoitov99c55f72014-09-26 00:16:57 -07001473 default:
1474 err = -EINVAL;
1475 break;
1476 }
1477
1478 return err;
1479}