blob: cbd94b2144ff04b076e77b3fa11e1febd76d11a5 [file] [log] [blame]
Alexei Starovoitov99c55f72014-09-26 00:16:57 -07001/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12#include <linux/bpf.h>
13#include <linux/syscalls.h>
14#include <linux/slab.h>
15#include <linux/anon_inodes.h>
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -070016#include <linux/file.h>
Alexei Starovoitov09756af2014-09-26 00:17:00 -070017#include <linux/license.h>
18#include <linux/filter.h>
Alexei Starovoitov25415172015-03-25 12:49:20 -070019#include <linux/version.h>
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070020
Alexei Starovoitovb121d1e2016-03-07 21:57:13 -080021DEFINE_PER_CPU(int, bpf_prog_active);
22
Alexei Starovoitov1be7f752015-10-07 22:23:21 -070023int sysctl_unprivileged_bpf_disabled __read_mostly;
24
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070025static LIST_HEAD(bpf_map_types);
26
27static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
28{
29 struct bpf_map_type_list *tl;
30 struct bpf_map *map;
31
32 list_for_each_entry(tl, &bpf_map_types, list_node) {
33 if (tl->type == attr->map_type) {
34 map = tl->ops->map_alloc(attr);
35 if (IS_ERR(map))
36 return map;
37 map->ops = tl->ops;
38 map->map_type = attr->map_type;
39 return map;
40 }
41 }
42 return ERR_PTR(-EINVAL);
43}
44
45/* boot time registration of different map implementations */
46void bpf_register_map_type(struct bpf_map_type_list *tl)
47{
48 list_add(&tl->list_node, &bpf_map_types);
49}
50
Alexei Starovoitov6c905982016-03-07 21:57:15 -080051int bpf_map_precharge_memlock(u32 pages)
52{
53 struct user_struct *user = get_current_user();
54 unsigned long memlock_limit, cur;
55
56 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
57 cur = atomic_long_read(&user->locked_vm);
58 free_uid(user);
59 if (cur + pages > memlock_limit)
60 return -EPERM;
61 return 0;
62}
63
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -070064static int bpf_map_charge_memlock(struct bpf_map *map)
65{
66 struct user_struct *user = get_current_user();
67 unsigned long memlock_limit;
68
69 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
70
71 atomic_long_add(map->pages, &user->locked_vm);
72
73 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
74 atomic_long_sub(map->pages, &user->locked_vm);
75 free_uid(user);
76 return -EPERM;
77 }
78 map->user = user;
79 return 0;
80}
81
82static void bpf_map_uncharge_memlock(struct bpf_map *map)
83{
84 struct user_struct *user = map->user;
85
86 atomic_long_sub(map->pages, &user->locked_vm);
87 free_uid(user);
88}
89
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070090/* called from workqueue */
91static void bpf_map_free_deferred(struct work_struct *work)
92{
93 struct bpf_map *map = container_of(work, struct bpf_map, work);
94
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -070095 bpf_map_uncharge_memlock(map);
Alexei Starovoitov99c55f72014-09-26 00:16:57 -070096 /* implementation dependent freeing */
97 map->ops->map_free(map);
98}
99
Daniel Borkmannc9da1612015-11-24 21:28:15 +0100100static void bpf_map_put_uref(struct bpf_map *map)
101{
102 if (atomic_dec_and_test(&map->usercnt)) {
103 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
104 bpf_fd_array_map_clear(map);
105 }
106}
107
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700108/* decrement map refcnt and schedule it for freeing via workqueue
109 * (unrelying map implementation ops->map_free() might sleep)
110 */
111void bpf_map_put(struct bpf_map *map)
112{
113 if (atomic_dec_and_test(&map->refcnt)) {
114 INIT_WORK(&map->work, bpf_map_free_deferred);
115 schedule_work(&map->work);
116 }
117}
118
Daniel Borkmannc9da1612015-11-24 21:28:15 +0100119void bpf_map_put_with_uref(struct bpf_map *map)
120{
121 bpf_map_put_uref(map);
122 bpf_map_put(map);
123}
124
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700125static int bpf_map_release(struct inode *inode, struct file *filp)
126{
Daniel Borkmannc9da1612015-11-24 21:28:15 +0100127 bpf_map_put_with_uref(filp->private_data);
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700128 return 0;
129}
130
Daniel Borkmannf99bf202015-11-19 11:56:22 +0100131#ifdef CONFIG_PROC_FS
132static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
133{
134 const struct bpf_map *map = filp->private_data;
135
136 seq_printf(m,
137 "map_type:\t%u\n"
138 "key_size:\t%u\n"
139 "value_size:\t%u\n"
140 "max_entries:\t%u\n",
141 map->map_type,
142 map->key_size,
143 map->value_size,
144 map->max_entries);
145}
146#endif
147
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700148static const struct file_operations bpf_map_fops = {
Daniel Borkmannf99bf202015-11-19 11:56:22 +0100149#ifdef CONFIG_PROC_FS
150 .show_fdinfo = bpf_map_show_fdinfo,
151#endif
152 .release = bpf_map_release,
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700153};
154
Daniel Borkmannb2197752015-10-29 14:58:09 +0100155int bpf_map_new_fd(struct bpf_map *map)
Daniel Borkmannaa797812015-10-29 14:58:06 +0100156{
157 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
158 O_RDWR | O_CLOEXEC);
159}
160
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700161/* helper macro to check that unused fields 'union bpf_attr' are zero */
162#define CHECK_ATTR(CMD) \
163 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
164 sizeof(attr->CMD##_LAST_FIELD), 0, \
165 sizeof(*attr) - \
166 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
167 sizeof(attr->CMD##_LAST_FIELD)) != NULL
168
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800169#define BPF_MAP_CREATE_LAST_FIELD map_flags
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700170/* called via syscall */
171static int map_create(union bpf_attr *attr)
172{
173 struct bpf_map *map;
174 int err;
175
176 err = CHECK_ATTR(BPF_MAP_CREATE);
177 if (err)
178 return -EINVAL;
179
180 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
181 map = find_and_alloc_map(attr);
182 if (IS_ERR(map))
183 return PTR_ERR(map);
184
185 atomic_set(&map->refcnt, 1);
Daniel Borkmannc9da1612015-11-24 21:28:15 +0100186 atomic_set(&map->usercnt, 1);
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700187
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700188 err = bpf_map_charge_memlock(map);
189 if (err)
190 goto free_map;
191
Daniel Borkmannaa797812015-10-29 14:58:06 +0100192 err = bpf_map_new_fd(map);
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700193 if (err < 0)
194 /* failed to allocate fd */
195 goto free_map;
196
197 return err;
198
199free_map:
200 map->ops->map_free(map);
201 return err;
202}
203
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700204/* if error is returned, fd is released.
205 * On success caller should complete fd access with matching fdput()
206 */
Daniel Borkmannc2101292015-10-29 14:58:07 +0100207struct bpf_map *__bpf_map_get(struct fd f)
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700208{
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700209 if (!f.file)
210 return ERR_PTR(-EBADF);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700211 if (f.file->f_op != &bpf_map_fops) {
212 fdput(f);
213 return ERR_PTR(-EINVAL);
214 }
215
Daniel Borkmannc2101292015-10-29 14:58:07 +0100216 return f.file->private_data;
217}
218
Daniel Borkmannc9da1612015-11-24 21:28:15 +0100219void bpf_map_inc(struct bpf_map *map, bool uref)
220{
221 atomic_inc(&map->refcnt);
222 if (uref)
223 atomic_inc(&map->usercnt);
224}
225
226struct bpf_map *bpf_map_get_with_uref(u32 ufd)
Daniel Borkmannc2101292015-10-29 14:58:07 +0100227{
228 struct fd f = fdget(ufd);
229 struct bpf_map *map;
230
231 map = __bpf_map_get(f);
232 if (IS_ERR(map))
233 return map;
234
Daniel Borkmannc9da1612015-11-24 21:28:15 +0100235 bpf_map_inc(map, true);
Daniel Borkmannc2101292015-10-29 14:58:07 +0100236 fdput(f);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700237
238 return map;
239}
240
241/* helper to convert user pointers passed inside __aligned_u64 fields */
242static void __user *u64_to_ptr(__u64 val)
243{
244 return (void __user *) (unsigned long) val;
245}
246
247/* last field in 'union bpf_attr' used by this command */
248#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
249
250static int map_lookup_elem(union bpf_attr *attr)
251{
252 void __user *ukey = u64_to_ptr(attr->key);
253 void __user *uvalue = u64_to_ptr(attr->value);
254 int ufd = attr->map_fd;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700255 struct bpf_map *map;
Alexei Starovoitov8ebe6672015-01-22 17:11:08 -0800256 void *key, *value, *ptr;
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800257 u32 value_size;
Daniel Borkmann592867b2015-09-08 18:00:09 +0200258 struct fd f;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700259 int err;
260
261 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
262 return -EINVAL;
263
Daniel Borkmann592867b2015-09-08 18:00:09 +0200264 f = fdget(ufd);
Daniel Borkmannc2101292015-10-29 14:58:07 +0100265 map = __bpf_map_get(f);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700266 if (IS_ERR(map))
267 return PTR_ERR(map);
268
269 err = -ENOMEM;
270 key = kmalloc(map->key_size, GFP_USER);
271 if (!key)
272 goto err_put;
273
274 err = -EFAULT;
275 if (copy_from_user(key, ukey, map->key_size) != 0)
276 goto free_key;
277
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800278 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
279 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
280 value_size = round_up(map->value_size, 8) * num_possible_cpus();
281 else
282 value_size = map->value_size;
283
Alexei Starovoitov8ebe6672015-01-22 17:11:08 -0800284 err = -ENOMEM;
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800285 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700286 if (!value)
Alexei Starovoitov8ebe6672015-01-22 17:11:08 -0800287 goto free_key;
288
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800289 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
290 err = bpf_percpu_hash_copy(map, key, value);
291 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
292 err = bpf_percpu_array_copy(map, key, value);
293 } else {
294 rcu_read_lock();
295 ptr = map->ops->map_lookup_elem(map, key);
296 if (ptr)
297 memcpy(value, ptr, value_size);
298 rcu_read_unlock();
299 err = ptr ? 0 : -ENOENT;
300 }
Alexei Starovoitov8ebe6672015-01-22 17:11:08 -0800301
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800302 if (err)
Alexei Starovoitov8ebe6672015-01-22 17:11:08 -0800303 goto free_value;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700304
305 err = -EFAULT;
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800306 if (copy_to_user(uvalue, value, value_size) != 0)
Alexei Starovoitov8ebe6672015-01-22 17:11:08 -0800307 goto free_value;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700308
309 err = 0;
310
Alexei Starovoitov8ebe6672015-01-22 17:11:08 -0800311free_value:
312 kfree(value);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700313free_key:
314 kfree(key);
315err_put:
316 fdput(f);
317 return err;
318}
319
Alexei Starovoitov3274f522014-11-13 17:36:44 -0800320#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700321
322static int map_update_elem(union bpf_attr *attr)
323{
324 void __user *ukey = u64_to_ptr(attr->key);
325 void __user *uvalue = u64_to_ptr(attr->value);
326 int ufd = attr->map_fd;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700327 struct bpf_map *map;
328 void *key, *value;
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800329 u32 value_size;
Daniel Borkmann592867b2015-09-08 18:00:09 +0200330 struct fd f;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700331 int err;
332
333 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
334 return -EINVAL;
335
Daniel Borkmann592867b2015-09-08 18:00:09 +0200336 f = fdget(ufd);
Daniel Borkmannc2101292015-10-29 14:58:07 +0100337 map = __bpf_map_get(f);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700338 if (IS_ERR(map))
339 return PTR_ERR(map);
340
341 err = -ENOMEM;
342 key = kmalloc(map->key_size, GFP_USER);
343 if (!key)
344 goto err_put;
345
346 err = -EFAULT;
347 if (copy_from_user(key, ukey, map->key_size) != 0)
348 goto free_key;
349
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800350 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
351 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
352 value_size = round_up(map->value_size, 8) * num_possible_cpus();
353 else
354 value_size = map->value_size;
355
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700356 err = -ENOMEM;
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800357 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700358 if (!value)
359 goto free_key;
360
361 err = -EFAULT;
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800362 if (copy_from_user(value, uvalue, value_size) != 0)
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700363 goto free_value;
364
Alexei Starovoitovb121d1e2016-03-07 21:57:13 -0800365 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
366 * inside bpf map update or delete otherwise deadlocks are possible
367 */
368 preempt_disable();
369 __this_cpu_inc(bpf_prog_active);
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800370 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
371 err = bpf_percpu_hash_update(map, key, value, attr->flags);
372 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
373 err = bpf_percpu_array_update(map, key, value, attr->flags);
374 } else {
375 rcu_read_lock();
376 err = map->ops->map_update_elem(map, key, value, attr->flags);
377 rcu_read_unlock();
378 }
Alexei Starovoitovb121d1e2016-03-07 21:57:13 -0800379 __this_cpu_dec(bpf_prog_active);
380 preempt_enable();
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700381
382free_value:
383 kfree(value);
384free_key:
385 kfree(key);
386err_put:
387 fdput(f);
388 return err;
389}
390
391#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
392
393static int map_delete_elem(union bpf_attr *attr)
394{
395 void __user *ukey = u64_to_ptr(attr->key);
396 int ufd = attr->map_fd;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700397 struct bpf_map *map;
Daniel Borkmann592867b2015-09-08 18:00:09 +0200398 struct fd f;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700399 void *key;
400 int err;
401
402 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
403 return -EINVAL;
404
Daniel Borkmann592867b2015-09-08 18:00:09 +0200405 f = fdget(ufd);
Daniel Borkmannc2101292015-10-29 14:58:07 +0100406 map = __bpf_map_get(f);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700407 if (IS_ERR(map))
408 return PTR_ERR(map);
409
410 err = -ENOMEM;
411 key = kmalloc(map->key_size, GFP_USER);
412 if (!key)
413 goto err_put;
414
415 err = -EFAULT;
416 if (copy_from_user(key, ukey, map->key_size) != 0)
417 goto free_key;
418
Alexei Starovoitovb121d1e2016-03-07 21:57:13 -0800419 preempt_disable();
420 __this_cpu_inc(bpf_prog_active);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700421 rcu_read_lock();
422 err = map->ops->map_delete_elem(map, key);
423 rcu_read_unlock();
Alexei Starovoitovb121d1e2016-03-07 21:57:13 -0800424 __this_cpu_dec(bpf_prog_active);
425 preempt_enable();
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700426
427free_key:
428 kfree(key);
429err_put:
430 fdput(f);
431 return err;
432}
433
434/* last field in 'union bpf_attr' used by this command */
435#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
436
437static int map_get_next_key(union bpf_attr *attr)
438{
439 void __user *ukey = u64_to_ptr(attr->key);
440 void __user *unext_key = u64_to_ptr(attr->next_key);
441 int ufd = attr->map_fd;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700442 struct bpf_map *map;
443 void *key, *next_key;
Daniel Borkmann592867b2015-09-08 18:00:09 +0200444 struct fd f;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700445 int err;
446
447 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
448 return -EINVAL;
449
Daniel Borkmann592867b2015-09-08 18:00:09 +0200450 f = fdget(ufd);
Daniel Borkmannc2101292015-10-29 14:58:07 +0100451 map = __bpf_map_get(f);
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700452 if (IS_ERR(map))
453 return PTR_ERR(map);
454
455 err = -ENOMEM;
456 key = kmalloc(map->key_size, GFP_USER);
457 if (!key)
458 goto err_put;
459
460 err = -EFAULT;
461 if (copy_from_user(key, ukey, map->key_size) != 0)
462 goto free_key;
463
464 err = -ENOMEM;
465 next_key = kmalloc(map->key_size, GFP_USER);
466 if (!next_key)
467 goto free_key;
468
469 rcu_read_lock();
470 err = map->ops->map_get_next_key(map, key, next_key);
471 rcu_read_unlock();
472 if (err)
473 goto free_next_key;
474
475 err = -EFAULT;
476 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
477 goto free_next_key;
478
479 err = 0;
480
481free_next_key:
482 kfree(next_key);
483free_key:
484 kfree(key);
485err_put:
486 fdput(f);
487 return err;
488}
489
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700490static LIST_HEAD(bpf_prog_types);
491
492static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
493{
494 struct bpf_prog_type_list *tl;
495
496 list_for_each_entry(tl, &bpf_prog_types, list_node) {
497 if (tl->type == type) {
498 prog->aux->ops = tl->ops;
Daniel Borkmann24701ec2015-03-01 12:31:47 +0100499 prog->type = type;
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700500 return 0;
501 }
502 }
Daniel Borkmann24701ec2015-03-01 12:31:47 +0100503
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700504 return -EINVAL;
505}
506
507void bpf_register_prog_type(struct bpf_prog_type_list *tl)
508{
509 list_add(&tl->list_node, &bpf_prog_types);
510}
511
Alexei Starovoitov0a542a82014-09-26 00:17:01 -0700512/* fixup insn->imm field of bpf_call instructions:
513 * if (insn->imm == BPF_FUNC_map_lookup_elem)
514 * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
515 * else if (insn->imm == BPF_FUNC_map_update_elem)
516 * insn->imm = bpf_map_update_elem - __bpf_call_base;
517 * else ...
518 *
519 * this function is called after eBPF program passed verification
520 */
521static void fixup_bpf_calls(struct bpf_prog *prog)
522{
523 const struct bpf_func_proto *fn;
524 int i;
525
526 for (i = 0; i < prog->len; i++) {
527 struct bpf_insn *insn = &prog->insnsi[i];
528
529 if (insn->code == (BPF_JMP | BPF_CALL)) {
530 /* we reach here when program has bpf_call instructions
531 * and it passed bpf_check(), means that
532 * ops->get_func_proto must have been supplied, check it
533 */
534 BUG_ON(!prog->aux->ops->get_func_proto);
535
Daniel Borkmannc46646d2015-09-30 01:41:51 +0200536 if (insn->imm == BPF_FUNC_get_route_realm)
537 prog->dst_needed = 1;
Daniel Borkmann3ad00402015-10-08 01:20:39 +0200538 if (insn->imm == BPF_FUNC_get_prandom_u32)
539 bpf_user_rnd_init_once();
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700540 if (insn->imm == BPF_FUNC_tail_call) {
541 /* mark bpf_tail_call as different opcode
542 * to avoid conditional branch in
543 * interpeter for every normal call
544 * and to prevent accidental JITing by
545 * JIT compiler that doesn't support
546 * bpf_tail_call yet
547 */
548 insn->imm = 0;
549 insn->code |= BPF_X;
550 continue;
551 }
552
Alexei Starovoitov0a542a82014-09-26 00:17:01 -0700553 fn = prog->aux->ops->get_func_proto(insn->imm);
554 /* all functions that have prototype and verifier allowed
555 * programs to call them, must be real in-kernel functions
556 */
557 BUG_ON(!fn->func);
558 insn->imm = fn->func - __bpf_call_base;
559 }
560 }
561}
562
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700563/* drop refcnt on maps used by eBPF program and free auxilary data */
564static void free_used_maps(struct bpf_prog_aux *aux)
565{
566 int i;
567
568 for (i = 0; i < aux->used_map_cnt; i++)
569 bpf_map_put(aux->used_maps[i]);
570
571 kfree(aux->used_maps);
572}
573
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700574static int bpf_prog_charge_memlock(struct bpf_prog *prog)
575{
576 struct user_struct *user = get_current_user();
577 unsigned long memlock_limit;
578
579 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
580
581 atomic_long_add(prog->pages, &user->locked_vm);
582 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
583 atomic_long_sub(prog->pages, &user->locked_vm);
584 free_uid(user);
585 return -EPERM;
586 }
587 prog->aux->user = user;
588 return 0;
589}
590
591static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
592{
593 struct user_struct *user = prog->aux->user;
594
595 atomic_long_sub(prog->pages, &user->locked_vm);
596 free_uid(user);
597}
598
Daniel Borkmanne9d8afa2015-10-29 14:58:08 +0100599static void __prog_put_common(struct rcu_head *rcu)
Alexei Starovoitovabf2e7d2015-05-28 19:26:02 -0700600{
601 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
602
603 free_used_maps(aux);
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700604 bpf_prog_uncharge_memlock(aux->prog);
Alexei Starovoitovabf2e7d2015-05-28 19:26:02 -0700605 bpf_prog_free(aux->prog);
606}
607
608/* version of bpf_prog_put() that is called after a grace period */
609void bpf_prog_put_rcu(struct bpf_prog *prog)
610{
Daniel Borkmanne9d8afa2015-10-29 14:58:08 +0100611 if (atomic_dec_and_test(&prog->aux->refcnt))
612 call_rcu(&prog->aux->rcu, __prog_put_common);
Alexei Starovoitovabf2e7d2015-05-28 19:26:02 -0700613}
614
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700615void bpf_prog_put(struct bpf_prog *prog)
616{
Daniel Borkmanne9d8afa2015-10-29 14:58:08 +0100617 if (atomic_dec_and_test(&prog->aux->refcnt))
618 __prog_put_common(&prog->aux->rcu);
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700619}
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100620EXPORT_SYMBOL_GPL(bpf_prog_put);
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700621
622static int bpf_prog_release(struct inode *inode, struct file *filp)
623{
624 struct bpf_prog *prog = filp->private_data;
625
Alexei Starovoitovabf2e7d2015-05-28 19:26:02 -0700626 bpf_prog_put_rcu(prog);
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700627 return 0;
628}
629
630static const struct file_operations bpf_prog_fops = {
631 .release = bpf_prog_release,
632};
633
Daniel Borkmannb2197752015-10-29 14:58:09 +0100634int bpf_prog_new_fd(struct bpf_prog *prog)
Daniel Borkmannaa797812015-10-29 14:58:06 +0100635{
636 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
637 O_RDWR | O_CLOEXEC);
638}
639
Daniel Borkmannc2101292015-10-29 14:58:07 +0100640static struct bpf_prog *__bpf_prog_get(struct fd f)
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700641{
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700642 if (!f.file)
643 return ERR_PTR(-EBADF);
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700644 if (f.file->f_op != &bpf_prog_fops) {
645 fdput(f);
646 return ERR_PTR(-EINVAL);
647 }
648
Daniel Borkmannc2101292015-10-29 14:58:07 +0100649 return f.file->private_data;
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700650}
651
652/* called by sockets/tracing/seccomp before attaching program to an event
653 * pairs with bpf_prog_put()
654 */
655struct bpf_prog *bpf_prog_get(u32 ufd)
656{
657 struct fd f = fdget(ufd);
658 struct bpf_prog *prog;
659
Daniel Borkmannc2101292015-10-29 14:58:07 +0100660 prog = __bpf_prog_get(f);
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700661 if (IS_ERR(prog))
662 return prog;
663
664 atomic_inc(&prog->aux->refcnt);
665 fdput(f);
Daniel Borkmannc2101292015-10-29 14:58:07 +0100666
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700667 return prog;
668}
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100669EXPORT_SYMBOL_GPL(bpf_prog_get);
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700670
671/* last field in 'union bpf_attr' used by this command */
Alexei Starovoitov25415172015-03-25 12:49:20 -0700672#define BPF_PROG_LOAD_LAST_FIELD kern_version
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700673
674static int bpf_prog_load(union bpf_attr *attr)
675{
676 enum bpf_prog_type type = attr->prog_type;
677 struct bpf_prog *prog;
678 int err;
679 char license[128];
680 bool is_gpl;
681
682 if (CHECK_ATTR(BPF_PROG_LOAD))
683 return -EINVAL;
684
685 /* copy eBPF program license from user space */
686 if (strncpy_from_user(license, u64_to_ptr(attr->license),
687 sizeof(license) - 1) < 0)
688 return -EFAULT;
689 license[sizeof(license) - 1] = 0;
690
691 /* eBPF programs must be GPL compatible to use GPL-ed functions */
692 is_gpl = license_is_gpl_compatible(license);
693
694 if (attr->insn_cnt >= BPF_MAXINSNS)
695 return -EINVAL;
696
Alexei Starovoitov25415172015-03-25 12:49:20 -0700697 if (type == BPF_PROG_TYPE_KPROBE &&
698 attr->kern_version != LINUX_VERSION_CODE)
699 return -EINVAL;
700
Alexei Starovoitov1be7f752015-10-07 22:23:21 -0700701 if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
702 return -EPERM;
703
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700704 /* plain bpf_prog allocation */
705 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
706 if (!prog)
707 return -ENOMEM;
708
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700709 err = bpf_prog_charge_memlock(prog);
710 if (err)
711 goto free_prog_nouncharge;
712
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700713 prog->len = attr->insn_cnt;
714
715 err = -EFAULT;
716 if (copy_from_user(prog->insns, u64_to_ptr(attr->insns),
717 prog->len * sizeof(struct bpf_insn)) != 0)
718 goto free_prog;
719
720 prog->orig_prog = NULL;
Daniel Borkmanna91263d2015-09-30 01:41:50 +0200721 prog->jited = 0;
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700722
723 atomic_set(&prog->aux->refcnt, 1);
Daniel Borkmanna91263d2015-09-30 01:41:50 +0200724 prog->gpl_compatible = is_gpl ? 1 : 0;
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700725
726 /* find program type: socket_filter vs tracing_filter */
727 err = find_prog_type(type, prog);
728 if (err < 0)
729 goto free_prog;
730
731 /* run eBPF verifier */
Alexei Starovoitov9bac3d62015-03-13 11:57:42 -0700732 err = bpf_check(&prog, attr);
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700733 if (err < 0)
734 goto free_used_maps;
735
Alexei Starovoitov0a542a82014-09-26 00:17:01 -0700736 /* fixup BPF_CALL->imm field */
737 fixup_bpf_calls(prog);
738
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700739 /* eBPF program is ready to be JITed */
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700740 err = bpf_prog_select_runtime(prog);
741 if (err < 0)
742 goto free_used_maps;
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700743
Daniel Borkmannaa797812015-10-29 14:58:06 +0100744 err = bpf_prog_new_fd(prog);
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700745 if (err < 0)
746 /* failed to allocate fd */
747 goto free_used_maps;
748
749 return err;
750
751free_used_maps:
752 free_used_maps(prog->aux);
753free_prog:
Alexei Starovoitovaaac3ba2015-10-07 22:23:22 -0700754 bpf_prog_uncharge_memlock(prog);
755free_prog_nouncharge:
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700756 bpf_prog_free(prog);
757 return err;
758}
759
Daniel Borkmannb2197752015-10-29 14:58:09 +0100760#define BPF_OBJ_LAST_FIELD bpf_fd
761
762static int bpf_obj_pin(const union bpf_attr *attr)
763{
764 if (CHECK_ATTR(BPF_OBJ))
765 return -EINVAL;
766
767 return bpf_obj_pin_user(attr->bpf_fd, u64_to_ptr(attr->pathname));
768}
769
770static int bpf_obj_get(const union bpf_attr *attr)
771{
772 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
773 return -EINVAL;
774
775 return bpf_obj_get_user(u64_to_ptr(attr->pathname));
776}
777
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700778SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
779{
780 union bpf_attr attr = {};
781 int err;
782
Alexei Starovoitov1be7f752015-10-07 22:23:21 -0700783 if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700784 return -EPERM;
785
786 if (!access_ok(VERIFY_READ, uattr, 1))
787 return -EFAULT;
788
789 if (size > PAGE_SIZE) /* silly large */
790 return -E2BIG;
791
792 /* If we're handed a bigger struct than we know of,
793 * ensure all the unknown bits are 0 - i.e. new
794 * user-space does not rely on any kernel feature
795 * extensions we dont know about yet.
796 */
797 if (size > sizeof(attr)) {
798 unsigned char __user *addr;
799 unsigned char __user *end;
800 unsigned char val;
801
802 addr = (void __user *)uattr + sizeof(attr);
803 end = (void __user *)uattr + size;
804
805 for (; addr < end; addr++) {
806 err = get_user(val, addr);
807 if (err)
808 return err;
809 if (val)
810 return -E2BIG;
811 }
812 size = sizeof(attr);
813 }
814
815 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
816 if (copy_from_user(&attr, uattr, size) != 0)
817 return -EFAULT;
818
819 switch (cmd) {
820 case BPF_MAP_CREATE:
821 err = map_create(&attr);
822 break;
Alexei Starovoitovdb20fd22014-09-26 00:16:59 -0700823 case BPF_MAP_LOOKUP_ELEM:
824 err = map_lookup_elem(&attr);
825 break;
826 case BPF_MAP_UPDATE_ELEM:
827 err = map_update_elem(&attr);
828 break;
829 case BPF_MAP_DELETE_ELEM:
830 err = map_delete_elem(&attr);
831 break;
832 case BPF_MAP_GET_NEXT_KEY:
833 err = map_get_next_key(&attr);
834 break;
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700835 case BPF_PROG_LOAD:
836 err = bpf_prog_load(&attr);
837 break;
Daniel Borkmannb2197752015-10-29 14:58:09 +0100838 case BPF_OBJ_PIN:
839 err = bpf_obj_pin(&attr);
840 break;
841 case BPF_OBJ_GET:
842 err = bpf_obj_get(&attr);
843 break;
Alexei Starovoitov99c55f72014-09-26 00:16:57 -0700844 default:
845 err = -EINVAL;
846 break;
847 }
848
849 return err;
850}