blob: 07a34ef562a064ff3413f91a3dc7cc72e3301204 [file] [log] [blame]
Roman Gushchinde9cbba2018-08-02 14:27:18 -07001//SPDX-License-Identifier: GPL-2.0
2#include <linux/bpf-cgroup.h>
3#include <linux/bpf.h>
Roman Gushchin9a1126b2018-12-10 15:43:01 -08004#include <linux/btf.h>
Roman Gushchinde9cbba2018-08-02 14:27:18 -07005#include <linux/bug.h>
6#include <linux/filter.h>
7#include <linux/mm.h>
8#include <linux/rbtree.h>
9#include <linux/slab.h>
Roman Gushchin9a1126b2018-12-10 15:43:01 -080010#include <uapi/linux/btf.h>
Roman Gushchinde9cbba2018-08-02 14:27:18 -070011
Rustam Kovhaevcf0dd412018-11-23 15:48:16 -080012DEFINE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
Roman Gushchinaa0ad5b2018-08-02 14:27:19 -070013
Roman Gushchinde9cbba2018-08-02 14:27:18 -070014#ifdef CONFIG_CGROUP_BPF
15
16#define LOCAL_STORAGE_CREATE_FLAG_MASK \
17 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
18
19struct bpf_cgroup_storage_map {
20 struct bpf_map map;
21
22 spinlock_t lock;
23 struct bpf_prog *prog;
24 struct rb_root root;
25 struct list_head list;
26};
27
28static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map)
29{
30 return container_of(map, struct bpf_cgroup_storage_map, map);
31}
32
33static int bpf_cgroup_storage_key_cmp(
34 const struct bpf_cgroup_storage_key *key1,
35 const struct bpf_cgroup_storage_key *key2)
36{
37 if (key1->cgroup_inode_id < key2->cgroup_inode_id)
38 return -1;
39 else if (key1->cgroup_inode_id > key2->cgroup_inode_id)
40 return 1;
41 else if (key1->attach_type < key2->attach_type)
42 return -1;
43 else if (key1->attach_type > key2->attach_type)
44 return 1;
45 return 0;
46}
47
48static struct bpf_cgroup_storage *cgroup_storage_lookup(
49 struct bpf_cgroup_storage_map *map, struct bpf_cgroup_storage_key *key,
50 bool locked)
51{
52 struct rb_root *root = &map->root;
53 struct rb_node *node;
54
55 if (!locked)
56 spin_lock_bh(&map->lock);
57
58 node = root->rb_node;
59 while (node) {
60 struct bpf_cgroup_storage *storage;
61
62 storage = container_of(node, struct bpf_cgroup_storage, node);
63
64 switch (bpf_cgroup_storage_key_cmp(key, &storage->key)) {
65 case -1:
66 node = node->rb_left;
67 break;
68 case 1:
69 node = node->rb_right;
70 break;
71 default:
72 if (!locked)
73 spin_unlock_bh(&map->lock);
74 return storage;
75 }
76 }
77
78 if (!locked)
79 spin_unlock_bh(&map->lock);
80
81 return NULL;
82}
83
84static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map,
85 struct bpf_cgroup_storage *storage)
86{
87 struct rb_root *root = &map->root;
88 struct rb_node **new = &(root->rb_node), *parent = NULL;
89
90 while (*new) {
91 struct bpf_cgroup_storage *this;
92
93 this = container_of(*new, struct bpf_cgroup_storage, node);
94
95 parent = *new;
96 switch (bpf_cgroup_storage_key_cmp(&storage->key, &this->key)) {
97 case -1:
98 new = &((*new)->rb_left);
99 break;
100 case 1:
101 new = &((*new)->rb_right);
102 break;
103 default:
104 return -EEXIST;
105 }
106 }
107
108 rb_link_node(&storage->node, parent, new);
109 rb_insert_color(&storage->node, root);
110
111 return 0;
112}
113
114static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *_key)
115{
116 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
117 struct bpf_cgroup_storage_key *key = _key;
118 struct bpf_cgroup_storage *storage;
119
120 storage = cgroup_storage_lookup(map, key, false);
121 if (!storage)
122 return NULL;
123
124 return &READ_ONCE(storage->buf)->data[0];
125}
126
127static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
128 void *value, u64 flags)
129{
130 struct bpf_cgroup_storage_key *key = _key;
131 struct bpf_cgroup_storage *storage;
132 struct bpf_storage_buffer *new;
133
Roman Gushchin4288ea02018-09-28 14:33:21 +0100134 if (flags != BPF_ANY && flags != BPF_EXIST)
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700135 return -EINVAL;
136
137 storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
138 key, false);
139 if (!storage)
140 return -ENOENT;
141
142 new = kmalloc_node(sizeof(struct bpf_storage_buffer) +
Roman Gushchin569a9332018-11-14 10:00:34 -0800143 map->value_size,
144 __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700145 map->numa_node);
146 if (!new)
147 return -ENOMEM;
148
149 memcpy(&new->data[0], value, map->value_size);
150
151 new = xchg(&storage->buf, new);
152 kfree_rcu(new, rcu);
153
154 return 0;
155}
156
Roman Gushchinb741f162018-09-28 14:45:43 +0000157int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *_key,
158 void *value)
159{
160 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
161 struct bpf_cgroup_storage_key *key = _key;
162 struct bpf_cgroup_storage *storage;
163 int cpu, off = 0;
164 u32 size;
165
166 rcu_read_lock();
167 storage = cgroup_storage_lookup(map, key, false);
168 if (!storage) {
169 rcu_read_unlock();
170 return -ENOENT;
171 }
172
173 /* per_cpu areas are zero-filled and bpf programs can only
174 * access 'value_size' of them, so copying rounded areas
175 * will not leak any kernel data
176 */
177 size = round_up(_map->value_size, 8);
178 for_each_possible_cpu(cpu) {
179 bpf_long_memcpy(value + off,
180 per_cpu_ptr(storage->percpu_buf, cpu), size);
181 off += size;
182 }
183 rcu_read_unlock();
184 return 0;
185}
186
187int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *_key,
188 void *value, u64 map_flags)
189{
190 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
191 struct bpf_cgroup_storage_key *key = _key;
192 struct bpf_cgroup_storage *storage;
193 int cpu, off = 0;
194 u32 size;
195
196 if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
197 return -EINVAL;
198
199 rcu_read_lock();
200 storage = cgroup_storage_lookup(map, key, false);
201 if (!storage) {
202 rcu_read_unlock();
203 return -ENOENT;
204 }
205
206 /* the user space will provide round_up(value_size, 8) bytes that
207 * will be copied into per-cpu area. bpf programs can only access
208 * value_size of it. During lookup the same extra bytes will be
209 * returned or zeros which were zero-filled by percpu_alloc,
210 * so no kernel data leaks possible
211 */
212 size = round_up(_map->value_size, 8);
213 for_each_possible_cpu(cpu) {
214 bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu),
215 value + off, size);
216 off += size;
217 }
218 rcu_read_unlock();
219 return 0;
220}
221
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700222static int cgroup_storage_get_next_key(struct bpf_map *_map, void *_key,
223 void *_next_key)
224{
225 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
226 struct bpf_cgroup_storage_key *key = _key;
227 struct bpf_cgroup_storage_key *next = _next_key;
228 struct bpf_cgroup_storage *storage;
229
230 spin_lock_bh(&map->lock);
231
232 if (list_empty(&map->list))
233 goto enoent;
234
235 if (key) {
236 storage = cgroup_storage_lookup(map, key, true);
237 if (!storage)
238 goto enoent;
239
240 storage = list_next_entry(storage, list);
241 if (!storage)
242 goto enoent;
243 } else {
244 storage = list_first_entry(&map->list,
245 struct bpf_cgroup_storage, list);
246 }
247
248 spin_unlock_bh(&map->lock);
249 next->attach_type = storage->key.attach_type;
250 next->cgroup_inode_id = storage->key.cgroup_inode_id;
251 return 0;
252
253enoent:
254 spin_unlock_bh(&map->lock);
255 return -ENOENT;
256}
257
258static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
259{
260 int numa_node = bpf_map_attr_numa_node(attr);
261 struct bpf_cgroup_storage_map *map;
262
263 if (attr->key_size != sizeof(struct bpf_cgroup_storage_key))
264 return ERR_PTR(-EINVAL);
265
Roman Gushchinb0584ea2018-10-02 02:41:53 +0000266 if (attr->value_size == 0)
267 return ERR_PTR(-EINVAL);
268
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700269 if (attr->value_size > PAGE_SIZE)
270 return ERR_PTR(-E2BIG);
271
272 if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK)
273 /* reserved bits should not be used */
274 return ERR_PTR(-EINVAL);
275
276 if (attr->max_entries)
277 /* max_entries is not used and enforced to be 0 */
278 return ERR_PTR(-EINVAL);
279
280 map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map),
281 __GFP_ZERO | GFP_USER, numa_node);
282 if (!map)
283 return ERR_PTR(-ENOMEM);
284
285 map->map.pages = round_up(sizeof(struct bpf_cgroup_storage_map),
286 PAGE_SIZE) >> PAGE_SHIFT;
287
288 /* copy mandatory map attributes */
289 bpf_map_init_from_attr(&map->map, attr);
290
291 spin_lock_init(&map->lock);
292 map->root = RB_ROOT;
293 INIT_LIST_HEAD(&map->list);
294
295 return &map->map;
296}
297
298static void cgroup_storage_map_free(struct bpf_map *_map)
299{
300 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
301
302 WARN_ON(!RB_EMPTY_ROOT(&map->root));
303 WARN_ON(!list_empty(&map->list));
304
305 kfree(map);
306}
307
308static int cgroup_storage_delete_elem(struct bpf_map *map, void *key)
309{
310 return -EINVAL;
311}
312
Roman Gushchin9a1126b2018-12-10 15:43:01 -0800313static int cgroup_storage_check_btf(const struct bpf_map *map,
314 const struct btf *btf,
315 const struct btf_type *key_type,
316 const struct btf_type *value_type)
317{
Roman Gushchin9a1126b2018-12-10 15:43:01 -0800318 struct btf_member *m;
Yonghong Songffa0c1c2018-12-15 22:13:52 -0800319 u32 offset, size;
Roman Gushchin9a1126b2018-12-10 15:43:01 -0800320
321 /* Key is expected to be of struct bpf_cgroup_storage_key type,
322 * which is:
323 * struct bpf_cgroup_storage_key {
324 * __u64 cgroup_inode_id;
325 * __u32 attach_type;
326 * };
327 */
328
329 /*
330 * Key_type must be a structure with two fields.
331 */
332 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ||
333 BTF_INFO_VLEN(key_type->info) != 2)
334 return -EINVAL;
335
336 /*
337 * The first field must be a 64 bit integer at 0 offset.
338 */
339 m = (struct btf_member *)(key_type + 1);
Roman Gushchin9a1126b2018-12-10 15:43:01 -0800340 size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, cgroup_inode_id);
Yonghong Songffa0c1c2018-12-15 22:13:52 -0800341 if (!btf_member_is_reg_int(btf, key_type, m, 0, size))
Roman Gushchin9a1126b2018-12-10 15:43:01 -0800342 return -EINVAL;
343
344 /*
345 * The second field must be a 32 bit integer at 64 bit offset.
346 */
347 m++;
Yonghong Songffa0c1c2018-12-15 22:13:52 -0800348 offset = offsetof(struct bpf_cgroup_storage_key, attach_type);
Roman Gushchin9a1126b2018-12-10 15:43:01 -0800349 size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, attach_type);
Yonghong Songffa0c1c2018-12-15 22:13:52 -0800350 if (!btf_member_is_reg_int(btf, key_type, m, offset, size))
Roman Gushchin9a1126b2018-12-10 15:43:01 -0800351 return -EINVAL;
352
353 return 0;
354}
355
356static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *_key,
357 struct seq_file *m)
358{
359 enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
360 struct bpf_cgroup_storage_key *key = _key;
361 struct bpf_cgroup_storage *storage;
362 int cpu;
363
364 rcu_read_lock();
365 storage = cgroup_storage_lookup(map_to_storage(map), key, false);
366 if (!storage) {
367 rcu_read_unlock();
368 return;
369 }
370
371 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
372 stype = cgroup_storage_type(map);
373 if (stype == BPF_CGROUP_STORAGE_SHARED) {
374 seq_puts(m, ": ");
375 btf_type_seq_show(map->btf, map->btf_value_type_id,
376 &READ_ONCE(storage->buf)->data[0], m);
377 seq_puts(m, "\n");
378 } else {
379 seq_puts(m, ": {\n");
380 for_each_possible_cpu(cpu) {
381 seq_printf(m, "\tcpu%d: ", cpu);
382 btf_type_seq_show(map->btf, map->btf_value_type_id,
383 per_cpu_ptr(storage->percpu_buf, cpu),
384 m);
385 seq_puts(m, "\n");
386 }
387 seq_puts(m, "}\n");
388 }
389 rcu_read_unlock();
390}
391
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700392const struct bpf_map_ops cgroup_storage_map_ops = {
393 .map_alloc = cgroup_storage_map_alloc,
394 .map_free = cgroup_storage_map_free,
395 .map_get_next_key = cgroup_storage_get_next_key,
396 .map_lookup_elem = cgroup_storage_lookup_elem,
397 .map_update_elem = cgroup_storage_update_elem,
398 .map_delete_elem = cgroup_storage_delete_elem,
Roman Gushchin9a1126b2018-12-10 15:43:01 -0800399 .map_check_btf = cgroup_storage_check_btf,
400 .map_seq_show_elem = cgroup_storage_seq_show_elem,
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700401};
402
403int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)
404{
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000405 enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700406 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
407 int ret = -EBUSY;
408
409 spin_lock_bh(&map->lock);
410
411 if (map->prog && map->prog != prog)
412 goto unlock;
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000413 if (prog->aux->cgroup_storage[stype] &&
414 prog->aux->cgroup_storage[stype] != _map)
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700415 goto unlock;
416
417 map->prog = prog;
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000418 prog->aux->cgroup_storage[stype] = _map;
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700419 ret = 0;
420unlock:
421 spin_unlock_bh(&map->lock);
422
423 return ret;
424}
425
426void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *_map)
427{
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000428 enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700429 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
430
431 spin_lock_bh(&map->lock);
432 if (map->prog == prog) {
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000433 WARN_ON(prog->aux->cgroup_storage[stype] != _map);
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700434 map->prog = NULL;
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000435 prog->aux->cgroup_storage[stype] = NULL;
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700436 }
437 spin_unlock_bh(&map->lock);
438}
439
Roman Gushchinb741f162018-09-28 14:45:43 +0000440static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages)
441{
442 size_t size;
443
444 if (cgroup_storage_type(map) == BPF_CGROUP_STORAGE_SHARED) {
445 size = sizeof(struct bpf_storage_buffer) + map->value_size;
446 *pages = round_up(sizeof(struct bpf_cgroup_storage) + size,
447 PAGE_SIZE) >> PAGE_SHIFT;
448 } else {
449 size = map->value_size;
450 *pages = round_up(round_up(size, 8) * num_possible_cpus(),
451 PAGE_SIZE) >> PAGE_SHIFT;
452 }
453
454 return size;
455}
456
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000457struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
458 enum bpf_cgroup_storage_type stype)
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700459{
460 struct bpf_cgroup_storage *storage;
461 struct bpf_map *map;
Roman Gushchinb741f162018-09-28 14:45:43 +0000462 gfp_t flags;
463 size_t size;
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700464 u32 pages;
465
Roman Gushchin8bad74f2018-09-28 14:45:36 +0000466 map = prog->aux->cgroup_storage[stype];
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700467 if (!map)
468 return NULL;
469
Roman Gushchinb741f162018-09-28 14:45:43 +0000470 size = bpf_cgroup_storage_calculate_size(map, &pages);
471
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700472 if (bpf_map_charge_memlock(map, pages))
473 return ERR_PTR(-EPERM);
474
475 storage = kmalloc_node(sizeof(struct bpf_cgroup_storage),
476 __GFP_ZERO | GFP_USER, map->numa_node);
Roman Gushchinb741f162018-09-28 14:45:43 +0000477 if (!storage)
478 goto enomem;
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700479
Roman Gushchinb741f162018-09-28 14:45:43 +0000480 flags = __GFP_ZERO | GFP_USER;
481
482 if (stype == BPF_CGROUP_STORAGE_SHARED) {
483 storage->buf = kmalloc_node(size, flags, map->numa_node);
484 if (!storage->buf)
485 goto enomem;
486 } else {
487 storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags);
488 if (!storage->percpu_buf)
489 goto enomem;
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700490 }
491
492 storage->map = (struct bpf_cgroup_storage_map *)map;
493
494 return storage;
Roman Gushchinb741f162018-09-28 14:45:43 +0000495
496enomem:
497 bpf_map_uncharge_memlock(map, pages);
498 kfree(storage);
499 return ERR_PTR(-ENOMEM);
500}
501
502static void free_shared_cgroup_storage_rcu(struct rcu_head *rcu)
503{
504 struct bpf_cgroup_storage *storage =
505 container_of(rcu, struct bpf_cgroup_storage, rcu);
506
507 kfree(storage->buf);
508 kfree(storage);
509}
510
511static void free_percpu_cgroup_storage_rcu(struct rcu_head *rcu)
512{
513 struct bpf_cgroup_storage *storage =
514 container_of(rcu, struct bpf_cgroup_storage, rcu);
515
516 free_percpu(storage->percpu_buf);
517 kfree(storage);
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700518}
519
520void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage)
521{
Roman Gushchinb741f162018-09-28 14:45:43 +0000522 enum bpf_cgroup_storage_type stype;
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700523 struct bpf_map *map;
Roman Gushchinb741f162018-09-28 14:45:43 +0000524 u32 pages;
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700525
526 if (!storage)
527 return;
528
529 map = &storage->map->map;
Roman Gushchinb741f162018-09-28 14:45:43 +0000530
531 bpf_cgroup_storage_calculate_size(map, &pages);
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700532 bpf_map_uncharge_memlock(map, pages);
533
Roman Gushchinb741f162018-09-28 14:45:43 +0000534 stype = cgroup_storage_type(map);
535 if (stype == BPF_CGROUP_STORAGE_SHARED)
536 call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu);
537 else
538 call_rcu(&storage->rcu, free_percpu_cgroup_storage_rcu);
Roman Gushchinde9cbba2018-08-02 14:27:18 -0700539}
540
541void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
542 struct cgroup *cgroup,
543 enum bpf_attach_type type)
544{
545 struct bpf_cgroup_storage_map *map;
546
547 if (!storage)
548 return;
549
550 storage->key.attach_type = type;
551 storage->key.cgroup_inode_id = cgroup->kn->id.id;
552
553 map = storage->map;
554
555 spin_lock_bh(&map->lock);
556 WARN_ON(cgroup_storage_insert(map, storage));
557 list_add(&storage->list, &map->list);
558 spin_unlock_bh(&map->lock);
559}
560
561void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage)
562{
563 struct bpf_cgroup_storage_map *map;
564 struct rb_root *root;
565
566 if (!storage)
567 return;
568
569 map = storage->map;
570
571 spin_lock_bh(&map->lock);
572 root = &map->root;
573 rb_erase(&storage->node, root);
574
575 list_del(&storage->list);
576 spin_unlock_bh(&map->lock);
577}
578
579#endif