blob: 44184f82916a938f6f487a54672fc7b42490b748 [file] [log] [blame]
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * queue_stack_maps.c: BPF queue and stack maps
4 *
5 * Copyright (c) 2018 Politecnico di Torino
6 */
7#include <linux/bpf.h>
8#include <linux/list.h>
9#include <linux/slab.h>
Alexei Starovoitov813961d2018-11-22 10:49:56 -080010#include <linux/capability.h>
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +020011#include "percpu_freelist.h"
12
13#define QUEUE_STACK_CREATE_FLAG_MASK \
Daniel Borkmann591fe982019-04-09 23:20:05 +020014 (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +020015
16struct bpf_queue_stack {
17 struct bpf_map map;
18 raw_spinlock_t lock;
19 u32 head, tail;
20 u32 size; /* max_entries + 1 */
21
Gustavo A. R. Silva385bbf72020-05-07 13:50:57 -050022 char elements[] __aligned(8);
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +020023};
24
25static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
26{
27 return container_of(map, struct bpf_queue_stack, map);
28}
29
30static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
31{
32 return qs->head == qs->tail;
33}
34
35static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
36{
37 u32 head = qs->head + 1;
38
39 if (unlikely(head >= qs->size))
40 head = 0;
41
42 return head == qs->tail;
43}
44
45/* Called from syscall */
46static int queue_stack_map_alloc_check(union bpf_attr *attr)
47{
Alexei Starovoitov2c78ee82020-05-13 16:03:54 -070048 if (!bpf_capable())
Alexei Starovoitov813961d2018-11-22 10:49:56 -080049 return -EPERM;
50
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +020051 /* check sanity of attributes */
52 if (attr->max_entries == 0 || attr->key_size != 0 ||
Alexei Starovoitov813961d2018-11-22 10:49:56 -080053 attr->value_size == 0 ||
Daniel Borkmann591fe982019-04-09 23:20:05 +020054 attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK ||
55 !bpf_map_flags_access_ok(attr->map_flags))
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +020056 return -EINVAL;
57
58 if (attr->value_size > KMALLOC_MAX_SIZE)
59 /* if value_size is bigger, the user space won't be able to
60 * access the elements.
61 */
62 return -E2BIG;
63
64 return 0;
65}
66
67static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
68{
69 int ret, numa_node = bpf_map_attr_numa_node(attr);
Roman Gushchinb936ca62019-05-29 18:03:58 -070070 struct bpf_map_memory mem = {0};
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +020071 struct bpf_queue_stack *qs;
Alexei Starovoitov813961d2018-11-22 10:49:56 -080072 u64 size, queue_size, cost;
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +020073
Alexei Starovoitov813961d2018-11-22 10:49:56 -080074 size = (u64) attr->max_entries + 1;
75 cost = queue_size = sizeof(*qs) + size * attr->value_size;
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +020076
Roman Gushchinb936ca62019-05-29 18:03:58 -070077 ret = bpf_map_charge_init(&mem, cost);
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +020078 if (ret < 0)
79 return ERR_PTR(ret);
80
81 qs = bpf_map_area_alloc(queue_size, numa_node);
Roman Gushchinb936ca62019-05-29 18:03:58 -070082 if (!qs) {
83 bpf_map_charge_finish(&mem);
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +020084 return ERR_PTR(-ENOMEM);
Roman Gushchinb936ca62019-05-29 18:03:58 -070085 }
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +020086
87 memset(qs, 0, sizeof(*qs));
88
89 bpf_map_init_from_attr(&qs->map, attr);
90
Roman Gushchinb936ca62019-05-29 18:03:58 -070091 bpf_map_charge_move(&qs->map.memory, &mem);
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +020092 qs->size = size;
93
94 raw_spin_lock_init(&qs->lock);
95
96 return &qs->map;
97}
98
99/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
100static void queue_stack_map_free(struct bpf_map *map)
101{
102 struct bpf_queue_stack *qs = bpf_queue_stack(map);
103
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +0200104 bpf_map_area_free(qs);
105}
106
107static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
108{
109 struct bpf_queue_stack *qs = bpf_queue_stack(map);
110 unsigned long flags;
111 int err = 0;
112 void *ptr;
113
114 raw_spin_lock_irqsave(&qs->lock, flags);
115
116 if (queue_stack_map_is_empty(qs)) {
Daniel Borkmannd3f66e42018-10-24 22:05:48 +0200117 memset(value, 0, qs->map.value_size);
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +0200118 err = -ENOENT;
119 goto out;
120 }
121
122 ptr = &qs->elements[qs->tail * qs->map.value_size];
123 memcpy(value, ptr, qs->map.value_size);
124
125 if (delete) {
126 if (unlikely(++qs->tail >= qs->size))
127 qs->tail = 0;
128 }
129
130out:
131 raw_spin_unlock_irqrestore(&qs->lock, flags);
132 return err;
133}
134
135
136static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
137{
138 struct bpf_queue_stack *qs = bpf_queue_stack(map);
139 unsigned long flags;
140 int err = 0;
141 void *ptr;
142 u32 index;
143
144 raw_spin_lock_irqsave(&qs->lock, flags);
145
146 if (queue_stack_map_is_empty(qs)) {
Daniel Borkmannd3f66e42018-10-24 22:05:48 +0200147 memset(value, 0, qs->map.value_size);
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +0200148 err = -ENOENT;
149 goto out;
150 }
151
152 index = qs->head - 1;
153 if (unlikely(index >= qs->size))
154 index = qs->size - 1;
155
156 ptr = &qs->elements[index * qs->map.value_size];
157 memcpy(value, ptr, qs->map.value_size);
158
159 if (delete)
160 qs->head = index;
161
162out:
163 raw_spin_unlock_irqrestore(&qs->lock, flags);
164 return err;
165}
166
167/* Called from syscall or from eBPF program */
168static int queue_map_peek_elem(struct bpf_map *map, void *value)
169{
170 return __queue_map_get(map, value, false);
171}
172
173/* Called from syscall or from eBPF program */
174static int stack_map_peek_elem(struct bpf_map *map, void *value)
175{
176 return __stack_map_get(map, value, false);
177}
178
179/* Called from syscall or from eBPF program */
180static int queue_map_pop_elem(struct bpf_map *map, void *value)
181{
182 return __queue_map_get(map, value, true);
183}
184
185/* Called from syscall or from eBPF program */
186static int stack_map_pop_elem(struct bpf_map *map, void *value)
187{
188 return __stack_map_get(map, value, true);
189}
190
191/* Called from syscall or from eBPF program */
192static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
193 u64 flags)
194{
195 struct bpf_queue_stack *qs = bpf_queue_stack(map);
196 unsigned long irq_flags;
197 int err = 0;
198 void *dst;
199
200 /* BPF_EXIST is used to force making room for a new element in case the
201 * map is full
202 */
203 bool replace = (flags & BPF_EXIST);
204
205 /* Check supported flags for queue and stack maps */
206 if (flags & BPF_NOEXIST || flags > BPF_EXIST)
207 return -EINVAL;
208
209 raw_spin_lock_irqsave(&qs->lock, irq_flags);
210
211 if (queue_stack_map_is_full(qs)) {
212 if (!replace) {
213 err = -E2BIG;
214 goto out;
215 }
216 /* advance tail pointer to overwrite oldest element */
217 if (unlikely(++qs->tail >= qs->size))
218 qs->tail = 0;
219 }
220
221 dst = &qs->elements[qs->head * qs->map.value_size];
222 memcpy(dst, value, qs->map.value_size);
223
224 if (unlikely(++qs->head >= qs->size))
225 qs->head = 0;
226
227out:
228 raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
229 return err;
230}
231
232/* Called from syscall or from eBPF program */
233static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
234{
235 return NULL;
236}
237
238/* Called from syscall or from eBPF program */
239static int queue_stack_map_update_elem(struct bpf_map *map, void *key,
240 void *value, u64 flags)
241{
242 return -EINVAL;
243}
244
245/* Called from syscall or from eBPF program */
246static int queue_stack_map_delete_elem(struct bpf_map *map, void *key)
247{
248 return -EINVAL;
249}
250
251/* Called from syscall */
252static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
253 void *next_key)
254{
255 return -EINVAL;
256}
257
Andrey Ignatov2872e9a2020-06-19 14:11:44 -0700258static int queue_map_btf_id;
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +0200259const struct bpf_map_ops queue_map_ops = {
260 .map_alloc_check = queue_stack_map_alloc_check,
261 .map_alloc = queue_stack_map_alloc,
262 .map_free = queue_stack_map_free,
263 .map_lookup_elem = queue_stack_map_lookup_elem,
264 .map_update_elem = queue_stack_map_update_elem,
265 .map_delete_elem = queue_stack_map_delete_elem,
266 .map_push_elem = queue_stack_map_push_elem,
267 .map_pop_elem = queue_map_pop_elem,
268 .map_peek_elem = queue_map_peek_elem,
269 .map_get_next_key = queue_stack_map_get_next_key,
Andrey Ignatov2872e9a2020-06-19 14:11:44 -0700270 .map_btf_name = "bpf_queue_stack",
271 .map_btf_id = &queue_map_btf_id,
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +0200272};
273
Andrey Ignatov2872e9a2020-06-19 14:11:44 -0700274static int stack_map_btf_id;
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +0200275const struct bpf_map_ops stack_map_ops = {
276 .map_alloc_check = queue_stack_map_alloc_check,
277 .map_alloc = queue_stack_map_alloc,
278 .map_free = queue_stack_map_free,
279 .map_lookup_elem = queue_stack_map_lookup_elem,
280 .map_update_elem = queue_stack_map_update_elem,
281 .map_delete_elem = queue_stack_map_delete_elem,
282 .map_push_elem = queue_stack_map_push_elem,
283 .map_pop_elem = stack_map_pop_elem,
284 .map_peek_elem = stack_map_peek_elem,
285 .map_get_next_key = queue_stack_map_get_next_key,
Andrey Ignatov2872e9a2020-06-19 14:11:44 -0700286 .map_btf_name = "bpf_queue_stack",
287 .map_btf_id = &stack_map_btf_id,
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +0200288};