blob: 34debc1a9641875382b6603820f3e85c4a18dffc [file] [log] [blame]
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
Alexei Starovoitov6c905982016-03-07 21:57:15 -08002 * Copyright (c) 2016 Facebook
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/bpf.h>
14#include <linux/jhash.h>
15#include <linux/filter.h>
16#include <linux/vmalloc.h>
Alexei Starovoitov6c905982016-03-07 21:57:15 -080017#include "percpu_freelist.h"
Martin KaFai Lau29ba7322016-11-11 10:55:09 -080018#include "bpf_lru_list.h"
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -080019
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +080020struct bucket {
21 struct hlist_head head;
22 raw_spinlock_t lock;
23};
24
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -080025struct bpf_htab {
26 struct bpf_map map;
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +080027 struct bucket *buckets;
Alexei Starovoitov6c905982016-03-07 21:57:15 -080028 void *elems;
Martin KaFai Lau29ba7322016-11-11 10:55:09 -080029 union {
30 struct pcpu_freelist freelist;
31 struct bpf_lru lru;
32 };
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -070033 void __percpu *extra_elems;
tom.leiming@gmail.com6591f1e2015-12-29 22:40:25 +080034 atomic_t count; /* number of elements in this hashtable */
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -080035 u32 n_buckets; /* number of hash buckets */
36 u32 elem_size; /* size of each element in bytes */
37};
38
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -070039enum extra_elem_state {
40 HTAB_NOT_AN_EXTRA_ELEM = 0,
41 HTAB_EXTRA_ELEM_FREE,
42 HTAB_EXTRA_ELEM_USED
43};
44
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -080045/* each htab element is struct htab_elem + key + value */
46struct htab_elem {
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -080047 union {
Alexei Starovoitov6c905982016-03-07 21:57:15 -080048 struct hlist_node hash_node;
49 struct bpf_htab *htab;
50 struct pcpu_freelist_node fnode;
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -080051 };
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -070052 union {
53 struct rcu_head rcu;
54 enum extra_elem_state state;
Martin KaFai Lau29ba7322016-11-11 10:55:09 -080055 struct bpf_lru_node lru_node;
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -070056 };
Alexei Starovoitov6c905982016-03-07 21:57:15 -080057 u32 hash;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -080058 char key[0] __aligned(8);
59};
60
Martin KaFai Lau29ba7322016-11-11 10:55:09 -080061static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
62
63static bool htab_is_lru(const struct bpf_htab *htab)
64{
Martin KaFai Lau8f844932016-11-11 10:55:10 -080065 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
66 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
67}
68
69static bool htab_is_percpu(const struct bpf_htab *htab)
70{
71 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
72 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
Martin KaFai Lau29ba7322016-11-11 10:55:09 -080073}
74
Alexei Starovoitov6c905982016-03-07 21:57:15 -080075static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
76 void __percpu *pptr)
77{
78 *(void __percpu **)(l->key + key_size) = pptr;
79}
80
81static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
82{
83 return *(void __percpu **)(l->key + key_size);
84}
85
86static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
87{
88 return (struct htab_elem *) (htab->elems + i * htab->elem_size);
89}
90
91static void htab_free_elems(struct bpf_htab *htab)
92{
93 int i;
94
Martin KaFai Lau8f844932016-11-11 10:55:10 -080095 if (!htab_is_percpu(htab))
Alexei Starovoitov6c905982016-03-07 21:57:15 -080096 goto free_elems;
97
98 for (i = 0; i < htab->map.max_entries; i++) {
99 void __percpu *pptr;
100
101 pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
102 htab->map.key_size);
103 free_percpu(pptr);
104 }
105free_elems:
106 vfree(htab->elems);
107}
108
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800109static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
110 u32 hash)
111{
112 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
113 struct htab_elem *l;
114
115 if (node) {
116 l = container_of(node, struct htab_elem, lru_node);
117 memcpy(l->key, key, htab->map.key_size);
118 return l;
119 }
120
121 return NULL;
122}
123
124static int prealloc_init(struct bpf_htab *htab)
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800125{
126 int err = -ENOMEM, i;
127
128 htab->elems = vzalloc(htab->elem_size * htab->map.max_entries);
129 if (!htab->elems)
130 return -ENOMEM;
131
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800132 if (!htab_is_percpu(htab))
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800133 goto skip_percpu_elems;
134
135 for (i = 0; i < htab->map.max_entries; i++) {
136 u32 size = round_up(htab->map.value_size, 8);
137 void __percpu *pptr;
138
139 pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN);
140 if (!pptr)
141 goto free_elems;
142 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
143 pptr);
144 }
145
146skip_percpu_elems:
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800147 if (htab_is_lru(htab))
148 err = bpf_lru_init(&htab->lru,
149 htab->map.map_flags & BPF_F_NO_COMMON_LRU,
150 offsetof(struct htab_elem, hash) -
151 offsetof(struct htab_elem, lru_node),
152 htab_lru_map_delete_node,
153 htab);
154 else
155 err = pcpu_freelist_init(&htab->freelist);
156
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800157 if (err)
158 goto free_elems;
159
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800160 if (htab_is_lru(htab))
161 bpf_lru_populate(&htab->lru, htab->elems,
162 offsetof(struct htab_elem, lru_node),
163 htab->elem_size, htab->map.max_entries);
164 else
165 pcpu_freelist_populate(&htab->freelist, htab->elems,
166 htab->elem_size, htab->map.max_entries);
167
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800168 return 0;
169
170free_elems:
171 htab_free_elems(htab);
172 return err;
173}
174
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800175static void prealloc_destroy(struct bpf_htab *htab)
176{
177 htab_free_elems(htab);
178
179 if (htab_is_lru(htab))
180 bpf_lru_destroy(&htab->lru);
181 else
182 pcpu_freelist_destroy(&htab->freelist);
183}
184
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700185static int alloc_extra_elems(struct bpf_htab *htab)
186{
187 void __percpu *pptr;
188 int cpu;
189
190 pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN);
191 if (!pptr)
192 return -ENOMEM;
193
194 for_each_possible_cpu(cpu) {
195 ((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state =
196 HTAB_EXTRA_ELEM_FREE;
197 }
198 htab->extra_elems = pptr;
199 return 0;
200}
201
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800202/* Called from syscall */
203static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
204{
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800205 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
206 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
207 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
208 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800209 /* percpu_lru means each cpu has its own LRU list.
210 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
211 * the map's value itself is percpu. percpu_lru has
212 * nothing to do with the map's value.
213 */
214 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
215 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800216 struct bpf_htab *htab;
217 int err, i;
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800218 u64 cost;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800219
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800220 if (lru && !capable(CAP_SYS_ADMIN))
221 /* LRU implementation is much complicated than other
222 * maps. Hence, limit to CAP_SYS_ADMIN for now.
223 */
224 return ERR_PTR(-EPERM);
225
226 if (attr->map_flags & ~(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU))
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800227 /* reserved bits should not be used */
228 return ERR_PTR(-EINVAL);
229
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800230 if (!lru && percpu_lru)
231 return ERR_PTR(-EINVAL);
232
233 if (lru && !prealloc)
234 return ERR_PTR(-ENOTSUPP);
235
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800236 htab = kzalloc(sizeof(*htab), GFP_USER);
237 if (!htab)
238 return ERR_PTR(-ENOMEM);
239
240 /* mandatory map attributes */
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800241 htab->map.map_type = attr->map_type;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800242 htab->map.key_size = attr->key_size;
243 htab->map.value_size = attr->value_size;
244 htab->map.max_entries = attr->max_entries;
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800245 htab->map.map_flags = attr->map_flags;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800246
247 /* check sanity of attributes.
248 * value_size == 0 may be allowed in the future to use map as a set
249 */
250 err = -EINVAL;
251 if (htab->map.max_entries == 0 || htab->map.key_size == 0 ||
252 htab->map.value_size == 0)
253 goto free_htab;
254
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800255 if (percpu_lru) {
256 /* ensure each CPU's lru list has >=1 elements.
257 * since we are at it, make each lru list has the same
258 * number of elements.
259 */
260 htab->map.max_entries = roundup(attr->max_entries,
261 num_possible_cpus());
262 if (htab->map.max_entries < attr->max_entries)
263 htab->map.max_entries = rounddown(attr->max_entries,
264 num_possible_cpus());
265 }
266
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800267 /* hash table size must be power of 2 */
268 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
269
270 err = -E2BIG;
271 if (htab->map.key_size > MAX_BPF_STACK)
272 /* eBPF programs initialize keys on stack, so they cannot be
273 * larger than max stack size
274 */
275 goto free_htab;
276
Alexei Starovoitov01b3f522015-11-29 16:59:35 -0800277 if (htab->map.value_size >= (1 << (KMALLOC_SHIFT_MAX - 1)) -
278 MAX_BPF_STACK - sizeof(struct htab_elem))
279 /* if value_size is bigger, the user space won't be able to
280 * access the elements via bpf syscall. This check also makes
281 * sure that the elem_size doesn't overflow and it's
282 * kmalloc-able later in htab_map_update_elem()
283 */
284 goto free_htab;
285
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800286 if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE)
287 /* make sure the size for pcpu_alloc() is reasonable */
288 goto free_htab;
289
Alexei Starovoitov01b3f522015-11-29 16:59:35 -0800290 htab->elem_size = sizeof(struct htab_elem) +
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800291 round_up(htab->map.key_size, 8);
292 if (percpu)
293 htab->elem_size += sizeof(void *);
294 else
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800295 htab->elem_size += round_up(htab->map.value_size, 8);
Alexei Starovoitov01b3f522015-11-29 16:59:35 -0800296
Alexei Starovoitovdaaf4272014-11-18 17:32:16 -0800297 /* prevent zero size kmalloc and check for u32 overflow */
298 if (htab->n_buckets == 0 ||
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +0800299 htab->n_buckets > U32_MAX / sizeof(struct bucket))
Alexei Starovoitovdaaf4272014-11-18 17:32:16 -0800300 goto free_htab;
301
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800302 cost = (u64) htab->n_buckets * sizeof(struct bucket) +
303 (u64) htab->elem_size * htab->map.max_entries;
304
305 if (percpu)
306 cost += (u64) round_up(htab->map.value_size, 8) *
307 num_possible_cpus() * htab->map.max_entries;
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700308 else
309 cost += (u64) htab->elem_size * num_possible_cpus();
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800310
311 if (cost >= U32_MAX - PAGE_SIZE)
Alexei Starovoitov01b3f522015-11-29 16:59:35 -0800312 /* make sure page count doesn't overflow */
313 goto free_htab;
314
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800315 htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
Alexei Starovoitov01b3f522015-11-29 16:59:35 -0800316
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800317 /* if map size is larger than memlock limit, reject it early */
318 err = bpf_map_precharge_memlock(htab->map.pages);
319 if (err)
320 goto free_htab;
321
Alexei Starovoitov01b3f522015-11-29 16:59:35 -0800322 err = -ENOMEM;
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +0800323 htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket),
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800324 GFP_USER | __GFP_NOWARN);
325
326 if (!htab->buckets) {
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +0800327 htab->buckets = vmalloc(htab->n_buckets * sizeof(struct bucket));
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800328 if (!htab->buckets)
329 goto free_htab;
330 }
331
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +0800332 for (i = 0; i < htab->n_buckets; i++) {
333 INIT_HLIST_HEAD(&htab->buckets[i].head);
334 raw_spin_lock_init(&htab->buckets[i].lock);
335 }
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800336
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800337 if (!percpu && !lru) {
338 /* lru itself can remove the least used element, so
339 * there is no need for an extra elem during map_update.
340 */
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700341 err = alloc_extra_elems(htab);
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800342 if (err)
343 goto free_buckets;
344 }
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800345
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800346 if (prealloc) {
347 err = prealloc_init(htab);
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700348 if (err)
349 goto free_extra_elems;
350 }
351
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800352 return &htab->map;
353
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700354free_extra_elems:
355 free_percpu(htab->extra_elems);
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800356free_buckets:
357 kvfree(htab->buckets);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800358free_htab:
359 kfree(htab);
360 return ERR_PTR(err);
361}
362
363static inline u32 htab_map_hash(const void *key, u32 key_len)
364{
365 return jhash(key, key_len, 0);
366}
367
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +0800368static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800369{
370 return &htab->buckets[hash & (htab->n_buckets - 1)];
371}
372
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +0800373static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
374{
375 return &__select_bucket(htab, hash)->head;
376}
377
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800378static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
379 void *key, u32 key_size)
380{
381 struct htab_elem *l;
382
383 hlist_for_each_entry_rcu(l, head, hash_node)
384 if (l->hash == hash && !memcmp(&l->key, key, key_size))
385 return l;
386
387 return NULL;
388}
389
390/* Called from syscall or from eBPF program */
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800391static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800392{
393 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
394 struct hlist_head *head;
395 struct htab_elem *l;
396 u32 hash, key_size;
397
398 /* Must be called with rcu_read_lock. */
399 WARN_ON_ONCE(!rcu_read_lock_held());
400
401 key_size = map->key_size;
402
403 hash = htab_map_hash(key, key_size);
404
405 head = select_bucket(htab, hash);
406
407 l = lookup_elem_raw(head, hash, key, key_size);
408
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800409 return l;
410}
411
412static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
413{
414 struct htab_elem *l = __htab_map_lookup_elem(map, key);
415
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800416 if (l)
417 return l->key + round_up(map->key_size, 8);
418
419 return NULL;
420}
421
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800422static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
423{
424 struct htab_elem *l = __htab_map_lookup_elem(map, key);
425
426 if (l) {
427 bpf_lru_node_set_ref(&l->lru_node);
428 return l->key + round_up(map->key_size, 8);
429 }
430
431 return NULL;
432}
433
434/* It is called from the bpf_lru_list when the LRU needs to delete
435 * older elements from the htab.
436 */
437static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
438{
439 struct bpf_htab *htab = (struct bpf_htab *)arg;
440 struct htab_elem *l, *tgt_l;
441 struct hlist_head *head;
442 unsigned long flags;
443 struct bucket *b;
444
445 tgt_l = container_of(node, struct htab_elem, lru_node);
446 b = __select_bucket(htab, tgt_l->hash);
447 head = &b->head;
448
449 raw_spin_lock_irqsave(&b->lock, flags);
450
451 hlist_for_each_entry_rcu(l, head, hash_node)
452 if (l == tgt_l) {
453 hlist_del_rcu(&l->hash_node);
454 break;
455 }
456
457 raw_spin_unlock_irqrestore(&b->lock, flags);
458
459 return l == tgt_l;
460}
461
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800462/* Called from syscall */
463static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
464{
465 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
466 struct hlist_head *head;
467 struct htab_elem *l, *next_l;
468 u32 hash, key_size;
469 int i;
470
471 WARN_ON_ONCE(!rcu_read_lock_held());
472
473 key_size = map->key_size;
474
475 hash = htab_map_hash(key, key_size);
476
477 head = select_bucket(htab, hash);
478
479 /* lookup the key */
480 l = lookup_elem_raw(head, hash, key, key_size);
481
482 if (!l) {
483 i = 0;
484 goto find_first_elem;
485 }
486
487 /* key was found, get next key in the same bucket */
488 next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
489 struct htab_elem, hash_node);
490
491 if (next_l) {
492 /* if next elem in this hash list is non-zero, just return it */
493 memcpy(next_key, next_l->key, key_size);
494 return 0;
495 }
496
497 /* no more elements in this hash list, go to the next bucket */
498 i = hash & (htab->n_buckets - 1);
499 i++;
500
501find_first_elem:
502 /* iterate over buckets */
503 for (; i < htab->n_buckets; i++) {
504 head = select_bucket(htab, i);
505
506 /* pick first element in the bucket */
507 next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
508 struct htab_elem, hash_node);
509 if (next_l) {
510 /* if it's not empty, just return it */
511 memcpy(next_key, next_l->key, key_size);
512 return 0;
513 }
514 }
515
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800516 /* iterated over all buckets and all elements */
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800517 return -ENOENT;
518}
519
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800520static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800521{
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800522 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
523 free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800524 kfree(l);
525}
526
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800527static void htab_elem_free_rcu(struct rcu_head *head)
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800528{
529 struct htab_elem *l = container_of(head, struct htab_elem, rcu);
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800530 struct bpf_htab *htab = l->htab;
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800531
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800532 /* must increment bpf_prog_active to avoid kprobe+bpf triggering while
533 * we're calling kfree, otherwise deadlock is possible if kprobes
534 * are placed somewhere inside of slub
535 */
536 preempt_disable();
537 __this_cpu_inc(bpf_prog_active);
538 htab_elem_free(htab, l);
539 __this_cpu_dec(bpf_prog_active);
540 preempt_enable();
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800541}
542
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800543static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800544{
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700545 if (l->state == HTAB_EXTRA_ELEM_USED) {
546 l->state = HTAB_EXTRA_ELEM_FREE;
547 return;
548 }
549
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800550 if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) {
551 pcpu_freelist_push(&htab->freelist, &l->fnode);
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800552 } else {
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800553 atomic_dec(&htab->count);
554 l->htab = htab;
555 call_rcu(&l->rcu, htab_elem_free_rcu);
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800556 }
557}
558
Martin KaFai Laufd91de72016-11-11 10:55:08 -0800559static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
560 void *value, bool onallcpus)
561{
562 if (!onallcpus) {
563 /* copy true value_size bytes */
564 memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
565 } else {
566 u32 size = round_up(htab->map.value_size, 8);
567 int off = 0, cpu;
568
569 for_each_possible_cpu(cpu) {
570 bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
571 value + off, size);
572 off += size;
573 }
574 }
575}
576
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800577static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
578 void *value, u32 key_size, u32 hash,
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700579 bool percpu, bool onallcpus,
580 bool old_elem_exists)
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800581{
582 u32 size = htab->map.value_size;
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800583 bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC);
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800584 struct htab_elem *l_new;
585 void __percpu *pptr;
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700586 int err = 0;
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800587
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800588 if (prealloc) {
589 l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist);
590 if (!l_new)
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700591 err = -E2BIG;
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800592 } else {
593 if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
594 atomic_dec(&htab->count);
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700595 err = -E2BIG;
596 } else {
597 l_new = kmalloc(htab->elem_size,
598 GFP_ATOMIC | __GFP_NOWARN);
599 if (!l_new)
600 return ERR_PTR(-ENOMEM);
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800601 }
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700602 }
603
604 if (err) {
605 if (!old_elem_exists)
606 return ERR_PTR(err);
607
608 /* if we're updating the existing element and the hash table
609 * is full, use per-cpu extra elems
610 */
611 l_new = this_cpu_ptr(htab->extra_elems);
612 if (l_new->state != HTAB_EXTRA_ELEM_FREE)
613 return ERR_PTR(-E2BIG);
614 l_new->state = HTAB_EXTRA_ELEM_USED;
615 } else {
616 l_new->state = HTAB_NOT_AN_EXTRA_ELEM;
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800617 }
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800618
619 memcpy(l_new->key, key, key_size);
620 if (percpu) {
621 /* round up value_size to 8 bytes */
622 size = round_up(size, 8);
623
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800624 if (prealloc) {
625 pptr = htab_elem_get_ptr(l_new, key_size);
626 } else {
627 /* alloc_percpu zero-fills */
628 pptr = __alloc_percpu_gfp(size, 8,
629 GFP_ATOMIC | __GFP_NOWARN);
630 if (!pptr) {
631 kfree(l_new);
632 return ERR_PTR(-ENOMEM);
633 }
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800634 }
635
Martin KaFai Laufd91de72016-11-11 10:55:08 -0800636 pcpu_copy_value(htab, pptr, value, onallcpus);
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800637
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800638 if (!prealloc)
639 htab_elem_set_ptr(l_new, key_size, pptr);
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800640 } else {
641 memcpy(l_new->key + round_up(key_size, 8), value, size);
642 }
643
644 l_new->hash = hash;
645 return l_new;
646}
647
648static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
649 u64 map_flags)
650{
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800651 if (l_old && map_flags == BPF_NOEXIST)
652 /* elem already exists */
653 return -EEXIST;
654
655 if (!l_old && map_flags == BPF_EXIST)
656 /* elem doesn't exist, cannot update it */
657 return -ENOENT;
658
659 return 0;
660}
661
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800662/* Called from syscall or from eBPF program */
663static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
664 u64 map_flags)
665{
666 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800667 struct htab_elem *l_new = NULL, *l_old;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800668 struct hlist_head *head;
669 unsigned long flags;
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800670 struct bucket *b;
671 u32 key_size, hash;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800672 int ret;
673
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800674 if (unlikely(map_flags > BPF_EXIST))
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800675 /* unknown flags */
676 return -EINVAL;
677
678 WARN_ON_ONCE(!rcu_read_lock_held());
679
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800680 key_size = map->key_size;
681
682 hash = htab_map_hash(key, key_size);
683
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800684 b = __select_bucket(htab, hash);
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +0800685 head = &b->head;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800686
687 /* bpf_map_update_elem() can be called in_irq() */
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +0800688 raw_spin_lock_irqsave(&b->lock, flags);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800689
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800690 l_old = lookup_elem_raw(head, hash, key, key_size);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800691
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800692 ret = check_flags(htab, l_old, map_flags);
693 if (ret)
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800694 goto err;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800695
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700696 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
697 !!l_old);
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800698 if (IS_ERR(l_new)) {
699 /* all pre-allocated elements are in use or memory exhausted */
700 ret = PTR_ERR(l_new);
701 goto err;
702 }
703
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800704 /* add new element to the head of the list, so that
705 * concurrent search will find it before old elem
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800706 */
707 hlist_add_head_rcu(&l_new->hash_node, head);
708 if (l_old) {
709 hlist_del_rcu(&l_old->hash_node);
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800710 free_htab_elem(htab, l_old);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800711 }
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800712 ret = 0;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800713err:
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +0800714 raw_spin_unlock_irqrestore(&b->lock, flags);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800715 return ret;
716}
717
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800718static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
719 u64 map_flags)
720{
721 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
722 struct htab_elem *l_new, *l_old = NULL;
723 struct hlist_head *head;
724 unsigned long flags;
725 struct bucket *b;
726 u32 key_size, hash;
727 int ret;
728
729 if (unlikely(map_flags > BPF_EXIST))
730 /* unknown flags */
731 return -EINVAL;
732
733 WARN_ON_ONCE(!rcu_read_lock_held());
734
735 key_size = map->key_size;
736
737 hash = htab_map_hash(key, key_size);
738
739 b = __select_bucket(htab, hash);
740 head = &b->head;
741
742 /* For LRU, we need to alloc before taking bucket's
743 * spinlock because getting free nodes from LRU may need
744 * to remove older elements from htab and this removal
745 * operation will need a bucket lock.
746 */
747 l_new = prealloc_lru_pop(htab, key, hash);
748 if (!l_new)
749 return -ENOMEM;
750 memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size);
751
752 /* bpf_map_update_elem() can be called in_irq() */
753 raw_spin_lock_irqsave(&b->lock, flags);
754
755 l_old = lookup_elem_raw(head, hash, key, key_size);
756
757 ret = check_flags(htab, l_old, map_flags);
758 if (ret)
759 goto err;
760
761 /* add new element to the head of the list, so that
762 * concurrent search will find it before old elem
763 */
764 hlist_add_head_rcu(&l_new->hash_node, head);
765 if (l_old) {
766 bpf_lru_node_set_ref(&l_new->lru_node);
767 hlist_del_rcu(&l_old->hash_node);
768 }
769 ret = 0;
770
771err:
772 raw_spin_unlock_irqrestore(&b->lock, flags);
773
774 if (ret)
775 bpf_lru_push_free(&htab->lru, &l_new->lru_node);
776 else if (l_old)
777 bpf_lru_push_free(&htab->lru, &l_old->lru_node);
778
779 return ret;
780}
781
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800782static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
783 void *value, u64 map_flags,
784 bool onallcpus)
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800785{
786 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
787 struct htab_elem *l_new = NULL, *l_old;
788 struct hlist_head *head;
789 unsigned long flags;
790 struct bucket *b;
791 u32 key_size, hash;
792 int ret;
793
794 if (unlikely(map_flags > BPF_EXIST))
795 /* unknown flags */
796 return -EINVAL;
797
798 WARN_ON_ONCE(!rcu_read_lock_held());
799
800 key_size = map->key_size;
801
802 hash = htab_map_hash(key, key_size);
803
804 b = __select_bucket(htab, hash);
805 head = &b->head;
806
807 /* bpf_map_update_elem() can be called in_irq() */
808 raw_spin_lock_irqsave(&b->lock, flags);
809
810 l_old = lookup_elem_raw(head, hash, key, key_size);
811
812 ret = check_flags(htab, l_old, map_flags);
813 if (ret)
814 goto err;
815
816 if (l_old) {
817 /* per-cpu hash map can update value in-place */
Martin KaFai Laufd91de72016-11-11 10:55:08 -0800818 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
819 value, onallcpus);
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800820 } else {
821 l_new = alloc_htab_elem(htab, key, value, key_size,
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700822 hash, true, onallcpus, false);
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800823 if (IS_ERR(l_new)) {
824 ret = PTR_ERR(l_new);
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800825 goto err;
826 }
827 hlist_add_head_rcu(&l_new->hash_node, head);
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800828 }
829 ret = 0;
830err:
831 raw_spin_unlock_irqrestore(&b->lock, flags);
832 return ret;
833}
834
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800835static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
836 void *value, u64 map_flags,
837 bool onallcpus)
838{
839 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
840 struct htab_elem *l_new = NULL, *l_old;
841 struct hlist_head *head;
842 unsigned long flags;
843 struct bucket *b;
844 u32 key_size, hash;
845 int ret;
846
847 if (unlikely(map_flags > BPF_EXIST))
848 /* unknown flags */
849 return -EINVAL;
850
851 WARN_ON_ONCE(!rcu_read_lock_held());
852
853 key_size = map->key_size;
854
855 hash = htab_map_hash(key, key_size);
856
857 b = __select_bucket(htab, hash);
858 head = &b->head;
859
860 /* For LRU, we need to alloc before taking bucket's
861 * spinlock because LRU's elem alloc may need
862 * to remove older elem from htab and this removal
863 * operation will need a bucket lock.
864 */
865 if (map_flags != BPF_EXIST) {
866 l_new = prealloc_lru_pop(htab, key, hash);
867 if (!l_new)
868 return -ENOMEM;
869 }
870
871 /* bpf_map_update_elem() can be called in_irq() */
872 raw_spin_lock_irqsave(&b->lock, flags);
873
874 l_old = lookup_elem_raw(head, hash, key, key_size);
875
876 ret = check_flags(htab, l_old, map_flags);
877 if (ret)
878 goto err;
879
880 if (l_old) {
881 bpf_lru_node_set_ref(&l_old->lru_node);
882
883 /* per-cpu hash map can update value in-place */
884 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
885 value, onallcpus);
886 } else {
887 pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
888 value, onallcpus);
889 hlist_add_head_rcu(&l_new->hash_node, head);
890 l_new = NULL;
891 }
892 ret = 0;
893err:
894 raw_spin_unlock_irqrestore(&b->lock, flags);
895 if (l_new)
896 bpf_lru_push_free(&htab->lru, &l_new->lru_node);
897 return ret;
898}
899
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800900static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
901 void *value, u64 map_flags)
902{
903 return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
904}
905
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800906static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
907 void *value, u64 map_flags)
908{
909 return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
910 false);
911}
912
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800913/* Called from syscall or from eBPF program */
914static int htab_map_delete_elem(struct bpf_map *map, void *key)
915{
916 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
917 struct hlist_head *head;
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +0800918 struct bucket *b;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800919 struct htab_elem *l;
920 unsigned long flags;
921 u32 hash, key_size;
922 int ret = -ENOENT;
923
924 WARN_ON_ONCE(!rcu_read_lock_held());
925
926 key_size = map->key_size;
927
928 hash = htab_map_hash(key, key_size);
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +0800929 b = __select_bucket(htab, hash);
930 head = &b->head;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800931
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +0800932 raw_spin_lock_irqsave(&b->lock, flags);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800933
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800934 l = lookup_elem_raw(head, hash, key, key_size);
935
936 if (l) {
937 hlist_del_rcu(&l->hash_node);
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800938 free_htab_elem(htab, l);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800939 ret = 0;
940 }
941
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +0800942 raw_spin_unlock_irqrestore(&b->lock, flags);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800943 return ret;
944}
945
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800946static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
947{
948 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
949 struct hlist_head *head;
950 struct bucket *b;
951 struct htab_elem *l;
952 unsigned long flags;
953 u32 hash, key_size;
954 int ret = -ENOENT;
955
956 WARN_ON_ONCE(!rcu_read_lock_held());
957
958 key_size = map->key_size;
959
960 hash = htab_map_hash(key, key_size);
961 b = __select_bucket(htab, hash);
962 head = &b->head;
963
964 raw_spin_lock_irqsave(&b->lock, flags);
965
966 l = lookup_elem_raw(head, hash, key, key_size);
967
968 if (l) {
969 hlist_del_rcu(&l->hash_node);
970 ret = 0;
971 }
972
973 raw_spin_unlock_irqrestore(&b->lock, flags);
974 if (l)
975 bpf_lru_push_free(&htab->lru, &l->lru_node);
976 return ret;
977}
978
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800979static void delete_all_elements(struct bpf_htab *htab)
980{
981 int i;
982
983 for (i = 0; i < htab->n_buckets; i++) {
984 struct hlist_head *head = select_bucket(htab, i);
985 struct hlist_node *n;
986 struct htab_elem *l;
987
988 hlist_for_each_entry_safe(l, n, head, hash_node) {
989 hlist_del_rcu(&l->hash_node);
Daniel Borkmann483bed22016-11-04 00:01:19 +0100990 if (l->state != HTAB_EXTRA_ELEM_USED)
991 htab_elem_free(htab, l);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800992 }
993 }
994}
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800995/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
996static void htab_map_free(struct bpf_map *map)
997{
998 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
999
1000 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
1001 * so the programs (can be more than one that used this map) were
1002 * disconnected from events. Wait for outstanding critical sections in
1003 * these programs to complete
1004 */
1005 synchronize_rcu();
1006
Alexei Starovoitov6c905982016-03-07 21:57:15 -08001007 /* some of free_htab_elem() callbacks for elements of this map may
1008 * not have executed. Wait for them.
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001009 */
Alexei Starovoitov6c905982016-03-07 21:57:15 -08001010 rcu_barrier();
Martin KaFai Lau29ba7322016-11-11 10:55:09 -08001011 if (htab->map.map_flags & BPF_F_NO_PREALLOC)
Alexei Starovoitov6c905982016-03-07 21:57:15 -08001012 delete_all_elements(htab);
Martin KaFai Lau29ba7322016-11-11 10:55:09 -08001013 else
1014 prealloc_destroy(htab);
1015
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -07001016 free_percpu(htab->extra_elems);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001017 kvfree(htab->buckets);
1018 kfree(htab);
1019}
1020
Daniel Borkmanna2c83ff2015-03-01 12:31:42 +01001021static const struct bpf_map_ops htab_ops = {
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001022 .map_alloc = htab_map_alloc,
1023 .map_free = htab_map_free,
1024 .map_get_next_key = htab_map_get_next_key,
1025 .map_lookup_elem = htab_map_lookup_elem,
1026 .map_update_elem = htab_map_update_elem,
1027 .map_delete_elem = htab_map_delete_elem,
1028};
1029
Daniel Borkmanna2c83ff2015-03-01 12:31:42 +01001030static struct bpf_map_type_list htab_type __read_mostly = {
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001031 .ops = &htab_ops,
1032 .type = BPF_MAP_TYPE_HASH,
1033};
1034
Martin KaFai Lau29ba7322016-11-11 10:55:09 -08001035static const struct bpf_map_ops htab_lru_ops = {
1036 .map_alloc = htab_map_alloc,
1037 .map_free = htab_map_free,
1038 .map_get_next_key = htab_map_get_next_key,
1039 .map_lookup_elem = htab_lru_map_lookup_elem,
1040 .map_update_elem = htab_lru_map_update_elem,
1041 .map_delete_elem = htab_lru_map_delete_elem,
1042};
1043
1044static struct bpf_map_type_list htab_lru_type __read_mostly = {
1045 .ops = &htab_lru_ops,
1046 .type = BPF_MAP_TYPE_LRU_HASH,
1047};
1048
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -08001049/* Called from eBPF program */
1050static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
1051{
1052 struct htab_elem *l = __htab_map_lookup_elem(map, key);
1053
1054 if (l)
1055 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
1056 else
1057 return NULL;
1058}
1059
Martin KaFai Lau8f844932016-11-11 10:55:10 -08001060static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
1061{
1062 struct htab_elem *l = __htab_map_lookup_elem(map, key);
1063
1064 if (l) {
1065 bpf_lru_node_set_ref(&l->lru_node);
1066 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
1067 }
1068
1069 return NULL;
1070}
1071
Alexei Starovoitov15a07b32016-02-01 22:39:55 -08001072int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
1073{
Martin KaFai Lau8f844932016-11-11 10:55:10 -08001074 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
Alexei Starovoitov15a07b32016-02-01 22:39:55 -08001075 struct htab_elem *l;
1076 void __percpu *pptr;
1077 int ret = -ENOENT;
1078 int cpu, off = 0;
1079 u32 size;
1080
1081 /* per_cpu areas are zero-filled and bpf programs can only
1082 * access 'value_size' of them, so copying rounded areas
1083 * will not leak any kernel data
1084 */
1085 size = round_up(map->value_size, 8);
1086 rcu_read_lock();
1087 l = __htab_map_lookup_elem(map, key);
1088 if (!l)
1089 goto out;
Martin KaFai Lau8f844932016-11-11 10:55:10 -08001090 if (htab_is_lru(htab))
1091 bpf_lru_node_set_ref(&l->lru_node);
Alexei Starovoitov15a07b32016-02-01 22:39:55 -08001092 pptr = htab_elem_get_ptr(l, map->key_size);
1093 for_each_possible_cpu(cpu) {
1094 bpf_long_memcpy(value + off,
1095 per_cpu_ptr(pptr, cpu), size);
1096 off += size;
1097 }
1098 ret = 0;
1099out:
1100 rcu_read_unlock();
1101 return ret;
1102}
1103
1104int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
1105 u64 map_flags)
1106{
Martin KaFai Lau8f844932016-11-11 10:55:10 -08001107 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
Sasha Levin6bbd9a02016-02-19 13:53:10 -05001108 int ret;
1109
1110 rcu_read_lock();
Martin KaFai Lau8f844932016-11-11 10:55:10 -08001111 if (htab_is_lru(htab))
1112 ret = __htab_lru_percpu_map_update_elem(map, key, value,
1113 map_flags, true);
1114 else
1115 ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
1116 true);
Sasha Levin6bbd9a02016-02-19 13:53:10 -05001117 rcu_read_unlock();
1118
1119 return ret;
Alexei Starovoitov15a07b32016-02-01 22:39:55 -08001120}
1121
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -08001122static const struct bpf_map_ops htab_percpu_ops = {
1123 .map_alloc = htab_map_alloc,
1124 .map_free = htab_map_free,
1125 .map_get_next_key = htab_map_get_next_key,
1126 .map_lookup_elem = htab_percpu_map_lookup_elem,
1127 .map_update_elem = htab_percpu_map_update_elem,
1128 .map_delete_elem = htab_map_delete_elem,
1129};
1130
1131static struct bpf_map_type_list htab_percpu_type __read_mostly = {
1132 .ops = &htab_percpu_ops,
1133 .type = BPF_MAP_TYPE_PERCPU_HASH,
1134};
1135
Martin KaFai Lau8f844932016-11-11 10:55:10 -08001136static const struct bpf_map_ops htab_lru_percpu_ops = {
1137 .map_alloc = htab_map_alloc,
1138 .map_free = htab_map_free,
1139 .map_get_next_key = htab_map_get_next_key,
1140 .map_lookup_elem = htab_lru_percpu_map_lookup_elem,
1141 .map_update_elem = htab_lru_percpu_map_update_elem,
1142 .map_delete_elem = htab_lru_map_delete_elem,
1143};
1144
1145static struct bpf_map_type_list htab_lru_percpu_type __read_mostly = {
1146 .ops = &htab_lru_percpu_ops,
1147 .type = BPF_MAP_TYPE_LRU_PERCPU_HASH,
1148};
1149
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001150static int __init register_htab_map(void)
1151{
Daniel Borkmanna2c83ff2015-03-01 12:31:42 +01001152 bpf_register_map_type(&htab_type);
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -08001153 bpf_register_map_type(&htab_percpu_type);
Martin KaFai Lau29ba7322016-11-11 10:55:09 -08001154 bpf_register_map_type(&htab_lru_type);
Martin KaFai Lau8f844932016-11-11 10:55:10 -08001155 bpf_register_map_type(&htab_lru_percpu_type);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001156 return 0;
1157}
1158late_initcall(register_htab_map);