blob: 47ae748c3a4919eab993dcf2763149c9436e636a [file] [log] [blame]
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
Alexei Starovoitov6c905982016-03-07 21:57:15 -08002 * Copyright (c) 2016 Facebook
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/bpf.h>
14#include <linux/jhash.h>
15#include <linux/filter.h>
Alexei Starovoitov4fe84352017-03-07 20:00:13 -080016#include <linux/rculist_nulls.h>
Alexei Starovoitov6c905982016-03-07 21:57:15 -080017#include "percpu_freelist.h"
Martin KaFai Lau29ba7322016-11-11 10:55:09 -080018#include "bpf_lru_list.h"
Martin KaFai Laubcc6b1b2017-03-22 10:00:34 -070019#include "map_in_map.h"
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -080020
Martin KaFai Lau96eabe72017-08-18 11:28:00 -070021#define HTAB_CREATE_FLAG_MASK \
22 (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE)
23
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +080024struct bucket {
Alexei Starovoitov4fe84352017-03-07 20:00:13 -080025 struct hlist_nulls_head head;
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +080026 raw_spinlock_t lock;
27};
28
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -080029struct bpf_htab {
30 struct bpf_map map;
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +080031 struct bucket *buckets;
Alexei Starovoitov6c905982016-03-07 21:57:15 -080032 void *elems;
Martin KaFai Lau29ba7322016-11-11 10:55:09 -080033 union {
34 struct pcpu_freelist freelist;
35 struct bpf_lru lru;
36 };
Alexei Starovoitov8c290e62017-03-21 19:05:04 -070037 struct htab_elem *__percpu *extra_elems;
tom.leiming@gmail.com6591f1e2015-12-29 22:40:25 +080038 atomic_t count; /* number of elements in this hashtable */
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -080039 u32 n_buckets; /* number of hash buckets */
40 u32 elem_size; /* size of each element in bytes */
41};
42
43/* each htab element is struct htab_elem + key + value */
44struct htab_elem {
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -080045 union {
Alexei Starovoitov4fe84352017-03-07 20:00:13 -080046 struct hlist_nulls_node hash_node;
Alexei Starovoitov9f691542017-03-07 20:00:12 -080047 struct {
48 void *padding;
49 union {
50 struct bpf_htab *htab;
51 struct pcpu_freelist_node fnode;
52 };
53 };
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -080054 };
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -070055 union {
56 struct rcu_head rcu;
Martin KaFai Lau29ba7322016-11-11 10:55:09 -080057 struct bpf_lru_node lru_node;
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -070058 };
Alexei Starovoitov6c905982016-03-07 21:57:15 -080059 u32 hash;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -080060 char key[0] __aligned(8);
61};
62
Martin KaFai Lau29ba7322016-11-11 10:55:09 -080063static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
64
65static bool htab_is_lru(const struct bpf_htab *htab)
66{
Martin KaFai Lau8f844932016-11-11 10:55:10 -080067 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
68 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
69}
70
71static bool htab_is_percpu(const struct bpf_htab *htab)
72{
73 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
74 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
Martin KaFai Lau29ba7322016-11-11 10:55:09 -080075}
76
Alexei Starovoitov8c290e62017-03-21 19:05:04 -070077static bool htab_is_prealloc(const struct bpf_htab *htab)
78{
79 return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
80}
81
Alexei Starovoitov6c905982016-03-07 21:57:15 -080082static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
83 void __percpu *pptr)
84{
85 *(void __percpu **)(l->key + key_size) = pptr;
86}
87
88static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
89{
90 return *(void __percpu **)(l->key + key_size);
91}
92
Martin KaFai Laubcc6b1b2017-03-22 10:00:34 -070093static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
94{
95 return *(void **)(l->key + roundup(map->key_size, 8));
96}
97
Alexei Starovoitov6c905982016-03-07 21:57:15 -080098static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
99{
100 return (struct htab_elem *) (htab->elems + i * htab->elem_size);
101}
102
103static void htab_free_elems(struct bpf_htab *htab)
104{
105 int i;
106
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800107 if (!htab_is_percpu(htab))
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800108 goto free_elems;
109
110 for (i = 0; i < htab->map.max_entries; i++) {
111 void __percpu *pptr;
112
113 pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
114 htab->map.key_size);
115 free_percpu(pptr);
116 }
117free_elems:
Daniel Borkmannd407bd22017-01-18 15:14:17 +0100118 bpf_map_area_free(htab->elems);
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800119}
120
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800121static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
122 u32 hash)
123{
124 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
125 struct htab_elem *l;
126
127 if (node) {
128 l = container_of(node, struct htab_elem, lru_node);
129 memcpy(l->key, key, htab->map.key_size);
130 return l;
131 }
132
133 return NULL;
134}
135
136static int prealloc_init(struct bpf_htab *htab)
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800137{
Alexei Starovoitov8c290e62017-03-21 19:05:04 -0700138 u32 num_entries = htab->map.max_entries;
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800139 int err = -ENOMEM, i;
140
Alexei Starovoitov8c290e62017-03-21 19:05:04 -0700141 if (!htab_is_percpu(htab) && !htab_is_lru(htab))
142 num_entries += num_possible_cpus();
143
Martin KaFai Lau96eabe72017-08-18 11:28:00 -0700144 htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries,
145 htab->map.numa_node);
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800146 if (!htab->elems)
147 return -ENOMEM;
148
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800149 if (!htab_is_percpu(htab))
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800150 goto skip_percpu_elems;
151
Alexei Starovoitov8c290e62017-03-21 19:05:04 -0700152 for (i = 0; i < num_entries; i++) {
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800153 u32 size = round_up(htab->map.value_size, 8);
154 void __percpu *pptr;
155
156 pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN);
157 if (!pptr)
158 goto free_elems;
159 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
160 pptr);
161 }
162
163skip_percpu_elems:
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800164 if (htab_is_lru(htab))
165 err = bpf_lru_init(&htab->lru,
166 htab->map.map_flags & BPF_F_NO_COMMON_LRU,
167 offsetof(struct htab_elem, hash) -
168 offsetof(struct htab_elem, lru_node),
169 htab_lru_map_delete_node,
170 htab);
171 else
172 err = pcpu_freelist_init(&htab->freelist);
173
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800174 if (err)
175 goto free_elems;
176
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800177 if (htab_is_lru(htab))
178 bpf_lru_populate(&htab->lru, htab->elems,
179 offsetof(struct htab_elem, lru_node),
Alexei Starovoitov8c290e62017-03-21 19:05:04 -0700180 htab->elem_size, num_entries);
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800181 else
Alexei Starovoitov9f691542017-03-07 20:00:12 -0800182 pcpu_freelist_populate(&htab->freelist,
183 htab->elems + offsetof(struct htab_elem, fnode),
Alexei Starovoitov8c290e62017-03-21 19:05:04 -0700184 htab->elem_size, num_entries);
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800185
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800186 return 0;
187
188free_elems:
189 htab_free_elems(htab);
190 return err;
191}
192
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800193static void prealloc_destroy(struct bpf_htab *htab)
194{
195 htab_free_elems(htab);
196
197 if (htab_is_lru(htab))
198 bpf_lru_destroy(&htab->lru);
199 else
200 pcpu_freelist_destroy(&htab->freelist);
201}
202
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700203static int alloc_extra_elems(struct bpf_htab *htab)
204{
Alexei Starovoitov8c290e62017-03-21 19:05:04 -0700205 struct htab_elem *__percpu *pptr, *l_new;
206 struct pcpu_freelist_node *l;
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700207 int cpu;
208
Alexei Starovoitov8c290e62017-03-21 19:05:04 -0700209 pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8,
210 GFP_USER | __GFP_NOWARN);
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700211 if (!pptr)
212 return -ENOMEM;
213
214 for_each_possible_cpu(cpu) {
Alexei Starovoitov8c290e62017-03-21 19:05:04 -0700215 l = pcpu_freelist_pop(&htab->freelist);
216 /* pop will succeed, since prealloc_init()
217 * preallocated extra num_possible_cpus elements
218 */
219 l_new = container_of(l, struct htab_elem, fnode);
220 *per_cpu_ptr(pptr, cpu) = l_new;
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700221 }
222 htab->extra_elems = pptr;
223 return 0;
224}
225
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800226/* Called from syscall */
227static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
228{
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800229 bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
230 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
231 bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
232 attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800233 /* percpu_lru means each cpu has its own LRU list.
234 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
235 * the map's value itself is percpu. percpu_lru has
236 * nothing to do with the map's value.
237 */
238 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
239 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
Martin KaFai Lau96eabe72017-08-18 11:28:00 -0700240 int numa_node = bpf_map_attr_numa_node(attr);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800241 struct bpf_htab *htab;
242 int err, i;
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800243 u64 cost;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800244
Alexei Starovoitov9f691542017-03-07 20:00:12 -0800245 BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
246 offsetof(struct htab_elem, hash_node.pprev));
247 BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
248 offsetof(struct htab_elem, hash_node.pprev));
249
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800250 if (lru && !capable(CAP_SYS_ADMIN))
251 /* LRU implementation is much complicated than other
252 * maps. Hence, limit to CAP_SYS_ADMIN for now.
253 */
254 return ERR_PTR(-EPERM);
255
Martin KaFai Lau96eabe72017-08-18 11:28:00 -0700256 if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK)
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800257 /* reserved bits should not be used */
258 return ERR_PTR(-EINVAL);
259
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800260 if (!lru && percpu_lru)
261 return ERR_PTR(-EINVAL);
262
263 if (lru && !prealloc)
264 return ERR_PTR(-ENOTSUPP);
265
Martin KaFai Lau96eabe72017-08-18 11:28:00 -0700266 if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
267 return ERR_PTR(-EINVAL);
268
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800269 htab = kzalloc(sizeof(*htab), GFP_USER);
270 if (!htab)
271 return ERR_PTR(-ENOMEM);
272
273 /* mandatory map attributes */
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800274 htab->map.map_type = attr->map_type;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800275 htab->map.key_size = attr->key_size;
276 htab->map.value_size = attr->value_size;
277 htab->map.max_entries = attr->max_entries;
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800278 htab->map.map_flags = attr->map_flags;
Martin KaFai Lau96eabe72017-08-18 11:28:00 -0700279 htab->map.numa_node = numa_node;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800280
281 /* check sanity of attributes.
282 * value_size == 0 may be allowed in the future to use map as a set
283 */
284 err = -EINVAL;
285 if (htab->map.max_entries == 0 || htab->map.key_size == 0 ||
286 htab->map.value_size == 0)
287 goto free_htab;
288
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800289 if (percpu_lru) {
290 /* ensure each CPU's lru list has >=1 elements.
291 * since we are at it, make each lru list has the same
292 * number of elements.
293 */
294 htab->map.max_entries = roundup(attr->max_entries,
295 num_possible_cpus());
296 if (htab->map.max_entries < attr->max_entries)
297 htab->map.max_entries = rounddown(attr->max_entries,
298 num_possible_cpus());
299 }
300
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800301 /* hash table size must be power of 2 */
302 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
303
304 err = -E2BIG;
305 if (htab->map.key_size > MAX_BPF_STACK)
306 /* eBPF programs initialize keys on stack, so they cannot be
307 * larger than max stack size
308 */
309 goto free_htab;
310
Michal Hocko7984c272017-01-10 16:57:30 -0800311 if (htab->map.value_size >= KMALLOC_MAX_SIZE -
Alexei Starovoitov01b3f522015-11-29 16:59:35 -0800312 MAX_BPF_STACK - sizeof(struct htab_elem))
313 /* if value_size is bigger, the user space won't be able to
314 * access the elements via bpf syscall. This check also makes
315 * sure that the elem_size doesn't overflow and it's
316 * kmalloc-able later in htab_map_update_elem()
317 */
318 goto free_htab;
319
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800320 if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE)
321 /* make sure the size for pcpu_alloc() is reasonable */
322 goto free_htab;
323
Alexei Starovoitov01b3f522015-11-29 16:59:35 -0800324 htab->elem_size = sizeof(struct htab_elem) +
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800325 round_up(htab->map.key_size, 8);
326 if (percpu)
327 htab->elem_size += sizeof(void *);
328 else
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800329 htab->elem_size += round_up(htab->map.value_size, 8);
Alexei Starovoitov01b3f522015-11-29 16:59:35 -0800330
Alexei Starovoitovdaaf4272014-11-18 17:32:16 -0800331 /* prevent zero size kmalloc and check for u32 overflow */
332 if (htab->n_buckets == 0 ||
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +0800333 htab->n_buckets > U32_MAX / sizeof(struct bucket))
Alexei Starovoitovdaaf4272014-11-18 17:32:16 -0800334 goto free_htab;
335
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800336 cost = (u64) htab->n_buckets * sizeof(struct bucket) +
337 (u64) htab->elem_size * htab->map.max_entries;
338
339 if (percpu)
340 cost += (u64) round_up(htab->map.value_size, 8) *
341 num_possible_cpus() * htab->map.max_entries;
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700342 else
343 cost += (u64) htab->elem_size * num_possible_cpus();
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800344
345 if (cost >= U32_MAX - PAGE_SIZE)
Alexei Starovoitov01b3f522015-11-29 16:59:35 -0800346 /* make sure page count doesn't overflow */
347 goto free_htab;
348
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800349 htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
Alexei Starovoitov01b3f522015-11-29 16:59:35 -0800350
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800351 /* if map size is larger than memlock limit, reject it early */
352 err = bpf_map_precharge_memlock(htab->map.pages);
353 if (err)
354 goto free_htab;
355
Alexei Starovoitov01b3f522015-11-29 16:59:35 -0800356 err = -ENOMEM;
Daniel Borkmannd407bd22017-01-18 15:14:17 +0100357 htab->buckets = bpf_map_area_alloc(htab->n_buckets *
Martin KaFai Lau96eabe72017-08-18 11:28:00 -0700358 sizeof(struct bucket),
359 htab->map.numa_node);
Daniel Borkmannd407bd22017-01-18 15:14:17 +0100360 if (!htab->buckets)
361 goto free_htab;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800362
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +0800363 for (i = 0; i < htab->n_buckets; i++) {
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800364 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +0800365 raw_spin_lock_init(&htab->buckets[i].lock);
366 }
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800367
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800368 if (prealloc) {
369 err = prealloc_init(htab);
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700370 if (err)
Alexei Starovoitov8c290e62017-03-21 19:05:04 -0700371 goto free_buckets;
372
373 if (!percpu && !lru) {
374 /* lru itself can remove the least used element, so
375 * there is no need for an extra elem during map_update.
376 */
377 err = alloc_extra_elems(htab);
378 if (err)
379 goto free_prealloc;
380 }
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700381 }
382
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800383 return &htab->map;
384
Alexei Starovoitov8c290e62017-03-21 19:05:04 -0700385free_prealloc:
386 prealloc_destroy(htab);
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800387free_buckets:
Daniel Borkmannd407bd22017-01-18 15:14:17 +0100388 bpf_map_area_free(htab->buckets);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800389free_htab:
390 kfree(htab);
391 return ERR_PTR(err);
392}
393
394static inline u32 htab_map_hash(const void *key, u32 key_len)
395{
396 return jhash(key, key_len, 0);
397}
398
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +0800399static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800400{
401 return &htab->buckets[hash & (htab->n_buckets - 1)];
402}
403
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800404static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +0800405{
406 return &__select_bucket(htab, hash)->head;
407}
408
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800409/* this lookup function can only be called with bucket lock taken */
410static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800411 void *key, u32 key_size)
412{
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800413 struct hlist_nulls_node *n;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800414 struct htab_elem *l;
415
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800416 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800417 if (l->hash == hash && !memcmp(&l->key, key, key_size))
418 return l;
419
420 return NULL;
421}
422
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800423/* can be called without bucket lock. it will repeat the loop in
424 * the unlikely event when elements moved from one bucket into another
425 * while link list is being walked
426 */
427static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
428 u32 hash, void *key,
429 u32 key_size, u32 n_buckets)
430{
431 struct hlist_nulls_node *n;
432 struct htab_elem *l;
433
434again:
435 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
436 if (l->hash == hash && !memcmp(&l->key, key, key_size))
437 return l;
438
439 if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
440 goto again;
441
442 return NULL;
443}
444
Alexei Starovoitov9015d2f2017-03-15 18:26:43 -0700445/* Called from syscall or from eBPF program directly, so
446 * arguments have to match bpf_map_lookup_elem() exactly.
447 * The return value is adjusted by BPF instructions
448 * in htab_map_gen_lookup().
449 */
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800450static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800451{
452 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800453 struct hlist_nulls_head *head;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800454 struct htab_elem *l;
455 u32 hash, key_size;
456
457 /* Must be called with rcu_read_lock. */
458 WARN_ON_ONCE(!rcu_read_lock_held());
459
460 key_size = map->key_size;
461
462 hash = htab_map_hash(key, key_size);
463
464 head = select_bucket(htab, hash);
465
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800466 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800467
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800468 return l;
469}
470
471static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
472{
473 struct htab_elem *l = __htab_map_lookup_elem(map, key);
474
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800475 if (l)
476 return l->key + round_up(map->key_size, 8);
477
478 return NULL;
479}
480
Alexei Starovoitov9015d2f2017-03-15 18:26:43 -0700481/* inline bpf_map_lookup_elem() call.
482 * Instead of:
483 * bpf_prog
484 * bpf_map_lookup_elem
485 * map->ops->map_lookup_elem
486 * htab_map_lookup_elem
487 * __htab_map_lookup_elem
488 * do:
489 * bpf_prog
490 * __htab_map_lookup_elem
491 */
492static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
493{
494 struct bpf_insn *insn = insn_buf;
495 const int ret = BPF_REG_0;
496
497 *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem);
498 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
499 *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
500 offsetof(struct htab_elem, key) +
501 round_up(map->key_size, 8));
502 return insn - insn_buf;
503}
504
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800505static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
506{
507 struct htab_elem *l = __htab_map_lookup_elem(map, key);
508
509 if (l) {
510 bpf_lru_node_set_ref(&l->lru_node);
511 return l->key + round_up(map->key_size, 8);
512 }
513
514 return NULL;
515}
516
517/* It is called from the bpf_lru_list when the LRU needs to delete
518 * older elements from the htab.
519 */
520static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
521{
522 struct bpf_htab *htab = (struct bpf_htab *)arg;
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800523 struct htab_elem *l = NULL, *tgt_l;
524 struct hlist_nulls_head *head;
525 struct hlist_nulls_node *n;
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800526 unsigned long flags;
527 struct bucket *b;
528
529 tgt_l = container_of(node, struct htab_elem, lru_node);
530 b = __select_bucket(htab, tgt_l->hash);
531 head = &b->head;
532
533 raw_spin_lock_irqsave(&b->lock, flags);
534
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800535 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800536 if (l == tgt_l) {
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800537 hlist_nulls_del_rcu(&l->hash_node);
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800538 break;
539 }
540
541 raw_spin_unlock_irqrestore(&b->lock, flags);
542
543 return l == tgt_l;
544}
545
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800546/* Called from syscall */
547static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
548{
549 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800550 struct hlist_nulls_head *head;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800551 struct htab_elem *l, *next_l;
552 u32 hash, key_size;
Teng Qin8fe45922017-04-24 19:00:37 -0700553 int i = 0;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800554
555 WARN_ON_ONCE(!rcu_read_lock_held());
556
557 key_size = map->key_size;
558
Teng Qin8fe45922017-04-24 19:00:37 -0700559 if (!key)
560 goto find_first_elem;
561
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800562 hash = htab_map_hash(key, key_size);
563
564 head = select_bucket(htab, hash);
565
566 /* lookup the key */
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800567 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800568
Teng Qin8fe45922017-04-24 19:00:37 -0700569 if (!l)
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800570 goto find_first_elem;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800571
572 /* key was found, get next key in the same bucket */
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800573 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800574 struct htab_elem, hash_node);
575
576 if (next_l) {
577 /* if next elem in this hash list is non-zero, just return it */
578 memcpy(next_key, next_l->key, key_size);
579 return 0;
580 }
581
582 /* no more elements in this hash list, go to the next bucket */
583 i = hash & (htab->n_buckets - 1);
584 i++;
585
586find_first_elem:
587 /* iterate over buckets */
588 for (; i < htab->n_buckets; i++) {
589 head = select_bucket(htab, i);
590
591 /* pick first element in the bucket */
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800592 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800593 struct htab_elem, hash_node);
594 if (next_l) {
595 /* if it's not empty, just return it */
596 memcpy(next_key, next_l->key, key_size);
597 return 0;
598 }
599 }
600
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800601 /* iterated over all buckets and all elements */
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800602 return -ENOENT;
603}
604
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800605static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800606{
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800607 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
608 free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800609 kfree(l);
610}
611
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800612static void htab_elem_free_rcu(struct rcu_head *head)
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800613{
614 struct htab_elem *l = container_of(head, struct htab_elem, rcu);
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800615 struct bpf_htab *htab = l->htab;
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800616
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800617 /* must increment bpf_prog_active to avoid kprobe+bpf triggering while
618 * we're calling kfree, otherwise deadlock is possible if kprobes
619 * are placed somewhere inside of slub
620 */
621 preempt_disable();
622 __this_cpu_inc(bpf_prog_active);
623 htab_elem_free(htab, l);
624 __this_cpu_dec(bpf_prog_active);
625 preempt_enable();
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800626}
627
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800628static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800629{
Martin KaFai Laubcc6b1b2017-03-22 10:00:34 -0700630 struct bpf_map *map = &htab->map;
631
632 if (map->ops->map_fd_put_ptr) {
633 void *ptr = fd_htab_map_get_ptr(map, l);
634
635 map->ops->map_fd_put_ptr(ptr);
636 }
637
Alexei Starovoitov8c290e62017-03-21 19:05:04 -0700638 if (htab_is_prealloc(htab)) {
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800639 pcpu_freelist_push(&htab->freelist, &l->fnode);
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800640 } else {
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800641 atomic_dec(&htab->count);
642 l->htab = htab;
643 call_rcu(&l->rcu, htab_elem_free_rcu);
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800644 }
645}
646
Martin KaFai Laufd91de72016-11-11 10:55:08 -0800647static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
648 void *value, bool onallcpus)
649{
650 if (!onallcpus) {
651 /* copy true value_size bytes */
652 memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
653 } else {
654 u32 size = round_up(htab->map.value_size, 8);
655 int off = 0, cpu;
656
657 for_each_possible_cpu(cpu) {
658 bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
659 value + off, size);
660 off += size;
661 }
662 }
663}
664
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800665static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
666 void *value, u32 key_size, u32 hash,
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700667 bool percpu, bool onallcpus,
Alexei Starovoitov8c290e62017-03-21 19:05:04 -0700668 struct htab_elem *old_elem)
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800669{
670 u32 size = htab->map.value_size;
Alexei Starovoitov8c290e62017-03-21 19:05:04 -0700671 bool prealloc = htab_is_prealloc(htab);
672 struct htab_elem *l_new, **pl_new;
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800673 void __percpu *pptr;
674
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800675 if (prealloc) {
Alexei Starovoitov8c290e62017-03-21 19:05:04 -0700676 if (old_elem) {
677 /* if we're updating the existing element,
678 * use per-cpu extra elems to avoid freelist_pop/push
679 */
680 pl_new = this_cpu_ptr(htab->extra_elems);
681 l_new = *pl_new;
682 *pl_new = old_elem;
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700683 } else {
Alexei Starovoitov8c290e62017-03-21 19:05:04 -0700684 struct pcpu_freelist_node *l;
685
686 l = pcpu_freelist_pop(&htab->freelist);
687 if (!l)
688 return ERR_PTR(-E2BIG);
689 l_new = container_of(l, struct htab_elem, fnode);
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800690 }
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700691 } else {
Alexei Starovoitov8c290e62017-03-21 19:05:04 -0700692 if (atomic_inc_return(&htab->count) > htab->map.max_entries)
693 if (!old_elem) {
694 /* when map is full and update() is replacing
695 * old element, it's ok to allocate, since
696 * old element will be freed immediately.
697 * Otherwise return an error
698 */
699 atomic_dec(&htab->count);
700 return ERR_PTR(-E2BIG);
701 }
Martin KaFai Lau96eabe72017-08-18 11:28:00 -0700702 l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
703 htab->map.numa_node);
Alexei Starovoitov8c290e62017-03-21 19:05:04 -0700704 if (!l_new)
705 return ERR_PTR(-ENOMEM);
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800706 }
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800707
708 memcpy(l_new->key, key, key_size);
709 if (percpu) {
710 /* round up value_size to 8 bytes */
711 size = round_up(size, 8);
712
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800713 if (prealloc) {
714 pptr = htab_elem_get_ptr(l_new, key_size);
715 } else {
716 /* alloc_percpu zero-fills */
717 pptr = __alloc_percpu_gfp(size, 8,
718 GFP_ATOMIC | __GFP_NOWARN);
719 if (!pptr) {
720 kfree(l_new);
721 return ERR_PTR(-ENOMEM);
722 }
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800723 }
724
Martin KaFai Laufd91de72016-11-11 10:55:08 -0800725 pcpu_copy_value(htab, pptr, value, onallcpus);
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800726
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800727 if (!prealloc)
728 htab_elem_set_ptr(l_new, key_size, pptr);
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800729 } else {
730 memcpy(l_new->key + round_up(key_size, 8), value, size);
731 }
732
733 l_new->hash = hash;
734 return l_new;
735}
736
737static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
738 u64 map_flags)
739{
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800740 if (l_old && map_flags == BPF_NOEXIST)
741 /* elem already exists */
742 return -EEXIST;
743
744 if (!l_old && map_flags == BPF_EXIST)
745 /* elem doesn't exist, cannot update it */
746 return -ENOENT;
747
748 return 0;
749}
750
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800751/* Called from syscall or from eBPF program */
752static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
753 u64 map_flags)
754{
755 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800756 struct htab_elem *l_new = NULL, *l_old;
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800757 struct hlist_nulls_head *head;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800758 unsigned long flags;
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800759 struct bucket *b;
760 u32 key_size, hash;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800761 int ret;
762
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800763 if (unlikely(map_flags > BPF_EXIST))
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800764 /* unknown flags */
765 return -EINVAL;
766
767 WARN_ON_ONCE(!rcu_read_lock_held());
768
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800769 key_size = map->key_size;
770
771 hash = htab_map_hash(key, key_size);
772
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800773 b = __select_bucket(htab, hash);
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +0800774 head = &b->head;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800775
776 /* bpf_map_update_elem() can be called in_irq() */
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +0800777 raw_spin_lock_irqsave(&b->lock, flags);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800778
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800779 l_old = lookup_elem_raw(head, hash, key, key_size);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800780
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800781 ret = check_flags(htab, l_old, map_flags);
782 if (ret)
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800783 goto err;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800784
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -0700785 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
Alexei Starovoitov8c290e62017-03-21 19:05:04 -0700786 l_old);
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800787 if (IS_ERR(l_new)) {
788 /* all pre-allocated elements are in use or memory exhausted */
789 ret = PTR_ERR(l_new);
790 goto err;
791 }
792
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800793 /* add new element to the head of the list, so that
794 * concurrent search will find it before old elem
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800795 */
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800796 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800797 if (l_old) {
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800798 hlist_nulls_del_rcu(&l_old->hash_node);
Alexei Starovoitov8c290e62017-03-21 19:05:04 -0700799 if (!htab_is_prealloc(htab))
800 free_htab_elem(htab, l_old);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800801 }
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800802 ret = 0;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800803err:
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +0800804 raw_spin_unlock_irqrestore(&b->lock, flags);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -0800805 return ret;
806}
807
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800808static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
809 u64 map_flags)
810{
811 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
812 struct htab_elem *l_new, *l_old = NULL;
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800813 struct hlist_nulls_head *head;
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800814 unsigned long flags;
815 struct bucket *b;
816 u32 key_size, hash;
817 int ret;
818
819 if (unlikely(map_flags > BPF_EXIST))
820 /* unknown flags */
821 return -EINVAL;
822
823 WARN_ON_ONCE(!rcu_read_lock_held());
824
825 key_size = map->key_size;
826
827 hash = htab_map_hash(key, key_size);
828
829 b = __select_bucket(htab, hash);
830 head = &b->head;
831
832 /* For LRU, we need to alloc before taking bucket's
833 * spinlock because getting free nodes from LRU may need
834 * to remove older elements from htab and this removal
835 * operation will need a bucket lock.
836 */
837 l_new = prealloc_lru_pop(htab, key, hash);
838 if (!l_new)
839 return -ENOMEM;
840 memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size);
841
842 /* bpf_map_update_elem() can be called in_irq() */
843 raw_spin_lock_irqsave(&b->lock, flags);
844
845 l_old = lookup_elem_raw(head, hash, key, key_size);
846
847 ret = check_flags(htab, l_old, map_flags);
848 if (ret)
849 goto err;
850
851 /* add new element to the head of the list, so that
852 * concurrent search will find it before old elem
853 */
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800854 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800855 if (l_old) {
856 bpf_lru_node_set_ref(&l_new->lru_node);
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800857 hlist_nulls_del_rcu(&l_old->hash_node);
Martin KaFai Lau29ba7322016-11-11 10:55:09 -0800858 }
859 ret = 0;
860
861err:
862 raw_spin_unlock_irqrestore(&b->lock, flags);
863
864 if (ret)
865 bpf_lru_push_free(&htab->lru, &l_new->lru_node);
866 else if (l_old)
867 bpf_lru_push_free(&htab->lru, &l_old->lru_node);
868
869 return ret;
870}
871
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800872static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
873 void *value, u64 map_flags,
874 bool onallcpus)
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800875{
876 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
877 struct htab_elem *l_new = NULL, *l_old;
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800878 struct hlist_nulls_head *head;
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800879 unsigned long flags;
880 struct bucket *b;
881 u32 key_size, hash;
882 int ret;
883
884 if (unlikely(map_flags > BPF_EXIST))
885 /* unknown flags */
886 return -EINVAL;
887
888 WARN_ON_ONCE(!rcu_read_lock_held());
889
890 key_size = map->key_size;
891
892 hash = htab_map_hash(key, key_size);
893
894 b = __select_bucket(htab, hash);
895 head = &b->head;
896
897 /* bpf_map_update_elem() can be called in_irq() */
898 raw_spin_lock_irqsave(&b->lock, flags);
899
900 l_old = lookup_elem_raw(head, hash, key, key_size);
901
902 ret = check_flags(htab, l_old, map_flags);
903 if (ret)
904 goto err;
905
906 if (l_old) {
907 /* per-cpu hash map can update value in-place */
Martin KaFai Laufd91de72016-11-11 10:55:08 -0800908 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
909 value, onallcpus);
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800910 } else {
911 l_new = alloc_htab_elem(htab, key, value, key_size,
Alexei Starovoitov8c290e62017-03-21 19:05:04 -0700912 hash, true, onallcpus, NULL);
Alexei Starovoitov6c905982016-03-07 21:57:15 -0800913 if (IS_ERR(l_new)) {
914 ret = PTR_ERR(l_new);
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800915 goto err;
916 }
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800917 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -0800918 }
919 ret = 0;
920err:
921 raw_spin_unlock_irqrestore(&b->lock, flags);
922 return ret;
923}
924
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800925static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
926 void *value, u64 map_flags,
927 bool onallcpus)
928{
929 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
930 struct htab_elem *l_new = NULL, *l_old;
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800931 struct hlist_nulls_head *head;
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800932 unsigned long flags;
933 struct bucket *b;
934 u32 key_size, hash;
935 int ret;
936
937 if (unlikely(map_flags > BPF_EXIST))
938 /* unknown flags */
939 return -EINVAL;
940
941 WARN_ON_ONCE(!rcu_read_lock_held());
942
943 key_size = map->key_size;
944
945 hash = htab_map_hash(key, key_size);
946
947 b = __select_bucket(htab, hash);
948 head = &b->head;
949
950 /* For LRU, we need to alloc before taking bucket's
951 * spinlock because LRU's elem alloc may need
952 * to remove older elem from htab and this removal
953 * operation will need a bucket lock.
954 */
955 if (map_flags != BPF_EXIST) {
956 l_new = prealloc_lru_pop(htab, key, hash);
957 if (!l_new)
958 return -ENOMEM;
959 }
960
961 /* bpf_map_update_elem() can be called in_irq() */
962 raw_spin_lock_irqsave(&b->lock, flags);
963
964 l_old = lookup_elem_raw(head, hash, key, key_size);
965
966 ret = check_flags(htab, l_old, map_flags);
967 if (ret)
968 goto err;
969
970 if (l_old) {
971 bpf_lru_node_set_ref(&l_old->lru_node);
972
973 /* per-cpu hash map can update value in-place */
974 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
975 value, onallcpus);
976 } else {
977 pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
978 value, onallcpus);
Alexei Starovoitov4fe84352017-03-07 20:00:13 -0800979 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800980 l_new = NULL;
981 }
982 ret = 0;
983err:
984 raw_spin_unlock_irqrestore(&b->lock, flags);
985 if (l_new)
986 bpf_lru_push_free(&htab->lru, &l_new->lru_node);
987 return ret;
988}
989
Alexei Starovoitov15a07b32016-02-01 22:39:55 -0800990static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
991 void *value, u64 map_flags)
992{
993 return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
994}
995
Martin KaFai Lau8f844932016-11-11 10:55:10 -0800996static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
997 void *value, u64 map_flags)
998{
999 return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
1000 false);
1001}
1002
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001003/* Called from syscall or from eBPF program */
1004static int htab_map_delete_elem(struct bpf_map *map, void *key)
1005{
1006 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
Alexei Starovoitov4fe84352017-03-07 20:00:13 -08001007 struct hlist_nulls_head *head;
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +08001008 struct bucket *b;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001009 struct htab_elem *l;
1010 unsigned long flags;
1011 u32 hash, key_size;
1012 int ret = -ENOENT;
1013
1014 WARN_ON_ONCE(!rcu_read_lock_held());
1015
1016 key_size = map->key_size;
1017
1018 hash = htab_map_hash(key, key_size);
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +08001019 b = __select_bucket(htab, hash);
1020 head = &b->head;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001021
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +08001022 raw_spin_lock_irqsave(&b->lock, flags);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001023
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001024 l = lookup_elem_raw(head, hash, key, key_size);
1025
1026 if (l) {
Alexei Starovoitov4fe84352017-03-07 20:00:13 -08001027 hlist_nulls_del_rcu(&l->hash_node);
Alexei Starovoitov6c905982016-03-07 21:57:15 -08001028 free_htab_elem(htab, l);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001029 ret = 0;
1030 }
1031
tom.leiming@gmail.com688ecfe2015-12-29 22:40:27 +08001032 raw_spin_unlock_irqrestore(&b->lock, flags);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001033 return ret;
1034}
1035
Martin KaFai Lau29ba7322016-11-11 10:55:09 -08001036static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1037{
1038 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
Alexei Starovoitov4fe84352017-03-07 20:00:13 -08001039 struct hlist_nulls_head *head;
Martin KaFai Lau29ba7322016-11-11 10:55:09 -08001040 struct bucket *b;
1041 struct htab_elem *l;
1042 unsigned long flags;
1043 u32 hash, key_size;
1044 int ret = -ENOENT;
1045
1046 WARN_ON_ONCE(!rcu_read_lock_held());
1047
1048 key_size = map->key_size;
1049
1050 hash = htab_map_hash(key, key_size);
1051 b = __select_bucket(htab, hash);
1052 head = &b->head;
1053
1054 raw_spin_lock_irqsave(&b->lock, flags);
1055
1056 l = lookup_elem_raw(head, hash, key, key_size);
1057
1058 if (l) {
Alexei Starovoitov4fe84352017-03-07 20:00:13 -08001059 hlist_nulls_del_rcu(&l->hash_node);
Martin KaFai Lau29ba7322016-11-11 10:55:09 -08001060 ret = 0;
1061 }
1062
1063 raw_spin_unlock_irqrestore(&b->lock, flags);
1064 if (l)
1065 bpf_lru_push_free(&htab->lru, &l->lru_node);
1066 return ret;
1067}
1068
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001069static void delete_all_elements(struct bpf_htab *htab)
1070{
1071 int i;
1072
1073 for (i = 0; i < htab->n_buckets; i++) {
Alexei Starovoitov4fe84352017-03-07 20:00:13 -08001074 struct hlist_nulls_head *head = select_bucket(htab, i);
1075 struct hlist_nulls_node *n;
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001076 struct htab_elem *l;
1077
Alexei Starovoitov4fe84352017-03-07 20:00:13 -08001078 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1079 hlist_nulls_del_rcu(&l->hash_node);
Alexei Starovoitov8c290e62017-03-21 19:05:04 -07001080 htab_elem_free(htab, l);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001081 }
1082 }
1083}
Martin KaFai Laubcc6b1b2017-03-22 10:00:34 -07001084
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001085/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
1086static void htab_map_free(struct bpf_map *map)
1087{
1088 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1089
1090 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
1091 * so the programs (can be more than one that used this map) were
1092 * disconnected from events. Wait for outstanding critical sections in
1093 * these programs to complete
1094 */
1095 synchronize_rcu();
1096
Alexei Starovoitov6c905982016-03-07 21:57:15 -08001097 /* some of free_htab_elem() callbacks for elements of this map may
1098 * not have executed. Wait for them.
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001099 */
Alexei Starovoitov6c905982016-03-07 21:57:15 -08001100 rcu_barrier();
Alexei Starovoitov8c290e62017-03-21 19:05:04 -07001101 if (!htab_is_prealloc(htab))
Alexei Starovoitov6c905982016-03-07 21:57:15 -08001102 delete_all_elements(htab);
Martin KaFai Lau29ba7322016-11-11 10:55:09 -08001103 else
1104 prealloc_destroy(htab);
1105
Alexei Starovoitova6ed3ea2016-08-05 14:01:27 -07001106 free_percpu(htab->extra_elems);
Daniel Borkmannd407bd22017-01-18 15:14:17 +01001107 bpf_map_area_free(htab->buckets);
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001108 kfree(htab);
1109}
1110
Johannes Berg40077e02017-04-11 15:34:58 +02001111const struct bpf_map_ops htab_map_ops = {
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001112 .map_alloc = htab_map_alloc,
1113 .map_free = htab_map_free,
1114 .map_get_next_key = htab_map_get_next_key,
1115 .map_lookup_elem = htab_map_lookup_elem,
1116 .map_update_elem = htab_map_update_elem,
1117 .map_delete_elem = htab_map_delete_elem,
Alexei Starovoitov9015d2f2017-03-15 18:26:43 -07001118 .map_gen_lookup = htab_map_gen_lookup,
Alexei Starovoitov0f8e4bd2014-11-13 17:36:45 -08001119};
1120
Johannes Berg40077e02017-04-11 15:34:58 +02001121const struct bpf_map_ops htab_lru_map_ops = {
Martin KaFai Lau29ba7322016-11-11 10:55:09 -08001122 .map_alloc = htab_map_alloc,
1123 .map_free = htab_map_free,
1124 .map_get_next_key = htab_map_get_next_key,
1125 .map_lookup_elem = htab_lru_map_lookup_elem,
1126 .map_update_elem = htab_lru_map_update_elem,
1127 .map_delete_elem = htab_lru_map_delete_elem,
1128};
1129
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -08001130/* Called from eBPF program */
1131static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
1132{
1133 struct htab_elem *l = __htab_map_lookup_elem(map, key);
1134
1135 if (l)
1136 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
1137 else
1138 return NULL;
1139}
1140
Martin KaFai Lau8f844932016-11-11 10:55:10 -08001141static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
1142{
1143 struct htab_elem *l = __htab_map_lookup_elem(map, key);
1144
1145 if (l) {
1146 bpf_lru_node_set_ref(&l->lru_node);
1147 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
1148 }
1149
1150 return NULL;
1151}
1152
Alexei Starovoitov15a07b32016-02-01 22:39:55 -08001153int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
1154{
Martin KaFai Lau8f844932016-11-11 10:55:10 -08001155 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
Alexei Starovoitov15a07b32016-02-01 22:39:55 -08001156 struct htab_elem *l;
1157 void __percpu *pptr;
1158 int ret = -ENOENT;
1159 int cpu, off = 0;
1160 u32 size;
1161
1162 /* per_cpu areas are zero-filled and bpf programs can only
1163 * access 'value_size' of them, so copying rounded areas
1164 * will not leak any kernel data
1165 */
1166 size = round_up(map->value_size, 8);
1167 rcu_read_lock();
1168 l = __htab_map_lookup_elem(map, key);
1169 if (!l)
1170 goto out;
Martin KaFai Lau8f844932016-11-11 10:55:10 -08001171 if (htab_is_lru(htab))
1172 bpf_lru_node_set_ref(&l->lru_node);
Alexei Starovoitov15a07b32016-02-01 22:39:55 -08001173 pptr = htab_elem_get_ptr(l, map->key_size);
1174 for_each_possible_cpu(cpu) {
1175 bpf_long_memcpy(value + off,
1176 per_cpu_ptr(pptr, cpu), size);
1177 off += size;
1178 }
1179 ret = 0;
1180out:
1181 rcu_read_unlock();
1182 return ret;
1183}
1184
1185int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
1186 u64 map_flags)
1187{
Martin KaFai Lau8f844932016-11-11 10:55:10 -08001188 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
Sasha Levin6bbd9a02016-02-19 13:53:10 -05001189 int ret;
1190
1191 rcu_read_lock();
Martin KaFai Lau8f844932016-11-11 10:55:10 -08001192 if (htab_is_lru(htab))
1193 ret = __htab_lru_percpu_map_update_elem(map, key, value,
1194 map_flags, true);
1195 else
1196 ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
1197 true);
Sasha Levin6bbd9a02016-02-19 13:53:10 -05001198 rcu_read_unlock();
1199
1200 return ret;
Alexei Starovoitov15a07b32016-02-01 22:39:55 -08001201}
1202
Johannes Berg40077e02017-04-11 15:34:58 +02001203const struct bpf_map_ops htab_percpu_map_ops = {
Alexei Starovoitov824bd0c2016-02-01 22:39:53 -08001204 .map_alloc = htab_map_alloc,
1205 .map_free = htab_map_free,
1206 .map_get_next_key = htab_map_get_next_key,
1207 .map_lookup_elem = htab_percpu_map_lookup_elem,
1208 .map_update_elem = htab_percpu_map_update_elem,
1209 .map_delete_elem = htab_map_delete_elem,
1210};
1211
Johannes Berg40077e02017-04-11 15:34:58 +02001212const struct bpf_map_ops htab_lru_percpu_map_ops = {
Martin KaFai Lau8f844932016-11-11 10:55:10 -08001213 .map_alloc = htab_map_alloc,
1214 .map_free = htab_map_free,
1215 .map_get_next_key = htab_map_get_next_key,
1216 .map_lookup_elem = htab_lru_percpu_map_lookup_elem,
1217 .map_update_elem = htab_lru_percpu_map_update_elem,
1218 .map_delete_elem = htab_lru_map_delete_elem,
1219};
1220
Martin KaFai Laubcc6b1b2017-03-22 10:00:34 -07001221static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr)
1222{
1223 struct bpf_map *map;
1224
1225 if (attr->value_size != sizeof(u32))
1226 return ERR_PTR(-EINVAL);
1227
1228 /* pointer is stored internally */
1229 attr->value_size = sizeof(void *);
1230 map = htab_map_alloc(attr);
1231 attr->value_size = sizeof(u32);
1232
1233 return map;
1234}
1235
1236static void fd_htab_map_free(struct bpf_map *map)
1237{
1238 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1239 struct hlist_nulls_node *n;
1240 struct hlist_nulls_head *head;
1241 struct htab_elem *l;
1242 int i;
1243
1244 for (i = 0; i < htab->n_buckets; i++) {
1245 head = select_bucket(htab, i);
1246
1247 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1248 void *ptr = fd_htab_map_get_ptr(map, l);
1249
1250 map->ops->map_fd_put_ptr(ptr);
1251 }
1252 }
1253
1254 htab_map_free(map);
1255}
1256
1257/* only called from syscall */
Martin KaFai Lau14dc6f02017-06-27 23:08:34 -07001258int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
1259{
1260 void **ptr;
1261 int ret = 0;
1262
1263 if (!map->ops->map_fd_sys_lookup_elem)
1264 return -ENOTSUPP;
1265
1266 rcu_read_lock();
1267 ptr = htab_map_lookup_elem(map, key);
1268 if (ptr)
1269 *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr));
1270 else
1271 ret = -ENOENT;
1272 rcu_read_unlock();
1273
1274 return ret;
1275}
1276
1277/* only called from syscall */
Martin KaFai Laubcc6b1b2017-03-22 10:00:34 -07001278int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
1279 void *key, void *value, u64 map_flags)
1280{
1281 void *ptr;
1282 int ret;
1283 u32 ufd = *(u32 *)value;
1284
1285 ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
1286 if (IS_ERR(ptr))
1287 return PTR_ERR(ptr);
1288
1289 ret = htab_map_update_elem(map, key, &ptr, map_flags);
1290 if (ret)
1291 map->ops->map_fd_put_ptr(ptr);
1292
1293 return ret;
1294}
1295
1296static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
1297{
1298 struct bpf_map *map, *inner_map_meta;
1299
1300 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1301 if (IS_ERR(inner_map_meta))
1302 return inner_map_meta;
1303
1304 map = fd_htab_map_alloc(attr);
1305 if (IS_ERR(map)) {
1306 bpf_map_meta_free(inner_map_meta);
1307 return map;
1308 }
1309
1310 map->inner_map_meta = inner_map_meta;
1311
1312 return map;
1313}
1314
1315static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
1316{
1317 struct bpf_map **inner_map = htab_map_lookup_elem(map, key);
1318
1319 if (!inner_map)
1320 return NULL;
1321
1322 return READ_ONCE(*inner_map);
1323}
1324
1325static void htab_of_map_free(struct bpf_map *map)
1326{
1327 bpf_map_meta_free(map->inner_map_meta);
1328 fd_htab_map_free(map);
1329}
1330
Johannes Berg40077e02017-04-11 15:34:58 +02001331const struct bpf_map_ops htab_of_maps_map_ops = {
Martin KaFai Laubcc6b1b2017-03-22 10:00:34 -07001332 .map_alloc = htab_of_map_alloc,
1333 .map_free = htab_of_map_free,
1334 .map_get_next_key = htab_map_get_next_key,
1335 .map_lookup_elem = htab_of_map_lookup_elem,
1336 .map_delete_elem = htab_map_delete_elem,
1337 .map_fd_get_ptr = bpf_map_fd_get_ptr,
1338 .map_fd_put_ptr = bpf_map_fd_put_ptr,
Martin KaFai Lau14dc6f02017-06-27 23:08:34 -07001339 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
Martin KaFai Laubcc6b1b2017-03-22 10:00:34 -07001340};