blob: 70a534549cd3e0ce80e342192e17698ab8ac9c9b [file] [log] [blame]
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001/*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
6 *
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *
9 * Authors:
10 *
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * Andi Kleen - Fix a few bad bugs and races.
Alexei Starovoitov4df95ff2014-07-30 20:34:14 -070021 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070022 */
Daniel Borkmann738cbe72014-09-08 08:04:47 +020023
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070024#include <linux/filter.h>
25#include <linux/skbuff.h>
Daniel Borkmann60a3b222014-09-02 22:53:44 +020026#include <linux/vmalloc.h>
Daniel Borkmann738cbe72014-09-08 08:04:47 +020027#include <linux/random.h>
28#include <linux/moduleloader.h>
Alexei Starovoitov09756af2014-09-26 00:17:00 -070029#include <linux/bpf.h>
Josh Poimboeuf39853cc2016-02-28 22:22:37 -060030#include <linux/frame.h>
Daniel Borkmann74451e662017-02-16 22:24:50 +010031#include <linux/rbtree_latch.h>
32#include <linux/kallsyms.h>
33#include <linux/rcupdate.h>
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070034
Daniel Borkmann3324b582015-05-29 23:23:07 +020035#include <asm/unaligned.h>
36
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070037/* Registers */
38#define BPF_R0 regs[BPF_REG_0]
39#define BPF_R1 regs[BPF_REG_1]
40#define BPF_R2 regs[BPF_REG_2]
41#define BPF_R3 regs[BPF_REG_3]
42#define BPF_R4 regs[BPF_REG_4]
43#define BPF_R5 regs[BPF_REG_5]
44#define BPF_R6 regs[BPF_REG_6]
45#define BPF_R7 regs[BPF_REG_7]
46#define BPF_R8 regs[BPF_REG_8]
47#define BPF_R9 regs[BPF_REG_9]
48#define BPF_R10 regs[BPF_REG_10]
49
50/* Named registers */
51#define DST regs[insn->dst_reg]
52#define SRC regs[insn->src_reg]
53#define FP regs[BPF_REG_FP]
54#define ARG1 regs[BPF_REG_ARG1]
55#define CTX regs[BPF_REG_CTX]
56#define IMM insn->imm
57
58/* No hurry in this branch
59 *
60 * Exported for the bpf jit load helper.
61 */
62void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
63{
64 u8 *ptr = NULL;
65
66 if (k >= SKF_NET_OFF)
67 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
68 else if (k >= SKF_LL_OFF)
69 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
Daniel Borkmann3324b582015-05-29 23:23:07 +020070
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070071 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
72 return ptr;
73
74 return NULL;
75}
76
Daniel Borkmann60a3b222014-09-02 22:53:44 +020077struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
78{
Michal Hocko19809c22017-05-08 15:57:44 -070079 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
Alexei Starovoitov09756af2014-09-26 00:17:00 -070080 struct bpf_prog_aux *aux;
Daniel Borkmann60a3b222014-09-02 22:53:44 +020081 struct bpf_prog *fp;
82
83 size = round_up(size, PAGE_SIZE);
84 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
85 if (fp == NULL)
86 return NULL;
87
Alexei Starovoitov09756af2014-09-26 00:17:00 -070088 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
89 if (aux == NULL) {
Daniel Borkmann60a3b222014-09-02 22:53:44 +020090 vfree(fp);
91 return NULL;
92 }
93
94 fp->pages = size / PAGE_SIZE;
Alexei Starovoitov09756af2014-09-26 00:17:00 -070095 fp->aux = aux;
Daniel Borkmanne9d8afa2015-10-29 14:58:08 +010096 fp->aux->prog = fp;
Alexei Starovoitov60b58afc2017-12-14 17:55:14 -080097 fp->jit_requested = ebpf_jit_enabled();
Daniel Borkmann60a3b222014-09-02 22:53:44 +020098
Daniel Borkmann74451e662017-02-16 22:24:50 +010099 INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
100
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200101 return fp;
102}
103EXPORT_SYMBOL_GPL(bpf_prog_alloc);
104
105struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
106 gfp_t gfp_extra_flags)
107{
Michal Hocko19809c22017-05-08 15:57:44 -0700108 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200109 struct bpf_prog *fp;
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100110 u32 pages, delta;
111 int ret;
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200112
113 BUG_ON(fp_old == NULL);
114
115 size = round_up(size, PAGE_SIZE);
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100116 pages = size / PAGE_SIZE;
117 if (pages <= fp_old->pages)
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200118 return fp_old;
119
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100120 delta = pages - fp_old->pages;
121 ret = __bpf_prog_charge(fp_old->aux->user, delta);
122 if (ret)
123 return NULL;
124
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200125 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100126 if (fp == NULL) {
127 __bpf_prog_uncharge(fp_old->aux->user, delta);
128 } else {
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200129 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100130 fp->pages = pages;
Daniel Borkmanne9d8afa2015-10-29 14:58:08 +0100131 fp->aux->prog = fp;
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200132
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700133 /* We keep fp->aux from fp_old around in the new
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200134 * reallocated structure.
135 */
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700136 fp_old->aux = NULL;
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200137 __bpf_prog_free(fp_old);
138 }
139
140 return fp;
141}
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200142
143void __bpf_prog_free(struct bpf_prog *fp)
144{
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700145 kfree(fp->aux);
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200146 vfree(fp);
147}
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200148
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100149int bpf_prog_calc_tag(struct bpf_prog *fp)
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100150{
151 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100152 u32 raw_size = bpf_prog_tag_scratch_size(fp);
153 u32 digest[SHA_DIGEST_WORDS];
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100154 u32 ws[SHA_WORKSPACE_WORDS];
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100155 u32 i, bsize, psize, blocks;
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100156 struct bpf_insn *dst;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100157 bool was_ld_map;
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100158 u8 *raw, *todo;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100159 __be32 *result;
160 __be64 *bits;
161
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100162 raw = vmalloc(raw_size);
163 if (!raw)
164 return -ENOMEM;
165
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100166 sha_init(digest);
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100167 memset(ws, 0, sizeof(ws));
168
169 /* We need to take out the map fd for the digest calculation
170 * since they are unstable from user space side.
171 */
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100172 dst = (void *)raw;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100173 for (i = 0, was_ld_map = false; i < fp->len; i++) {
174 dst[i] = fp->insnsi[i];
175 if (!was_ld_map &&
176 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
177 dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
178 was_ld_map = true;
179 dst[i].imm = 0;
180 } else if (was_ld_map &&
181 dst[i].code == 0 &&
182 dst[i].dst_reg == 0 &&
183 dst[i].src_reg == 0 &&
184 dst[i].off == 0) {
185 was_ld_map = false;
186 dst[i].imm = 0;
187 } else {
188 was_ld_map = false;
189 }
190 }
191
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100192 psize = bpf_prog_insn_size(fp);
193 memset(&raw[psize], 0, raw_size - psize);
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100194 raw[psize++] = 0x80;
195
196 bsize = round_up(psize, SHA_MESSAGE_BYTES);
197 blocks = bsize / SHA_MESSAGE_BYTES;
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100198 todo = raw;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100199 if (bsize - psize >= sizeof(__be64)) {
200 bits = (__be64 *)(todo + bsize - sizeof(__be64));
201 } else {
202 bits = (__be64 *)(todo + bsize + bits_offset);
203 blocks++;
204 }
205 *bits = cpu_to_be64((psize - 1) << 3);
206
207 while (blocks--) {
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100208 sha_transform(digest, todo, ws);
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100209 todo += SHA_MESSAGE_BYTES;
210 }
211
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100212 result = (__force __be32 *)digest;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100213 for (i = 0; i < SHA_DIGEST_WORDS; i++)
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100214 result[i] = cpu_to_be32(digest[i]);
215 memcpy(fp->tag, result, sizeof(fp->tag));
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100216
217 vfree(raw);
218 return 0;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100219}
220
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200221static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
222{
223 struct bpf_insn *insn = prog->insnsi;
224 u32 i, insn_cnt = prog->len;
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -0800225 bool pseudo_call;
226 u8 code;
227 int off;
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200228
229 for (i = 0; i < insn_cnt; i++, insn++) {
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -0800230 code = insn->code;
231 if (BPF_CLASS(code) != BPF_JMP)
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200232 continue;
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -0800233 if (BPF_OP(code) == BPF_EXIT)
234 continue;
235 if (BPF_OP(code) == BPF_CALL) {
236 if (insn->src_reg == BPF_PSEUDO_CALL)
237 pseudo_call = true;
238 else
239 continue;
240 } else {
241 pseudo_call = false;
242 }
243 off = pseudo_call ? insn->imm : insn->off;
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200244
245 /* Adjust offset of jmps if we cross boundaries. */
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -0800246 if (i < pos && i + off + 1 > pos)
247 off += delta;
248 else if (i > pos + delta && i + off + 1 <= pos + delta)
249 off -= delta;
250
251 if (pseudo_call)
252 insn->imm = off;
253 else
254 insn->off = off;
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200255 }
256}
257
258struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
259 const struct bpf_insn *patch, u32 len)
260{
261 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
262 struct bpf_prog *prog_adj;
263
264 /* Since our patchlet doesn't expand the image, we're done. */
265 if (insn_delta == 0) {
266 memcpy(prog->insnsi + off, patch, sizeof(*patch));
267 return prog;
268 }
269
270 insn_adj_cnt = prog->len + insn_delta;
271
272 /* Several new instructions need to be inserted. Make room
273 * for them. Likely, there's no need for a new allocation as
274 * last page could have large enough tailroom.
275 */
276 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
277 GFP_USER);
278 if (!prog_adj)
279 return NULL;
280
281 prog_adj->len = insn_adj_cnt;
282
283 /* Patching happens in 3 steps:
284 *
285 * 1) Move over tail of insnsi from next instruction onwards,
286 * so we can patch the single target insn with one or more
287 * new ones (patching is always from 1 to n insns, n > 0).
288 * 2) Inject new instructions at the target location.
289 * 3) Adjust branch offsets if necessary.
290 */
291 insn_rest = insn_adj_cnt - off - len;
292
293 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
294 sizeof(*patch) * insn_rest);
295 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
296
297 bpf_adj_branches(prog_adj, off, insn_delta);
298
299 return prog_adj;
300}
301
Daniel Borkmannb954d832014-09-10 15:01:02 +0200302#ifdef CONFIG_BPF_JIT
Daniel Borkmann74451e662017-02-16 22:24:50 +0100303static __always_inline void
304bpf_get_prog_addr_region(const struct bpf_prog *prog,
305 unsigned long *symbol_start,
306 unsigned long *symbol_end)
307{
308 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
309 unsigned long addr = (unsigned long)hdr;
310
311 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
312
313 *symbol_start = addr;
314 *symbol_end = addr + hdr->pages * PAGE_SIZE;
315}
316
317static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
318{
Martin KaFai Lau368211f2017-10-05 21:52:13 -0700319 const char *end = sym + KSYM_NAME_LEN;
320
Daniel Borkmann74451e662017-02-16 22:24:50 +0100321 BUILD_BUG_ON(sizeof("bpf_prog_") +
Martin KaFai Lau368211f2017-10-05 21:52:13 -0700322 sizeof(prog->tag) * 2 +
323 /* name has been null terminated.
324 * We should need +1 for the '_' preceding
325 * the name. However, the null character
326 * is double counted between the name and the
327 * sizeof("bpf_prog_") above, so we omit
328 * the +1 here.
329 */
330 sizeof(prog->aux->name) > KSYM_NAME_LEN);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100331
332 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
333 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
Martin KaFai Lau368211f2017-10-05 21:52:13 -0700334 if (prog->aux->name[0])
335 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
336 else
337 *sym = 0;
Daniel Borkmann74451e662017-02-16 22:24:50 +0100338}
339
340static __always_inline unsigned long
341bpf_get_prog_addr_start(struct latch_tree_node *n)
342{
343 unsigned long symbol_start, symbol_end;
344 const struct bpf_prog_aux *aux;
345
346 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
347 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
348
349 return symbol_start;
350}
351
352static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
353 struct latch_tree_node *b)
354{
355 return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
356}
357
358static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
359{
360 unsigned long val = (unsigned long)key;
361 unsigned long symbol_start, symbol_end;
362 const struct bpf_prog_aux *aux;
363
364 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
365 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
366
367 if (val < symbol_start)
368 return -1;
369 if (val >= symbol_end)
370 return 1;
371
372 return 0;
373}
374
375static const struct latch_tree_ops bpf_tree_ops = {
376 .less = bpf_tree_less,
377 .comp = bpf_tree_comp,
378};
379
380static DEFINE_SPINLOCK(bpf_lock);
381static LIST_HEAD(bpf_kallsyms);
382static struct latch_tree_root bpf_tree __cacheline_aligned;
383
384int bpf_jit_kallsyms __read_mostly;
385
386static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
387{
388 WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
389 list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
390 latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
391}
392
393static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
394{
395 if (list_empty(&aux->ksym_lnode))
396 return;
397
398 latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
399 list_del_rcu(&aux->ksym_lnode);
400}
401
402static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
403{
404 return fp->jited && !bpf_prog_was_classic(fp);
405}
406
407static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
408{
409 return list_empty(&fp->aux->ksym_lnode) ||
410 fp->aux->ksym_lnode.prev == LIST_POISON2;
411}
412
413void bpf_prog_kallsyms_add(struct bpf_prog *fp)
414{
Daniel Borkmann74451e662017-02-16 22:24:50 +0100415 if (!bpf_prog_kallsyms_candidate(fp) ||
416 !capable(CAP_SYS_ADMIN))
417 return;
418
Hannes Frederic Sowad24f7c72017-04-27 01:39:33 +0200419 spin_lock_bh(&bpf_lock);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100420 bpf_prog_ksym_node_add(fp->aux);
Hannes Frederic Sowad24f7c72017-04-27 01:39:33 +0200421 spin_unlock_bh(&bpf_lock);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100422}
423
424void bpf_prog_kallsyms_del(struct bpf_prog *fp)
425{
Daniel Borkmann74451e662017-02-16 22:24:50 +0100426 if (!bpf_prog_kallsyms_candidate(fp))
427 return;
428
Hannes Frederic Sowad24f7c72017-04-27 01:39:33 +0200429 spin_lock_bh(&bpf_lock);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100430 bpf_prog_ksym_node_del(fp->aux);
Hannes Frederic Sowad24f7c72017-04-27 01:39:33 +0200431 spin_unlock_bh(&bpf_lock);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100432}
433
434static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
435{
436 struct latch_tree_node *n;
437
438 if (!bpf_jit_kallsyms_enabled())
439 return NULL;
440
441 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
442 return n ?
443 container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
444 NULL;
445}
446
447const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
448 unsigned long *off, char *sym)
449{
450 unsigned long symbol_start, symbol_end;
451 struct bpf_prog *prog;
452 char *ret = NULL;
453
454 rcu_read_lock();
455 prog = bpf_prog_kallsyms_find(addr);
456 if (prog) {
457 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
458 bpf_get_prog_name(prog, sym);
459
460 ret = sym;
461 if (size)
462 *size = symbol_end - symbol_start;
463 if (off)
464 *off = addr - symbol_start;
465 }
466 rcu_read_unlock();
467
468 return ret;
469}
470
471bool is_bpf_text_address(unsigned long addr)
472{
473 bool ret;
474
475 rcu_read_lock();
476 ret = bpf_prog_kallsyms_find(addr) != NULL;
477 rcu_read_unlock();
478
479 return ret;
480}
481
482int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
483 char *sym)
484{
485 unsigned long symbol_start, symbol_end;
486 struct bpf_prog_aux *aux;
487 unsigned int it = 0;
488 int ret = -ERANGE;
489
490 if (!bpf_jit_kallsyms_enabled())
491 return ret;
492
493 rcu_read_lock();
494 list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
495 if (it++ != symnum)
496 continue;
497
498 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
499 bpf_get_prog_name(aux->prog, sym);
500
501 *value = symbol_start;
502 *type = BPF_SYM_ELF_TYPE;
503
504 ret = 0;
505 break;
506 }
507 rcu_read_unlock();
508
509 return ret;
510}
511
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200512struct bpf_binary_header *
513bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
514 unsigned int alignment,
515 bpf_jit_fill_hole_t bpf_fill_ill_insns)
516{
517 struct bpf_binary_header *hdr;
518 unsigned int size, hole, start;
519
520 /* Most of BPF filters are really small, but if some of them
521 * fill a page, allow at least 128 extra bytes to insert a
522 * random section of illegal instructions.
523 */
524 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
525 hdr = module_alloc(size);
526 if (hdr == NULL)
527 return NULL;
528
529 /* Fill space with illegal/arch-dep instructions. */
530 bpf_fill_ill_insns(hdr, size);
531
532 hdr->pages = size / PAGE_SIZE;
533 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
534 PAGE_SIZE - sizeof(*hdr));
Daniel Borkmannb7552e1b2016-05-18 14:14:28 +0200535 start = (get_random_int() % hole) & ~(alignment - 1);
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200536
537 /* Leave a random number of instructions before BPF code. */
538 *image_ptr = &hdr->image[start];
539
540 return hdr;
541}
542
543void bpf_jit_binary_free(struct bpf_binary_header *hdr)
544{
Rusty Russellbe1f2212015-01-20 09:07:05 +1030545 module_memfree(hdr);
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200546}
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200547
Daniel Borkmann74451e662017-02-16 22:24:50 +0100548/* This symbol is only overridden by archs that have different
549 * requirements than the usual eBPF JITs, f.e. when they only
550 * implement cBPF JIT, do not set images read-only, etc.
551 */
552void __weak bpf_jit_free(struct bpf_prog *fp)
553{
554 if (fp->jited) {
555 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
556
557 bpf_jit_binary_unlock_ro(hdr);
558 bpf_jit_binary_free(hdr);
559
560 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
561 }
562
563 bpf_prog_unlock_free(fp);
564}
565
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200566int bpf_jit_harden __read_mostly;
567
568static int bpf_jit_blind_insn(const struct bpf_insn *from,
569 const struct bpf_insn *aux,
570 struct bpf_insn *to_buff)
571{
572 struct bpf_insn *to = to_buff;
Daniel Borkmannb7552e1b2016-05-18 14:14:28 +0200573 u32 imm_rnd = get_random_int();
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200574 s16 off;
575
576 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
577 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
578
579 if (from->imm == 0 &&
580 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
581 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
582 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
583 goto out;
584 }
585
586 switch (from->code) {
587 case BPF_ALU | BPF_ADD | BPF_K:
588 case BPF_ALU | BPF_SUB | BPF_K:
589 case BPF_ALU | BPF_AND | BPF_K:
590 case BPF_ALU | BPF_OR | BPF_K:
591 case BPF_ALU | BPF_XOR | BPF_K:
592 case BPF_ALU | BPF_MUL | BPF_K:
593 case BPF_ALU | BPF_MOV | BPF_K:
594 case BPF_ALU | BPF_DIV | BPF_K:
595 case BPF_ALU | BPF_MOD | BPF_K:
596 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
597 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
598 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
599 break;
600
601 case BPF_ALU64 | BPF_ADD | BPF_K:
602 case BPF_ALU64 | BPF_SUB | BPF_K:
603 case BPF_ALU64 | BPF_AND | BPF_K:
604 case BPF_ALU64 | BPF_OR | BPF_K:
605 case BPF_ALU64 | BPF_XOR | BPF_K:
606 case BPF_ALU64 | BPF_MUL | BPF_K:
607 case BPF_ALU64 | BPF_MOV | BPF_K:
608 case BPF_ALU64 | BPF_DIV | BPF_K:
609 case BPF_ALU64 | BPF_MOD | BPF_K:
610 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
611 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
612 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
613 break;
614
615 case BPF_JMP | BPF_JEQ | BPF_K:
616 case BPF_JMP | BPF_JNE | BPF_K:
617 case BPF_JMP | BPF_JGT | BPF_K:
Daniel Borkmann92b31a92017-08-10 01:39:55 +0200618 case BPF_JMP | BPF_JLT | BPF_K:
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200619 case BPF_JMP | BPF_JGE | BPF_K:
Daniel Borkmann92b31a92017-08-10 01:39:55 +0200620 case BPF_JMP | BPF_JLE | BPF_K:
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200621 case BPF_JMP | BPF_JSGT | BPF_K:
Daniel Borkmann92b31a92017-08-10 01:39:55 +0200622 case BPF_JMP | BPF_JSLT | BPF_K:
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200623 case BPF_JMP | BPF_JSGE | BPF_K:
Daniel Borkmann92b31a92017-08-10 01:39:55 +0200624 case BPF_JMP | BPF_JSLE | BPF_K:
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200625 case BPF_JMP | BPF_JSET | BPF_K:
626 /* Accommodate for extra offset in case of a backjump. */
627 off = from->off;
628 if (off < 0)
629 off -= 2;
630 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
631 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
632 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
633 break;
634
635 case BPF_LD | BPF_ABS | BPF_W:
636 case BPF_LD | BPF_ABS | BPF_H:
637 case BPF_LD | BPF_ABS | BPF_B:
638 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
639 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
640 *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
641 break;
642
643 case BPF_LD | BPF_IND | BPF_W:
644 case BPF_LD | BPF_IND | BPF_H:
645 case BPF_LD | BPF_IND | BPF_B:
646 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
647 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
648 *to++ = BPF_ALU32_REG(BPF_ADD, BPF_REG_AX, from->src_reg);
649 *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
650 break;
651
652 case BPF_LD | BPF_IMM | BPF_DW:
653 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
654 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
655 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
656 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
657 break;
658 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
659 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
660 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
661 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
662 break;
663
664 case BPF_ST | BPF_MEM | BPF_DW:
665 case BPF_ST | BPF_MEM | BPF_W:
666 case BPF_ST | BPF_MEM | BPF_H:
667 case BPF_ST | BPF_MEM | BPF_B:
668 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
669 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
670 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
671 break;
672 }
673out:
674 return to - to_buff;
675}
676
677static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
678 gfp_t gfp_extra_flags)
679{
Michal Hocko19809c22017-05-08 15:57:44 -0700680 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200681 struct bpf_prog *fp;
682
683 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
684 if (fp != NULL) {
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200685 /* aux->prog still points to the fp_other one, so
686 * when promoting the clone to the real program,
687 * this still needs to be adapted.
688 */
689 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
690 }
691
692 return fp;
693}
694
695static void bpf_prog_clone_free(struct bpf_prog *fp)
696{
697 /* aux was stolen by the other clone, so we cannot free
698 * it from this path! It will be freed eventually by the
699 * other program on release.
700 *
701 * At this point, we don't need a deferred release since
702 * clone is guaranteed to not be locked.
703 */
704 fp->aux = NULL;
705 __bpf_prog_free(fp);
706}
707
708void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
709{
710 /* We have to repoint aux->prog to self, as we don't
711 * know whether fp here is the clone or the original.
712 */
713 fp->aux->prog = fp;
714 bpf_prog_clone_free(fp_other);
715}
716
717struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
718{
719 struct bpf_insn insn_buff[16], aux[2];
720 struct bpf_prog *clone, *tmp;
721 int insn_delta, insn_cnt;
722 struct bpf_insn *insn;
723 int i, rewritten;
724
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -0800725 if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200726 return prog;
727
728 clone = bpf_prog_clone_create(prog, GFP_USER);
729 if (!clone)
730 return ERR_PTR(-ENOMEM);
731
732 insn_cnt = clone->len;
733 insn = clone->insnsi;
734
735 for (i = 0; i < insn_cnt; i++, insn++) {
736 /* We temporarily need to hold the original ld64 insn
737 * so that we can still access the first part in the
738 * second blinding run.
739 */
740 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
741 insn[1].code == 0)
742 memcpy(aux, insn, sizeof(aux));
743
744 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
745 if (!rewritten)
746 continue;
747
748 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
749 if (!tmp) {
750 /* Patching may have repointed aux->prog during
751 * realloc from the original one, so we need to
752 * fix it up here on error.
753 */
754 bpf_jit_prog_release_other(prog, clone);
755 return ERR_PTR(-ENOMEM);
756 }
757
758 clone = tmp;
759 insn_delta = rewritten - 1;
760
761 /* Walk new program and skip insns we just inserted. */
762 insn = clone->insnsi + i + insn_delta;
763 insn_cnt += insn_delta;
764 i += insn_delta;
765 }
766
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -0800767 clone->blinded = 1;
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200768 return clone;
769}
Daniel Borkmannb954d832014-09-10 15:01:02 +0200770#endif /* CONFIG_BPF_JIT */
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200771
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700772/* Base function for offset calculation. Needs to go into .text section,
773 * therefore keeping it non-static as well; will also be used by JITs
Daniel Borkmann7105e822017-12-20 13:42:57 +0100774 * anyway later on, so do not let the compiler omit it. This also needs
775 * to go into kallsyms for correlation from e.g. bpftool, so naming
776 * must not change.
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700777 */
778noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
779{
780 return 0;
781}
Alexei Starovoitov4d9c5c52015-07-20 20:34:19 -0700782EXPORT_SYMBOL_GPL(__bpf_call_base);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700783
784/**
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700785 * __bpf_prog_run - run eBPF program on a given context
786 * @ctx: is the data we are operating on
787 * @insn: is the array of eBPF instructions
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700788 *
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700789 * Decode and execute eBPF instructions.
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700790 */
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -0800791static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700792{
Alexei Starovoitovf696b8f2017-05-30 13:31:28 -0700793 u64 tmp;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700794 static const void *jumptable[256] = {
795 [0 ... 255] = &&default_label,
796 /* Now overwrite non-defaults ... */
797 /* 32 bit ALU operations */
798 [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
799 [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
800 [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
801 [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
802 [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
803 [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
804 [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
805 [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
806 [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
807 [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
808 [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
809 [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
810 [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
811 [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
812 [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
813 [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
814 [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
815 [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
816 [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
817 [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
818 [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
819 [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
820 [BPF_ALU | BPF_NEG] = &&ALU_NEG,
821 [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
822 [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
823 /* 64 bit ALU operations */
824 [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
825 [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
826 [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
827 [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
828 [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
829 [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
830 [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
831 [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
832 [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
833 [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
834 [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
835 [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
836 [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
837 [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
838 [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
839 [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
840 [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
841 [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
842 [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
843 [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
844 [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
845 [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
846 [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
847 [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
848 [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
849 /* Call instruction */
850 [BPF_JMP | BPF_CALL] = &&JMP_CALL,
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -0800851 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
Alexei Starovoitov71189fa2017-05-30 13:31:27 -0700852 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700853 /* Jumps */
854 [BPF_JMP | BPF_JA] = &&JMP_JA,
855 [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
856 [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
857 [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
858 [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
859 [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
860 [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
Daniel Borkmann92b31a92017-08-10 01:39:55 +0200861 [BPF_JMP | BPF_JLT | BPF_X] = &&JMP_JLT_X,
862 [BPF_JMP | BPF_JLT | BPF_K] = &&JMP_JLT_K,
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700863 [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
864 [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
Daniel Borkmann92b31a92017-08-10 01:39:55 +0200865 [BPF_JMP | BPF_JLE | BPF_X] = &&JMP_JLE_X,
866 [BPF_JMP | BPF_JLE | BPF_K] = &&JMP_JLE_K,
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700867 [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
868 [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
Daniel Borkmann92b31a92017-08-10 01:39:55 +0200869 [BPF_JMP | BPF_JSLT | BPF_X] = &&JMP_JSLT_X,
870 [BPF_JMP | BPF_JSLT | BPF_K] = &&JMP_JSLT_K,
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700871 [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
872 [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
Daniel Borkmann92b31a92017-08-10 01:39:55 +0200873 [BPF_JMP | BPF_JSLE | BPF_X] = &&JMP_JSLE_X,
874 [BPF_JMP | BPF_JSLE | BPF_K] = &&JMP_JSLE_K,
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700875 [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
876 [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
877 /* Program return */
878 [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
879 /* Store instructions */
880 [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
881 [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
882 [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
883 [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
884 [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
885 [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
886 [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
887 [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
888 [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
889 [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
890 /* Load instructions */
891 [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
892 [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
893 [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
894 [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
895 [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
896 [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
897 [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
898 [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
899 [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
900 [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
Alexei Starovoitov02ab6952014-09-04 22:17:17 -0700901 [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700902 };
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -0700903 u32 tail_call_cnt = 0;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700904 void *ptr;
905 int off;
906
907#define CONT ({ insn++; goto select_insn; })
908#define CONT_JMP ({ insn++; goto select_insn; })
909
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700910select_insn:
911 goto *jumptable[insn->code];
912
913 /* ALU */
914#define ALU(OPCODE, OP) \
915 ALU64_##OPCODE##_X: \
916 DST = DST OP SRC; \
917 CONT; \
918 ALU_##OPCODE##_X: \
919 DST = (u32) DST OP (u32) SRC; \
920 CONT; \
921 ALU64_##OPCODE##_K: \
922 DST = DST OP IMM; \
923 CONT; \
924 ALU_##OPCODE##_K: \
925 DST = (u32) DST OP (u32) IMM; \
926 CONT;
927
928 ALU(ADD, +)
929 ALU(SUB, -)
930 ALU(AND, &)
931 ALU(OR, |)
932 ALU(LSH, <<)
933 ALU(RSH, >>)
934 ALU(XOR, ^)
935 ALU(MUL, *)
936#undef ALU
937 ALU_NEG:
938 DST = (u32) -DST;
939 CONT;
940 ALU64_NEG:
941 DST = -DST;
942 CONT;
943 ALU_MOV_X:
944 DST = (u32) SRC;
945 CONT;
946 ALU_MOV_K:
947 DST = (u32) IMM;
948 CONT;
949 ALU64_MOV_X:
950 DST = SRC;
951 CONT;
952 ALU64_MOV_K:
953 DST = IMM;
954 CONT;
Alexei Starovoitov02ab6952014-09-04 22:17:17 -0700955 LD_IMM_DW:
956 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
957 insn++;
958 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700959 ALU64_ARSH_X:
960 (*(s64 *) &DST) >>= SRC;
961 CONT;
962 ALU64_ARSH_K:
963 (*(s64 *) &DST) >>= IMM;
964 CONT;
965 ALU64_MOD_X:
966 if (unlikely(SRC == 0))
967 return 0;
Alexei Starovoitov876a7ae2015-04-27 14:40:37 -0700968 div64_u64_rem(DST, SRC, &tmp);
969 DST = tmp;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700970 CONT;
971 ALU_MOD_X:
972 if (unlikely(SRC == 0))
973 return 0;
974 tmp = (u32) DST;
975 DST = do_div(tmp, (u32) SRC);
976 CONT;
977 ALU64_MOD_K:
Alexei Starovoitov876a7ae2015-04-27 14:40:37 -0700978 div64_u64_rem(DST, IMM, &tmp);
979 DST = tmp;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700980 CONT;
981 ALU_MOD_K:
982 tmp = (u32) DST;
983 DST = do_div(tmp, (u32) IMM);
984 CONT;
985 ALU64_DIV_X:
986 if (unlikely(SRC == 0))
987 return 0;
Alexei Starovoitov876a7ae2015-04-27 14:40:37 -0700988 DST = div64_u64(DST, SRC);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700989 CONT;
990 ALU_DIV_X:
991 if (unlikely(SRC == 0))
992 return 0;
993 tmp = (u32) DST;
994 do_div(tmp, (u32) SRC);
995 DST = (u32) tmp;
996 CONT;
997 ALU64_DIV_K:
Alexei Starovoitov876a7ae2015-04-27 14:40:37 -0700998 DST = div64_u64(DST, IMM);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700999 CONT;
1000 ALU_DIV_K:
1001 tmp = (u32) DST;
1002 do_div(tmp, (u32) IMM);
1003 DST = (u32) tmp;
1004 CONT;
1005 ALU_END_TO_BE:
1006 switch (IMM) {
1007 case 16:
1008 DST = (__force u16) cpu_to_be16(DST);
1009 break;
1010 case 32:
1011 DST = (__force u32) cpu_to_be32(DST);
1012 break;
1013 case 64:
1014 DST = (__force u64) cpu_to_be64(DST);
1015 break;
1016 }
1017 CONT;
1018 ALU_END_TO_LE:
1019 switch (IMM) {
1020 case 16:
1021 DST = (__force u16) cpu_to_le16(DST);
1022 break;
1023 case 32:
1024 DST = (__force u32) cpu_to_le32(DST);
1025 break;
1026 case 64:
1027 DST = (__force u64) cpu_to_le64(DST);
1028 break;
1029 }
1030 CONT;
1031
1032 /* CALL */
1033 JMP_CALL:
1034 /* Function call scratches BPF_R1-BPF_R5 registers,
1035 * preserves BPF_R6-BPF_R9, and stores return value
1036 * into BPF_R0.
1037 */
1038 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1039 BPF_R4, BPF_R5);
1040 CONT;
1041
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08001042 JMP_CALL_ARGS:
1043 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1044 BPF_R3, BPF_R4,
1045 BPF_R5,
1046 insn + insn->off + 1);
1047 CONT;
1048
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001049 JMP_TAIL_CALL: {
1050 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1051 struct bpf_array *array = container_of(map, struct bpf_array, map);
1052 struct bpf_prog *prog;
Alexei Starovoitov90caccd2017-10-03 15:37:20 -07001053 u32 index = BPF_R3;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001054
1055 if (unlikely(index >= array->map.max_entries))
1056 goto out;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001057 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1058 goto out;
1059
1060 tail_call_cnt++;
1061
Wang Nan2a36f0b2015-08-06 07:02:33 +00001062 prog = READ_ONCE(array->ptrs[index]);
Daniel Borkmann1ca1cc92016-06-28 12:18:23 +02001063 if (!prog)
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001064 goto out;
1065
Daniel Borkmannc4675f92015-07-13 20:49:32 +02001066 /* ARG1 at this point is guaranteed to point to CTX from
1067 * the verifier side due to the fact that the tail call is
1068 * handeled like a helper, that is, bpf_tail_call_proto,
1069 * where arg1_type is ARG_PTR_TO_CTX.
1070 */
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001071 insn = prog->insnsi;
1072 goto select_insn;
1073out:
1074 CONT;
1075 }
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001076 /* JMP */
1077 JMP_JA:
1078 insn += insn->off;
1079 CONT;
1080 JMP_JEQ_X:
1081 if (DST == SRC) {
1082 insn += insn->off;
1083 CONT_JMP;
1084 }
1085 CONT;
1086 JMP_JEQ_K:
1087 if (DST == IMM) {
1088 insn += insn->off;
1089 CONT_JMP;
1090 }
1091 CONT;
1092 JMP_JNE_X:
1093 if (DST != SRC) {
1094 insn += insn->off;
1095 CONT_JMP;
1096 }
1097 CONT;
1098 JMP_JNE_K:
1099 if (DST != IMM) {
1100 insn += insn->off;
1101 CONT_JMP;
1102 }
1103 CONT;
1104 JMP_JGT_X:
1105 if (DST > SRC) {
1106 insn += insn->off;
1107 CONT_JMP;
1108 }
1109 CONT;
1110 JMP_JGT_K:
1111 if (DST > IMM) {
1112 insn += insn->off;
1113 CONT_JMP;
1114 }
1115 CONT;
Daniel Borkmann92b31a92017-08-10 01:39:55 +02001116 JMP_JLT_X:
1117 if (DST < SRC) {
1118 insn += insn->off;
1119 CONT_JMP;
1120 }
1121 CONT;
1122 JMP_JLT_K:
1123 if (DST < IMM) {
1124 insn += insn->off;
1125 CONT_JMP;
1126 }
1127 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001128 JMP_JGE_X:
1129 if (DST >= SRC) {
1130 insn += insn->off;
1131 CONT_JMP;
1132 }
1133 CONT;
1134 JMP_JGE_K:
1135 if (DST >= IMM) {
1136 insn += insn->off;
1137 CONT_JMP;
1138 }
1139 CONT;
Daniel Borkmann92b31a92017-08-10 01:39:55 +02001140 JMP_JLE_X:
1141 if (DST <= SRC) {
1142 insn += insn->off;
1143 CONT_JMP;
1144 }
1145 CONT;
1146 JMP_JLE_K:
1147 if (DST <= IMM) {
1148 insn += insn->off;
1149 CONT_JMP;
1150 }
1151 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001152 JMP_JSGT_X:
1153 if (((s64) DST) > ((s64) SRC)) {
1154 insn += insn->off;
1155 CONT_JMP;
1156 }
1157 CONT;
1158 JMP_JSGT_K:
1159 if (((s64) DST) > ((s64) IMM)) {
1160 insn += insn->off;
1161 CONT_JMP;
1162 }
1163 CONT;
Daniel Borkmann92b31a92017-08-10 01:39:55 +02001164 JMP_JSLT_X:
1165 if (((s64) DST) < ((s64) SRC)) {
1166 insn += insn->off;
1167 CONT_JMP;
1168 }
1169 CONT;
1170 JMP_JSLT_K:
1171 if (((s64) DST) < ((s64) IMM)) {
1172 insn += insn->off;
1173 CONT_JMP;
1174 }
1175 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001176 JMP_JSGE_X:
1177 if (((s64) DST) >= ((s64) SRC)) {
1178 insn += insn->off;
1179 CONT_JMP;
1180 }
1181 CONT;
1182 JMP_JSGE_K:
1183 if (((s64) DST) >= ((s64) IMM)) {
1184 insn += insn->off;
1185 CONT_JMP;
1186 }
1187 CONT;
Daniel Borkmann92b31a92017-08-10 01:39:55 +02001188 JMP_JSLE_X:
1189 if (((s64) DST) <= ((s64) SRC)) {
1190 insn += insn->off;
1191 CONT_JMP;
1192 }
1193 CONT;
1194 JMP_JSLE_K:
1195 if (((s64) DST) <= ((s64) IMM)) {
1196 insn += insn->off;
1197 CONT_JMP;
1198 }
1199 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001200 JMP_JSET_X:
1201 if (DST & SRC) {
1202 insn += insn->off;
1203 CONT_JMP;
1204 }
1205 CONT;
1206 JMP_JSET_K:
1207 if (DST & IMM) {
1208 insn += insn->off;
1209 CONT_JMP;
1210 }
1211 CONT;
1212 JMP_EXIT:
1213 return BPF_R0;
1214
1215 /* STX and ST and LDX*/
1216#define LDST(SIZEOP, SIZE) \
1217 STX_MEM_##SIZEOP: \
1218 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1219 CONT; \
1220 ST_MEM_##SIZEOP: \
1221 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1222 CONT; \
1223 LDX_MEM_##SIZEOP: \
1224 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1225 CONT;
1226
1227 LDST(B, u8)
1228 LDST(H, u16)
1229 LDST(W, u32)
1230 LDST(DW, u64)
1231#undef LDST
1232 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1233 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1234 (DST + insn->off));
1235 CONT;
1236 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1237 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1238 (DST + insn->off));
1239 CONT;
1240 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
1241 off = IMM;
1242load_word:
Johannes Berg96a94cc2017-04-11 12:10:58 +02001243 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
1244 * appearing in the programs where ctx == skb
1245 * (see may_access_skb() in the verifier). All programs
1246 * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6,
1247 * bpf_convert_filter() saves it in BPF_R6, internal BPF
1248 * verifier will check that BPF_R6 == ctx.
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001249 *
1250 * BPF_ABS and BPF_IND are wrappers of function calls,
1251 * so they scratch BPF_R1-BPF_R5 registers, preserve
1252 * BPF_R6-BPF_R9, and store return value into BPF_R0.
1253 *
1254 * Implicit input:
1255 * ctx == skb == BPF_R6 == CTX
1256 *
1257 * Explicit input:
1258 * SRC == any register
1259 * IMM == 32-bit immediate
1260 *
1261 * Output:
1262 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
1263 */
1264
1265 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
1266 if (likely(ptr != NULL)) {
1267 BPF_R0 = get_unaligned_be32(ptr);
1268 CONT;
1269 }
1270
1271 return 0;
1272 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
1273 off = IMM;
1274load_half:
1275 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
1276 if (likely(ptr != NULL)) {
1277 BPF_R0 = get_unaligned_be16(ptr);
1278 CONT;
1279 }
1280
1281 return 0;
1282 LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
1283 off = IMM;
1284load_byte:
1285 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
1286 if (likely(ptr != NULL)) {
1287 BPF_R0 = *(u8 *)ptr;
1288 CONT;
1289 }
1290
1291 return 0;
1292 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
1293 off = IMM + SRC;
1294 goto load_word;
1295 LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
1296 off = IMM + SRC;
1297 goto load_half;
1298 LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
1299 off = IMM + SRC;
1300 goto load_byte;
1301
1302 default_label:
1303 /* If we ever reach this, we have a bug somewhere. */
1304 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
1305 return 0;
1306}
Alexei Starovoitovf696b8f2017-05-30 13:31:28 -07001307STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
1308
Alexei Starovoitovb870aa92017-05-30 13:31:33 -07001309#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1310#define DEFINE_BPF_PROG_RUN(stack_size) \
1311static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1312{ \
1313 u64 stack[stack_size / sizeof(u64)]; \
1314 u64 regs[MAX_BPF_REG]; \
1315\
1316 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1317 ARG1 = (u64) (unsigned long) ctx; \
1318 return ___bpf_prog_run(regs, insn, stack); \
Alexei Starovoitovf696b8f2017-05-30 13:31:28 -07001319}
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001320
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08001321#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1322#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1323static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1324 const struct bpf_insn *insn) \
1325{ \
1326 u64 stack[stack_size / sizeof(u64)]; \
1327 u64 regs[MAX_BPF_REG]; \
1328\
1329 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1330 BPF_R1 = r1; \
1331 BPF_R2 = r2; \
1332 BPF_R3 = r3; \
1333 BPF_R4 = r4; \
1334 BPF_R5 = r5; \
1335 return ___bpf_prog_run(regs, insn, stack); \
1336}
1337
Alexei Starovoitovb870aa92017-05-30 13:31:33 -07001338#define EVAL1(FN, X) FN(X)
1339#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1340#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1341#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1342#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1343#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1344
1345EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1346EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1347EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1348
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08001349EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1350EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1351EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1352
Alexei Starovoitovb870aa92017-05-30 13:31:33 -07001353#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1354
1355static unsigned int (*interpreters[])(const void *ctx,
1356 const struct bpf_insn *insn) = {
1357EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1358EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1359EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1360};
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08001361#undef PROG_NAME_LIST
1362#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1363static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1364 const struct bpf_insn *insn) = {
1365EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1366EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1367EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1368};
1369#undef PROG_NAME_LIST
1370
1371void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1372{
1373 stack_depth = max_t(u32, stack_depth, 1);
1374 insn->off = (s16) insn->imm;
1375 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1376 __bpf_call_base_args;
1377 insn->code = BPF_JMP | BPF_CALL_ARGS;
1378}
Alexei Starovoitovb870aa92017-05-30 13:31:33 -07001379
Daniel Borkmann3324b582015-05-29 23:23:07 +02001380bool bpf_prog_array_compatible(struct bpf_array *array,
1381 const struct bpf_prog *fp)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001382{
Josef Bacik9802d862017-12-11 11:36:48 -05001383 if (fp->kprobe_override)
1384 return false;
1385
Daniel Borkmann3324b582015-05-29 23:23:07 +02001386 if (!array->owner_prog_type) {
1387 /* There's no owner yet where we could check for
1388 * compatibility.
1389 */
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001390 array->owner_prog_type = fp->type;
1391 array->owner_jited = fp->jited;
Daniel Borkmann3324b582015-05-29 23:23:07 +02001392
1393 return true;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001394 }
Daniel Borkmann3324b582015-05-29 23:23:07 +02001395
1396 return array->owner_prog_type == fp->type &&
1397 array->owner_jited == fp->jited;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001398}
1399
Daniel Borkmann3324b582015-05-29 23:23:07 +02001400static int bpf_check_tail_call(const struct bpf_prog *fp)
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001401{
1402 struct bpf_prog_aux *aux = fp->aux;
1403 int i;
1404
1405 for (i = 0; i < aux->used_map_cnt; i++) {
Daniel Borkmann3324b582015-05-29 23:23:07 +02001406 struct bpf_map *map = aux->used_maps[i];
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001407 struct bpf_array *array;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001408
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001409 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1410 continue;
Daniel Borkmann3324b582015-05-29 23:23:07 +02001411
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001412 array = container_of(map, struct bpf_array, map);
1413 if (!bpf_prog_array_compatible(array, fp))
1414 return -EINVAL;
1415 }
1416
1417 return 0;
1418}
1419
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001420/**
Daniel Borkmann3324b582015-05-29 23:23:07 +02001421 * bpf_prog_select_runtime - select exec runtime for BPF program
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001422 * @fp: bpf_prog populated with internal BPF program
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001423 * @err: pointer to error variable
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001424 *
Daniel Borkmann3324b582015-05-29 23:23:07 +02001425 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1426 * The BPF program will be executed via BPF_PROG_RUN() macro.
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001427 */
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001428struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001429{
Martin KaFai Lau8007e402017-06-28 10:41:24 -07001430 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1431
1432 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001433
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001434 /* eBPF JITs can rewrite the program in case constant
1435 * blinding is active. However, in case of error during
1436 * blinding, bpf_int_jit_compile() must always return a
1437 * valid program, which in this case would simply not
1438 * be JITed, but falls back to the interpreter.
1439 */
Jakub Kicinskiab3f0062017-11-03 13:56:17 -07001440 if (!bpf_prog_is_dev_bound(fp->aux)) {
1441 fp = bpf_int_jit_compile(fp);
1442 } else {
1443 *err = bpf_prog_offload_compile(fp);
1444 if (*err)
1445 return fp;
1446 }
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001447 bpf_prog_lock_ro(fp);
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001448
Daniel Borkmann3324b582015-05-29 23:23:07 +02001449 /* The tail call compatibility check can only be done at
1450 * this late stage as we need to determine, if we deal
1451 * with JITed or non JITed program concatenations and not
1452 * all eBPF JITs might immediately support all features.
1453 */
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001454 *err = bpf_check_tail_call(fp);
1455
1456 return fp;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001457}
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001458EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001459
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001460static unsigned int __bpf_prog_ret1(const void *ctx,
1461 const struct bpf_insn *insn)
1462{
1463 return 1;
1464}
1465
1466static struct bpf_prog_dummy {
1467 struct bpf_prog prog;
1468} dummy_bpf_prog = {
1469 .prog = {
1470 .bpf_func = __bpf_prog_ret1,
1471 },
1472};
1473
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -07001474/* to avoid allocating empty bpf_prog_array for cgroups that
1475 * don't have bpf program attached use one global 'empty_prog_array'
1476 * It will not be modified the caller of bpf_prog_array_alloc()
1477 * (since caller requested prog_cnt == 0)
1478 * that pointer should be 'freed' by bpf_prog_array_free()
1479 */
1480static struct {
1481 struct bpf_prog_array hdr;
1482 struct bpf_prog *null_prog;
1483} empty_prog_array = {
1484 .null_prog = NULL,
1485};
1486
1487struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1488{
1489 if (prog_cnt)
1490 return kzalloc(sizeof(struct bpf_prog_array) +
1491 sizeof(struct bpf_prog *) * (prog_cnt + 1),
1492 flags);
1493
1494 return &empty_prog_array.hdr;
1495}
1496
1497void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
1498{
1499 if (!progs ||
1500 progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
1501 return;
1502 kfree_rcu(progs, rcu);
1503}
1504
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001505int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
1506{
1507 struct bpf_prog **prog;
1508 u32 cnt = 0;
1509
1510 rcu_read_lock();
1511 prog = rcu_dereference(progs)->progs;
1512 for (; *prog; prog++)
Yonghong Songc8c088b2017-11-30 13:47:54 -08001513 if (*prog != &dummy_bpf_prog.prog)
1514 cnt++;
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001515 rcu_read_unlock();
1516 return cnt;
1517}
1518
1519int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
1520 __u32 __user *prog_ids, u32 cnt)
1521{
1522 struct bpf_prog **prog;
1523 u32 i = 0, id;
1524
1525 rcu_read_lock();
1526 prog = rcu_dereference(progs)->progs;
1527 for (; *prog; prog++) {
Yonghong Songf371b302017-12-11 11:39:02 -08001528 if (*prog == &dummy_bpf_prog.prog)
1529 continue;
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001530 id = (*prog)->aux->id;
1531 if (copy_to_user(prog_ids + i, &id, sizeof(id))) {
1532 rcu_read_unlock();
1533 return -EFAULT;
1534 }
1535 if (++i == cnt) {
1536 prog++;
1537 break;
1538 }
1539 }
1540 rcu_read_unlock();
1541 if (*prog)
1542 return -ENOSPC;
1543 return 0;
1544}
1545
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001546void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
1547 struct bpf_prog *old_prog)
1548{
1549 struct bpf_prog **prog = progs->progs;
1550
1551 for (; *prog; prog++)
1552 if (*prog == old_prog) {
1553 WRITE_ONCE(*prog, &dummy_bpf_prog.prog);
1554 break;
1555 }
1556}
1557
1558int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
1559 struct bpf_prog *exclude_prog,
1560 struct bpf_prog *include_prog,
1561 struct bpf_prog_array **new_array)
1562{
1563 int new_prog_cnt, carry_prog_cnt = 0;
1564 struct bpf_prog **existing_prog;
1565 struct bpf_prog_array *array;
1566 int new_prog_idx = 0;
1567
1568 /* Figure out how many existing progs we need to carry over to
1569 * the new array.
1570 */
1571 if (old_array) {
1572 existing_prog = old_array->progs;
1573 for (; *existing_prog; existing_prog++) {
1574 if (*existing_prog != exclude_prog &&
1575 *existing_prog != &dummy_bpf_prog.prog)
1576 carry_prog_cnt++;
1577 if (*existing_prog == include_prog)
1578 return -EEXIST;
1579 }
1580 }
1581
1582 /* How many progs (not NULL) will be in the new array? */
1583 new_prog_cnt = carry_prog_cnt;
1584 if (include_prog)
1585 new_prog_cnt += 1;
1586
1587 /* Do we have any prog (not NULL) in the new array? */
1588 if (!new_prog_cnt) {
1589 *new_array = NULL;
1590 return 0;
1591 }
1592
1593 /* +1 as the end of prog_array is marked with NULL */
1594 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1595 if (!array)
1596 return -ENOMEM;
1597
1598 /* Fill in the new prog array */
1599 if (carry_prog_cnt) {
1600 existing_prog = old_array->progs;
1601 for (; *existing_prog; existing_prog++)
1602 if (*existing_prog != exclude_prog &&
1603 *existing_prog != &dummy_bpf_prog.prog)
1604 array->progs[new_prog_idx++] = *existing_prog;
1605 }
1606 if (include_prog)
1607 array->progs[new_prog_idx++] = include_prog;
1608 array->progs[new_prog_idx] = NULL;
1609 *new_array = array;
1610 return 0;
1611}
1612
Yonghong Songf371b302017-12-11 11:39:02 -08001613int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
1614 __u32 __user *prog_ids, u32 request_cnt,
1615 __u32 __user *prog_cnt)
1616{
1617 u32 cnt = 0;
1618
1619 if (array)
1620 cnt = bpf_prog_array_length(array);
1621
1622 if (copy_to_user(prog_cnt, &cnt, sizeof(cnt)))
1623 return -EFAULT;
1624
1625 /* return early if user requested only program count or nothing to copy */
1626 if (!request_cnt || !cnt)
1627 return 0;
1628
1629 return bpf_prog_array_copy_to_user(array, prog_ids, request_cnt);
1630}
1631
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001632static void bpf_prog_free_deferred(struct work_struct *work)
1633{
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001634 struct bpf_prog_aux *aux;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001635 int i;
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001636
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001637 aux = container_of(work, struct bpf_prog_aux, work);
Jakub Kicinskiab3f0062017-11-03 13:56:17 -07001638 if (bpf_prog_is_dev_bound(aux))
1639 bpf_prog_offload_destroy(aux->prog);
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001640 for (i = 0; i < aux->func_cnt; i++)
1641 bpf_jit_free(aux->func[i]);
1642 if (aux->func_cnt) {
1643 kfree(aux->func);
1644 bpf_prog_unlock_free(aux->prog);
1645 } else {
1646 bpf_jit_free(aux->prog);
1647 }
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001648}
1649
1650/* Free internal BPF program */
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001651void bpf_prog_free(struct bpf_prog *fp)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001652{
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001653 struct bpf_prog_aux *aux = fp->aux;
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001654
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001655 INIT_WORK(&aux->work, bpf_prog_free_deferred);
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001656 schedule_work(&aux->work);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001657}
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001658EXPORT_SYMBOL_GPL(bpf_prog_free);
Alexei Starovoitovf89b7752014-10-23 18:41:08 -07001659
Daniel Borkmann3ad00402015-10-08 01:20:39 +02001660/* RNG for unpriviledged user space with separated state from prandom_u32(). */
1661static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1662
1663void bpf_user_rnd_init_once(void)
1664{
1665 prandom_init_once(&bpf_user_rnd_state);
1666}
1667
Daniel Borkmannf3694e02016-09-09 02:45:31 +02001668BPF_CALL_0(bpf_user_rnd_u32)
Daniel Borkmann3ad00402015-10-08 01:20:39 +02001669{
1670 /* Should someone ever have the rather unwise idea to use some
1671 * of the registers passed into this function, then note that
1672 * this function is called from native eBPF and classic-to-eBPF
1673 * transformations. Register assignments from both sides are
1674 * different, f.e. classic always sets fn(ctx, A, X) here.
1675 */
1676 struct rnd_state *state;
1677 u32 res;
1678
1679 state = &get_cpu_var(bpf_user_rnd_state);
1680 res = prandom_u32_state(state);
Shaohua Lib761fe22016-09-27 08:42:41 -07001681 put_cpu_var(bpf_user_rnd_state);
Daniel Borkmann3ad00402015-10-08 01:20:39 +02001682
1683 return res;
1684}
1685
Daniel Borkmann3ba67da2015-03-05 23:27:51 +01001686/* Weak definitions of helper functions in case we don't have bpf syscall. */
1687const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
1688const struct bpf_func_proto bpf_map_update_elem_proto __weak;
1689const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
1690
Daniel Borkmann03e69b52015-03-14 02:27:16 +01001691const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
Daniel Borkmannc04167c2015-03-14 02:27:17 +01001692const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
Daniel Borkmann2d0e30c2016-10-21 12:46:33 +02001693const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
Daniel Borkmann17ca8cb2015-05-29 23:23:06 +02001694const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
Daniel Borkmannbd570ff2016-04-18 21:01:24 +02001695
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -07001696const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
1697const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
1698const struct bpf_func_proto bpf_get_current_comm_proto __weak;
John Fastabend6bdc9c42017-08-16 15:02:32 -07001699const struct bpf_func_proto bpf_sock_map_update_proto __weak;
Daniel Borkmannbd570ff2016-04-18 21:01:24 +02001700
Alexei Starovoitov0756ea32015-06-12 19:39:13 -07001701const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
1702{
1703 return NULL;
1704}
Daniel Borkmann03e69b52015-03-14 02:27:16 +01001705
Daniel Borkmann555c8a82016-07-14 18:08:05 +02001706u64 __weak
1707bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1708 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
Daniel Borkmannbd570ff2016-04-18 21:01:24 +02001709{
Daniel Borkmann555c8a82016-07-14 18:08:05 +02001710 return -ENOTSUPP;
Daniel Borkmannbd570ff2016-04-18 21:01:24 +02001711}
1712
Daniel Borkmann3324b582015-05-29 23:23:07 +02001713/* Always built-in helper functions. */
1714const struct bpf_func_proto bpf_tail_call_proto = {
1715 .func = NULL,
1716 .gpl_only = false,
1717 .ret_type = RET_VOID,
1718 .arg1_type = ARG_PTR_TO_CTX,
1719 .arg2_type = ARG_CONST_MAP_PTR,
1720 .arg3_type = ARG_ANYTHING,
1721};
1722
Daniel Borkmann93831912017-02-16 22:24:49 +01001723/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
1724 * It is encouraged to implement bpf_int_jit_compile() instead, so that
1725 * eBPF and implicitly also cBPF can get JITed!
1726 */
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001727struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
Daniel Borkmann3324b582015-05-29 23:23:07 +02001728{
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001729 return prog;
Daniel Borkmann3324b582015-05-29 23:23:07 +02001730}
1731
Daniel Borkmann93831912017-02-16 22:24:49 +01001732/* Stub for JITs that support eBPF. All cBPF code gets transformed into
1733 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
1734 */
1735void __weak bpf_jit_compile(struct bpf_prog *prog)
1736{
1737}
1738
Martin KaFai Lau17bedab2016-12-07 15:53:11 -08001739bool __weak bpf_helper_changes_pkt_data(void *func)
Alexei Starovoitov969bf052016-05-05 19:49:10 -07001740{
1741 return false;
1742}
1743
Alexei Starovoitovf89b7752014-10-23 18:41:08 -07001744/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
1745 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
1746 */
1747int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
1748 int len)
1749{
1750 return -EFAULT;
1751}
Daniel Borkmanna67edbf2017-01-25 02:28:18 +01001752
1753/* All definitions of tracepoints related to BPF. */
1754#define CREATE_TRACE_POINTS
1755#include <linux/bpf_trace.h>
1756
1757EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
1758
Steven Rostedt (VMware)9185a612017-10-12 18:40:02 -04001759/* These are only used within the BPF_SYSCALL code */
1760#ifdef CONFIG_BPF_SYSCALL
Daniel Borkmanna67edbf2017-01-25 02:28:18 +01001761EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type);
1762EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu);
Steven Rostedt (VMware)9185a612017-10-12 18:40:02 -04001763#endif