blob: b1a3545d0ec89f747d1cd51b7140fd64ad2fe6fd [file] [log] [blame]
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001/*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
6 *
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *
9 * Authors:
10 *
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * Andi Kleen - Fix a few bad bugs and races.
Alexei Starovoitov4df95ff2014-07-30 20:34:14 -070021 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070022 */
Daniel Borkmann738cbe72014-09-08 08:04:47 +020023
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070024#include <linux/filter.h>
25#include <linux/skbuff.h>
Daniel Borkmann60a3b222014-09-02 22:53:44 +020026#include <linux/vmalloc.h>
Daniel Borkmann738cbe72014-09-08 08:04:47 +020027#include <linux/random.h>
28#include <linux/moduleloader.h>
Alexei Starovoitov09756af2014-09-26 00:17:00 -070029#include <linux/bpf.h>
Josh Poimboeuf39853cc2016-02-28 22:22:37 -060030#include <linux/frame.h>
Daniel Borkmann74451e662017-02-16 22:24:50 +010031#include <linux/rbtree_latch.h>
32#include <linux/kallsyms.h>
33#include <linux/rcupdate.h>
Yonghong Songc195651e2018-04-28 22:28:08 -070034#include <linux/perf_event.h>
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070035
Daniel Borkmann3324b582015-05-29 23:23:07 +020036#include <asm/unaligned.h>
37
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070038/* Registers */
39#define BPF_R0 regs[BPF_REG_0]
40#define BPF_R1 regs[BPF_REG_1]
41#define BPF_R2 regs[BPF_REG_2]
42#define BPF_R3 regs[BPF_REG_3]
43#define BPF_R4 regs[BPF_REG_4]
44#define BPF_R5 regs[BPF_REG_5]
45#define BPF_R6 regs[BPF_REG_6]
46#define BPF_R7 regs[BPF_REG_7]
47#define BPF_R8 regs[BPF_REG_8]
48#define BPF_R9 regs[BPF_REG_9]
49#define BPF_R10 regs[BPF_REG_10]
50
51/* Named registers */
52#define DST regs[insn->dst_reg]
53#define SRC regs[insn->src_reg]
54#define FP regs[BPF_REG_FP]
55#define ARG1 regs[BPF_REG_ARG1]
56#define CTX regs[BPF_REG_CTX]
57#define IMM insn->imm
58
59/* No hurry in this branch
60 *
61 * Exported for the bpf jit load helper.
62 */
63void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
64{
65 u8 *ptr = NULL;
66
67 if (k >= SKF_NET_OFF)
68 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
69 else if (k >= SKF_LL_OFF)
70 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
Daniel Borkmann3324b582015-05-29 23:23:07 +020071
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070072 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
73 return ptr;
74
75 return NULL;
76}
77
Daniel Borkmann60a3b222014-09-02 22:53:44 +020078struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
79{
Michal Hocko19809c22017-05-08 15:57:44 -070080 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
Alexei Starovoitov09756af2014-09-26 00:17:00 -070081 struct bpf_prog_aux *aux;
Daniel Borkmann60a3b222014-09-02 22:53:44 +020082 struct bpf_prog *fp;
83
84 size = round_up(size, PAGE_SIZE);
85 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
86 if (fp == NULL)
87 return NULL;
88
Alexei Starovoitov09756af2014-09-26 00:17:00 -070089 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
90 if (aux == NULL) {
Daniel Borkmann60a3b222014-09-02 22:53:44 +020091 vfree(fp);
92 return NULL;
93 }
94
95 fp->pages = size / PAGE_SIZE;
Alexei Starovoitov09756af2014-09-26 00:17:00 -070096 fp->aux = aux;
Daniel Borkmanne9d8afa2015-10-29 14:58:08 +010097 fp->aux->prog = fp;
Alexei Starovoitov60b58afc2017-12-14 17:55:14 -080098 fp->jit_requested = ebpf_jit_enabled();
Daniel Borkmann60a3b222014-09-02 22:53:44 +020099
Daniel Borkmann74451e662017-02-16 22:24:50 +0100100 INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
101
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200102 return fp;
103}
104EXPORT_SYMBOL_GPL(bpf_prog_alloc);
105
106struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
107 gfp_t gfp_extra_flags)
108{
Michal Hocko19809c22017-05-08 15:57:44 -0700109 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200110 struct bpf_prog *fp;
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100111 u32 pages, delta;
112 int ret;
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200113
114 BUG_ON(fp_old == NULL);
115
116 size = round_up(size, PAGE_SIZE);
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100117 pages = size / PAGE_SIZE;
118 if (pages <= fp_old->pages)
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200119 return fp_old;
120
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100121 delta = pages - fp_old->pages;
122 ret = __bpf_prog_charge(fp_old->aux->user, delta);
123 if (ret)
124 return NULL;
125
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200126 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100127 if (fp == NULL) {
128 __bpf_prog_uncharge(fp_old->aux->user, delta);
129 } else {
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200130 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100131 fp->pages = pages;
Daniel Borkmanne9d8afa2015-10-29 14:58:08 +0100132 fp->aux->prog = fp;
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200133
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700134 /* We keep fp->aux from fp_old around in the new
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200135 * reallocated structure.
136 */
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700137 fp_old->aux = NULL;
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200138 __bpf_prog_free(fp_old);
139 }
140
141 return fp;
142}
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200143
144void __bpf_prog_free(struct bpf_prog *fp)
145{
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700146 kfree(fp->aux);
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200147 vfree(fp);
148}
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200149
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100150int bpf_prog_calc_tag(struct bpf_prog *fp)
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100151{
152 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100153 u32 raw_size = bpf_prog_tag_scratch_size(fp);
154 u32 digest[SHA_DIGEST_WORDS];
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100155 u32 ws[SHA_WORKSPACE_WORDS];
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100156 u32 i, bsize, psize, blocks;
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100157 struct bpf_insn *dst;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100158 bool was_ld_map;
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100159 u8 *raw, *todo;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100160 __be32 *result;
161 __be64 *bits;
162
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100163 raw = vmalloc(raw_size);
164 if (!raw)
165 return -ENOMEM;
166
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100167 sha_init(digest);
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100168 memset(ws, 0, sizeof(ws));
169
170 /* We need to take out the map fd for the digest calculation
171 * since they are unstable from user space side.
172 */
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100173 dst = (void *)raw;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100174 for (i = 0, was_ld_map = false; i < fp->len; i++) {
175 dst[i] = fp->insnsi[i];
176 if (!was_ld_map &&
177 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
178 dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
179 was_ld_map = true;
180 dst[i].imm = 0;
181 } else if (was_ld_map &&
182 dst[i].code == 0 &&
183 dst[i].dst_reg == 0 &&
184 dst[i].src_reg == 0 &&
185 dst[i].off == 0) {
186 was_ld_map = false;
187 dst[i].imm = 0;
188 } else {
189 was_ld_map = false;
190 }
191 }
192
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100193 psize = bpf_prog_insn_size(fp);
194 memset(&raw[psize], 0, raw_size - psize);
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100195 raw[psize++] = 0x80;
196
197 bsize = round_up(psize, SHA_MESSAGE_BYTES);
198 blocks = bsize / SHA_MESSAGE_BYTES;
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100199 todo = raw;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100200 if (bsize - psize >= sizeof(__be64)) {
201 bits = (__be64 *)(todo + bsize - sizeof(__be64));
202 } else {
203 bits = (__be64 *)(todo + bsize + bits_offset);
204 blocks++;
205 }
206 *bits = cpu_to_be64((psize - 1) << 3);
207
208 while (blocks--) {
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100209 sha_transform(digest, todo, ws);
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100210 todo += SHA_MESSAGE_BYTES;
211 }
212
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100213 result = (__force __be32 *)digest;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100214 for (i = 0; i < SHA_DIGEST_WORDS; i++)
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100215 result[i] = cpu_to_be32(digest[i]);
216 memcpy(fp->tag, result, sizeof(fp->tag));
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100217
218 vfree(raw);
219 return 0;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100220}
221
Daniel Borkmann050fad72018-05-17 01:44:11 +0200222static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
223 u32 curr, const bool probe_pass)
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200224{
Daniel Borkmann050fad72018-05-17 01:44:11 +0200225 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
226 s64 imm = insn->imm;
227
228 if (curr < pos && curr + imm + 1 > pos)
229 imm += delta;
230 else if (curr > pos + delta && curr + imm + 1 <= pos + delta)
231 imm -= delta;
232 if (imm < imm_min || imm > imm_max)
233 return -ERANGE;
234 if (!probe_pass)
235 insn->imm = imm;
236 return 0;
237}
238
239static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
240 u32 curr, const bool probe_pass)
241{
242 const s32 off_min = S16_MIN, off_max = S16_MAX;
243 s32 off = insn->off;
244
245 if (curr < pos && curr + off + 1 > pos)
246 off += delta;
247 else if (curr > pos + delta && curr + off + 1 <= pos + delta)
248 off -= delta;
249 if (off < off_min || off > off_max)
250 return -ERANGE;
251 if (!probe_pass)
252 insn->off = off;
253 return 0;
254}
255
256static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
257 const bool probe_pass)
258{
259 u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0);
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200260 struct bpf_insn *insn = prog->insnsi;
Daniel Borkmann050fad72018-05-17 01:44:11 +0200261 int ret = 0;
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200262
263 for (i = 0; i < insn_cnt; i++, insn++) {
Daniel Borkmann050fad72018-05-17 01:44:11 +0200264 u8 code;
265
266 /* In the probing pass we still operate on the original,
267 * unpatched image in order to check overflows before we
268 * do any other adjustments. Therefore skip the patchlet.
269 */
270 if (probe_pass && i == pos) {
271 i += delta + 1;
272 insn++;
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -0800273 }
Daniel Borkmann050fad72018-05-17 01:44:11 +0200274 code = insn->code;
275 if (BPF_CLASS(code) != BPF_JMP ||
276 BPF_OP(code) == BPF_EXIT)
277 continue;
278 /* Adjust offset of jmps if we cross patch boundaries. */
279 if (BPF_OP(code) == BPF_CALL) {
280 if (insn->src_reg != BPF_PSEUDO_CALL)
281 continue;
282 ret = bpf_adj_delta_to_imm(insn, pos, delta, i,
283 probe_pass);
284 } else {
285 ret = bpf_adj_delta_to_off(insn, pos, delta, i,
286 probe_pass);
287 }
288 if (ret)
289 break;
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200290 }
Daniel Borkmann050fad72018-05-17 01:44:11 +0200291
292 return ret;
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200293}
294
295struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
296 const struct bpf_insn *patch, u32 len)
297{
298 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
Daniel Borkmann050fad72018-05-17 01:44:11 +0200299 const u32 cnt_max = S16_MAX;
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200300 struct bpf_prog *prog_adj;
301
302 /* Since our patchlet doesn't expand the image, we're done. */
303 if (insn_delta == 0) {
304 memcpy(prog->insnsi + off, patch, sizeof(*patch));
305 return prog;
306 }
307
308 insn_adj_cnt = prog->len + insn_delta;
309
Daniel Borkmann050fad72018-05-17 01:44:11 +0200310 /* Reject anything that would potentially let the insn->off
311 * target overflow when we have excessive program expansions.
312 * We need to probe here before we do any reallocation where
313 * we afterwards may not fail anymore.
314 */
315 if (insn_adj_cnt > cnt_max &&
316 bpf_adj_branches(prog, off, insn_delta, true))
317 return NULL;
318
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200319 /* Several new instructions need to be inserted. Make room
320 * for them. Likely, there's no need for a new allocation as
321 * last page could have large enough tailroom.
322 */
323 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
324 GFP_USER);
325 if (!prog_adj)
326 return NULL;
327
328 prog_adj->len = insn_adj_cnt;
329
330 /* Patching happens in 3 steps:
331 *
332 * 1) Move over tail of insnsi from next instruction onwards,
333 * so we can patch the single target insn with one or more
334 * new ones (patching is always from 1 to n insns, n > 0).
335 * 2) Inject new instructions at the target location.
336 * 3) Adjust branch offsets if necessary.
337 */
338 insn_rest = insn_adj_cnt - off - len;
339
340 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
341 sizeof(*patch) * insn_rest);
342 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
343
Daniel Borkmann050fad72018-05-17 01:44:11 +0200344 /* We are guaranteed to not fail at this point, otherwise
345 * the ship has sailed to reverse to the original state. An
346 * overflow cannot happen at this point.
347 */
348 BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200349
350 return prog_adj;
351}
352
Daniel Borkmann7d1982b2018-06-15 02:30:47 +0200353void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
354{
355 int i;
356
357 for (i = 0; i < fp->aux->func_cnt; i++)
358 bpf_prog_kallsyms_del(fp->aux->func[i]);
359}
360
361void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
362{
363 bpf_prog_kallsyms_del_subprogs(fp);
364 bpf_prog_kallsyms_del(fp);
365}
366
Daniel Borkmannb954d832014-09-10 15:01:02 +0200367#ifdef CONFIG_BPF_JIT
Daniel Borkmannede95a632018-10-23 01:11:04 +0200368# define BPF_JIT_LIMIT_DEFAULT (PAGE_SIZE * 40000)
369
Daniel Borkmannfa9dd592018-01-20 01:24:33 +0100370/* All BPF JIT sysctl knobs here. */
371int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
372int bpf_jit_harden __read_mostly;
373int bpf_jit_kallsyms __read_mostly;
Daniel Borkmannede95a632018-10-23 01:11:04 +0200374int bpf_jit_limit __read_mostly = BPF_JIT_LIMIT_DEFAULT;
Daniel Borkmannfa9dd592018-01-20 01:24:33 +0100375
Daniel Borkmann74451e662017-02-16 22:24:50 +0100376static __always_inline void
377bpf_get_prog_addr_region(const struct bpf_prog *prog,
378 unsigned long *symbol_start,
379 unsigned long *symbol_end)
380{
381 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
382 unsigned long addr = (unsigned long)hdr;
383
384 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
385
386 *symbol_start = addr;
387 *symbol_end = addr + hdr->pages * PAGE_SIZE;
388}
389
390static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
391{
Martin KaFai Lau368211f2017-10-05 21:52:13 -0700392 const char *end = sym + KSYM_NAME_LEN;
393
Daniel Borkmann74451e662017-02-16 22:24:50 +0100394 BUILD_BUG_ON(sizeof("bpf_prog_") +
Martin KaFai Lau368211f2017-10-05 21:52:13 -0700395 sizeof(prog->tag) * 2 +
396 /* name has been null terminated.
397 * We should need +1 for the '_' preceding
398 * the name. However, the null character
399 * is double counted between the name and the
400 * sizeof("bpf_prog_") above, so we omit
401 * the +1 here.
402 */
403 sizeof(prog->aux->name) > KSYM_NAME_LEN);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100404
405 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
406 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
Martin KaFai Lau368211f2017-10-05 21:52:13 -0700407 if (prog->aux->name[0])
408 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
409 else
410 *sym = 0;
Daniel Borkmann74451e662017-02-16 22:24:50 +0100411}
412
413static __always_inline unsigned long
414bpf_get_prog_addr_start(struct latch_tree_node *n)
415{
416 unsigned long symbol_start, symbol_end;
417 const struct bpf_prog_aux *aux;
418
419 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
420 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
421
422 return symbol_start;
423}
424
425static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
426 struct latch_tree_node *b)
427{
428 return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
429}
430
431static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
432{
433 unsigned long val = (unsigned long)key;
434 unsigned long symbol_start, symbol_end;
435 const struct bpf_prog_aux *aux;
436
437 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
438 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
439
440 if (val < symbol_start)
441 return -1;
442 if (val >= symbol_end)
443 return 1;
444
445 return 0;
446}
447
448static const struct latch_tree_ops bpf_tree_ops = {
449 .less = bpf_tree_less,
450 .comp = bpf_tree_comp,
451};
452
453static DEFINE_SPINLOCK(bpf_lock);
454static LIST_HEAD(bpf_kallsyms);
455static struct latch_tree_root bpf_tree __cacheline_aligned;
456
Daniel Borkmann74451e662017-02-16 22:24:50 +0100457static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
458{
459 WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
460 list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
461 latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
462}
463
464static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
465{
466 if (list_empty(&aux->ksym_lnode))
467 return;
468
469 latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
470 list_del_rcu(&aux->ksym_lnode);
471}
472
473static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
474{
475 return fp->jited && !bpf_prog_was_classic(fp);
476}
477
478static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
479{
480 return list_empty(&fp->aux->ksym_lnode) ||
481 fp->aux->ksym_lnode.prev == LIST_POISON2;
482}
483
484void bpf_prog_kallsyms_add(struct bpf_prog *fp)
485{
Daniel Borkmann74451e662017-02-16 22:24:50 +0100486 if (!bpf_prog_kallsyms_candidate(fp) ||
487 !capable(CAP_SYS_ADMIN))
488 return;
489
Hannes Frederic Sowad24f7c72017-04-27 01:39:33 +0200490 spin_lock_bh(&bpf_lock);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100491 bpf_prog_ksym_node_add(fp->aux);
Hannes Frederic Sowad24f7c72017-04-27 01:39:33 +0200492 spin_unlock_bh(&bpf_lock);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100493}
494
495void bpf_prog_kallsyms_del(struct bpf_prog *fp)
496{
Daniel Borkmann74451e662017-02-16 22:24:50 +0100497 if (!bpf_prog_kallsyms_candidate(fp))
498 return;
499
Hannes Frederic Sowad24f7c72017-04-27 01:39:33 +0200500 spin_lock_bh(&bpf_lock);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100501 bpf_prog_ksym_node_del(fp->aux);
Hannes Frederic Sowad24f7c72017-04-27 01:39:33 +0200502 spin_unlock_bh(&bpf_lock);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100503}
504
505static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
506{
507 struct latch_tree_node *n;
508
509 if (!bpf_jit_kallsyms_enabled())
510 return NULL;
511
512 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
513 return n ?
514 container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
515 NULL;
516}
517
518const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
519 unsigned long *off, char *sym)
520{
521 unsigned long symbol_start, symbol_end;
522 struct bpf_prog *prog;
523 char *ret = NULL;
524
525 rcu_read_lock();
526 prog = bpf_prog_kallsyms_find(addr);
527 if (prog) {
528 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
529 bpf_get_prog_name(prog, sym);
530
531 ret = sym;
532 if (size)
533 *size = symbol_end - symbol_start;
534 if (off)
535 *off = addr - symbol_start;
536 }
537 rcu_read_unlock();
538
539 return ret;
540}
541
542bool is_bpf_text_address(unsigned long addr)
543{
544 bool ret;
545
546 rcu_read_lock();
547 ret = bpf_prog_kallsyms_find(addr) != NULL;
548 rcu_read_unlock();
549
550 return ret;
551}
552
553int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
554 char *sym)
555{
Daniel Borkmann74451e662017-02-16 22:24:50 +0100556 struct bpf_prog_aux *aux;
557 unsigned int it = 0;
558 int ret = -ERANGE;
559
560 if (!bpf_jit_kallsyms_enabled())
561 return ret;
562
563 rcu_read_lock();
564 list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
565 if (it++ != symnum)
566 continue;
567
Daniel Borkmann74451e662017-02-16 22:24:50 +0100568 bpf_get_prog_name(aux->prog, sym);
569
Song Liudf073472018-11-02 10:16:15 -0700570 *value = (unsigned long)aux->prog->bpf_func;
Daniel Borkmann74451e662017-02-16 22:24:50 +0100571 *type = BPF_SYM_ELF_TYPE;
572
573 ret = 0;
574 break;
575 }
576 rcu_read_unlock();
577
578 return ret;
579}
580
Daniel Borkmannede95a632018-10-23 01:11:04 +0200581static atomic_long_t bpf_jit_current;
582
583#if defined(MODULES_VADDR)
584static int __init bpf_jit_charge_init(void)
585{
586 /* Only used as heuristic here to derive limit. */
587 bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2,
588 PAGE_SIZE), INT_MAX);
589 return 0;
590}
591pure_initcall(bpf_jit_charge_init);
592#endif
593
594static int bpf_jit_charge_modmem(u32 pages)
595{
596 if (atomic_long_add_return(pages, &bpf_jit_current) >
597 (bpf_jit_limit >> PAGE_SHIFT)) {
598 if (!capable(CAP_SYS_ADMIN)) {
599 atomic_long_sub(pages, &bpf_jit_current);
600 return -EPERM;
601 }
602 }
603
604 return 0;
605}
606
607static void bpf_jit_uncharge_modmem(u32 pages)
608{
609 atomic_long_sub(pages, &bpf_jit_current);
610}
611
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200612struct bpf_binary_header *
613bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
614 unsigned int alignment,
615 bpf_jit_fill_hole_t bpf_fill_ill_insns)
616{
617 struct bpf_binary_header *hdr;
Daniel Borkmannede95a632018-10-23 01:11:04 +0200618 u32 size, hole, start, pages;
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200619
620 /* Most of BPF filters are really small, but if some of them
621 * fill a page, allow at least 128 extra bytes to insert a
622 * random section of illegal instructions.
623 */
624 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
Daniel Borkmannede95a632018-10-23 01:11:04 +0200625 pages = size / PAGE_SIZE;
626
627 if (bpf_jit_charge_modmem(pages))
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200628 return NULL;
Daniel Borkmannede95a632018-10-23 01:11:04 +0200629 hdr = module_alloc(size);
630 if (!hdr) {
631 bpf_jit_uncharge_modmem(pages);
632 return NULL;
633 }
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200634
635 /* Fill space with illegal/arch-dep instructions. */
636 bpf_fill_ill_insns(hdr, size);
637
Daniel Borkmannede95a632018-10-23 01:11:04 +0200638 hdr->pages = pages;
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200639 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
640 PAGE_SIZE - sizeof(*hdr));
Daniel Borkmannb7552e1b2016-05-18 14:14:28 +0200641 start = (get_random_int() % hole) & ~(alignment - 1);
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200642
643 /* Leave a random number of instructions before BPF code. */
644 *image_ptr = &hdr->image[start];
645
646 return hdr;
647}
648
649void bpf_jit_binary_free(struct bpf_binary_header *hdr)
650{
Daniel Borkmannede95a632018-10-23 01:11:04 +0200651 u32 pages = hdr->pages;
652
Rusty Russellbe1f2212015-01-20 09:07:05 +1030653 module_memfree(hdr);
Daniel Borkmannede95a632018-10-23 01:11:04 +0200654 bpf_jit_uncharge_modmem(pages);
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200655}
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200656
Daniel Borkmann74451e662017-02-16 22:24:50 +0100657/* This symbol is only overridden by archs that have different
658 * requirements than the usual eBPF JITs, f.e. when they only
659 * implement cBPF JIT, do not set images read-only, etc.
660 */
661void __weak bpf_jit_free(struct bpf_prog *fp)
662{
663 if (fp->jited) {
664 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
665
666 bpf_jit_binary_unlock_ro(hdr);
667 bpf_jit_binary_free(hdr);
668
669 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
670 }
671
672 bpf_prog_unlock_free(fp);
673}
674
Daniel Borkmanne2c95a62018-11-26 14:05:38 +0100675int bpf_jit_get_func_addr(const struct bpf_prog *prog,
676 const struct bpf_insn *insn, bool extra_pass,
677 u64 *func_addr, bool *func_addr_fixed)
678{
679 s16 off = insn->off;
680 s32 imm = insn->imm;
681 u8 *addr;
682
683 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
684 if (!*func_addr_fixed) {
685 /* Place-holder address till the last pass has collected
686 * all addresses for JITed subprograms in which case we
687 * can pick them up from prog->aux.
688 */
689 if (!extra_pass)
690 addr = NULL;
691 else if (prog->aux->func &&
692 off >= 0 && off < prog->aux->func_cnt)
693 addr = (u8 *)prog->aux->func[off]->bpf_func;
694 else
695 return -EINVAL;
696 } else {
697 /* Address of a BPF helper call. Since part of the core
698 * kernel, it's always at a fixed location. __bpf_call_base
699 * and the helper with imm relative to it are both in core
700 * kernel.
701 */
702 addr = (u8 *)__bpf_call_base + imm;
703 }
704
705 *func_addr = (unsigned long)addr;
706 return 0;
707}
708
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200709static int bpf_jit_blind_insn(const struct bpf_insn *from,
710 const struct bpf_insn *aux,
711 struct bpf_insn *to_buff)
712{
713 struct bpf_insn *to = to_buff;
Daniel Borkmannb7552e1b2016-05-18 14:14:28 +0200714 u32 imm_rnd = get_random_int();
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200715 s16 off;
716
717 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
718 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
719
720 if (from->imm == 0 &&
721 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
722 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
723 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
724 goto out;
725 }
726
727 switch (from->code) {
728 case BPF_ALU | BPF_ADD | BPF_K:
729 case BPF_ALU | BPF_SUB | BPF_K:
730 case BPF_ALU | BPF_AND | BPF_K:
731 case BPF_ALU | BPF_OR | BPF_K:
732 case BPF_ALU | BPF_XOR | BPF_K:
733 case BPF_ALU | BPF_MUL | BPF_K:
734 case BPF_ALU | BPF_MOV | BPF_K:
735 case BPF_ALU | BPF_DIV | BPF_K:
736 case BPF_ALU | BPF_MOD | BPF_K:
737 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
738 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
739 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
740 break;
741
742 case BPF_ALU64 | BPF_ADD | BPF_K:
743 case BPF_ALU64 | BPF_SUB | BPF_K:
744 case BPF_ALU64 | BPF_AND | BPF_K:
745 case BPF_ALU64 | BPF_OR | BPF_K:
746 case BPF_ALU64 | BPF_XOR | BPF_K:
747 case BPF_ALU64 | BPF_MUL | BPF_K:
748 case BPF_ALU64 | BPF_MOV | BPF_K:
749 case BPF_ALU64 | BPF_DIV | BPF_K:
750 case BPF_ALU64 | BPF_MOD | BPF_K:
751 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
752 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
753 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
754 break;
755
756 case BPF_JMP | BPF_JEQ | BPF_K:
757 case BPF_JMP | BPF_JNE | BPF_K:
758 case BPF_JMP | BPF_JGT | BPF_K:
Daniel Borkmann92b31a92017-08-10 01:39:55 +0200759 case BPF_JMP | BPF_JLT | BPF_K:
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200760 case BPF_JMP | BPF_JGE | BPF_K:
Daniel Borkmann92b31a92017-08-10 01:39:55 +0200761 case BPF_JMP | BPF_JLE | BPF_K:
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200762 case BPF_JMP | BPF_JSGT | BPF_K:
Daniel Borkmann92b31a92017-08-10 01:39:55 +0200763 case BPF_JMP | BPF_JSLT | BPF_K:
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200764 case BPF_JMP | BPF_JSGE | BPF_K:
Daniel Borkmann92b31a92017-08-10 01:39:55 +0200765 case BPF_JMP | BPF_JSLE | BPF_K:
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200766 case BPF_JMP | BPF_JSET | BPF_K:
767 /* Accommodate for extra offset in case of a backjump. */
768 off = from->off;
769 if (off < 0)
770 off -= 2;
771 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
772 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
773 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
774 break;
775
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200776 case BPF_LD | BPF_IMM | BPF_DW:
777 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
778 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
779 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
780 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
781 break;
782 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
783 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
784 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
785 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
786 break;
787
788 case BPF_ST | BPF_MEM | BPF_DW:
789 case BPF_ST | BPF_MEM | BPF_W:
790 case BPF_ST | BPF_MEM | BPF_H:
791 case BPF_ST | BPF_MEM | BPF_B:
792 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
793 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
794 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
795 break;
796 }
797out:
798 return to - to_buff;
799}
800
801static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
802 gfp_t gfp_extra_flags)
803{
Michal Hocko19809c22017-05-08 15:57:44 -0700804 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200805 struct bpf_prog *fp;
806
807 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
808 if (fp != NULL) {
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200809 /* aux->prog still points to the fp_other one, so
810 * when promoting the clone to the real program,
811 * this still needs to be adapted.
812 */
813 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
814 }
815
816 return fp;
817}
818
819static void bpf_prog_clone_free(struct bpf_prog *fp)
820{
821 /* aux was stolen by the other clone, so we cannot free
822 * it from this path! It will be freed eventually by the
823 * other program on release.
824 *
825 * At this point, we don't need a deferred release since
826 * clone is guaranteed to not be locked.
827 */
828 fp->aux = NULL;
829 __bpf_prog_free(fp);
830}
831
832void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
833{
834 /* We have to repoint aux->prog to self, as we don't
835 * know whether fp here is the clone or the original.
836 */
837 fp->aux->prog = fp;
838 bpf_prog_clone_free(fp_other);
839}
840
841struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
842{
843 struct bpf_insn insn_buff[16], aux[2];
844 struct bpf_prog *clone, *tmp;
845 int insn_delta, insn_cnt;
846 struct bpf_insn *insn;
847 int i, rewritten;
848
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -0800849 if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200850 return prog;
851
852 clone = bpf_prog_clone_create(prog, GFP_USER);
853 if (!clone)
854 return ERR_PTR(-ENOMEM);
855
856 insn_cnt = clone->len;
857 insn = clone->insnsi;
858
859 for (i = 0; i < insn_cnt; i++, insn++) {
860 /* We temporarily need to hold the original ld64 insn
861 * so that we can still access the first part in the
862 * second blinding run.
863 */
864 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
865 insn[1].code == 0)
866 memcpy(aux, insn, sizeof(aux));
867
868 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
869 if (!rewritten)
870 continue;
871
872 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
873 if (!tmp) {
874 /* Patching may have repointed aux->prog during
875 * realloc from the original one, so we need to
876 * fix it up here on error.
877 */
878 bpf_jit_prog_release_other(prog, clone);
879 return ERR_PTR(-ENOMEM);
880 }
881
882 clone = tmp;
883 insn_delta = rewritten - 1;
884
885 /* Walk new program and skip insns we just inserted. */
886 insn = clone->insnsi + i + insn_delta;
887 insn_cnt += insn_delta;
888 i += insn_delta;
889 }
890
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -0800891 clone->blinded = 1;
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200892 return clone;
893}
Daniel Borkmannb954d832014-09-10 15:01:02 +0200894#endif /* CONFIG_BPF_JIT */
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200895
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700896/* Base function for offset calculation. Needs to go into .text section,
897 * therefore keeping it non-static as well; will also be used by JITs
Daniel Borkmann7105e822017-12-20 13:42:57 +0100898 * anyway later on, so do not let the compiler omit it. This also needs
899 * to go into kallsyms for correlation from e.g. bpftool, so naming
900 * must not change.
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700901 */
902noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
903{
904 return 0;
905}
Alexei Starovoitov4d9c5c52015-07-20 20:34:19 -0700906EXPORT_SYMBOL_GPL(__bpf_call_base);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -0700907
Daniel Borkmann5e581da2018-01-26 23:33:38 +0100908/* All UAPI available opcodes. */
909#define BPF_INSN_MAP(INSN_2, INSN_3) \
910 /* 32 bit ALU operations. */ \
911 /* Register based. */ \
912 INSN_3(ALU, ADD, X), \
913 INSN_3(ALU, SUB, X), \
914 INSN_3(ALU, AND, X), \
915 INSN_3(ALU, OR, X), \
916 INSN_3(ALU, LSH, X), \
917 INSN_3(ALU, RSH, X), \
918 INSN_3(ALU, XOR, X), \
919 INSN_3(ALU, MUL, X), \
920 INSN_3(ALU, MOV, X), \
921 INSN_3(ALU, DIV, X), \
922 INSN_3(ALU, MOD, X), \
923 INSN_2(ALU, NEG), \
924 INSN_3(ALU, END, TO_BE), \
925 INSN_3(ALU, END, TO_LE), \
926 /* Immediate based. */ \
927 INSN_3(ALU, ADD, K), \
928 INSN_3(ALU, SUB, K), \
929 INSN_3(ALU, AND, K), \
930 INSN_3(ALU, OR, K), \
931 INSN_3(ALU, LSH, K), \
932 INSN_3(ALU, RSH, K), \
933 INSN_3(ALU, XOR, K), \
934 INSN_3(ALU, MUL, K), \
935 INSN_3(ALU, MOV, K), \
936 INSN_3(ALU, DIV, K), \
937 INSN_3(ALU, MOD, K), \
938 /* 64 bit ALU operations. */ \
939 /* Register based. */ \
940 INSN_3(ALU64, ADD, X), \
941 INSN_3(ALU64, SUB, X), \
942 INSN_3(ALU64, AND, X), \
943 INSN_3(ALU64, OR, X), \
944 INSN_3(ALU64, LSH, X), \
945 INSN_3(ALU64, RSH, X), \
946 INSN_3(ALU64, XOR, X), \
947 INSN_3(ALU64, MUL, X), \
948 INSN_3(ALU64, MOV, X), \
949 INSN_3(ALU64, ARSH, X), \
950 INSN_3(ALU64, DIV, X), \
951 INSN_3(ALU64, MOD, X), \
952 INSN_2(ALU64, NEG), \
953 /* Immediate based. */ \
954 INSN_3(ALU64, ADD, K), \
955 INSN_3(ALU64, SUB, K), \
956 INSN_3(ALU64, AND, K), \
957 INSN_3(ALU64, OR, K), \
958 INSN_3(ALU64, LSH, K), \
959 INSN_3(ALU64, RSH, K), \
960 INSN_3(ALU64, XOR, K), \
961 INSN_3(ALU64, MUL, K), \
962 INSN_3(ALU64, MOV, K), \
963 INSN_3(ALU64, ARSH, K), \
964 INSN_3(ALU64, DIV, K), \
965 INSN_3(ALU64, MOD, K), \
966 /* Call instruction. */ \
967 INSN_2(JMP, CALL), \
968 /* Exit instruction. */ \
969 INSN_2(JMP, EXIT), \
970 /* Jump instructions. */ \
971 /* Register based. */ \
972 INSN_3(JMP, JEQ, X), \
973 INSN_3(JMP, JNE, X), \
974 INSN_3(JMP, JGT, X), \
975 INSN_3(JMP, JLT, X), \
976 INSN_3(JMP, JGE, X), \
977 INSN_3(JMP, JLE, X), \
978 INSN_3(JMP, JSGT, X), \
979 INSN_3(JMP, JSLT, X), \
980 INSN_3(JMP, JSGE, X), \
981 INSN_3(JMP, JSLE, X), \
982 INSN_3(JMP, JSET, X), \
983 /* Immediate based. */ \
984 INSN_3(JMP, JEQ, K), \
985 INSN_3(JMP, JNE, K), \
986 INSN_3(JMP, JGT, K), \
987 INSN_3(JMP, JLT, K), \
988 INSN_3(JMP, JGE, K), \
989 INSN_3(JMP, JLE, K), \
990 INSN_3(JMP, JSGT, K), \
991 INSN_3(JMP, JSLT, K), \
992 INSN_3(JMP, JSGE, K), \
993 INSN_3(JMP, JSLE, K), \
994 INSN_3(JMP, JSET, K), \
995 INSN_2(JMP, JA), \
996 /* Store instructions. */ \
997 /* Register based. */ \
998 INSN_3(STX, MEM, B), \
999 INSN_3(STX, MEM, H), \
1000 INSN_3(STX, MEM, W), \
1001 INSN_3(STX, MEM, DW), \
1002 INSN_3(STX, XADD, W), \
1003 INSN_3(STX, XADD, DW), \
1004 /* Immediate based. */ \
1005 INSN_3(ST, MEM, B), \
1006 INSN_3(ST, MEM, H), \
1007 INSN_3(ST, MEM, W), \
1008 INSN_3(ST, MEM, DW), \
1009 /* Load instructions. */ \
1010 /* Register based. */ \
1011 INSN_3(LDX, MEM, B), \
1012 INSN_3(LDX, MEM, H), \
1013 INSN_3(LDX, MEM, W), \
1014 INSN_3(LDX, MEM, DW), \
1015 /* Immediate based. */ \
Daniel Borkmanne0cea7c2018-05-04 01:08:14 +02001016 INSN_3(LD, IMM, DW)
Daniel Borkmann5e581da2018-01-26 23:33:38 +01001017
1018bool bpf_opcode_in_insntable(u8 code)
1019{
1020#define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1021#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1022 static const bool public_insntable[256] = {
1023 [0 ... 255] = false,
1024 /* Now overwrite non-defaults ... */
1025 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
Daniel Borkmanne0cea7c2018-05-04 01:08:14 +02001026 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1027 [BPF_LD | BPF_ABS | BPF_B] = true,
1028 [BPF_LD | BPF_ABS | BPF_H] = true,
1029 [BPF_LD | BPF_ABS | BPF_W] = true,
1030 [BPF_LD | BPF_IND | BPF_B] = true,
1031 [BPF_LD | BPF_IND | BPF_H] = true,
1032 [BPF_LD | BPF_IND | BPF_W] = true,
Daniel Borkmann5e581da2018-01-26 23:33:38 +01001033 };
1034#undef BPF_INSN_3_TBL
1035#undef BPF_INSN_2_TBL
1036 return public_insntable[code];
1037}
1038
Alexei Starovoitov290af862018-01-09 10:04:29 -08001039#ifndef CONFIG_BPF_JIT_ALWAYS_ON
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001040/**
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001041 * __bpf_prog_run - run eBPF program on a given context
1042 * @ctx: is the data we are operating on
1043 * @insn: is the array of eBPF instructions
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001044 *
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001045 * Decode and execute eBPF instructions.
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001046 */
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08001047static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001048{
Alexei Starovoitovf696b8f2017-05-30 13:31:28 -07001049 u64 tmp;
Daniel Borkmann5e581da2018-01-26 23:33:38 +01001050#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1051#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001052 static const void *jumptable[256] = {
1053 [0 ... 255] = &&default_label,
1054 /* Now overwrite non-defaults ... */
Daniel Borkmann5e581da2018-01-26 23:33:38 +01001055 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1056 /* Non-UAPI available opcodes. */
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08001057 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
Alexei Starovoitov71189fa2017-05-30 13:31:27 -07001058 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001059 };
Daniel Borkmann5e581da2018-01-26 23:33:38 +01001060#undef BPF_INSN_3_LBL
1061#undef BPF_INSN_2_LBL
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001062 u32 tail_call_cnt = 0;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001063
1064#define CONT ({ insn++; goto select_insn; })
1065#define CONT_JMP ({ insn++; goto select_insn; })
1066
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001067select_insn:
1068 goto *jumptable[insn->code];
1069
1070 /* ALU */
1071#define ALU(OPCODE, OP) \
1072 ALU64_##OPCODE##_X: \
1073 DST = DST OP SRC; \
1074 CONT; \
1075 ALU_##OPCODE##_X: \
1076 DST = (u32) DST OP (u32) SRC; \
1077 CONT; \
1078 ALU64_##OPCODE##_K: \
1079 DST = DST OP IMM; \
1080 CONT; \
1081 ALU_##OPCODE##_K: \
1082 DST = (u32) DST OP (u32) IMM; \
1083 CONT;
1084
1085 ALU(ADD, +)
1086 ALU(SUB, -)
1087 ALU(AND, &)
1088 ALU(OR, |)
1089 ALU(LSH, <<)
1090 ALU(RSH, >>)
1091 ALU(XOR, ^)
1092 ALU(MUL, *)
1093#undef ALU
1094 ALU_NEG:
1095 DST = (u32) -DST;
1096 CONT;
1097 ALU64_NEG:
1098 DST = -DST;
1099 CONT;
1100 ALU_MOV_X:
1101 DST = (u32) SRC;
1102 CONT;
1103 ALU_MOV_K:
1104 DST = (u32) IMM;
1105 CONT;
1106 ALU64_MOV_X:
1107 DST = SRC;
1108 CONT;
1109 ALU64_MOV_K:
1110 DST = IMM;
1111 CONT;
Alexei Starovoitov02ab6952014-09-04 22:17:17 -07001112 LD_IMM_DW:
1113 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1114 insn++;
1115 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001116 ALU64_ARSH_X:
1117 (*(s64 *) &DST) >>= SRC;
1118 CONT;
1119 ALU64_ARSH_K:
1120 (*(s64 *) &DST) >>= IMM;
1121 CONT;
1122 ALU64_MOD_X:
Alexei Starovoitov876a7ae2015-04-27 14:40:37 -07001123 div64_u64_rem(DST, SRC, &tmp);
1124 DST = tmp;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001125 CONT;
1126 ALU_MOD_X:
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001127 tmp = (u32) DST;
1128 DST = do_div(tmp, (u32) SRC);
1129 CONT;
1130 ALU64_MOD_K:
Alexei Starovoitov876a7ae2015-04-27 14:40:37 -07001131 div64_u64_rem(DST, IMM, &tmp);
1132 DST = tmp;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001133 CONT;
1134 ALU_MOD_K:
1135 tmp = (u32) DST;
1136 DST = do_div(tmp, (u32) IMM);
1137 CONT;
1138 ALU64_DIV_X:
Alexei Starovoitov876a7ae2015-04-27 14:40:37 -07001139 DST = div64_u64(DST, SRC);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001140 CONT;
1141 ALU_DIV_X:
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001142 tmp = (u32) DST;
1143 do_div(tmp, (u32) SRC);
1144 DST = (u32) tmp;
1145 CONT;
1146 ALU64_DIV_K:
Alexei Starovoitov876a7ae2015-04-27 14:40:37 -07001147 DST = div64_u64(DST, IMM);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001148 CONT;
1149 ALU_DIV_K:
1150 tmp = (u32) DST;
1151 do_div(tmp, (u32) IMM);
1152 DST = (u32) tmp;
1153 CONT;
1154 ALU_END_TO_BE:
1155 switch (IMM) {
1156 case 16:
1157 DST = (__force u16) cpu_to_be16(DST);
1158 break;
1159 case 32:
1160 DST = (__force u32) cpu_to_be32(DST);
1161 break;
1162 case 64:
1163 DST = (__force u64) cpu_to_be64(DST);
1164 break;
1165 }
1166 CONT;
1167 ALU_END_TO_LE:
1168 switch (IMM) {
1169 case 16:
1170 DST = (__force u16) cpu_to_le16(DST);
1171 break;
1172 case 32:
1173 DST = (__force u32) cpu_to_le32(DST);
1174 break;
1175 case 64:
1176 DST = (__force u64) cpu_to_le64(DST);
1177 break;
1178 }
1179 CONT;
1180
1181 /* CALL */
1182 JMP_CALL:
1183 /* Function call scratches BPF_R1-BPF_R5 registers,
1184 * preserves BPF_R6-BPF_R9, and stores return value
1185 * into BPF_R0.
1186 */
1187 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1188 BPF_R4, BPF_R5);
1189 CONT;
1190
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08001191 JMP_CALL_ARGS:
1192 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1193 BPF_R3, BPF_R4,
1194 BPF_R5,
1195 insn + insn->off + 1);
1196 CONT;
1197
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001198 JMP_TAIL_CALL: {
1199 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1200 struct bpf_array *array = container_of(map, struct bpf_array, map);
1201 struct bpf_prog *prog;
Alexei Starovoitov90caccd2017-10-03 15:37:20 -07001202 u32 index = BPF_R3;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001203
1204 if (unlikely(index >= array->map.max_entries))
1205 goto out;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001206 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1207 goto out;
1208
1209 tail_call_cnt++;
1210
Wang Nan2a36f0b2015-08-06 07:02:33 +00001211 prog = READ_ONCE(array->ptrs[index]);
Daniel Borkmann1ca1cc92016-06-28 12:18:23 +02001212 if (!prog)
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001213 goto out;
1214
Daniel Borkmannc4675f92015-07-13 20:49:32 +02001215 /* ARG1 at this point is guaranteed to point to CTX from
1216 * the verifier side due to the fact that the tail call is
1217 * handeled like a helper, that is, bpf_tail_call_proto,
1218 * where arg1_type is ARG_PTR_TO_CTX.
1219 */
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001220 insn = prog->insnsi;
1221 goto select_insn;
1222out:
1223 CONT;
1224 }
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001225 /* JMP */
1226 JMP_JA:
1227 insn += insn->off;
1228 CONT;
1229 JMP_JEQ_X:
1230 if (DST == SRC) {
1231 insn += insn->off;
1232 CONT_JMP;
1233 }
1234 CONT;
1235 JMP_JEQ_K:
1236 if (DST == IMM) {
1237 insn += insn->off;
1238 CONT_JMP;
1239 }
1240 CONT;
1241 JMP_JNE_X:
1242 if (DST != SRC) {
1243 insn += insn->off;
1244 CONT_JMP;
1245 }
1246 CONT;
1247 JMP_JNE_K:
1248 if (DST != IMM) {
1249 insn += insn->off;
1250 CONT_JMP;
1251 }
1252 CONT;
1253 JMP_JGT_X:
1254 if (DST > SRC) {
1255 insn += insn->off;
1256 CONT_JMP;
1257 }
1258 CONT;
1259 JMP_JGT_K:
1260 if (DST > IMM) {
1261 insn += insn->off;
1262 CONT_JMP;
1263 }
1264 CONT;
Daniel Borkmann92b31a92017-08-10 01:39:55 +02001265 JMP_JLT_X:
1266 if (DST < SRC) {
1267 insn += insn->off;
1268 CONT_JMP;
1269 }
1270 CONT;
1271 JMP_JLT_K:
1272 if (DST < IMM) {
1273 insn += insn->off;
1274 CONT_JMP;
1275 }
1276 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001277 JMP_JGE_X:
1278 if (DST >= SRC) {
1279 insn += insn->off;
1280 CONT_JMP;
1281 }
1282 CONT;
1283 JMP_JGE_K:
1284 if (DST >= IMM) {
1285 insn += insn->off;
1286 CONT_JMP;
1287 }
1288 CONT;
Daniel Borkmann92b31a92017-08-10 01:39:55 +02001289 JMP_JLE_X:
1290 if (DST <= SRC) {
1291 insn += insn->off;
1292 CONT_JMP;
1293 }
1294 CONT;
1295 JMP_JLE_K:
1296 if (DST <= IMM) {
1297 insn += insn->off;
1298 CONT_JMP;
1299 }
1300 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001301 JMP_JSGT_X:
1302 if (((s64) DST) > ((s64) SRC)) {
1303 insn += insn->off;
1304 CONT_JMP;
1305 }
1306 CONT;
1307 JMP_JSGT_K:
1308 if (((s64) DST) > ((s64) IMM)) {
1309 insn += insn->off;
1310 CONT_JMP;
1311 }
1312 CONT;
Daniel Borkmann92b31a92017-08-10 01:39:55 +02001313 JMP_JSLT_X:
1314 if (((s64) DST) < ((s64) SRC)) {
1315 insn += insn->off;
1316 CONT_JMP;
1317 }
1318 CONT;
1319 JMP_JSLT_K:
1320 if (((s64) DST) < ((s64) IMM)) {
1321 insn += insn->off;
1322 CONT_JMP;
1323 }
1324 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001325 JMP_JSGE_X:
1326 if (((s64) DST) >= ((s64) SRC)) {
1327 insn += insn->off;
1328 CONT_JMP;
1329 }
1330 CONT;
1331 JMP_JSGE_K:
1332 if (((s64) DST) >= ((s64) IMM)) {
1333 insn += insn->off;
1334 CONT_JMP;
1335 }
1336 CONT;
Daniel Borkmann92b31a92017-08-10 01:39:55 +02001337 JMP_JSLE_X:
1338 if (((s64) DST) <= ((s64) SRC)) {
1339 insn += insn->off;
1340 CONT_JMP;
1341 }
1342 CONT;
1343 JMP_JSLE_K:
1344 if (((s64) DST) <= ((s64) IMM)) {
1345 insn += insn->off;
1346 CONT_JMP;
1347 }
1348 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001349 JMP_JSET_X:
1350 if (DST & SRC) {
1351 insn += insn->off;
1352 CONT_JMP;
1353 }
1354 CONT;
1355 JMP_JSET_K:
1356 if (DST & IMM) {
1357 insn += insn->off;
1358 CONT_JMP;
1359 }
1360 CONT;
1361 JMP_EXIT:
1362 return BPF_R0;
1363
1364 /* STX and ST and LDX*/
1365#define LDST(SIZEOP, SIZE) \
1366 STX_MEM_##SIZEOP: \
1367 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1368 CONT; \
1369 ST_MEM_##SIZEOP: \
1370 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1371 CONT; \
1372 LDX_MEM_##SIZEOP: \
1373 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1374 CONT;
1375
1376 LDST(B, u8)
1377 LDST(H, u16)
1378 LDST(W, u32)
1379 LDST(DW, u64)
1380#undef LDST
1381 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1382 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1383 (DST + insn->off));
1384 CONT;
1385 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1386 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1387 (DST + insn->off));
1388 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001389
1390 default_label:
Daniel Borkmann5e581da2018-01-26 23:33:38 +01001391 /* If we ever reach this, we have a bug somewhere. Die hard here
1392 * instead of just returning 0; we could be somewhere in a subprog,
1393 * so execution could continue otherwise which we do /not/ want.
1394 *
1395 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1396 */
1397 pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1398 BUG_ON(1);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001399 return 0;
1400}
Alexei Starovoitovf696b8f2017-05-30 13:31:28 -07001401STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
1402
Alexei Starovoitovb870aa92017-05-30 13:31:33 -07001403#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1404#define DEFINE_BPF_PROG_RUN(stack_size) \
1405static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1406{ \
1407 u64 stack[stack_size / sizeof(u64)]; \
1408 u64 regs[MAX_BPF_REG]; \
1409\
1410 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1411 ARG1 = (u64) (unsigned long) ctx; \
1412 return ___bpf_prog_run(regs, insn, stack); \
Alexei Starovoitovf696b8f2017-05-30 13:31:28 -07001413}
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001414
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08001415#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1416#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1417static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1418 const struct bpf_insn *insn) \
1419{ \
1420 u64 stack[stack_size / sizeof(u64)]; \
1421 u64 regs[MAX_BPF_REG]; \
1422\
1423 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1424 BPF_R1 = r1; \
1425 BPF_R2 = r2; \
1426 BPF_R3 = r3; \
1427 BPF_R4 = r4; \
1428 BPF_R5 = r5; \
1429 return ___bpf_prog_run(regs, insn, stack); \
1430}
1431
Alexei Starovoitovb870aa92017-05-30 13:31:33 -07001432#define EVAL1(FN, X) FN(X)
1433#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1434#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1435#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1436#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1437#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1438
1439EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1440EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1441EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1442
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08001443EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1444EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1445EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1446
Alexei Starovoitovb870aa92017-05-30 13:31:33 -07001447#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1448
1449static unsigned int (*interpreters[])(const void *ctx,
1450 const struct bpf_insn *insn) = {
1451EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1452EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1453EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1454};
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08001455#undef PROG_NAME_LIST
1456#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1457static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1458 const struct bpf_insn *insn) = {
1459EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1460EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1461EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1462};
1463#undef PROG_NAME_LIST
1464
1465void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1466{
1467 stack_depth = max_t(u32, stack_depth, 1);
1468 insn->off = (s16) insn->imm;
1469 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1470 __bpf_call_base_args;
1471 insn->code = BPF_JMP | BPF_CALL_ARGS;
1472}
Alexei Starovoitovb870aa92017-05-30 13:31:33 -07001473
Alexei Starovoitov290af862018-01-09 10:04:29 -08001474#else
Daniel Borkmannfa9dd592018-01-20 01:24:33 +01001475static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1476 const struct bpf_insn *insn)
Alexei Starovoitov290af862018-01-09 10:04:29 -08001477{
Daniel Borkmannfa9dd592018-01-20 01:24:33 +01001478 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1479 * is not working properly, so warn about it!
1480 */
1481 WARN_ON_ONCE(1);
Alexei Starovoitov290af862018-01-09 10:04:29 -08001482 return 0;
1483}
1484#endif
1485
Daniel Borkmann3324b582015-05-29 23:23:07 +02001486bool bpf_prog_array_compatible(struct bpf_array *array,
1487 const struct bpf_prog *fp)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001488{
Josef Bacik9802d862017-12-11 11:36:48 -05001489 if (fp->kprobe_override)
1490 return false;
1491
Daniel Borkmann3324b582015-05-29 23:23:07 +02001492 if (!array->owner_prog_type) {
1493 /* There's no owner yet where we could check for
1494 * compatibility.
1495 */
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001496 array->owner_prog_type = fp->type;
1497 array->owner_jited = fp->jited;
Daniel Borkmann3324b582015-05-29 23:23:07 +02001498
1499 return true;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001500 }
Daniel Borkmann3324b582015-05-29 23:23:07 +02001501
1502 return array->owner_prog_type == fp->type &&
1503 array->owner_jited == fp->jited;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001504}
1505
Daniel Borkmann3324b582015-05-29 23:23:07 +02001506static int bpf_check_tail_call(const struct bpf_prog *fp)
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001507{
1508 struct bpf_prog_aux *aux = fp->aux;
1509 int i;
1510
1511 for (i = 0; i < aux->used_map_cnt; i++) {
Daniel Borkmann3324b582015-05-29 23:23:07 +02001512 struct bpf_map *map = aux->used_maps[i];
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001513 struct bpf_array *array;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001514
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001515 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1516 continue;
Daniel Borkmann3324b582015-05-29 23:23:07 +02001517
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001518 array = container_of(map, struct bpf_array, map);
1519 if (!bpf_prog_array_compatible(array, fp))
1520 return -EINVAL;
1521 }
1522
1523 return 0;
1524}
1525
Daniel Borkmann9facc332018-06-15 02:30:48 +02001526static void bpf_prog_select_func(struct bpf_prog *fp)
1527{
1528#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1529 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1530
1531 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1532#else
1533 fp->bpf_func = __bpf_prog_ret0_warn;
1534#endif
1535}
1536
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001537/**
Daniel Borkmann3324b582015-05-29 23:23:07 +02001538 * bpf_prog_select_runtime - select exec runtime for BPF program
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001539 * @fp: bpf_prog populated with internal BPF program
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001540 * @err: pointer to error variable
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001541 *
Daniel Borkmann3324b582015-05-29 23:23:07 +02001542 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1543 * The BPF program will be executed via BPF_PROG_RUN() macro.
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001544 */
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001545struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001546{
Daniel Borkmann9facc332018-06-15 02:30:48 +02001547 /* In case of BPF to BPF calls, verifier did all the prep
1548 * work with regards to JITing, etc.
1549 */
1550 if (fp->bpf_func)
1551 goto finalize;
Martin KaFai Lau8007e402017-06-28 10:41:24 -07001552
Daniel Borkmann9facc332018-06-15 02:30:48 +02001553 bpf_prog_select_func(fp);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001554
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001555 /* eBPF JITs can rewrite the program in case constant
1556 * blinding is active. However, in case of error during
1557 * blinding, bpf_int_jit_compile() must always return a
1558 * valid program, which in this case would simply not
1559 * be JITed, but falls back to the interpreter.
1560 */
Jakub Kicinskiab3f0062017-11-03 13:56:17 -07001561 if (!bpf_prog_is_dev_bound(fp->aux)) {
1562 fp = bpf_int_jit_compile(fp);
Alexei Starovoitov290af862018-01-09 10:04:29 -08001563#ifdef CONFIG_BPF_JIT_ALWAYS_ON
1564 if (!fp->jited) {
1565 *err = -ENOTSUPP;
1566 return fp;
1567 }
1568#endif
Jakub Kicinskiab3f0062017-11-03 13:56:17 -07001569 } else {
1570 *err = bpf_prog_offload_compile(fp);
1571 if (*err)
1572 return fp;
1573 }
Daniel Borkmann9facc332018-06-15 02:30:48 +02001574
1575finalize:
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001576 bpf_prog_lock_ro(fp);
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001577
Daniel Borkmann3324b582015-05-29 23:23:07 +02001578 /* The tail call compatibility check can only be done at
1579 * this late stage as we need to determine, if we deal
1580 * with JITed or non JITed program concatenations and not
1581 * all eBPF JITs might immediately support all features.
1582 */
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001583 *err = bpf_check_tail_call(fp);
1584
1585 return fp;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001586}
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001587EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001588
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001589static unsigned int __bpf_prog_ret1(const void *ctx,
1590 const struct bpf_insn *insn)
1591{
1592 return 1;
1593}
1594
1595static struct bpf_prog_dummy {
1596 struct bpf_prog prog;
1597} dummy_bpf_prog = {
1598 .prog = {
1599 .bpf_func = __bpf_prog_ret1,
1600 },
1601};
1602
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -07001603/* to avoid allocating empty bpf_prog_array for cgroups that
1604 * don't have bpf program attached use one global 'empty_prog_array'
1605 * It will not be modified the caller of bpf_prog_array_alloc()
1606 * (since caller requested prog_cnt == 0)
1607 * that pointer should be 'freed' by bpf_prog_array_free()
1608 */
1609static struct {
1610 struct bpf_prog_array hdr;
1611 struct bpf_prog *null_prog;
1612} empty_prog_array = {
1613 .null_prog = NULL,
1614};
1615
Roman Gushchind29ab6e2018-07-13 12:41:10 -07001616struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -07001617{
1618 if (prog_cnt)
1619 return kzalloc(sizeof(struct bpf_prog_array) +
Roman Gushchin394e40a2018-08-02 14:27:21 -07001620 sizeof(struct bpf_prog_array_item) *
1621 (prog_cnt + 1),
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -07001622 flags);
1623
1624 return &empty_prog_array.hdr;
1625}
1626
1627void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
1628{
1629 if (!progs ||
1630 progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
1631 return;
1632 kfree_rcu(progs, rcu);
1633}
1634
Roman Gushchin394e40a2018-08-02 14:27:21 -07001635int bpf_prog_array_length(struct bpf_prog_array __rcu *array)
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001636{
Roman Gushchin394e40a2018-08-02 14:27:21 -07001637 struct bpf_prog_array_item *item;
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001638 u32 cnt = 0;
1639
1640 rcu_read_lock();
Roman Gushchin394e40a2018-08-02 14:27:21 -07001641 item = rcu_dereference(array)->items;
1642 for (; item->prog; item++)
1643 if (item->prog != &dummy_bpf_prog.prog)
Yonghong Songc8c088b2017-11-30 13:47:54 -08001644 cnt++;
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001645 rcu_read_unlock();
1646 return cnt;
1647}
1648
Roman Gushchin394e40a2018-08-02 14:27:21 -07001649
1650static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array,
Yonghong Song3a38bb92018-04-10 09:37:32 -07001651 u32 *prog_ids,
1652 u32 request_cnt)
1653{
Roman Gushchin394e40a2018-08-02 14:27:21 -07001654 struct bpf_prog_array_item *item;
Yonghong Song3a38bb92018-04-10 09:37:32 -07001655 int i = 0;
1656
Yonghong Song965931e2018-08-14 11:01:12 -07001657 item = rcu_dereference_check(array, 1)->items;
Roman Gushchin394e40a2018-08-02 14:27:21 -07001658 for (; item->prog; item++) {
1659 if (item->prog == &dummy_bpf_prog.prog)
Yonghong Song3a38bb92018-04-10 09:37:32 -07001660 continue;
Roman Gushchin394e40a2018-08-02 14:27:21 -07001661 prog_ids[i] = item->prog->aux->id;
Yonghong Song3a38bb92018-04-10 09:37:32 -07001662 if (++i == request_cnt) {
Roman Gushchin394e40a2018-08-02 14:27:21 -07001663 item++;
Yonghong Song3a38bb92018-04-10 09:37:32 -07001664 break;
1665 }
1666 }
1667
Roman Gushchin394e40a2018-08-02 14:27:21 -07001668 return !!(item->prog);
Yonghong Song3a38bb92018-04-10 09:37:32 -07001669}
1670
Roman Gushchin394e40a2018-08-02 14:27:21 -07001671int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array,
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001672 __u32 __user *prog_ids, u32 cnt)
1673{
Alexei Starovoitov09112872018-02-02 15:14:05 -08001674 unsigned long err = 0;
Alexei Starovoitov09112872018-02-02 15:14:05 -08001675 bool nospc;
Yonghong Song3a38bb92018-04-10 09:37:32 -07001676 u32 *ids;
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001677
Alexei Starovoitov09112872018-02-02 15:14:05 -08001678 /* users of this function are doing:
1679 * cnt = bpf_prog_array_length();
1680 * if (cnt > 0)
1681 * bpf_prog_array_copy_to_user(..., cnt);
1682 * so below kcalloc doesn't need extra cnt > 0 check, but
1683 * bpf_prog_array_length() releases rcu lock and
1684 * prog array could have been swapped with empty or larger array,
1685 * so always copy 'cnt' prog_ids to the user.
1686 * In a rare race the user will see zero prog_ids
1687 */
Daniel Borkmann9c481b92018-02-14 15:31:00 +01001688 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
Alexei Starovoitov09112872018-02-02 15:14:05 -08001689 if (!ids)
1690 return -ENOMEM;
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001691 rcu_read_lock();
Roman Gushchin394e40a2018-08-02 14:27:21 -07001692 nospc = bpf_prog_array_copy_core(array, ids, cnt);
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001693 rcu_read_unlock();
Alexei Starovoitov09112872018-02-02 15:14:05 -08001694 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1695 kfree(ids);
1696 if (err)
1697 return -EFAULT;
1698 if (nospc)
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001699 return -ENOSPC;
1700 return 0;
1701}
1702
Roman Gushchin394e40a2018-08-02 14:27:21 -07001703void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array,
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001704 struct bpf_prog *old_prog)
1705{
Roman Gushchin394e40a2018-08-02 14:27:21 -07001706 struct bpf_prog_array_item *item = array->items;
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001707
Roman Gushchin394e40a2018-08-02 14:27:21 -07001708 for (; item->prog; item++)
1709 if (item->prog == old_prog) {
1710 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001711 break;
1712 }
1713}
1714
1715int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
1716 struct bpf_prog *exclude_prog,
1717 struct bpf_prog *include_prog,
1718 struct bpf_prog_array **new_array)
1719{
1720 int new_prog_cnt, carry_prog_cnt = 0;
Roman Gushchin394e40a2018-08-02 14:27:21 -07001721 struct bpf_prog_array_item *existing;
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001722 struct bpf_prog_array *array;
Sean Young170a7e32018-05-27 12:24:08 +01001723 bool found_exclude = false;
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001724 int new_prog_idx = 0;
1725
1726 /* Figure out how many existing progs we need to carry over to
1727 * the new array.
1728 */
1729 if (old_array) {
Roman Gushchin394e40a2018-08-02 14:27:21 -07001730 existing = old_array->items;
1731 for (; existing->prog; existing++) {
1732 if (existing->prog == exclude_prog) {
Sean Young170a7e32018-05-27 12:24:08 +01001733 found_exclude = true;
1734 continue;
1735 }
Roman Gushchin394e40a2018-08-02 14:27:21 -07001736 if (existing->prog != &dummy_bpf_prog.prog)
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001737 carry_prog_cnt++;
Roman Gushchin394e40a2018-08-02 14:27:21 -07001738 if (existing->prog == include_prog)
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001739 return -EEXIST;
1740 }
1741 }
1742
Sean Young170a7e32018-05-27 12:24:08 +01001743 if (exclude_prog && !found_exclude)
1744 return -ENOENT;
1745
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001746 /* How many progs (not NULL) will be in the new array? */
1747 new_prog_cnt = carry_prog_cnt;
1748 if (include_prog)
1749 new_prog_cnt += 1;
1750
1751 /* Do we have any prog (not NULL) in the new array? */
1752 if (!new_prog_cnt) {
1753 *new_array = NULL;
1754 return 0;
1755 }
1756
1757 /* +1 as the end of prog_array is marked with NULL */
1758 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1759 if (!array)
1760 return -ENOMEM;
1761
1762 /* Fill in the new prog array */
1763 if (carry_prog_cnt) {
Roman Gushchin394e40a2018-08-02 14:27:21 -07001764 existing = old_array->items;
1765 for (; existing->prog; existing++)
1766 if (existing->prog != exclude_prog &&
1767 existing->prog != &dummy_bpf_prog.prog) {
1768 array->items[new_prog_idx++].prog =
1769 existing->prog;
1770 }
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001771 }
1772 if (include_prog)
Roman Gushchin394e40a2018-08-02 14:27:21 -07001773 array->items[new_prog_idx++].prog = include_prog;
1774 array->items[new_prog_idx].prog = NULL;
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001775 *new_array = array;
1776 return 0;
1777}
1778
Yonghong Songf371b302017-12-11 11:39:02 -08001779int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
Yonghong Song3a38bb92018-04-10 09:37:32 -07001780 u32 *prog_ids, u32 request_cnt,
1781 u32 *prog_cnt)
Yonghong Songf371b302017-12-11 11:39:02 -08001782{
1783 u32 cnt = 0;
1784
1785 if (array)
1786 cnt = bpf_prog_array_length(array);
1787
Yonghong Song3a38bb92018-04-10 09:37:32 -07001788 *prog_cnt = cnt;
Yonghong Songf371b302017-12-11 11:39:02 -08001789
1790 /* return early if user requested only program count or nothing to copy */
1791 if (!request_cnt || !cnt)
1792 return 0;
1793
Yonghong Song3a38bb92018-04-10 09:37:32 -07001794 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
Roman Gushchin394e40a2018-08-02 14:27:21 -07001795 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
Yonghong Song3a38bb92018-04-10 09:37:32 -07001796 : 0;
Yonghong Songf371b302017-12-11 11:39:02 -08001797}
1798
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001799static void bpf_prog_free_deferred(struct work_struct *work)
1800{
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001801 struct bpf_prog_aux *aux;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001802 int i;
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001803
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001804 aux = container_of(work, struct bpf_prog_aux, work);
Jakub Kicinskiab3f0062017-11-03 13:56:17 -07001805 if (bpf_prog_is_dev_bound(aux))
1806 bpf_prog_offload_destroy(aux->prog);
Yonghong Songc195651e2018-04-28 22:28:08 -07001807#ifdef CONFIG_PERF_EVENTS
1808 if (aux->prog->has_callchain_buf)
1809 put_callchain_buffers();
1810#endif
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001811 for (i = 0; i < aux->func_cnt; i++)
1812 bpf_jit_free(aux->func[i]);
1813 if (aux->func_cnt) {
1814 kfree(aux->func);
1815 bpf_prog_unlock_free(aux->prog);
1816 } else {
1817 bpf_jit_free(aux->prog);
1818 }
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001819}
1820
1821/* Free internal BPF program */
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001822void bpf_prog_free(struct bpf_prog *fp)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001823{
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001824 struct bpf_prog_aux *aux = fp->aux;
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001825
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001826 INIT_WORK(&aux->work, bpf_prog_free_deferred);
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001827 schedule_work(&aux->work);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001828}
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001829EXPORT_SYMBOL_GPL(bpf_prog_free);
Alexei Starovoitovf89b7752014-10-23 18:41:08 -07001830
Daniel Borkmann3ad00402015-10-08 01:20:39 +02001831/* RNG for unpriviledged user space with separated state from prandom_u32(). */
1832static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1833
1834void bpf_user_rnd_init_once(void)
1835{
1836 prandom_init_once(&bpf_user_rnd_state);
1837}
1838
Daniel Borkmannf3694e02016-09-09 02:45:31 +02001839BPF_CALL_0(bpf_user_rnd_u32)
Daniel Borkmann3ad00402015-10-08 01:20:39 +02001840{
1841 /* Should someone ever have the rather unwise idea to use some
1842 * of the registers passed into this function, then note that
1843 * this function is called from native eBPF and classic-to-eBPF
1844 * transformations. Register assignments from both sides are
1845 * different, f.e. classic always sets fn(ctx, A, X) here.
1846 */
1847 struct rnd_state *state;
1848 u32 res;
1849
1850 state = &get_cpu_var(bpf_user_rnd_state);
1851 res = prandom_u32_state(state);
Shaohua Lib761fe22016-09-27 08:42:41 -07001852 put_cpu_var(bpf_user_rnd_state);
Daniel Borkmann3ad00402015-10-08 01:20:39 +02001853
1854 return res;
1855}
1856
Daniel Borkmann3ba67da2015-03-05 23:27:51 +01001857/* Weak definitions of helper functions in case we don't have bpf syscall. */
1858const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
1859const struct bpf_func_proto bpf_map_update_elem_proto __weak;
1860const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +02001861const struct bpf_func_proto bpf_map_push_elem_proto __weak;
1862const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
1863const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
Daniel Borkmann3ba67da2015-03-05 23:27:51 +01001864
Daniel Borkmann03e69b52015-03-14 02:27:16 +01001865const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
Daniel Borkmannc04167c2015-03-14 02:27:17 +01001866const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
Daniel Borkmann2d0e30c2016-10-21 12:46:33 +02001867const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
Daniel Borkmann17ca8cb2015-05-29 23:23:06 +02001868const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
Daniel Borkmannbd570ff2016-04-18 21:01:24 +02001869
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -07001870const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
1871const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
1872const struct bpf_func_proto bpf_get_current_comm_proto __weak;
Yonghong Songbf6fa2c82018-06-03 15:59:41 -07001873const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
Roman Gushchincd339432018-08-02 14:27:24 -07001874const struct bpf_func_proto bpf_get_local_storage_proto __weak;
Daniel Borkmannbd570ff2016-04-18 21:01:24 +02001875
Alexei Starovoitov0756ea32015-06-12 19:39:13 -07001876const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
1877{
1878 return NULL;
1879}
Daniel Borkmann03e69b52015-03-14 02:27:16 +01001880
Daniel Borkmann555c8a82016-07-14 18:08:05 +02001881u64 __weak
1882bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1883 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
Daniel Borkmannbd570ff2016-04-18 21:01:24 +02001884{
Daniel Borkmann555c8a82016-07-14 18:08:05 +02001885 return -ENOTSUPP;
Daniel Borkmannbd570ff2016-04-18 21:01:24 +02001886}
Jakub Kicinski6cb5fb32018-05-03 18:37:10 -07001887EXPORT_SYMBOL_GPL(bpf_event_output);
Daniel Borkmannbd570ff2016-04-18 21:01:24 +02001888
Daniel Borkmann3324b582015-05-29 23:23:07 +02001889/* Always built-in helper functions. */
1890const struct bpf_func_proto bpf_tail_call_proto = {
1891 .func = NULL,
1892 .gpl_only = false,
1893 .ret_type = RET_VOID,
1894 .arg1_type = ARG_PTR_TO_CTX,
1895 .arg2_type = ARG_CONST_MAP_PTR,
1896 .arg3_type = ARG_ANYTHING,
1897};
1898
Daniel Borkmann93831912017-02-16 22:24:49 +01001899/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
1900 * It is encouraged to implement bpf_int_jit_compile() instead, so that
1901 * eBPF and implicitly also cBPF can get JITed!
1902 */
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001903struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
Daniel Borkmann3324b582015-05-29 23:23:07 +02001904{
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001905 return prog;
Daniel Borkmann3324b582015-05-29 23:23:07 +02001906}
1907
Daniel Borkmann93831912017-02-16 22:24:49 +01001908/* Stub for JITs that support eBPF. All cBPF code gets transformed into
1909 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
1910 */
1911void __weak bpf_jit_compile(struct bpf_prog *prog)
1912{
1913}
1914
Martin KaFai Lau17bedab2016-12-07 15:53:11 -08001915bool __weak bpf_helper_changes_pkt_data(void *func)
Alexei Starovoitov969bf052016-05-05 19:49:10 -07001916{
1917 return false;
1918}
1919
Alexei Starovoitovf89b7752014-10-23 18:41:08 -07001920/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
1921 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
1922 */
1923int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
1924 int len)
1925{
1926 return -EFAULT;
1927}
Daniel Borkmanna67edbf2017-01-25 02:28:18 +01001928
1929/* All definitions of tracepoints related to BPF. */
1930#define CREATE_TRACE_POINTS
1931#include <linux/bpf_trace.h>
1932
1933EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);