blob: f908b9356025d27da489b7a1ceaac302169a1c98 [file] [log] [blame]
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001/*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
6 *
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *
9 * Authors:
10 *
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * Andi Kleen - Fix a few bad bugs and races.
Alexei Starovoitov4df95ff2014-07-30 20:34:14 -070021 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070022 */
Daniel Borkmann738cbe72014-09-08 08:04:47 +020023
Yonghong Song838e9692018-11-19 15:29:11 -080024#include <uapi/linux/btf.h>
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070025#include <linux/filter.h>
26#include <linux/skbuff.h>
Daniel Borkmann60a3b222014-09-02 22:53:44 +020027#include <linux/vmalloc.h>
Daniel Borkmann738cbe72014-09-08 08:04:47 +020028#include <linux/random.h>
29#include <linux/moduleloader.h>
Alexei Starovoitov09756af2014-09-26 00:17:00 -070030#include <linux/bpf.h>
Yonghong Song838e9692018-11-19 15:29:11 -080031#include <linux/btf.h>
Josh Poimboeuf39853cc2016-02-28 22:22:37 -060032#include <linux/frame.h>
Daniel Borkmann74451e662017-02-16 22:24:50 +010033#include <linux/rbtree_latch.h>
34#include <linux/kallsyms.h>
35#include <linux/rcupdate.h>
Yonghong Songc195651e2018-04-28 22:28:08 -070036#include <linux/perf_event.h>
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070037
Daniel Borkmann3324b582015-05-29 23:23:07 +020038#include <asm/unaligned.h>
39
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070040/* Registers */
41#define BPF_R0 regs[BPF_REG_0]
42#define BPF_R1 regs[BPF_REG_1]
43#define BPF_R2 regs[BPF_REG_2]
44#define BPF_R3 regs[BPF_REG_3]
45#define BPF_R4 regs[BPF_REG_4]
46#define BPF_R5 regs[BPF_REG_5]
47#define BPF_R6 regs[BPF_REG_6]
48#define BPF_R7 regs[BPF_REG_7]
49#define BPF_R8 regs[BPF_REG_8]
50#define BPF_R9 regs[BPF_REG_9]
51#define BPF_R10 regs[BPF_REG_10]
52
53/* Named registers */
54#define DST regs[insn->dst_reg]
55#define SRC regs[insn->src_reg]
56#define FP regs[BPF_REG_FP]
Daniel Borkmann144cd912019-01-03 00:58:28 +010057#define AX regs[BPF_REG_AX]
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070058#define ARG1 regs[BPF_REG_ARG1]
59#define CTX regs[BPF_REG_CTX]
60#define IMM insn->imm
61
62/* No hurry in this branch
63 *
64 * Exported for the bpf jit load helper.
65 */
66void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
67{
68 u8 *ptr = NULL;
69
70 if (k >= SKF_NET_OFF)
71 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
72 else if (k >= SKF_LL_OFF)
73 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
Daniel Borkmann3324b582015-05-29 23:23:07 +020074
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -070075 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
76 return ptr;
77
78 return NULL;
79}
80
Daniel Borkmann60a3b222014-09-02 22:53:44 +020081struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
82{
Michal Hocko19809c22017-05-08 15:57:44 -070083 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
Alexei Starovoitov09756af2014-09-26 00:17:00 -070084 struct bpf_prog_aux *aux;
Daniel Borkmann60a3b222014-09-02 22:53:44 +020085 struct bpf_prog *fp;
86
87 size = round_up(size, PAGE_SIZE);
88 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
89 if (fp == NULL)
90 return NULL;
91
Alexei Starovoitov09756af2014-09-26 00:17:00 -070092 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
93 if (aux == NULL) {
Daniel Borkmann60a3b222014-09-02 22:53:44 +020094 vfree(fp);
95 return NULL;
96 }
97
98 fp->pages = size / PAGE_SIZE;
Alexei Starovoitov09756af2014-09-26 00:17:00 -070099 fp->aux = aux;
Daniel Borkmanne9d8afa2015-10-29 14:58:08 +0100100 fp->aux->prog = fp;
Alexei Starovoitov60b58afc2017-12-14 17:55:14 -0800101 fp->jit_requested = ebpf_jit_enabled();
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200102
Daniel Borkmann74451e662017-02-16 22:24:50 +0100103 INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
104
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200105 return fp;
106}
107EXPORT_SYMBOL_GPL(bpf_prog_alloc);
108
Martin KaFai Lauc454a462018-12-07 16:42:25 -0800109int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
110{
111 if (!prog->aux->nr_linfo || !prog->jit_requested)
112 return 0;
113
114 prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
115 sizeof(*prog->aux->jited_linfo),
116 GFP_KERNEL | __GFP_NOWARN);
117 if (!prog->aux->jited_linfo)
118 return -ENOMEM;
119
120 return 0;
121}
122
123void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
124{
125 kfree(prog->aux->jited_linfo);
126 prog->aux->jited_linfo = NULL;
127}
128
129void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
130{
131 if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
132 bpf_prog_free_jited_linfo(prog);
133}
134
135/* The jit engine is responsible to provide an array
136 * for insn_off to the jited_off mapping (insn_to_jit_off).
137 *
138 * The idx to this array is the insn_off. Hence, the insn_off
139 * here is relative to the prog itself instead of the main prog.
140 * This array has one entry for each xlated bpf insn.
141 *
142 * jited_off is the byte off to the last byte of the jited insn.
143 *
144 * Hence, with
145 * insn_start:
146 * The first bpf insn off of the prog. The insn off
147 * here is relative to the main prog.
148 * e.g. if prog is a subprog, insn_start > 0
149 * linfo_idx:
150 * The prog's idx to prog->aux->linfo and jited_linfo
151 *
152 * jited_linfo[linfo_idx] = prog->bpf_func
153 *
154 * For i > linfo_idx,
155 *
156 * jited_linfo[i] = prog->bpf_func +
157 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
158 */
159void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
160 const u32 *insn_to_jit_off)
161{
162 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
163 const struct bpf_line_info *linfo;
164 void **jited_linfo;
165
166 if (!prog->aux->jited_linfo)
167 /* Userspace did not provide linfo */
168 return;
169
170 linfo_idx = prog->aux->linfo_idx;
171 linfo = &prog->aux->linfo[linfo_idx];
172 insn_start = linfo[0].insn_off;
173 insn_end = insn_start + prog->len;
174
175 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
176 jited_linfo[0] = prog->bpf_func;
177
178 nr_linfo = prog->aux->nr_linfo - linfo_idx;
179
180 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
181 /* The verifier ensures that linfo[i].insn_off is
182 * strictly increasing
183 */
184 jited_linfo[i] = prog->bpf_func +
185 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
186}
187
188void bpf_prog_free_linfo(struct bpf_prog *prog)
189{
190 bpf_prog_free_jited_linfo(prog);
191 kvfree(prog->aux->linfo);
192}
193
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200194struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
195 gfp_t gfp_extra_flags)
196{
Michal Hocko19809c22017-05-08 15:57:44 -0700197 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200198 struct bpf_prog *fp;
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100199 u32 pages, delta;
200 int ret;
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200201
202 BUG_ON(fp_old == NULL);
203
204 size = round_up(size, PAGE_SIZE);
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100205 pages = size / PAGE_SIZE;
206 if (pages <= fp_old->pages)
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200207 return fp_old;
208
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100209 delta = pages - fp_old->pages;
210 ret = __bpf_prog_charge(fp_old->aux->user, delta);
211 if (ret)
212 return NULL;
213
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200214 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100215 if (fp == NULL) {
216 __bpf_prog_uncharge(fp_old->aux->user, delta);
217 } else {
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200218 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
Daniel Borkmann5ccb0712016-12-18 01:52:58 +0100219 fp->pages = pages;
Daniel Borkmanne9d8afa2015-10-29 14:58:08 +0100220 fp->aux->prog = fp;
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200221
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700222 /* We keep fp->aux from fp_old around in the new
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200223 * reallocated structure.
224 */
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700225 fp_old->aux = NULL;
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200226 __bpf_prog_free(fp_old);
227 }
228
229 return fp;
230}
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200231
232void __bpf_prog_free(struct bpf_prog *fp)
233{
Alexei Starovoitov09756af2014-09-26 00:17:00 -0700234 kfree(fp->aux);
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200235 vfree(fp);
236}
Daniel Borkmann60a3b222014-09-02 22:53:44 +0200237
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100238int bpf_prog_calc_tag(struct bpf_prog *fp)
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100239{
240 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100241 u32 raw_size = bpf_prog_tag_scratch_size(fp);
242 u32 digest[SHA_DIGEST_WORDS];
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100243 u32 ws[SHA_WORKSPACE_WORDS];
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100244 u32 i, bsize, psize, blocks;
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100245 struct bpf_insn *dst;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100246 bool was_ld_map;
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100247 u8 *raw, *todo;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100248 __be32 *result;
249 __be64 *bits;
250
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100251 raw = vmalloc(raw_size);
252 if (!raw)
253 return -ENOMEM;
254
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100255 sha_init(digest);
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100256 memset(ws, 0, sizeof(ws));
257
258 /* We need to take out the map fd for the digest calculation
259 * since they are unstable from user space side.
260 */
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100261 dst = (void *)raw;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100262 for (i = 0, was_ld_map = false; i < fp->len; i++) {
263 dst[i] = fp->insnsi[i];
264 if (!was_ld_map &&
265 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
266 dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
267 was_ld_map = true;
268 dst[i].imm = 0;
269 } else if (was_ld_map &&
270 dst[i].code == 0 &&
271 dst[i].dst_reg == 0 &&
272 dst[i].src_reg == 0 &&
273 dst[i].off == 0) {
274 was_ld_map = false;
275 dst[i].imm = 0;
276 } else {
277 was_ld_map = false;
278 }
279 }
280
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100281 psize = bpf_prog_insn_size(fp);
282 memset(&raw[psize], 0, raw_size - psize);
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100283 raw[psize++] = 0x80;
284
285 bsize = round_up(psize, SHA_MESSAGE_BYTES);
286 blocks = bsize / SHA_MESSAGE_BYTES;
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100287 todo = raw;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100288 if (bsize - psize >= sizeof(__be64)) {
289 bits = (__be64 *)(todo + bsize - sizeof(__be64));
290 } else {
291 bits = (__be64 *)(todo + bsize + bits_offset);
292 blocks++;
293 }
294 *bits = cpu_to_be64((psize - 1) << 3);
295
296 while (blocks--) {
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100297 sha_transform(digest, todo, ws);
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100298 todo += SHA_MESSAGE_BYTES;
299 }
300
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100301 result = (__force __be32 *)digest;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100302 for (i = 0; i < SHA_DIGEST_WORDS; i++)
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100303 result[i] = cpu_to_be32(digest[i]);
304 memcpy(fp->tag, result, sizeof(fp->tag));
Daniel Borkmannaafe6ae2016-12-18 01:52:57 +0100305
306 vfree(raw);
307 return 0;
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100308}
309
Daniel Borkmann050fad72018-05-17 01:44:11 +0200310static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
311 u32 curr, const bool probe_pass)
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200312{
Daniel Borkmann050fad72018-05-17 01:44:11 +0200313 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
314 s64 imm = insn->imm;
315
316 if (curr < pos && curr + imm + 1 > pos)
317 imm += delta;
318 else if (curr > pos + delta && curr + imm + 1 <= pos + delta)
319 imm -= delta;
320 if (imm < imm_min || imm > imm_max)
321 return -ERANGE;
322 if (!probe_pass)
323 insn->imm = imm;
324 return 0;
325}
326
327static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
328 u32 curr, const bool probe_pass)
329{
330 const s32 off_min = S16_MIN, off_max = S16_MAX;
331 s32 off = insn->off;
332
333 if (curr < pos && curr + off + 1 > pos)
334 off += delta;
335 else if (curr > pos + delta && curr + off + 1 <= pos + delta)
336 off -= delta;
337 if (off < off_min || off > off_max)
338 return -ERANGE;
339 if (!probe_pass)
340 insn->off = off;
341 return 0;
342}
343
344static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
345 const bool probe_pass)
346{
347 u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0);
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200348 struct bpf_insn *insn = prog->insnsi;
Daniel Borkmann050fad72018-05-17 01:44:11 +0200349 int ret = 0;
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200350
351 for (i = 0; i < insn_cnt; i++, insn++) {
Daniel Borkmann050fad72018-05-17 01:44:11 +0200352 u8 code;
353
354 /* In the probing pass we still operate on the original,
355 * unpatched image in order to check overflows before we
356 * do any other adjustments. Therefore skip the patchlet.
357 */
358 if (probe_pass && i == pos) {
359 i += delta + 1;
360 insn++;
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -0800361 }
Daniel Borkmann050fad72018-05-17 01:44:11 +0200362 code = insn->code;
363 if (BPF_CLASS(code) != BPF_JMP ||
364 BPF_OP(code) == BPF_EXIT)
365 continue;
366 /* Adjust offset of jmps if we cross patch boundaries. */
367 if (BPF_OP(code) == BPF_CALL) {
368 if (insn->src_reg != BPF_PSEUDO_CALL)
369 continue;
370 ret = bpf_adj_delta_to_imm(insn, pos, delta, i,
371 probe_pass);
372 } else {
373 ret = bpf_adj_delta_to_off(insn, pos, delta, i,
374 probe_pass);
375 }
376 if (ret)
377 break;
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200378 }
Daniel Borkmann050fad72018-05-17 01:44:11 +0200379
380 return ret;
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200381}
382
Martin KaFai Lauc454a462018-12-07 16:42:25 -0800383static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
384{
385 struct bpf_line_info *linfo;
386 u32 i, nr_linfo;
387
388 nr_linfo = prog->aux->nr_linfo;
389 if (!nr_linfo || !delta)
390 return;
391
392 linfo = prog->aux->linfo;
393
394 for (i = 0; i < nr_linfo; i++)
395 if (off < linfo[i].insn_off)
396 break;
397
398 /* Push all off < linfo[i].insn_off by delta */
399 for (; i < nr_linfo; i++)
400 linfo[i].insn_off += delta;
401}
402
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200403struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
404 const struct bpf_insn *patch, u32 len)
405{
406 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
Daniel Borkmann050fad72018-05-17 01:44:11 +0200407 const u32 cnt_max = S16_MAX;
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200408 struct bpf_prog *prog_adj;
409
410 /* Since our patchlet doesn't expand the image, we're done. */
411 if (insn_delta == 0) {
412 memcpy(prog->insnsi + off, patch, sizeof(*patch));
413 return prog;
414 }
415
416 insn_adj_cnt = prog->len + insn_delta;
417
Daniel Borkmann050fad72018-05-17 01:44:11 +0200418 /* Reject anything that would potentially let the insn->off
419 * target overflow when we have excessive program expansions.
420 * We need to probe here before we do any reallocation where
421 * we afterwards may not fail anymore.
422 */
423 if (insn_adj_cnt > cnt_max &&
424 bpf_adj_branches(prog, off, insn_delta, true))
425 return NULL;
426
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200427 /* Several new instructions need to be inserted. Make room
428 * for them. Likely, there's no need for a new allocation as
429 * last page could have large enough tailroom.
430 */
431 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
432 GFP_USER);
433 if (!prog_adj)
434 return NULL;
435
436 prog_adj->len = insn_adj_cnt;
437
438 /* Patching happens in 3 steps:
439 *
440 * 1) Move over tail of insnsi from next instruction onwards,
441 * so we can patch the single target insn with one or more
442 * new ones (patching is always from 1 to n insns, n > 0).
443 * 2) Inject new instructions at the target location.
444 * 3) Adjust branch offsets if necessary.
445 */
446 insn_rest = insn_adj_cnt - off - len;
447
448 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
449 sizeof(*patch) * insn_rest);
450 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
451
Daniel Borkmann050fad72018-05-17 01:44:11 +0200452 /* We are guaranteed to not fail at this point, otherwise
453 * the ship has sailed to reverse to the original state. An
454 * overflow cannot happen at this point.
455 */
456 BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200457
Martin KaFai Lauc454a462018-12-07 16:42:25 -0800458 bpf_adj_linfo(prog_adj, off, insn_delta);
459
Daniel Borkmannc237ee52016-05-13 19:08:30 +0200460 return prog_adj;
461}
462
Daniel Borkmann7d1982b2018-06-15 02:30:47 +0200463void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
464{
465 int i;
466
467 for (i = 0; i < fp->aux->func_cnt; i++)
468 bpf_prog_kallsyms_del(fp->aux->func[i]);
469}
470
471void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
472{
473 bpf_prog_kallsyms_del_subprogs(fp);
474 bpf_prog_kallsyms_del(fp);
475}
476
Daniel Borkmannb954d832014-09-10 15:01:02 +0200477#ifdef CONFIG_BPF_JIT
Daniel Borkmannfa9dd592018-01-20 01:24:33 +0100478/* All BPF JIT sysctl knobs here. */
479int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
480int bpf_jit_harden __read_mostly;
481int bpf_jit_kallsyms __read_mostly;
Daniel Borkmannfdadd042018-12-11 12:14:12 +0100482long bpf_jit_limit __read_mostly;
Daniel Borkmannfa9dd592018-01-20 01:24:33 +0100483
Daniel Borkmann74451e662017-02-16 22:24:50 +0100484static __always_inline void
485bpf_get_prog_addr_region(const struct bpf_prog *prog,
486 unsigned long *symbol_start,
487 unsigned long *symbol_end)
488{
489 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
490 unsigned long addr = (unsigned long)hdr;
491
492 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
493
494 *symbol_start = addr;
495 *symbol_end = addr + hdr->pages * PAGE_SIZE;
496}
497
498static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
499{
Martin KaFai Lau368211f2017-10-05 21:52:13 -0700500 const char *end = sym + KSYM_NAME_LEN;
Yonghong Song838e9692018-11-19 15:29:11 -0800501 const struct btf_type *type;
502 const char *func_name;
Martin KaFai Lau368211f2017-10-05 21:52:13 -0700503
Daniel Borkmann74451e662017-02-16 22:24:50 +0100504 BUILD_BUG_ON(sizeof("bpf_prog_") +
Martin KaFai Lau368211f2017-10-05 21:52:13 -0700505 sizeof(prog->tag) * 2 +
506 /* name has been null terminated.
507 * We should need +1 for the '_' preceding
508 * the name. However, the null character
509 * is double counted between the name and the
510 * sizeof("bpf_prog_") above, so we omit
511 * the +1 here.
512 */
513 sizeof(prog->aux->name) > KSYM_NAME_LEN);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100514
515 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
516 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
Yonghong Song838e9692018-11-19 15:29:11 -0800517
518 /* prog->aux->name will be ignored if full btf name is available */
Martin KaFai Lau73372242018-12-05 17:35:43 -0800519 if (prog->aux->func_info_cnt) {
Yonghong Songba64e7d2018-11-24 23:20:44 -0800520 type = btf_type_by_id(prog->aux->btf,
521 prog->aux->func_info[prog->aux->func_idx].type_id);
Yonghong Song838e9692018-11-19 15:29:11 -0800522 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
523 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
524 return;
525 }
526
Martin KaFai Lau368211f2017-10-05 21:52:13 -0700527 if (prog->aux->name[0])
528 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
529 else
530 *sym = 0;
Daniel Borkmann74451e662017-02-16 22:24:50 +0100531}
532
533static __always_inline unsigned long
534bpf_get_prog_addr_start(struct latch_tree_node *n)
535{
536 unsigned long symbol_start, symbol_end;
537 const struct bpf_prog_aux *aux;
538
539 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
540 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
541
542 return symbol_start;
543}
544
545static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
546 struct latch_tree_node *b)
547{
548 return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
549}
550
551static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
552{
553 unsigned long val = (unsigned long)key;
554 unsigned long symbol_start, symbol_end;
555 const struct bpf_prog_aux *aux;
556
557 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
558 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
559
560 if (val < symbol_start)
561 return -1;
562 if (val >= symbol_end)
563 return 1;
564
565 return 0;
566}
567
568static const struct latch_tree_ops bpf_tree_ops = {
569 .less = bpf_tree_less,
570 .comp = bpf_tree_comp,
571};
572
573static DEFINE_SPINLOCK(bpf_lock);
574static LIST_HEAD(bpf_kallsyms);
575static struct latch_tree_root bpf_tree __cacheline_aligned;
576
Daniel Borkmann74451e662017-02-16 22:24:50 +0100577static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
578{
579 WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
580 list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
581 latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
582}
583
584static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
585{
586 if (list_empty(&aux->ksym_lnode))
587 return;
588
589 latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
590 list_del_rcu(&aux->ksym_lnode);
591}
592
593static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
594{
595 return fp->jited && !bpf_prog_was_classic(fp);
596}
597
598static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
599{
600 return list_empty(&fp->aux->ksym_lnode) ||
601 fp->aux->ksym_lnode.prev == LIST_POISON2;
602}
603
604void bpf_prog_kallsyms_add(struct bpf_prog *fp)
605{
Daniel Borkmann74451e662017-02-16 22:24:50 +0100606 if (!bpf_prog_kallsyms_candidate(fp) ||
607 !capable(CAP_SYS_ADMIN))
608 return;
609
Hannes Frederic Sowad24f7c72017-04-27 01:39:33 +0200610 spin_lock_bh(&bpf_lock);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100611 bpf_prog_ksym_node_add(fp->aux);
Hannes Frederic Sowad24f7c72017-04-27 01:39:33 +0200612 spin_unlock_bh(&bpf_lock);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100613}
614
615void bpf_prog_kallsyms_del(struct bpf_prog *fp)
616{
Daniel Borkmann74451e662017-02-16 22:24:50 +0100617 if (!bpf_prog_kallsyms_candidate(fp))
618 return;
619
Hannes Frederic Sowad24f7c72017-04-27 01:39:33 +0200620 spin_lock_bh(&bpf_lock);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100621 bpf_prog_ksym_node_del(fp->aux);
Hannes Frederic Sowad24f7c72017-04-27 01:39:33 +0200622 spin_unlock_bh(&bpf_lock);
Daniel Borkmann74451e662017-02-16 22:24:50 +0100623}
624
625static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
626{
627 struct latch_tree_node *n;
628
629 if (!bpf_jit_kallsyms_enabled())
630 return NULL;
631
632 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
633 return n ?
634 container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
635 NULL;
636}
637
638const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
639 unsigned long *off, char *sym)
640{
641 unsigned long symbol_start, symbol_end;
642 struct bpf_prog *prog;
643 char *ret = NULL;
644
645 rcu_read_lock();
646 prog = bpf_prog_kallsyms_find(addr);
647 if (prog) {
648 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
649 bpf_get_prog_name(prog, sym);
650
651 ret = sym;
652 if (size)
653 *size = symbol_end - symbol_start;
654 if (off)
655 *off = addr - symbol_start;
656 }
657 rcu_read_unlock();
658
659 return ret;
660}
661
662bool is_bpf_text_address(unsigned long addr)
663{
664 bool ret;
665
666 rcu_read_lock();
667 ret = bpf_prog_kallsyms_find(addr) != NULL;
668 rcu_read_unlock();
669
670 return ret;
671}
672
673int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
674 char *sym)
675{
Daniel Borkmann74451e662017-02-16 22:24:50 +0100676 struct bpf_prog_aux *aux;
677 unsigned int it = 0;
678 int ret = -ERANGE;
679
680 if (!bpf_jit_kallsyms_enabled())
681 return ret;
682
683 rcu_read_lock();
684 list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
685 if (it++ != symnum)
686 continue;
687
Daniel Borkmann74451e662017-02-16 22:24:50 +0100688 bpf_get_prog_name(aux->prog, sym);
689
Song Liudf073472018-11-02 10:16:15 -0700690 *value = (unsigned long)aux->prog->bpf_func;
Daniel Borkmann74451e662017-02-16 22:24:50 +0100691 *type = BPF_SYM_ELF_TYPE;
692
693 ret = 0;
694 break;
695 }
696 rcu_read_unlock();
697
698 return ret;
699}
700
Daniel Borkmannede95a632018-10-23 01:11:04 +0200701static atomic_long_t bpf_jit_current;
702
Daniel Borkmannfdadd042018-12-11 12:14:12 +0100703/* Can be overridden by an arch's JIT compiler if it has a custom,
704 * dedicated BPF backend memory area, or if neither of the two
705 * below apply.
706 */
707u64 __weak bpf_jit_alloc_exec_limit(void)
708{
Daniel Borkmannede95a632018-10-23 01:11:04 +0200709#if defined(MODULES_VADDR)
Daniel Borkmannfdadd042018-12-11 12:14:12 +0100710 return MODULES_END - MODULES_VADDR;
711#else
712 return VMALLOC_END - VMALLOC_START;
713#endif
714}
715
Daniel Borkmannede95a632018-10-23 01:11:04 +0200716static int __init bpf_jit_charge_init(void)
717{
718 /* Only used as heuristic here to derive limit. */
Daniel Borkmannfdadd042018-12-11 12:14:12 +0100719 bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
720 PAGE_SIZE), LONG_MAX);
Daniel Borkmannede95a632018-10-23 01:11:04 +0200721 return 0;
722}
723pure_initcall(bpf_jit_charge_init);
Daniel Borkmannede95a632018-10-23 01:11:04 +0200724
725static int bpf_jit_charge_modmem(u32 pages)
726{
727 if (atomic_long_add_return(pages, &bpf_jit_current) >
728 (bpf_jit_limit >> PAGE_SHIFT)) {
729 if (!capable(CAP_SYS_ADMIN)) {
730 atomic_long_sub(pages, &bpf_jit_current);
731 return -EPERM;
732 }
733 }
734
735 return 0;
736}
737
738static void bpf_jit_uncharge_modmem(u32 pages)
739{
740 atomic_long_sub(pages, &bpf_jit_current);
741}
742
Ard Biesheuveldc002bb2018-11-23 23:18:03 +0100743void *__weak bpf_jit_alloc_exec(unsigned long size)
744{
745 return module_alloc(size);
746}
747
748void __weak bpf_jit_free_exec(void *addr)
749{
750 module_memfree(addr);
751}
752
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200753struct bpf_binary_header *
754bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
755 unsigned int alignment,
756 bpf_jit_fill_hole_t bpf_fill_ill_insns)
757{
758 struct bpf_binary_header *hdr;
Daniel Borkmannede95a632018-10-23 01:11:04 +0200759 u32 size, hole, start, pages;
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200760
761 /* Most of BPF filters are really small, but if some of them
762 * fill a page, allow at least 128 extra bytes to insert a
763 * random section of illegal instructions.
764 */
765 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
Daniel Borkmannede95a632018-10-23 01:11:04 +0200766 pages = size / PAGE_SIZE;
767
768 if (bpf_jit_charge_modmem(pages))
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200769 return NULL;
Ard Biesheuveldc002bb2018-11-23 23:18:03 +0100770 hdr = bpf_jit_alloc_exec(size);
Daniel Borkmannede95a632018-10-23 01:11:04 +0200771 if (!hdr) {
772 bpf_jit_uncharge_modmem(pages);
773 return NULL;
774 }
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200775
776 /* Fill space with illegal/arch-dep instructions. */
777 bpf_fill_ill_insns(hdr, size);
778
Daniel Borkmannede95a632018-10-23 01:11:04 +0200779 hdr->pages = pages;
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200780 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
781 PAGE_SIZE - sizeof(*hdr));
Daniel Borkmannb7552e1b2016-05-18 14:14:28 +0200782 start = (get_random_int() % hole) & ~(alignment - 1);
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200783
784 /* Leave a random number of instructions before BPF code. */
785 *image_ptr = &hdr->image[start];
786
787 return hdr;
788}
789
790void bpf_jit_binary_free(struct bpf_binary_header *hdr)
791{
Daniel Borkmannede95a632018-10-23 01:11:04 +0200792 u32 pages = hdr->pages;
793
Ard Biesheuveldc002bb2018-11-23 23:18:03 +0100794 bpf_jit_free_exec(hdr);
Daniel Borkmannede95a632018-10-23 01:11:04 +0200795 bpf_jit_uncharge_modmem(pages);
Daniel Borkmann738cbe72014-09-08 08:04:47 +0200796}
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200797
Daniel Borkmann74451e662017-02-16 22:24:50 +0100798/* This symbol is only overridden by archs that have different
799 * requirements than the usual eBPF JITs, f.e. when they only
800 * implement cBPF JIT, do not set images read-only, etc.
801 */
802void __weak bpf_jit_free(struct bpf_prog *fp)
803{
804 if (fp->jited) {
805 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
806
807 bpf_jit_binary_unlock_ro(hdr);
808 bpf_jit_binary_free(hdr);
809
810 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
811 }
812
813 bpf_prog_unlock_free(fp);
814}
815
Daniel Borkmanne2c95a62018-11-26 14:05:38 +0100816int bpf_jit_get_func_addr(const struct bpf_prog *prog,
817 const struct bpf_insn *insn, bool extra_pass,
818 u64 *func_addr, bool *func_addr_fixed)
819{
820 s16 off = insn->off;
821 s32 imm = insn->imm;
822 u8 *addr;
823
824 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
825 if (!*func_addr_fixed) {
826 /* Place-holder address till the last pass has collected
827 * all addresses for JITed subprograms in which case we
828 * can pick them up from prog->aux.
829 */
830 if (!extra_pass)
831 addr = NULL;
832 else if (prog->aux->func &&
833 off >= 0 && off < prog->aux->func_cnt)
834 addr = (u8 *)prog->aux->func[off]->bpf_func;
835 else
836 return -EINVAL;
837 } else {
838 /* Address of a BPF helper call. Since part of the core
839 * kernel, it's always at a fixed location. __bpf_call_base
840 * and the helper with imm relative to it are both in core
841 * kernel.
842 */
843 addr = (u8 *)__bpf_call_base + imm;
844 }
845
846 *func_addr = (unsigned long)addr;
847 return 0;
848}
849
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200850static int bpf_jit_blind_insn(const struct bpf_insn *from,
851 const struct bpf_insn *aux,
852 struct bpf_insn *to_buff)
853{
854 struct bpf_insn *to = to_buff;
Daniel Borkmannb7552e1b2016-05-18 14:14:28 +0200855 u32 imm_rnd = get_random_int();
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200856 s16 off;
857
858 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
859 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
860
Daniel Borkmann9b73bfd2019-01-03 00:58:29 +0100861 /* Constraints on AX register:
862 *
863 * AX register is inaccessible from user space. It is mapped in
864 * all JITs, and used here for constant blinding rewrites. It is
865 * typically "stateless" meaning its contents are only valid within
866 * the executed instruction, but not across several instructions.
867 * There are a few exceptions however which are further detailed
868 * below.
869 *
870 * Constant blinding is only used by JITs, not in the interpreter.
871 * The interpreter uses AX in some occasions as a local temporary
872 * register e.g. in DIV or MOD instructions.
873 *
874 * In restricted circumstances, the verifier can also use the AX
875 * register for rewrites as long as they do not interfere with
876 * the above cases!
877 */
878 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
879 goto out;
880
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200881 if (from->imm == 0 &&
882 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
883 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
884 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
885 goto out;
886 }
887
888 switch (from->code) {
889 case BPF_ALU | BPF_ADD | BPF_K:
890 case BPF_ALU | BPF_SUB | BPF_K:
891 case BPF_ALU | BPF_AND | BPF_K:
892 case BPF_ALU | BPF_OR | BPF_K:
893 case BPF_ALU | BPF_XOR | BPF_K:
894 case BPF_ALU | BPF_MUL | BPF_K:
895 case BPF_ALU | BPF_MOV | BPF_K:
896 case BPF_ALU | BPF_DIV | BPF_K:
897 case BPF_ALU | BPF_MOD | BPF_K:
898 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
899 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
900 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
901 break;
902
903 case BPF_ALU64 | BPF_ADD | BPF_K:
904 case BPF_ALU64 | BPF_SUB | BPF_K:
905 case BPF_ALU64 | BPF_AND | BPF_K:
906 case BPF_ALU64 | BPF_OR | BPF_K:
907 case BPF_ALU64 | BPF_XOR | BPF_K:
908 case BPF_ALU64 | BPF_MUL | BPF_K:
909 case BPF_ALU64 | BPF_MOV | BPF_K:
910 case BPF_ALU64 | BPF_DIV | BPF_K:
911 case BPF_ALU64 | BPF_MOD | BPF_K:
912 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
913 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
914 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
915 break;
916
917 case BPF_JMP | BPF_JEQ | BPF_K:
918 case BPF_JMP | BPF_JNE | BPF_K:
919 case BPF_JMP | BPF_JGT | BPF_K:
Daniel Borkmann92b31a92017-08-10 01:39:55 +0200920 case BPF_JMP | BPF_JLT | BPF_K:
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200921 case BPF_JMP | BPF_JGE | BPF_K:
Daniel Borkmann92b31a92017-08-10 01:39:55 +0200922 case BPF_JMP | BPF_JLE | BPF_K:
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200923 case BPF_JMP | BPF_JSGT | BPF_K:
Daniel Borkmann92b31a92017-08-10 01:39:55 +0200924 case BPF_JMP | BPF_JSLT | BPF_K:
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200925 case BPF_JMP | BPF_JSGE | BPF_K:
Daniel Borkmann92b31a92017-08-10 01:39:55 +0200926 case BPF_JMP | BPF_JSLE | BPF_K:
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200927 case BPF_JMP | BPF_JSET | BPF_K:
928 /* Accommodate for extra offset in case of a backjump. */
929 off = from->off;
930 if (off < 0)
931 off -= 2;
932 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
933 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
934 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
935 break;
936
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200937 case BPF_LD | BPF_IMM | BPF_DW:
938 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
939 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
940 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
941 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
942 break;
943 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
944 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
945 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
946 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
947 break;
948
949 case BPF_ST | BPF_MEM | BPF_DW:
950 case BPF_ST | BPF_MEM | BPF_W:
951 case BPF_ST | BPF_MEM | BPF_H:
952 case BPF_ST | BPF_MEM | BPF_B:
953 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
954 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
955 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
956 break;
957 }
958out:
959 return to - to_buff;
960}
961
962static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
963 gfp_t gfp_extra_flags)
964{
Michal Hocko19809c22017-05-08 15:57:44 -0700965 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200966 struct bpf_prog *fp;
967
968 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
969 if (fp != NULL) {
Daniel Borkmann4f3446b2016-05-13 19:08:32 +0200970 /* aux->prog still points to the fp_other one, so
971 * when promoting the clone to the real program,
972 * this still needs to be adapted.
973 */
974 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
975 }
976
977 return fp;
978}
979
980static void bpf_prog_clone_free(struct bpf_prog *fp)
981{
982 /* aux was stolen by the other clone, so we cannot free
983 * it from this path! It will be freed eventually by the
984 * other program on release.
985 *
986 * At this point, we don't need a deferred release since
987 * clone is guaranteed to not be locked.
988 */
989 fp->aux = NULL;
990 __bpf_prog_free(fp);
991}
992
993void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
994{
995 /* We have to repoint aux->prog to self, as we don't
996 * know whether fp here is the clone or the original.
997 */
998 fp->aux->prog = fp;
999 bpf_prog_clone_free(fp_other);
1000}
1001
1002struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1003{
1004 struct bpf_insn insn_buff[16], aux[2];
1005 struct bpf_prog *clone, *tmp;
1006 int insn_delta, insn_cnt;
1007 struct bpf_insn *insn;
1008 int i, rewritten;
1009
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001010 if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
Daniel Borkmann4f3446b2016-05-13 19:08:32 +02001011 return prog;
1012
1013 clone = bpf_prog_clone_create(prog, GFP_USER);
1014 if (!clone)
1015 return ERR_PTR(-ENOMEM);
1016
1017 insn_cnt = clone->len;
1018 insn = clone->insnsi;
1019
1020 for (i = 0; i < insn_cnt; i++, insn++) {
1021 /* We temporarily need to hold the original ld64 insn
1022 * so that we can still access the first part in the
1023 * second blinding run.
1024 */
1025 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1026 insn[1].code == 0)
1027 memcpy(aux, insn, sizeof(aux));
1028
1029 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
1030 if (!rewritten)
1031 continue;
1032
1033 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1034 if (!tmp) {
1035 /* Patching may have repointed aux->prog during
1036 * realloc from the original one, so we need to
1037 * fix it up here on error.
1038 */
1039 bpf_jit_prog_release_other(prog, clone);
1040 return ERR_PTR(-ENOMEM);
1041 }
1042
1043 clone = tmp;
1044 insn_delta = rewritten - 1;
1045
1046 /* Walk new program and skip insns we just inserted. */
1047 insn = clone->insnsi + i + insn_delta;
1048 insn_cnt += insn_delta;
1049 i += insn_delta;
1050 }
1051
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001052 clone->blinded = 1;
Daniel Borkmann4f3446b2016-05-13 19:08:32 +02001053 return clone;
1054}
Daniel Borkmannb954d832014-09-10 15:01:02 +02001055#endif /* CONFIG_BPF_JIT */
Daniel Borkmann738cbe72014-09-08 08:04:47 +02001056
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001057/* Base function for offset calculation. Needs to go into .text section,
1058 * therefore keeping it non-static as well; will also be used by JITs
Daniel Borkmann7105e822017-12-20 13:42:57 +01001059 * anyway later on, so do not let the compiler omit it. This also needs
1060 * to go into kallsyms for correlation from e.g. bpftool, so naming
1061 * must not change.
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001062 */
1063noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1064{
1065 return 0;
1066}
Alexei Starovoitov4d9c5c52015-07-20 20:34:19 -07001067EXPORT_SYMBOL_GPL(__bpf_call_base);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001068
Daniel Borkmann5e581da2018-01-26 23:33:38 +01001069/* All UAPI available opcodes. */
1070#define BPF_INSN_MAP(INSN_2, INSN_3) \
1071 /* 32 bit ALU operations. */ \
1072 /* Register based. */ \
Jiong Wang2dc6b102018-12-05 13:52:34 -05001073 INSN_3(ALU, ADD, X), \
1074 INSN_3(ALU, SUB, X), \
1075 INSN_3(ALU, AND, X), \
1076 INSN_3(ALU, OR, X), \
1077 INSN_3(ALU, LSH, X), \
1078 INSN_3(ALU, RSH, X), \
1079 INSN_3(ALU, XOR, X), \
1080 INSN_3(ALU, MUL, X), \
1081 INSN_3(ALU, MOV, X), \
1082 INSN_3(ALU, ARSH, X), \
1083 INSN_3(ALU, DIV, X), \
1084 INSN_3(ALU, MOD, X), \
Daniel Borkmann5e581da2018-01-26 23:33:38 +01001085 INSN_2(ALU, NEG), \
1086 INSN_3(ALU, END, TO_BE), \
1087 INSN_3(ALU, END, TO_LE), \
1088 /* Immediate based. */ \
Jiong Wang2dc6b102018-12-05 13:52:34 -05001089 INSN_3(ALU, ADD, K), \
1090 INSN_3(ALU, SUB, K), \
1091 INSN_3(ALU, AND, K), \
1092 INSN_3(ALU, OR, K), \
1093 INSN_3(ALU, LSH, K), \
1094 INSN_3(ALU, RSH, K), \
1095 INSN_3(ALU, XOR, K), \
1096 INSN_3(ALU, MUL, K), \
1097 INSN_3(ALU, MOV, K), \
1098 INSN_3(ALU, ARSH, K), \
1099 INSN_3(ALU, DIV, K), \
1100 INSN_3(ALU, MOD, K), \
Daniel Borkmann5e581da2018-01-26 23:33:38 +01001101 /* 64 bit ALU operations. */ \
1102 /* Register based. */ \
1103 INSN_3(ALU64, ADD, X), \
1104 INSN_3(ALU64, SUB, X), \
1105 INSN_3(ALU64, AND, X), \
1106 INSN_3(ALU64, OR, X), \
1107 INSN_3(ALU64, LSH, X), \
1108 INSN_3(ALU64, RSH, X), \
1109 INSN_3(ALU64, XOR, X), \
1110 INSN_3(ALU64, MUL, X), \
1111 INSN_3(ALU64, MOV, X), \
1112 INSN_3(ALU64, ARSH, X), \
1113 INSN_3(ALU64, DIV, X), \
1114 INSN_3(ALU64, MOD, X), \
1115 INSN_2(ALU64, NEG), \
1116 /* Immediate based. */ \
1117 INSN_3(ALU64, ADD, K), \
1118 INSN_3(ALU64, SUB, K), \
1119 INSN_3(ALU64, AND, K), \
1120 INSN_3(ALU64, OR, K), \
1121 INSN_3(ALU64, LSH, K), \
1122 INSN_3(ALU64, RSH, K), \
1123 INSN_3(ALU64, XOR, K), \
1124 INSN_3(ALU64, MUL, K), \
1125 INSN_3(ALU64, MOV, K), \
1126 INSN_3(ALU64, ARSH, K), \
1127 INSN_3(ALU64, DIV, K), \
1128 INSN_3(ALU64, MOD, K), \
1129 /* Call instruction. */ \
1130 INSN_2(JMP, CALL), \
1131 /* Exit instruction. */ \
1132 INSN_2(JMP, EXIT), \
1133 /* Jump instructions. */ \
1134 /* Register based. */ \
1135 INSN_3(JMP, JEQ, X), \
1136 INSN_3(JMP, JNE, X), \
1137 INSN_3(JMP, JGT, X), \
1138 INSN_3(JMP, JLT, X), \
1139 INSN_3(JMP, JGE, X), \
1140 INSN_3(JMP, JLE, X), \
1141 INSN_3(JMP, JSGT, X), \
1142 INSN_3(JMP, JSLT, X), \
1143 INSN_3(JMP, JSGE, X), \
1144 INSN_3(JMP, JSLE, X), \
1145 INSN_3(JMP, JSET, X), \
1146 /* Immediate based. */ \
1147 INSN_3(JMP, JEQ, K), \
1148 INSN_3(JMP, JNE, K), \
1149 INSN_3(JMP, JGT, K), \
1150 INSN_3(JMP, JLT, K), \
1151 INSN_3(JMP, JGE, K), \
1152 INSN_3(JMP, JLE, K), \
1153 INSN_3(JMP, JSGT, K), \
1154 INSN_3(JMP, JSLT, K), \
1155 INSN_3(JMP, JSGE, K), \
1156 INSN_3(JMP, JSLE, K), \
1157 INSN_3(JMP, JSET, K), \
1158 INSN_2(JMP, JA), \
1159 /* Store instructions. */ \
1160 /* Register based. */ \
1161 INSN_3(STX, MEM, B), \
1162 INSN_3(STX, MEM, H), \
1163 INSN_3(STX, MEM, W), \
1164 INSN_3(STX, MEM, DW), \
1165 INSN_3(STX, XADD, W), \
1166 INSN_3(STX, XADD, DW), \
1167 /* Immediate based. */ \
1168 INSN_3(ST, MEM, B), \
1169 INSN_3(ST, MEM, H), \
1170 INSN_3(ST, MEM, W), \
1171 INSN_3(ST, MEM, DW), \
1172 /* Load instructions. */ \
1173 /* Register based. */ \
1174 INSN_3(LDX, MEM, B), \
1175 INSN_3(LDX, MEM, H), \
1176 INSN_3(LDX, MEM, W), \
1177 INSN_3(LDX, MEM, DW), \
1178 /* Immediate based. */ \
Daniel Borkmanne0cea7c2018-05-04 01:08:14 +02001179 INSN_3(LD, IMM, DW)
Daniel Borkmann5e581da2018-01-26 23:33:38 +01001180
1181bool bpf_opcode_in_insntable(u8 code)
1182{
1183#define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1184#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1185 static const bool public_insntable[256] = {
1186 [0 ... 255] = false,
1187 /* Now overwrite non-defaults ... */
1188 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
Daniel Borkmanne0cea7c2018-05-04 01:08:14 +02001189 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1190 [BPF_LD | BPF_ABS | BPF_B] = true,
1191 [BPF_LD | BPF_ABS | BPF_H] = true,
1192 [BPF_LD | BPF_ABS | BPF_W] = true,
1193 [BPF_LD | BPF_IND | BPF_B] = true,
1194 [BPF_LD | BPF_IND | BPF_H] = true,
1195 [BPF_LD | BPF_IND | BPF_W] = true,
Daniel Borkmann5e581da2018-01-26 23:33:38 +01001196 };
1197#undef BPF_INSN_3_TBL
1198#undef BPF_INSN_2_TBL
1199 return public_insntable[code];
1200}
1201
Alexei Starovoitov290af862018-01-09 10:04:29 -08001202#ifndef CONFIG_BPF_JIT_ALWAYS_ON
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001203/**
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001204 * __bpf_prog_run - run eBPF program on a given context
1205 * @ctx: is the data we are operating on
1206 * @insn: is the array of eBPF instructions
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001207 *
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001208 * Decode and execute eBPF instructions.
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001209 */
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08001210static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001211{
Daniel Borkmann5e581da2018-01-26 23:33:38 +01001212#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1213#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001214 static const void *jumptable[256] = {
1215 [0 ... 255] = &&default_label,
1216 /* Now overwrite non-defaults ... */
Daniel Borkmann5e581da2018-01-26 23:33:38 +01001217 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1218 /* Non-UAPI available opcodes. */
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08001219 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
Alexei Starovoitov71189fa2017-05-30 13:31:27 -07001220 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001221 };
Daniel Borkmann5e581da2018-01-26 23:33:38 +01001222#undef BPF_INSN_3_LBL
1223#undef BPF_INSN_2_LBL
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001224 u32 tail_call_cnt = 0;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001225
1226#define CONT ({ insn++; goto select_insn; })
1227#define CONT_JMP ({ insn++; goto select_insn; })
1228
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001229select_insn:
1230 goto *jumptable[insn->code];
1231
1232 /* ALU */
1233#define ALU(OPCODE, OP) \
1234 ALU64_##OPCODE##_X: \
1235 DST = DST OP SRC; \
1236 CONT; \
1237 ALU_##OPCODE##_X: \
1238 DST = (u32) DST OP (u32) SRC; \
1239 CONT; \
1240 ALU64_##OPCODE##_K: \
1241 DST = DST OP IMM; \
1242 CONT; \
1243 ALU_##OPCODE##_K: \
1244 DST = (u32) DST OP (u32) IMM; \
1245 CONT;
1246
1247 ALU(ADD, +)
1248 ALU(SUB, -)
1249 ALU(AND, &)
1250 ALU(OR, |)
1251 ALU(LSH, <<)
1252 ALU(RSH, >>)
1253 ALU(XOR, ^)
1254 ALU(MUL, *)
1255#undef ALU
1256 ALU_NEG:
1257 DST = (u32) -DST;
1258 CONT;
1259 ALU64_NEG:
1260 DST = -DST;
1261 CONT;
1262 ALU_MOV_X:
1263 DST = (u32) SRC;
1264 CONT;
1265 ALU_MOV_K:
1266 DST = (u32) IMM;
1267 CONT;
1268 ALU64_MOV_X:
1269 DST = SRC;
1270 CONT;
1271 ALU64_MOV_K:
1272 DST = IMM;
1273 CONT;
Alexei Starovoitov02ab6952014-09-04 22:17:17 -07001274 LD_IMM_DW:
1275 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1276 insn++;
1277 CONT;
Jiong Wang2dc6b102018-12-05 13:52:34 -05001278 ALU_ARSH_X:
1279 DST = (u64) (u32) ((*(s32 *) &DST) >> SRC);
1280 CONT;
1281 ALU_ARSH_K:
1282 DST = (u64) (u32) ((*(s32 *) &DST) >> IMM);
1283 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001284 ALU64_ARSH_X:
1285 (*(s64 *) &DST) >>= SRC;
1286 CONT;
1287 ALU64_ARSH_K:
1288 (*(s64 *) &DST) >>= IMM;
1289 CONT;
1290 ALU64_MOD_X:
Daniel Borkmann144cd912019-01-03 00:58:28 +01001291 div64_u64_rem(DST, SRC, &AX);
1292 DST = AX;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001293 CONT;
1294 ALU_MOD_X:
Daniel Borkmann144cd912019-01-03 00:58:28 +01001295 AX = (u32) DST;
1296 DST = do_div(AX, (u32) SRC);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001297 CONT;
1298 ALU64_MOD_K:
Daniel Borkmann144cd912019-01-03 00:58:28 +01001299 div64_u64_rem(DST, IMM, &AX);
1300 DST = AX;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001301 CONT;
1302 ALU_MOD_K:
Daniel Borkmann144cd912019-01-03 00:58:28 +01001303 AX = (u32) DST;
1304 DST = do_div(AX, (u32) IMM);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001305 CONT;
1306 ALU64_DIV_X:
Alexei Starovoitov876a7ae2015-04-27 14:40:37 -07001307 DST = div64_u64(DST, SRC);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001308 CONT;
1309 ALU_DIV_X:
Daniel Borkmann144cd912019-01-03 00:58:28 +01001310 AX = (u32) DST;
1311 do_div(AX, (u32) SRC);
1312 DST = (u32) AX;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001313 CONT;
1314 ALU64_DIV_K:
Alexei Starovoitov876a7ae2015-04-27 14:40:37 -07001315 DST = div64_u64(DST, IMM);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001316 CONT;
1317 ALU_DIV_K:
Daniel Borkmann144cd912019-01-03 00:58:28 +01001318 AX = (u32) DST;
1319 do_div(AX, (u32) IMM);
1320 DST = (u32) AX;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001321 CONT;
1322 ALU_END_TO_BE:
1323 switch (IMM) {
1324 case 16:
1325 DST = (__force u16) cpu_to_be16(DST);
1326 break;
1327 case 32:
1328 DST = (__force u32) cpu_to_be32(DST);
1329 break;
1330 case 64:
1331 DST = (__force u64) cpu_to_be64(DST);
1332 break;
1333 }
1334 CONT;
1335 ALU_END_TO_LE:
1336 switch (IMM) {
1337 case 16:
1338 DST = (__force u16) cpu_to_le16(DST);
1339 break;
1340 case 32:
1341 DST = (__force u32) cpu_to_le32(DST);
1342 break;
1343 case 64:
1344 DST = (__force u64) cpu_to_le64(DST);
1345 break;
1346 }
1347 CONT;
1348
1349 /* CALL */
1350 JMP_CALL:
1351 /* Function call scratches BPF_R1-BPF_R5 registers,
1352 * preserves BPF_R6-BPF_R9, and stores return value
1353 * into BPF_R0.
1354 */
1355 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1356 BPF_R4, BPF_R5);
1357 CONT;
1358
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08001359 JMP_CALL_ARGS:
1360 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1361 BPF_R3, BPF_R4,
1362 BPF_R5,
1363 insn + insn->off + 1);
1364 CONT;
1365
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001366 JMP_TAIL_CALL: {
1367 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1368 struct bpf_array *array = container_of(map, struct bpf_array, map);
1369 struct bpf_prog *prog;
Alexei Starovoitov90caccd2017-10-03 15:37:20 -07001370 u32 index = BPF_R3;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001371
1372 if (unlikely(index >= array->map.max_entries))
1373 goto out;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001374 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1375 goto out;
1376
1377 tail_call_cnt++;
1378
Wang Nan2a36f0b2015-08-06 07:02:33 +00001379 prog = READ_ONCE(array->ptrs[index]);
Daniel Borkmann1ca1cc92016-06-28 12:18:23 +02001380 if (!prog)
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001381 goto out;
1382
Daniel Borkmannc4675f92015-07-13 20:49:32 +02001383 /* ARG1 at this point is guaranteed to point to CTX from
1384 * the verifier side due to the fact that the tail call is
1385 * handeled like a helper, that is, bpf_tail_call_proto,
1386 * where arg1_type is ARG_PTR_TO_CTX.
1387 */
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001388 insn = prog->insnsi;
1389 goto select_insn;
1390out:
1391 CONT;
1392 }
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001393 /* JMP */
1394 JMP_JA:
1395 insn += insn->off;
1396 CONT;
1397 JMP_JEQ_X:
1398 if (DST == SRC) {
1399 insn += insn->off;
1400 CONT_JMP;
1401 }
1402 CONT;
1403 JMP_JEQ_K:
1404 if (DST == IMM) {
1405 insn += insn->off;
1406 CONT_JMP;
1407 }
1408 CONT;
1409 JMP_JNE_X:
1410 if (DST != SRC) {
1411 insn += insn->off;
1412 CONT_JMP;
1413 }
1414 CONT;
1415 JMP_JNE_K:
1416 if (DST != IMM) {
1417 insn += insn->off;
1418 CONT_JMP;
1419 }
1420 CONT;
1421 JMP_JGT_X:
1422 if (DST > SRC) {
1423 insn += insn->off;
1424 CONT_JMP;
1425 }
1426 CONT;
1427 JMP_JGT_K:
1428 if (DST > IMM) {
1429 insn += insn->off;
1430 CONT_JMP;
1431 }
1432 CONT;
Daniel Borkmann92b31a92017-08-10 01:39:55 +02001433 JMP_JLT_X:
1434 if (DST < SRC) {
1435 insn += insn->off;
1436 CONT_JMP;
1437 }
1438 CONT;
1439 JMP_JLT_K:
1440 if (DST < IMM) {
1441 insn += insn->off;
1442 CONT_JMP;
1443 }
1444 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001445 JMP_JGE_X:
1446 if (DST >= SRC) {
1447 insn += insn->off;
1448 CONT_JMP;
1449 }
1450 CONT;
1451 JMP_JGE_K:
1452 if (DST >= IMM) {
1453 insn += insn->off;
1454 CONT_JMP;
1455 }
1456 CONT;
Daniel Borkmann92b31a92017-08-10 01:39:55 +02001457 JMP_JLE_X:
1458 if (DST <= SRC) {
1459 insn += insn->off;
1460 CONT_JMP;
1461 }
1462 CONT;
1463 JMP_JLE_K:
1464 if (DST <= IMM) {
1465 insn += insn->off;
1466 CONT_JMP;
1467 }
1468 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001469 JMP_JSGT_X:
1470 if (((s64) DST) > ((s64) SRC)) {
1471 insn += insn->off;
1472 CONT_JMP;
1473 }
1474 CONT;
1475 JMP_JSGT_K:
1476 if (((s64) DST) > ((s64) IMM)) {
1477 insn += insn->off;
1478 CONT_JMP;
1479 }
1480 CONT;
Daniel Borkmann92b31a92017-08-10 01:39:55 +02001481 JMP_JSLT_X:
1482 if (((s64) DST) < ((s64) SRC)) {
1483 insn += insn->off;
1484 CONT_JMP;
1485 }
1486 CONT;
1487 JMP_JSLT_K:
1488 if (((s64) DST) < ((s64) IMM)) {
1489 insn += insn->off;
1490 CONT_JMP;
1491 }
1492 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001493 JMP_JSGE_X:
1494 if (((s64) DST) >= ((s64) SRC)) {
1495 insn += insn->off;
1496 CONT_JMP;
1497 }
1498 CONT;
1499 JMP_JSGE_K:
1500 if (((s64) DST) >= ((s64) IMM)) {
1501 insn += insn->off;
1502 CONT_JMP;
1503 }
1504 CONT;
Daniel Borkmann92b31a92017-08-10 01:39:55 +02001505 JMP_JSLE_X:
1506 if (((s64) DST) <= ((s64) SRC)) {
1507 insn += insn->off;
1508 CONT_JMP;
1509 }
1510 CONT;
1511 JMP_JSLE_K:
1512 if (((s64) DST) <= ((s64) IMM)) {
1513 insn += insn->off;
1514 CONT_JMP;
1515 }
1516 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001517 JMP_JSET_X:
1518 if (DST & SRC) {
1519 insn += insn->off;
1520 CONT_JMP;
1521 }
1522 CONT;
1523 JMP_JSET_K:
1524 if (DST & IMM) {
1525 insn += insn->off;
1526 CONT_JMP;
1527 }
1528 CONT;
1529 JMP_EXIT:
1530 return BPF_R0;
1531
1532 /* STX and ST and LDX*/
1533#define LDST(SIZEOP, SIZE) \
1534 STX_MEM_##SIZEOP: \
1535 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1536 CONT; \
1537 ST_MEM_##SIZEOP: \
1538 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1539 CONT; \
1540 LDX_MEM_##SIZEOP: \
1541 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1542 CONT;
1543
1544 LDST(B, u8)
1545 LDST(H, u16)
1546 LDST(W, u32)
1547 LDST(DW, u64)
1548#undef LDST
1549 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1550 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1551 (DST + insn->off));
1552 CONT;
1553 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1554 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1555 (DST + insn->off));
1556 CONT;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001557
1558 default_label:
Daniel Borkmann5e581da2018-01-26 23:33:38 +01001559 /* If we ever reach this, we have a bug somewhere. Die hard here
1560 * instead of just returning 0; we could be somewhere in a subprog,
1561 * so execution could continue otherwise which we do /not/ want.
1562 *
1563 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1564 */
1565 pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1566 BUG_ON(1);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001567 return 0;
1568}
Alexei Starovoitovf696b8f2017-05-30 13:31:28 -07001569STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
1570
Alexei Starovoitovb870aa92017-05-30 13:31:33 -07001571#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1572#define DEFINE_BPF_PROG_RUN(stack_size) \
1573static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1574{ \
1575 u64 stack[stack_size / sizeof(u64)]; \
Daniel Borkmann144cd912019-01-03 00:58:28 +01001576 u64 regs[MAX_BPF_EXT_REG]; \
Alexei Starovoitovb870aa92017-05-30 13:31:33 -07001577\
1578 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1579 ARG1 = (u64) (unsigned long) ctx; \
1580 return ___bpf_prog_run(regs, insn, stack); \
Alexei Starovoitovf696b8f2017-05-30 13:31:28 -07001581}
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001582
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08001583#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1584#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1585static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1586 const struct bpf_insn *insn) \
1587{ \
1588 u64 stack[stack_size / sizeof(u64)]; \
Daniel Borkmann144cd912019-01-03 00:58:28 +01001589 u64 regs[MAX_BPF_EXT_REG]; \
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08001590\
1591 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1592 BPF_R1 = r1; \
1593 BPF_R2 = r2; \
1594 BPF_R3 = r3; \
1595 BPF_R4 = r4; \
1596 BPF_R5 = r5; \
1597 return ___bpf_prog_run(regs, insn, stack); \
1598}
1599
Alexei Starovoitovb870aa92017-05-30 13:31:33 -07001600#define EVAL1(FN, X) FN(X)
1601#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1602#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1603#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1604#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1605#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1606
1607EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1608EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1609EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1610
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08001611EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1612EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1613EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1614
Alexei Starovoitovb870aa92017-05-30 13:31:33 -07001615#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1616
1617static unsigned int (*interpreters[])(const void *ctx,
1618 const struct bpf_insn *insn) = {
1619EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1620EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1621EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1622};
Alexei Starovoitov1ea47e02017-12-14 17:55:13 -08001623#undef PROG_NAME_LIST
1624#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1625static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1626 const struct bpf_insn *insn) = {
1627EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1628EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1629EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1630};
1631#undef PROG_NAME_LIST
1632
1633void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1634{
1635 stack_depth = max_t(u32, stack_depth, 1);
1636 insn->off = (s16) insn->imm;
1637 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1638 __bpf_call_base_args;
1639 insn->code = BPF_JMP | BPF_CALL_ARGS;
1640}
Alexei Starovoitovb870aa92017-05-30 13:31:33 -07001641
Alexei Starovoitov290af862018-01-09 10:04:29 -08001642#else
Daniel Borkmannfa9dd592018-01-20 01:24:33 +01001643static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1644 const struct bpf_insn *insn)
Alexei Starovoitov290af862018-01-09 10:04:29 -08001645{
Daniel Borkmannfa9dd592018-01-20 01:24:33 +01001646 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1647 * is not working properly, so warn about it!
1648 */
1649 WARN_ON_ONCE(1);
Alexei Starovoitov290af862018-01-09 10:04:29 -08001650 return 0;
1651}
1652#endif
1653
Daniel Borkmann3324b582015-05-29 23:23:07 +02001654bool bpf_prog_array_compatible(struct bpf_array *array,
1655 const struct bpf_prog *fp)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001656{
Josef Bacik9802d862017-12-11 11:36:48 -05001657 if (fp->kprobe_override)
1658 return false;
1659
Daniel Borkmann3324b582015-05-29 23:23:07 +02001660 if (!array->owner_prog_type) {
1661 /* There's no owner yet where we could check for
1662 * compatibility.
1663 */
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001664 array->owner_prog_type = fp->type;
1665 array->owner_jited = fp->jited;
Daniel Borkmann3324b582015-05-29 23:23:07 +02001666
1667 return true;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001668 }
Daniel Borkmann3324b582015-05-29 23:23:07 +02001669
1670 return array->owner_prog_type == fp->type &&
1671 array->owner_jited == fp->jited;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001672}
1673
Daniel Borkmann3324b582015-05-29 23:23:07 +02001674static int bpf_check_tail_call(const struct bpf_prog *fp)
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001675{
1676 struct bpf_prog_aux *aux = fp->aux;
1677 int i;
1678
1679 for (i = 0; i < aux->used_map_cnt; i++) {
Daniel Borkmann3324b582015-05-29 23:23:07 +02001680 struct bpf_map *map = aux->used_maps[i];
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001681 struct bpf_array *array;
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001682
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001683 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1684 continue;
Daniel Borkmann3324b582015-05-29 23:23:07 +02001685
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001686 array = container_of(map, struct bpf_array, map);
1687 if (!bpf_prog_array_compatible(array, fp))
1688 return -EINVAL;
1689 }
1690
1691 return 0;
1692}
1693
Daniel Borkmann9facc332018-06-15 02:30:48 +02001694static void bpf_prog_select_func(struct bpf_prog *fp)
1695{
1696#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1697 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1698
1699 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1700#else
1701 fp->bpf_func = __bpf_prog_ret0_warn;
1702#endif
1703}
1704
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001705/**
Daniel Borkmann3324b582015-05-29 23:23:07 +02001706 * bpf_prog_select_runtime - select exec runtime for BPF program
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001707 * @fp: bpf_prog populated with internal BPF program
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001708 * @err: pointer to error variable
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001709 *
Daniel Borkmann3324b582015-05-29 23:23:07 +02001710 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1711 * The BPF program will be executed via BPF_PROG_RUN() macro.
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001712 */
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001713struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001714{
Daniel Borkmann9facc332018-06-15 02:30:48 +02001715 /* In case of BPF to BPF calls, verifier did all the prep
1716 * work with regards to JITing, etc.
1717 */
1718 if (fp->bpf_func)
1719 goto finalize;
Martin KaFai Lau8007e402017-06-28 10:41:24 -07001720
Daniel Borkmann9facc332018-06-15 02:30:48 +02001721 bpf_prog_select_func(fp);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001722
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001723 /* eBPF JITs can rewrite the program in case constant
1724 * blinding is active. However, in case of error during
1725 * blinding, bpf_int_jit_compile() must always return a
1726 * valid program, which in this case would simply not
1727 * be JITed, but falls back to the interpreter.
1728 */
Jakub Kicinskiab3f0062017-11-03 13:56:17 -07001729 if (!bpf_prog_is_dev_bound(fp->aux)) {
Martin KaFai Lauc454a462018-12-07 16:42:25 -08001730 *err = bpf_prog_alloc_jited_linfo(fp);
1731 if (*err)
1732 return fp;
1733
Jakub Kicinskiab3f0062017-11-03 13:56:17 -07001734 fp = bpf_int_jit_compile(fp);
Alexei Starovoitov290af862018-01-09 10:04:29 -08001735 if (!fp->jited) {
Martin KaFai Lauc454a462018-12-07 16:42:25 -08001736 bpf_prog_free_jited_linfo(fp);
1737#ifdef CONFIG_BPF_JIT_ALWAYS_ON
Alexei Starovoitov290af862018-01-09 10:04:29 -08001738 *err = -ENOTSUPP;
1739 return fp;
Alexei Starovoitov290af862018-01-09 10:04:29 -08001740#endif
Martin KaFai Lauc454a462018-12-07 16:42:25 -08001741 } else {
1742 bpf_prog_free_unused_jited_linfo(fp);
1743 }
Jakub Kicinskiab3f0062017-11-03 13:56:17 -07001744 } else {
1745 *err = bpf_prog_offload_compile(fp);
1746 if (*err)
1747 return fp;
1748 }
Daniel Borkmann9facc332018-06-15 02:30:48 +02001749
1750finalize:
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001751 bpf_prog_lock_ro(fp);
Alexei Starovoitov04fd61ab2015-05-19 16:59:03 -07001752
Daniel Borkmann3324b582015-05-29 23:23:07 +02001753 /* The tail call compatibility check can only be done at
1754 * this late stage as we need to determine, if we deal
1755 * with JITed or non JITed program concatenations and not
1756 * all eBPF JITs might immediately support all features.
1757 */
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02001758 *err = bpf_check_tail_call(fp);
1759
1760 return fp;
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001761}
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001762EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001763
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001764static unsigned int __bpf_prog_ret1(const void *ctx,
1765 const struct bpf_insn *insn)
1766{
1767 return 1;
1768}
1769
1770static struct bpf_prog_dummy {
1771 struct bpf_prog prog;
1772} dummy_bpf_prog = {
1773 .prog = {
1774 .bpf_func = __bpf_prog_ret1,
1775 },
1776};
1777
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -07001778/* to avoid allocating empty bpf_prog_array for cgroups that
1779 * don't have bpf program attached use one global 'empty_prog_array'
1780 * It will not be modified the caller of bpf_prog_array_alloc()
1781 * (since caller requested prog_cnt == 0)
1782 * that pointer should be 'freed' by bpf_prog_array_free()
1783 */
1784static struct {
1785 struct bpf_prog_array hdr;
1786 struct bpf_prog *null_prog;
1787} empty_prog_array = {
1788 .null_prog = NULL,
1789};
1790
Roman Gushchind29ab6e2018-07-13 12:41:10 -07001791struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -07001792{
1793 if (prog_cnt)
1794 return kzalloc(sizeof(struct bpf_prog_array) +
Roman Gushchin394e40a2018-08-02 14:27:21 -07001795 sizeof(struct bpf_prog_array_item) *
1796 (prog_cnt + 1),
Alexei Starovoitov324bda9e62017-10-02 22:50:21 -07001797 flags);
1798
1799 return &empty_prog_array.hdr;
1800}
1801
1802void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
1803{
1804 if (!progs ||
1805 progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
1806 return;
1807 kfree_rcu(progs, rcu);
1808}
1809
Roman Gushchin394e40a2018-08-02 14:27:21 -07001810int bpf_prog_array_length(struct bpf_prog_array __rcu *array)
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001811{
Roman Gushchin394e40a2018-08-02 14:27:21 -07001812 struct bpf_prog_array_item *item;
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001813 u32 cnt = 0;
1814
1815 rcu_read_lock();
Roman Gushchin394e40a2018-08-02 14:27:21 -07001816 item = rcu_dereference(array)->items;
1817 for (; item->prog; item++)
1818 if (item->prog != &dummy_bpf_prog.prog)
Yonghong Songc8c088b2017-11-30 13:47:54 -08001819 cnt++;
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001820 rcu_read_unlock();
1821 return cnt;
1822}
1823
Roman Gushchin394e40a2018-08-02 14:27:21 -07001824
1825static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array,
Yonghong Song3a38bb92018-04-10 09:37:32 -07001826 u32 *prog_ids,
1827 u32 request_cnt)
1828{
Roman Gushchin394e40a2018-08-02 14:27:21 -07001829 struct bpf_prog_array_item *item;
Yonghong Song3a38bb92018-04-10 09:37:32 -07001830 int i = 0;
1831
Yonghong Song965931e2018-08-14 11:01:12 -07001832 item = rcu_dereference_check(array, 1)->items;
Roman Gushchin394e40a2018-08-02 14:27:21 -07001833 for (; item->prog; item++) {
1834 if (item->prog == &dummy_bpf_prog.prog)
Yonghong Song3a38bb92018-04-10 09:37:32 -07001835 continue;
Roman Gushchin394e40a2018-08-02 14:27:21 -07001836 prog_ids[i] = item->prog->aux->id;
Yonghong Song3a38bb92018-04-10 09:37:32 -07001837 if (++i == request_cnt) {
Roman Gushchin394e40a2018-08-02 14:27:21 -07001838 item++;
Yonghong Song3a38bb92018-04-10 09:37:32 -07001839 break;
1840 }
1841 }
1842
Roman Gushchin394e40a2018-08-02 14:27:21 -07001843 return !!(item->prog);
Yonghong Song3a38bb92018-04-10 09:37:32 -07001844}
1845
Roman Gushchin394e40a2018-08-02 14:27:21 -07001846int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array,
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001847 __u32 __user *prog_ids, u32 cnt)
1848{
Alexei Starovoitov09112872018-02-02 15:14:05 -08001849 unsigned long err = 0;
Alexei Starovoitov09112872018-02-02 15:14:05 -08001850 bool nospc;
Yonghong Song3a38bb92018-04-10 09:37:32 -07001851 u32 *ids;
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001852
Alexei Starovoitov09112872018-02-02 15:14:05 -08001853 /* users of this function are doing:
1854 * cnt = bpf_prog_array_length();
1855 * if (cnt > 0)
1856 * bpf_prog_array_copy_to_user(..., cnt);
1857 * so below kcalloc doesn't need extra cnt > 0 check, but
1858 * bpf_prog_array_length() releases rcu lock and
1859 * prog array could have been swapped with empty or larger array,
1860 * so always copy 'cnt' prog_ids to the user.
1861 * In a rare race the user will see zero prog_ids
1862 */
Daniel Borkmann9c481b92018-02-14 15:31:00 +01001863 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
Alexei Starovoitov09112872018-02-02 15:14:05 -08001864 if (!ids)
1865 return -ENOMEM;
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001866 rcu_read_lock();
Roman Gushchin394e40a2018-08-02 14:27:21 -07001867 nospc = bpf_prog_array_copy_core(array, ids, cnt);
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001868 rcu_read_unlock();
Alexei Starovoitov09112872018-02-02 15:14:05 -08001869 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1870 kfree(ids);
1871 if (err)
1872 return -EFAULT;
1873 if (nospc)
Alexei Starovoitov468e2f62017-10-02 22:50:22 -07001874 return -ENOSPC;
1875 return 0;
1876}
1877
Roman Gushchin394e40a2018-08-02 14:27:21 -07001878void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array,
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001879 struct bpf_prog *old_prog)
1880{
Roman Gushchin394e40a2018-08-02 14:27:21 -07001881 struct bpf_prog_array_item *item = array->items;
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001882
Roman Gushchin394e40a2018-08-02 14:27:21 -07001883 for (; item->prog; item++)
1884 if (item->prog == old_prog) {
1885 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001886 break;
1887 }
1888}
1889
1890int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
1891 struct bpf_prog *exclude_prog,
1892 struct bpf_prog *include_prog,
1893 struct bpf_prog_array **new_array)
1894{
1895 int new_prog_cnt, carry_prog_cnt = 0;
Roman Gushchin394e40a2018-08-02 14:27:21 -07001896 struct bpf_prog_array_item *existing;
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001897 struct bpf_prog_array *array;
Sean Young170a7e32018-05-27 12:24:08 +01001898 bool found_exclude = false;
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001899 int new_prog_idx = 0;
1900
1901 /* Figure out how many existing progs we need to carry over to
1902 * the new array.
1903 */
1904 if (old_array) {
Roman Gushchin394e40a2018-08-02 14:27:21 -07001905 existing = old_array->items;
1906 for (; existing->prog; existing++) {
1907 if (existing->prog == exclude_prog) {
Sean Young170a7e32018-05-27 12:24:08 +01001908 found_exclude = true;
1909 continue;
1910 }
Roman Gushchin394e40a2018-08-02 14:27:21 -07001911 if (existing->prog != &dummy_bpf_prog.prog)
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001912 carry_prog_cnt++;
Roman Gushchin394e40a2018-08-02 14:27:21 -07001913 if (existing->prog == include_prog)
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001914 return -EEXIST;
1915 }
1916 }
1917
Sean Young170a7e32018-05-27 12:24:08 +01001918 if (exclude_prog && !found_exclude)
1919 return -ENOENT;
1920
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001921 /* How many progs (not NULL) will be in the new array? */
1922 new_prog_cnt = carry_prog_cnt;
1923 if (include_prog)
1924 new_prog_cnt += 1;
1925
1926 /* Do we have any prog (not NULL) in the new array? */
1927 if (!new_prog_cnt) {
1928 *new_array = NULL;
1929 return 0;
1930 }
1931
1932 /* +1 as the end of prog_array is marked with NULL */
1933 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1934 if (!array)
1935 return -ENOMEM;
1936
1937 /* Fill in the new prog array */
1938 if (carry_prog_cnt) {
Roman Gushchin394e40a2018-08-02 14:27:21 -07001939 existing = old_array->items;
1940 for (; existing->prog; existing++)
1941 if (existing->prog != exclude_prog &&
1942 existing->prog != &dummy_bpf_prog.prog) {
1943 array->items[new_prog_idx++].prog =
1944 existing->prog;
1945 }
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001946 }
1947 if (include_prog)
Roman Gushchin394e40a2018-08-02 14:27:21 -07001948 array->items[new_prog_idx++].prog = include_prog;
1949 array->items[new_prog_idx].prog = NULL;
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001950 *new_array = array;
1951 return 0;
1952}
1953
Yonghong Songf371b302017-12-11 11:39:02 -08001954int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
Yonghong Song3a38bb92018-04-10 09:37:32 -07001955 u32 *prog_ids, u32 request_cnt,
1956 u32 *prog_cnt)
Yonghong Songf371b302017-12-11 11:39:02 -08001957{
1958 u32 cnt = 0;
1959
1960 if (array)
1961 cnt = bpf_prog_array_length(array);
1962
Yonghong Song3a38bb92018-04-10 09:37:32 -07001963 *prog_cnt = cnt;
Yonghong Songf371b302017-12-11 11:39:02 -08001964
1965 /* return early if user requested only program count or nothing to copy */
1966 if (!request_cnt || !cnt)
1967 return 0;
1968
Yonghong Song3a38bb92018-04-10 09:37:32 -07001969 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
Roman Gushchin394e40a2018-08-02 14:27:21 -07001970 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
Yonghong Song3a38bb92018-04-10 09:37:32 -07001971 : 0;
Yonghong Songf371b302017-12-11 11:39:02 -08001972}
1973
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001974static void bpf_prog_free_deferred(struct work_struct *work)
1975{
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001976 struct bpf_prog_aux *aux;
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001977 int i;
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001978
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001979 aux = container_of(work, struct bpf_prog_aux, work);
Jakub Kicinskiab3f0062017-11-03 13:56:17 -07001980 if (bpf_prog_is_dev_bound(aux))
1981 bpf_prog_offload_destroy(aux->prog);
Yonghong Songc195651e2018-04-28 22:28:08 -07001982#ifdef CONFIG_PERF_EVENTS
1983 if (aux->prog->has_callchain_buf)
1984 put_callchain_buffers();
1985#endif
Alexei Starovoitov1c2a0882017-12-14 17:55:15 -08001986 for (i = 0; i < aux->func_cnt; i++)
1987 bpf_jit_free(aux->func[i]);
1988 if (aux->func_cnt) {
1989 kfree(aux->func);
1990 bpf_prog_unlock_free(aux->prog);
1991 } else {
1992 bpf_jit_free(aux->prog);
1993 }
Daniel Borkmann60a3b222014-09-02 22:53:44 +02001994}
1995
1996/* Free internal BPF program */
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07001997void bpf_prog_free(struct bpf_prog *fp)
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07001998{
Alexei Starovoitov09756af2014-09-26 00:17:00 -07001999 struct bpf_prog_aux *aux = fp->aux;
Daniel Borkmann60a3b222014-09-02 22:53:44 +02002000
Alexei Starovoitov09756af2014-09-26 00:17:00 -07002001 INIT_WORK(&aux->work, bpf_prog_free_deferred);
Alexei Starovoitov09756af2014-09-26 00:17:00 -07002002 schedule_work(&aux->work);
Alexei Starovoitovf5bffec2014-07-22 23:01:58 -07002003}
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -07002004EXPORT_SYMBOL_GPL(bpf_prog_free);
Alexei Starovoitovf89b7752014-10-23 18:41:08 -07002005
Daniel Borkmann3ad00402015-10-08 01:20:39 +02002006/* RNG for unpriviledged user space with separated state from prandom_u32(). */
2007static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2008
2009void bpf_user_rnd_init_once(void)
2010{
2011 prandom_init_once(&bpf_user_rnd_state);
2012}
2013
Daniel Borkmannf3694e02016-09-09 02:45:31 +02002014BPF_CALL_0(bpf_user_rnd_u32)
Daniel Borkmann3ad00402015-10-08 01:20:39 +02002015{
2016 /* Should someone ever have the rather unwise idea to use some
2017 * of the registers passed into this function, then note that
2018 * this function is called from native eBPF and classic-to-eBPF
2019 * transformations. Register assignments from both sides are
2020 * different, f.e. classic always sets fn(ctx, A, X) here.
2021 */
2022 struct rnd_state *state;
2023 u32 res;
2024
2025 state = &get_cpu_var(bpf_user_rnd_state);
2026 res = prandom_u32_state(state);
Shaohua Lib761fe22016-09-27 08:42:41 -07002027 put_cpu_var(bpf_user_rnd_state);
Daniel Borkmann3ad00402015-10-08 01:20:39 +02002028
2029 return res;
2030}
2031
Daniel Borkmann3ba67da2015-03-05 23:27:51 +01002032/* Weak definitions of helper functions in case we don't have bpf syscall. */
2033const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2034const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2035const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
Mauricio Vasquez Bf1a2e442018-10-18 15:16:25 +02002036const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2037const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2038const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
Daniel Borkmann3ba67da2015-03-05 23:27:51 +01002039
Daniel Borkmann03e69b52015-03-14 02:27:16 +01002040const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
Daniel Borkmannc04167c2015-03-14 02:27:17 +01002041const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
Daniel Borkmann2d0e30c2016-10-21 12:46:33 +02002042const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
Daniel Borkmann17ca8cb2015-05-29 23:23:06 +02002043const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
Daniel Borkmannbd570ff2016-04-18 21:01:24 +02002044
Alexei Starovoitovffeedaf2015-06-12 19:39:12 -07002045const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2046const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2047const struct bpf_func_proto bpf_get_current_comm_proto __weak;
Yonghong Songbf6fa2c82018-06-03 15:59:41 -07002048const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
Roman Gushchincd339432018-08-02 14:27:24 -07002049const struct bpf_func_proto bpf_get_local_storage_proto __weak;
Daniel Borkmannbd570ff2016-04-18 21:01:24 +02002050
Alexei Starovoitov0756ea32015-06-12 19:39:13 -07002051const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2052{
2053 return NULL;
2054}
Daniel Borkmann03e69b52015-03-14 02:27:16 +01002055
Daniel Borkmann555c8a82016-07-14 18:08:05 +02002056u64 __weak
2057bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2058 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
Daniel Borkmannbd570ff2016-04-18 21:01:24 +02002059{
Daniel Borkmann555c8a82016-07-14 18:08:05 +02002060 return -ENOTSUPP;
Daniel Borkmannbd570ff2016-04-18 21:01:24 +02002061}
Jakub Kicinski6cb5fb32018-05-03 18:37:10 -07002062EXPORT_SYMBOL_GPL(bpf_event_output);
Daniel Borkmannbd570ff2016-04-18 21:01:24 +02002063
Daniel Borkmann3324b582015-05-29 23:23:07 +02002064/* Always built-in helper functions. */
2065const struct bpf_func_proto bpf_tail_call_proto = {
2066 .func = NULL,
2067 .gpl_only = false,
2068 .ret_type = RET_VOID,
2069 .arg1_type = ARG_PTR_TO_CTX,
2070 .arg2_type = ARG_CONST_MAP_PTR,
2071 .arg3_type = ARG_ANYTHING,
2072};
2073
Daniel Borkmann93831912017-02-16 22:24:49 +01002074/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2075 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2076 * eBPF and implicitly also cBPF can get JITed!
2077 */
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02002078struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
Daniel Borkmann3324b582015-05-29 23:23:07 +02002079{
Daniel Borkmannd1c55ab2016-05-13 19:08:31 +02002080 return prog;
Daniel Borkmann3324b582015-05-29 23:23:07 +02002081}
2082
Daniel Borkmann93831912017-02-16 22:24:49 +01002083/* Stub for JITs that support eBPF. All cBPF code gets transformed into
2084 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2085 */
2086void __weak bpf_jit_compile(struct bpf_prog *prog)
2087{
2088}
2089
Martin KaFai Lau17bedab2016-12-07 15:53:11 -08002090bool __weak bpf_helper_changes_pkt_data(void *func)
Alexei Starovoitov969bf052016-05-05 19:49:10 -07002091{
2092 return false;
2093}
2094
Alexei Starovoitovf89b7752014-10-23 18:41:08 -07002095/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2096 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2097 */
2098int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2099 int len)
2100{
2101 return -EFAULT;
2102}
Daniel Borkmanna67edbf2017-01-25 02:28:18 +01002103
2104/* All definitions of tracepoints related to BPF. */
2105#define CREATE_TRACE_POINTS
2106#include <linux/bpf_trace.h>
2107
2108EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);