blob: c61c3033522e53b9bd6472ad567b66491edb906f [file] [log] [blame]
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +01001/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#ifndef _LINUX_BPF_VERIFIER_H
8#define _LINUX_BPF_VERIFIER_H 1
9
10#include <linux/bpf.h> /* for enum bpf_reg_type */
11#include <linux/filter.h> /* for MAX_BPF_STACK */
Edward Creef1174f72017-08-07 15:26:19 +010012#include <linux/tnum.h>
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010013
Edward Creeb03c9f92017-08-07 15:26:36 +010014/* Maximum variable offset umax_value permitted when resolving memory accesses.
15 * In practice this is far bigger than any realistic pointer offset; this limit
16 * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
17 */
18#define BPF_MAX_VAR_OFF (1ULL << 31)
19/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
20 * that converting umax_value to int cannot overflow.
21 */
22#define BPF_MAX_VAR_SIZ INT_MAX
Josef Bacik48461132016-09-28 10:54:32 -040023
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010024struct bpf_reg_state {
25 enum bpf_reg_type type;
26 union {
Edward Creef1174f72017-08-07 15:26:19 +010027 /* valid when type == PTR_TO_PACKET */
28 u16 range;
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010029
30 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
31 * PTR_TO_MAP_VALUE_OR_NULL
32 */
33 struct bpf_map *map_ptr;
34 };
Edward Creef1174f72017-08-07 15:26:19 +010035 /* Fixed part of pointer offset, pointer types only */
36 s32 off;
37 /* For PTR_TO_PACKET, used to find other pointers with the same variable
38 * offset, so they can share range knowledge.
39 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
40 * came from, when one is tested for != NULL.
41 */
Alexei Starovoitovd2a4dd32016-12-07 10:57:59 -080042 u32 id;
Edward Creeb03c9f92017-08-07 15:26:36 +010043 /* These five fields must be last. See states_equal() */
Edward Creef1174f72017-08-07 15:26:19 +010044 /* For scalar types (SCALAR_VALUE), this represents our knowledge of
45 * the actual value.
46 * For pointer types, this represents the variable part of the offset
47 * from the pointed-to object, and is shared with all bpf_reg_states
48 * with the same id as us.
49 */
50 struct tnum var_off;
Alexei Starovoitovd2a4dd32016-12-07 10:57:59 -080051 /* Used to determine if any memory access using this register will
Edward Creef1174f72017-08-07 15:26:19 +010052 * result in a bad access.
53 * These refer to the same value as var_off, not necessarily the actual
54 * contents of the register.
Alexei Starovoitovd2a4dd32016-12-07 10:57:59 -080055 */
Edward Creeb03c9f92017-08-07 15:26:36 +010056 s64 smin_value; /* minimum possible (s64)value */
57 s64 smax_value; /* maximum possible (s64)value */
58 u64 umin_value; /* minimum possible (u64)value */
59 u64 umax_value; /* maximum possible (u64)value */
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010060};
61
62enum bpf_stack_slot_type {
63 STACK_INVALID, /* nothing was stored in this stack slot */
64 STACK_SPILL, /* register spilled into stack */
65 STACK_MISC /* BPF program wrote some data into this slot */
66};
67
68#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
69
70/* state of the program:
71 * type of all registers and stack info
72 */
73struct bpf_verifier_state {
74 struct bpf_reg_state regs[MAX_BPF_REG];
75 u8 stack_slot_type[MAX_BPF_STACK];
76 struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
77};
78
79/* linked list of verifier states used to prune search */
80struct bpf_verifier_state_list {
81 struct bpf_verifier_state state;
82 struct bpf_verifier_state_list *next;
83};
84
85struct bpf_insn_aux_data {
Alexei Starovoitov81ed18a2017-03-15 18:26:42 -070086 union {
87 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
88 struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
89 };
Yonghong Song23994632017-06-22 15:07:39 -070090 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
91 int converted_op_size; /* the valid value width after perceived conversion */
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +010092};
93
94#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
95
Jakub Kicinski13a27df2016-09-21 11:43:58 +010096struct bpf_verifier_env;
97struct bpf_ext_analyzer_ops {
98 int (*insn_hook)(struct bpf_verifier_env *env,
99 int insn_idx, int prev_insn_idx);
100};
101
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +0100102/* single container for all structs
103 * one verifier_env per bpf_check() call
104 */
105struct bpf_verifier_env {
106 struct bpf_prog *prog; /* eBPF program being verified */
107 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
108 int stack_size; /* number of states to be processed */
David S. Millere07b98d2017-05-10 11:38:07 -0700109 bool strict_alignment; /* perform strict pointer alignment checks */
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +0100110 struct bpf_verifier_state cur_state; /* current verifier state */
111 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
Jakub Kicinski13a27df2016-09-21 11:43:58 +0100112 const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
113 void *analyzer_priv; /* pointer to external analyzer's private data */
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +0100114 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
115 u32 used_map_cnt; /* number of used maps */
116 u32 id_gen; /* used to generate unique reg IDs */
117 bool allow_ptr_leaks;
118 bool seen_direct_write;
Josef Bacik48461132016-09-28 10:54:32 -0400119 bool varlen_map_value_access;
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +0100120 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
121};
122
Jakub Kicinski13a27df2016-09-21 11:43:58 +0100123int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
124 void *priv);
125
Jakub Kicinski58e2af8b2016-09-21 11:43:57 +0100126#endif /* _LINUX_BPF_VERIFIER_H */