blob: 2a50d87de485bb7681c415e09aa625649bd4849b [file] [log] [blame]
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2018 Facebook */
3
4#include <uapi/linux/btf.h>
5#include <uapi/linux/types.h>
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07006#include <linux/seq_file.h>
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07007#include <linux/compiler.h>
8#include <linux/errno.h>
9#include <linux/slab.h>
Martin KaFai Lauf56a6532018-04-18 15:56:01 -070010#include <linux/anon_inodes.h>
11#include <linux/file.h>
Martin KaFai Lau69b693f2018-04-18 15:55:57 -070012#include <linux/uaccess.h>
13#include <linux/kernel.h>
Martin KaFai Lau78958fc2018-05-04 14:49:51 -070014#include <linux/idr.h>
Martin KaFai Lauf80442a2018-05-22 14:57:18 -070015#include <linux/sort.h>
Martin KaFai Lau69b693f2018-04-18 15:55:57 -070016#include <linux/bpf_verifier.h>
17#include <linux/btf.h>
18
19/* BTF (BPF Type Format) is the meta data format which describes
20 * the data types of BPF program/map. Hence, it basically focus
21 * on the C programming language which the modern BPF is primary
22 * using.
23 *
24 * ELF Section:
25 * ~~~~~~~~~~~
26 * The BTF data is stored under the ".BTF" ELF section
27 *
28 * struct btf_type:
29 * ~~~~~~~~~~~~~~~
30 * Each 'struct btf_type' object describes a C data type.
31 * Depending on the type it is describing, a 'struct btf_type'
32 * object may be followed by more data. F.e.
33 * To describe an array, 'struct btf_type' is followed by
34 * 'struct btf_array'.
35 *
36 * 'struct btf_type' and any extra data following it are
37 * 4 bytes aligned.
38 *
39 * Type section:
40 * ~~~~~~~~~~~~~
41 * The BTF type section contains a list of 'struct btf_type' objects.
42 * Each one describes a C type. Recall from the above section
43 * that a 'struct btf_type' object could be immediately followed by extra
44 * data in order to desribe some particular C types.
45 *
46 * type_id:
47 * ~~~~~~~
48 * Each btf_type object is identified by a type_id. The type_id
49 * is implicitly implied by the location of the btf_type object in
50 * the BTF type section. The first one has type_id 1. The second
51 * one has type_id 2...etc. Hence, an earlier btf_type has
52 * a smaller type_id.
53 *
54 * A btf_type object may refer to another btf_type object by using
55 * type_id (i.e. the "type" in the "struct btf_type").
56 *
57 * NOTE that we cannot assume any reference-order.
58 * A btf_type object can refer to an earlier btf_type object
59 * but it can also refer to a later btf_type object.
60 *
61 * For example, to describe "const void *". A btf_type
62 * object describing "const" may refer to another btf_type
63 * object describing "void *". This type-reference is done
64 * by specifying type_id:
65 *
66 * [1] CONST (anon) type_id=2
67 * [2] PTR (anon) type_id=0
68 *
69 * The above is the btf_verifier debug log:
70 * - Each line started with "[?]" is a btf_type object
71 * - [?] is the type_id of the btf_type object.
72 * - CONST/PTR is the BTF_KIND_XXX
73 * - "(anon)" is the name of the type. It just
74 * happens that CONST and PTR has no name.
75 * - type_id=XXX is the 'u32 type' in btf_type
76 *
77 * NOTE: "void" has type_id 0
78 *
79 * String section:
80 * ~~~~~~~~~~~~~~
81 * The BTF string section contains the names used by the type section.
82 * Each string is referred by an "offset" from the beginning of the
83 * string section.
84 *
85 * Each string is '\0' terminated.
86 *
87 * The first character in the string section must be '\0'
88 * which is used to mean 'anonymous'. Some btf_type may not
89 * have a name.
90 */
91
92/* BTF verification:
93 *
94 * To verify BTF data, two passes are needed.
95 *
96 * Pass #1
97 * ~~~~~~~
98 * The first pass is to collect all btf_type objects to
99 * an array: "btf->types".
100 *
101 * Depending on the C type that a btf_type is describing,
102 * a btf_type may be followed by extra data. We don't know
103 * how many btf_type is there, and more importantly we don't
104 * know where each btf_type is located in the type section.
105 *
106 * Without knowing the location of each type_id, most verifications
107 * cannot be done. e.g. an earlier btf_type may refer to a later
108 * btf_type (recall the "const void *" above), so we cannot
109 * check this type-reference in the first pass.
110 *
111 * In the first pass, it still does some verifications (e.g.
112 * checking the name is a valid offset to the string section).
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700113 *
114 * Pass #2
115 * ~~~~~~~
116 * The main focus is to resolve a btf_type that is referring
117 * to another type.
118 *
119 * We have to ensure the referring type:
120 * 1) does exist in the BTF (i.e. in btf->types[])
121 * 2) does not cause a loop:
122 * struct A {
123 * struct B b;
124 * };
125 *
126 * struct B {
127 * struct A a;
128 * };
129 *
130 * btf_type_needs_resolve() decides if a btf_type needs
131 * to be resolved.
132 *
133 * The needs_resolve type implements the "resolve()" ops which
134 * essentially does a DFS and detects backedge.
135 *
136 * During resolve (or DFS), different C types have different
137 * "RESOLVED" conditions.
138 *
139 * When resolving a BTF_KIND_STRUCT, we need to resolve all its
140 * members because a member is always referring to another
141 * type. A struct's member can be treated as "RESOLVED" if
142 * it is referring to a BTF_KIND_PTR. Otherwise, the
143 * following valid C struct would be rejected:
144 *
145 * struct A {
146 * int m;
147 * struct A *a;
148 * };
149 *
150 * When resolving a BTF_KIND_PTR, it needs to keep resolving if
151 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot
152 * detect a pointer loop, e.g.:
153 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
154 * ^ |
155 * +-----------------------------------------+
156 *
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700157 */
158
159#define BITS_PER_U64 (sizeof(u64) * BITS_PER_BYTE)
160#define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
161#define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
162#define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
163#define BITS_ROUNDUP_BYTES(bits) \
164 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
165
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -0700166#define BTF_INFO_MASK 0x0f00ffff
167#define BTF_INT_MASK 0x0fffffff
168#define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
169#define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
170
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700171/* 16MB for 64k structs and each has 16 members and
172 * a few MB spaces for the string section.
173 * The hard limit is S32_MAX.
174 */
175#define BTF_MAX_SIZE (16 * 1024 * 1024)
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700176
177#define for_each_member(i, struct_type, member) \
178 for (i = 0, member = btf_type_member(struct_type); \
179 i < btf_type_vlen(struct_type); \
180 i++, member++)
181
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700182#define for_each_member_from(i, from, struct_type, member) \
183 for (i = from, member = btf_type_member(struct_type) + from; \
184 i < btf_type_vlen(struct_type); \
185 i++, member++)
186
Martin KaFai Lau78958fc2018-05-04 14:49:51 -0700187static DEFINE_IDR(btf_idr);
188static DEFINE_SPINLOCK(btf_idr_lock);
189
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700190struct btf {
Martin KaFai Lauf80442a2018-05-22 14:57:18 -0700191 void *data;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700192 struct btf_type **types;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700193 u32 *resolved_ids;
194 u32 *resolved_sizes;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700195 const char *strings;
196 void *nohdr_data;
Martin KaFai Lauf80442a2018-05-22 14:57:18 -0700197 struct btf_header hdr;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700198 u32 nr_types;
199 u32 types_size;
200 u32 data_size;
Martin KaFai Lauf56a6532018-04-18 15:56:01 -0700201 refcount_t refcnt;
Martin KaFai Lau78958fc2018-05-04 14:49:51 -0700202 u32 id;
203 struct rcu_head rcu;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700204};
205
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700206enum verifier_phase {
207 CHECK_META,
208 CHECK_TYPE,
209};
210
211struct resolve_vertex {
212 const struct btf_type *t;
213 u32 type_id;
214 u16 next_member;
215};
216
217enum visit_state {
218 NOT_VISITED,
219 VISITED,
220 RESOLVED,
221};
222
223enum resolve_mode {
224 RESOLVE_TBD, /* To Be Determined */
225 RESOLVE_PTR, /* Resolving for Pointer */
226 RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union
227 * or array
228 */
229};
230
231#define MAX_RESOLVE_DEPTH 32
232
Martin KaFai Lauf80442a2018-05-22 14:57:18 -0700233struct btf_sec_info {
234 u32 off;
235 u32 len;
236};
237
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700238struct btf_verifier_env {
239 struct btf *btf;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700240 u8 *visit_states;
241 struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700242 struct bpf_verifier_log log;
243 u32 log_type_id;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700244 u32 top_stack;
245 enum verifier_phase phase;
246 enum resolve_mode resolve_mode;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700247};
248
249static const char * const btf_kind_str[NR_BTF_KINDS] = {
250 [BTF_KIND_UNKN] = "UNKNOWN",
251 [BTF_KIND_INT] = "INT",
252 [BTF_KIND_PTR] = "PTR",
253 [BTF_KIND_ARRAY] = "ARRAY",
254 [BTF_KIND_STRUCT] = "STRUCT",
255 [BTF_KIND_UNION] = "UNION",
256 [BTF_KIND_ENUM] = "ENUM",
257 [BTF_KIND_FWD] = "FWD",
258 [BTF_KIND_TYPEDEF] = "TYPEDEF",
259 [BTF_KIND_VOLATILE] = "VOLATILE",
260 [BTF_KIND_CONST] = "CONST",
261 [BTF_KIND_RESTRICT] = "RESTRICT",
262};
263
264struct btf_kind_operations {
265 s32 (*check_meta)(struct btf_verifier_env *env,
266 const struct btf_type *t,
267 u32 meta_left);
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700268 int (*resolve)(struct btf_verifier_env *env,
269 const struct resolve_vertex *v);
Martin KaFai Lau179cde82018-04-18 15:55:59 -0700270 int (*check_member)(struct btf_verifier_env *env,
271 const struct btf_type *struct_type,
272 const struct btf_member *member,
273 const struct btf_type *member_type);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700274 void (*log_details)(struct btf_verifier_env *env,
275 const struct btf_type *t);
Martin KaFai Laub00b8da2018-04-18 15:56:00 -0700276 void (*seq_show)(const struct btf *btf, const struct btf_type *t,
277 u32 type_id, void *data, u8 bits_offsets,
278 struct seq_file *m);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700279};
280
281static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
282static struct btf_type btf_void;
283
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700284static bool btf_type_is_modifier(const struct btf_type *t)
285{
286 /* Some of them is not strictly a C modifier
287 * but they are grouped into the same bucket
288 * for BTF concern:
289 * A type (t) that refers to another
290 * type through t->type AND its size cannot
291 * be determined without following the t->type.
292 *
293 * ptr does not fall into this bucket
294 * because its size is always sizeof(void *).
295 */
296 switch (BTF_INFO_KIND(t->info)) {
297 case BTF_KIND_TYPEDEF:
298 case BTF_KIND_VOLATILE:
299 case BTF_KIND_CONST:
300 case BTF_KIND_RESTRICT:
301 return true;
302 }
303
304 return false;
305}
306
307static bool btf_type_is_void(const struct btf_type *t)
308{
Martin KaFai Laub47a0bd2018-11-19 15:29:06 -0800309 return t == &btf_void;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700310}
311
Martin KaFai Laub47a0bd2018-11-19 15:29:06 -0800312static bool btf_type_is_fwd(const struct btf_type *t)
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700313{
Martin KaFai Laub47a0bd2018-11-19 15:29:06 -0800314 return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
315}
316
317static bool btf_type_nosize(const struct btf_type *t)
318{
319 return btf_type_is_void(t) || btf_type_is_fwd(t);
320}
321
322static bool btf_type_nosize_or_null(const struct btf_type *t)
323{
324 return !t || btf_type_nosize(t);
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700325}
326
327/* union is only a special case of struct:
328 * all its offsetof(member) == 0
329 */
330static bool btf_type_is_struct(const struct btf_type *t)
331{
332 u8 kind = BTF_INFO_KIND(t->info);
333
334 return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
335}
336
337static bool btf_type_is_array(const struct btf_type *t)
338{
339 return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
340}
341
342static bool btf_type_is_ptr(const struct btf_type *t)
343{
344 return BTF_INFO_KIND(t->info) == BTF_KIND_PTR;
345}
346
347static bool btf_type_is_int(const struct btf_type *t)
348{
349 return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
350}
351
352/* What types need to be resolved?
353 *
354 * btf_type_is_modifier() is an obvious one.
355 *
356 * btf_type_is_struct() because its member refers to
357 * another type (through member->type).
358
359 * btf_type_is_array() because its element (array->type)
360 * refers to another type. Array can be thought of a
361 * special case of struct while array just has the same
362 * member-type repeated by array->nelems of times.
363 */
364static bool btf_type_needs_resolve(const struct btf_type *t)
365{
366 return btf_type_is_modifier(t) ||
367 btf_type_is_ptr(t) ||
368 btf_type_is_struct(t) ||
369 btf_type_is_array(t);
370}
371
372/* t->size can be used */
373static bool btf_type_has_size(const struct btf_type *t)
374{
375 switch (BTF_INFO_KIND(t->info)) {
376 case BTF_KIND_INT:
377 case BTF_KIND_STRUCT:
378 case BTF_KIND_UNION:
379 case BTF_KIND_ENUM:
380 return true;
381 }
382
383 return false;
384}
385
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700386static const char *btf_int_encoding_str(u8 encoding)
387{
388 if (encoding == 0)
389 return "(none)";
390 else if (encoding == BTF_INT_SIGNED)
391 return "SIGNED";
392 else if (encoding == BTF_INT_CHAR)
393 return "CHAR";
394 else if (encoding == BTF_INT_BOOL)
395 return "BOOL";
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700396 else
397 return "UNKN";
398}
399
400static u16 btf_type_vlen(const struct btf_type *t)
401{
402 return BTF_INFO_VLEN(t->info);
403}
404
405static u32 btf_type_int(const struct btf_type *t)
406{
407 return *(u32 *)(t + 1);
408}
409
410static const struct btf_array *btf_type_array(const struct btf_type *t)
411{
412 return (const struct btf_array *)(t + 1);
413}
414
415static const struct btf_member *btf_type_member(const struct btf_type *t)
416{
417 return (const struct btf_member *)(t + 1);
418}
419
420static const struct btf_enum *btf_type_enum(const struct btf_type *t)
421{
422 return (const struct btf_enum *)(t + 1);
423}
424
425static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
426{
427 return kind_ops[BTF_INFO_KIND(t->info)];
428}
429
430static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
431{
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -0700432 return BTF_STR_OFFSET_VALID(offset) &&
433 offset < btf->hdr.str_len;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700434}
435
436static const char *btf_name_by_offset(const struct btf *btf, u32 offset)
437{
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -0700438 if (!offset)
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700439 return "(anon)";
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -0700440 else if (offset < btf->hdr.str_len)
441 return &btf->strings[offset];
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700442 else
443 return "(invalid-name-offset)";
444}
445
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700446static const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
447{
448 if (type_id > btf->nr_types)
449 return NULL;
450
451 return btf->types[type_id];
452}
453
Martin KaFai Lau4ef5f572018-05-22 14:57:19 -0700454/*
455 * Regular int is not a bit field and it must be either
456 * u8/u16/u32/u64.
457 */
458static bool btf_type_int_is_regular(const struct btf_type *t)
459{
Martin KaFai Lau36fc3c82018-07-19 22:14:31 -0700460 u8 nr_bits, nr_bytes;
Martin KaFai Lau4ef5f572018-05-22 14:57:19 -0700461 u32 int_data;
462
463 int_data = btf_type_int(t);
464 nr_bits = BTF_INT_BITS(int_data);
465 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
466 if (BITS_PER_BYTE_MASKED(nr_bits) ||
467 BTF_INT_OFFSET(int_data) ||
468 (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
469 nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64))) {
470 return false;
471 }
472
473 return true;
474}
475
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700476__printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
477 const char *fmt, ...)
478{
479 va_list args;
480
481 va_start(args, fmt);
482 bpf_verifier_vlog(log, fmt, args);
483 va_end(args);
484}
485
486__printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
487 const char *fmt, ...)
488{
489 struct bpf_verifier_log *log = &env->log;
490 va_list args;
491
492 if (!bpf_verifier_log_needed(log))
493 return;
494
495 va_start(args, fmt);
496 bpf_verifier_vlog(log, fmt, args);
497 va_end(args);
498}
499
500__printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
501 const struct btf_type *t,
502 bool log_details,
503 const char *fmt, ...)
504{
505 struct bpf_verifier_log *log = &env->log;
506 u8 kind = BTF_INFO_KIND(t->info);
507 struct btf *btf = env->btf;
508 va_list args;
509
510 if (!bpf_verifier_log_needed(log))
511 return;
512
513 __btf_verifier_log(log, "[%u] %s %s%s",
514 env->log_type_id,
515 btf_kind_str[kind],
Martin KaFai Laufbcf93e2018-04-21 09:48:23 -0700516 btf_name_by_offset(btf, t->name_off),
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700517 log_details ? " " : "");
518
519 if (log_details)
520 btf_type_ops(t)->log_details(env, t);
521
522 if (fmt && *fmt) {
523 __btf_verifier_log(log, " ");
524 va_start(args, fmt);
525 bpf_verifier_vlog(log, fmt, args);
526 va_end(args);
527 }
528
529 __btf_verifier_log(log, "\n");
530}
531
532#define btf_verifier_log_type(env, t, ...) \
533 __btf_verifier_log_type((env), (t), true, __VA_ARGS__)
534#define btf_verifier_log_basic(env, t, ...) \
535 __btf_verifier_log_type((env), (t), false, __VA_ARGS__)
536
537__printf(4, 5)
538static void btf_verifier_log_member(struct btf_verifier_env *env,
539 const struct btf_type *struct_type,
540 const struct btf_member *member,
541 const char *fmt, ...)
542{
543 struct bpf_verifier_log *log = &env->log;
544 struct btf *btf = env->btf;
545 va_list args;
546
547 if (!bpf_verifier_log_needed(log))
548 return;
549
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700550 /* The CHECK_META phase already did a btf dump.
551 *
552 * If member is logged again, it must hit an error in
553 * parsing this member. It is useful to print out which
554 * struct this member belongs to.
555 */
556 if (env->phase != CHECK_META)
557 btf_verifier_log_type(env, struct_type, NULL);
558
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700559 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
Martin KaFai Laufbcf93e2018-04-21 09:48:23 -0700560 btf_name_by_offset(btf, member->name_off),
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700561 member->type, member->offset);
562
563 if (fmt && *fmt) {
564 __btf_verifier_log(log, " ");
565 va_start(args, fmt);
566 bpf_verifier_vlog(log, fmt, args);
567 va_end(args);
568 }
569
570 __btf_verifier_log(log, "\n");
571}
572
Martin KaFai Lauf80442a2018-05-22 14:57:18 -0700573static void btf_verifier_log_hdr(struct btf_verifier_env *env,
574 u32 btf_data_size)
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700575{
576 struct bpf_verifier_log *log = &env->log;
577 const struct btf *btf = env->btf;
578 const struct btf_header *hdr;
579
580 if (!bpf_verifier_log_needed(log))
581 return;
582
Martin KaFai Lauf80442a2018-05-22 14:57:18 -0700583 hdr = &btf->hdr;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700584 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
585 __btf_verifier_log(log, "version: %u\n", hdr->version);
586 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
Martin KaFai Lauf80442a2018-05-22 14:57:18 -0700587 __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700588 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
Martin KaFai Lauf80442a2018-05-22 14:57:18 -0700589 __btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700590 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
591 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
Martin KaFai Lauf80442a2018-05-22 14:57:18 -0700592 __btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700593}
594
595static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
596{
597 struct btf *btf = env->btf;
598
599 /* < 2 because +1 for btf_void which is always in btf->types[0].
600 * btf_void is not accounted in btf->nr_types because btf_void
601 * does not come from the BTF file.
602 */
603 if (btf->types_size - btf->nr_types < 2) {
604 /* Expand 'types' array */
605
606 struct btf_type **new_types;
607 u32 expand_by, new_size;
608
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -0700609 if (btf->types_size == BTF_MAX_TYPE) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700610 btf_verifier_log(env, "Exceeded max num of types");
611 return -E2BIG;
612 }
613
614 expand_by = max_t(u32, btf->types_size >> 2, 16);
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -0700615 new_size = min_t(u32, BTF_MAX_TYPE,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700616 btf->types_size + expand_by);
617
Kees Cook778e1cd2018-06-12 14:04:48 -0700618 new_types = kvcalloc(new_size, sizeof(*new_types),
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700619 GFP_KERNEL | __GFP_NOWARN);
620 if (!new_types)
621 return -ENOMEM;
622
623 if (btf->nr_types == 0)
624 new_types[0] = &btf_void;
625 else
626 memcpy(new_types, btf->types,
627 sizeof(*btf->types) * (btf->nr_types + 1));
628
629 kvfree(btf->types);
630 btf->types = new_types;
631 btf->types_size = new_size;
632 }
633
634 btf->types[++(btf->nr_types)] = t;
635
636 return 0;
637}
638
Martin KaFai Lau78958fc2018-05-04 14:49:51 -0700639static int btf_alloc_id(struct btf *btf)
640{
641 int id;
642
643 idr_preload(GFP_KERNEL);
644 spin_lock_bh(&btf_idr_lock);
645 id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
646 if (id > 0)
647 btf->id = id;
648 spin_unlock_bh(&btf_idr_lock);
649 idr_preload_end();
650
651 if (WARN_ON_ONCE(!id))
652 return -ENOSPC;
653
654 return id > 0 ? 0 : id;
655}
656
657static void btf_free_id(struct btf *btf)
658{
659 unsigned long flags;
660
661 /*
662 * In map-in-map, calling map_delete_elem() on outer
663 * map will call bpf_map_put on the inner map.
664 * It will then eventually call btf_free_id()
665 * on the inner map. Some of the map_delete_elem()
666 * implementation may have irq disabled, so
667 * we need to use the _irqsave() version instead
668 * of the _bh() version.
669 */
670 spin_lock_irqsave(&btf_idr_lock, flags);
671 idr_remove(&btf_idr, btf->id);
672 spin_unlock_irqrestore(&btf_idr_lock, flags);
673}
674
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700675static void btf_free(struct btf *btf)
676{
677 kvfree(btf->types);
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700678 kvfree(btf->resolved_sizes);
679 kvfree(btf->resolved_ids);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700680 kvfree(btf->data);
681 kfree(btf);
682}
683
Martin KaFai Lau78958fc2018-05-04 14:49:51 -0700684static void btf_free_rcu(struct rcu_head *rcu)
Martin KaFai Lauf56a6532018-04-18 15:56:01 -0700685{
Martin KaFai Lau78958fc2018-05-04 14:49:51 -0700686 struct btf *btf = container_of(rcu, struct btf, rcu);
687
688 btf_free(btf);
Martin KaFai Lauf56a6532018-04-18 15:56:01 -0700689}
690
691void btf_put(struct btf *btf)
692{
Martin KaFai Lau78958fc2018-05-04 14:49:51 -0700693 if (btf && refcount_dec_and_test(&btf->refcnt)) {
694 btf_free_id(btf);
695 call_rcu(&btf->rcu, btf_free_rcu);
696 }
Martin KaFai Lauf56a6532018-04-18 15:56:01 -0700697}
698
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700699static int env_resolve_init(struct btf_verifier_env *env)
700{
701 struct btf *btf = env->btf;
702 u32 nr_types = btf->nr_types;
703 u32 *resolved_sizes = NULL;
704 u32 *resolved_ids = NULL;
705 u8 *visit_states = NULL;
706
707 /* +1 for btf_void */
Kees Cook778e1cd2018-06-12 14:04:48 -0700708 resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes),
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700709 GFP_KERNEL | __GFP_NOWARN);
710 if (!resolved_sizes)
711 goto nomem;
712
Kees Cook778e1cd2018-06-12 14:04:48 -0700713 resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids),
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700714 GFP_KERNEL | __GFP_NOWARN);
715 if (!resolved_ids)
716 goto nomem;
717
Kees Cook778e1cd2018-06-12 14:04:48 -0700718 visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states),
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700719 GFP_KERNEL | __GFP_NOWARN);
720 if (!visit_states)
721 goto nomem;
722
723 btf->resolved_sizes = resolved_sizes;
724 btf->resolved_ids = resolved_ids;
725 env->visit_states = visit_states;
726
727 return 0;
728
729nomem:
730 kvfree(resolved_sizes);
731 kvfree(resolved_ids);
732 kvfree(visit_states);
733 return -ENOMEM;
734}
735
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700736static void btf_verifier_env_free(struct btf_verifier_env *env)
737{
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700738 kvfree(env->visit_states);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700739 kfree(env);
740}
741
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700742static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
743 const struct btf_type *next_type)
744{
745 switch (env->resolve_mode) {
746 case RESOLVE_TBD:
747 /* int, enum or void is a sink */
748 return !btf_type_needs_resolve(next_type);
749 case RESOLVE_PTR:
750 /* int, enum, void, struct or array is a sink for ptr */
751 return !btf_type_is_modifier(next_type) &&
752 !btf_type_is_ptr(next_type);
753 case RESOLVE_STRUCT_OR_ARRAY:
754 /* int, enum, void or ptr is a sink for struct and array */
755 return !btf_type_is_modifier(next_type) &&
756 !btf_type_is_array(next_type) &&
757 !btf_type_is_struct(next_type);
758 default:
Arnd Bergmann53c80362018-05-25 23:33:19 +0200759 BUG();
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700760 }
761}
762
763static bool env_type_is_resolved(const struct btf_verifier_env *env,
764 u32 type_id)
765{
766 return env->visit_states[type_id] == RESOLVED;
767}
768
769static int env_stack_push(struct btf_verifier_env *env,
770 const struct btf_type *t, u32 type_id)
771{
772 struct resolve_vertex *v;
773
774 if (env->top_stack == MAX_RESOLVE_DEPTH)
775 return -E2BIG;
776
777 if (env->visit_states[type_id] != NOT_VISITED)
778 return -EEXIST;
779
780 env->visit_states[type_id] = VISITED;
781
782 v = &env->stack[env->top_stack++];
783 v->t = t;
784 v->type_id = type_id;
785 v->next_member = 0;
786
787 if (env->resolve_mode == RESOLVE_TBD) {
788 if (btf_type_is_ptr(t))
789 env->resolve_mode = RESOLVE_PTR;
790 else if (btf_type_is_struct(t) || btf_type_is_array(t))
791 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
792 }
793
794 return 0;
795}
796
797static void env_stack_set_next_member(struct btf_verifier_env *env,
798 u16 next_member)
799{
800 env->stack[env->top_stack - 1].next_member = next_member;
801}
802
803static void env_stack_pop_resolved(struct btf_verifier_env *env,
804 u32 resolved_type_id,
805 u32 resolved_size)
806{
807 u32 type_id = env->stack[--(env->top_stack)].type_id;
808 struct btf *btf = env->btf;
809
810 btf->resolved_sizes[type_id] = resolved_size;
811 btf->resolved_ids[type_id] = resolved_type_id;
812 env->visit_states[type_id] = RESOLVED;
813}
814
815static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
816{
817 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
818}
819
820/* The input param "type_id" must point to a needs_resolve type */
821static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
822 u32 *type_id)
823{
824 *type_id = btf->resolved_ids[*type_id];
825 return btf_type_by_id(btf, *type_id);
826}
827
828const struct btf_type *btf_type_id_size(const struct btf *btf,
829 u32 *type_id, u32 *ret_size)
830{
831 const struct btf_type *size_type;
832 u32 size_type_id = *type_id;
833 u32 size = 0;
834
835 size_type = btf_type_by_id(btf, size_type_id);
Martin KaFai Laub47a0bd2018-11-19 15:29:06 -0800836 if (btf_type_nosize_or_null(size_type))
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700837 return NULL;
838
839 if (btf_type_has_size(size_type)) {
840 size = size_type->size;
841 } else if (btf_type_is_array(size_type)) {
842 size = btf->resolved_sizes[size_type_id];
843 } else if (btf_type_is_ptr(size_type)) {
844 size = sizeof(void *);
845 } else {
846 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type)))
847 return NULL;
848
849 size = btf->resolved_sizes[size_type_id];
850 size_type_id = btf->resolved_ids[size_type_id];
851 size_type = btf_type_by_id(btf, size_type_id);
Martin KaFai Laub47a0bd2018-11-19 15:29:06 -0800852 if (btf_type_nosize_or_null(size_type))
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700853 return NULL;
854 }
855
856 *type_id = size_type_id;
857 if (ret_size)
858 *ret_size = size;
859
860 return size_type;
861}
862
Martin KaFai Lau179cde82018-04-18 15:55:59 -0700863static int btf_df_check_member(struct btf_verifier_env *env,
864 const struct btf_type *struct_type,
865 const struct btf_member *member,
866 const struct btf_type *member_type)
867{
868 btf_verifier_log_basic(env, struct_type,
869 "Unsupported check_member");
870 return -EINVAL;
871}
872
Martin KaFai Laueb3f5952018-04-18 15:55:58 -0700873static int btf_df_resolve(struct btf_verifier_env *env,
874 const struct resolve_vertex *v)
875{
876 btf_verifier_log_basic(env, v->t, "Unsupported resolve");
877 return -EINVAL;
878}
879
Martin KaFai Laub00b8da2018-04-18 15:56:00 -0700880static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t,
881 u32 type_id, void *data, u8 bits_offsets,
882 struct seq_file *m)
883{
884 seq_printf(m, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
885}
886
Martin KaFai Lau179cde82018-04-18 15:55:59 -0700887static int btf_int_check_member(struct btf_verifier_env *env,
888 const struct btf_type *struct_type,
889 const struct btf_member *member,
890 const struct btf_type *member_type)
891{
892 u32 int_data = btf_type_int(member_type);
893 u32 struct_bits_off = member->offset;
894 u32 struct_size = struct_type->size;
895 u32 nr_copy_bits;
896 u32 bytes_offset;
897
898 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
899 btf_verifier_log_member(env, struct_type, member,
900 "bits_offset exceeds U32_MAX");
901 return -EINVAL;
902 }
903
904 struct_bits_off += BTF_INT_OFFSET(int_data);
905 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
906 nr_copy_bits = BTF_INT_BITS(int_data) +
907 BITS_PER_BYTE_MASKED(struct_bits_off);
908
909 if (nr_copy_bits > BITS_PER_U64) {
910 btf_verifier_log_member(env, struct_type, member,
911 "nr_copy_bits exceeds 64");
912 return -EINVAL;
913 }
914
915 if (struct_size < bytes_offset ||
916 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
917 btf_verifier_log_member(env, struct_type, member,
918 "Member exceeds struct_size");
919 return -EINVAL;
920 }
921
922 return 0;
923}
924
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700925static s32 btf_int_check_meta(struct btf_verifier_env *env,
926 const struct btf_type *t,
927 u32 meta_left)
928{
929 u32 int_data, nr_bits, meta_needed = sizeof(int_data);
930 u16 encoding;
931
932 if (meta_left < meta_needed) {
933 btf_verifier_log_basic(env, t,
934 "meta_left:%u meta_needed:%u",
935 meta_left, meta_needed);
936 return -EINVAL;
937 }
938
939 if (btf_type_vlen(t)) {
940 btf_verifier_log_type(env, t, "vlen != 0");
941 return -EINVAL;
942 }
943
944 int_data = btf_type_int(t);
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -0700945 if (int_data & ~BTF_INT_MASK) {
946 btf_verifier_log_basic(env, t, "Invalid int_data:%x",
947 int_data);
948 return -EINVAL;
949 }
950
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700951 nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
952
953 if (nr_bits > BITS_PER_U64) {
954 btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
955 BITS_PER_U64);
956 return -EINVAL;
957 }
958
959 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
960 btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
961 return -EINVAL;
962 }
963
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -0700964 /*
965 * Only one of the encoding bits is allowed and it
966 * should be sufficient for the pretty print purpose (i.e. decoding).
967 * Multiple bits can be allowed later if it is found
968 * to be insufficient.
969 */
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700970 encoding = BTF_INT_ENCODING(int_data);
971 if (encoding &&
972 encoding != BTF_INT_SIGNED &&
973 encoding != BTF_INT_CHAR &&
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -0700974 encoding != BTF_INT_BOOL) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -0700975 btf_verifier_log_type(env, t, "Unsupported encoding");
976 return -ENOTSUPP;
977 }
978
979 btf_verifier_log_type(env, t, NULL);
980
981 return meta_needed;
982}
983
984static void btf_int_log(struct btf_verifier_env *env,
985 const struct btf_type *t)
986{
987 int int_data = btf_type_int(t);
988
989 btf_verifier_log(env,
990 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
991 t->size, BTF_INT_OFFSET(int_data),
992 BTF_INT_BITS(int_data),
993 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
994}
995
Martin KaFai Laub00b8da2018-04-18 15:56:00 -0700996static void btf_int_bits_seq_show(const struct btf *btf,
997 const struct btf_type *t,
998 void *data, u8 bits_offset,
999 struct seq_file *m)
1000{
Okash Khawajab65f3702018-07-10 14:33:07 -07001001 u16 left_shift_bits, right_shift_bits;
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001002 u32 int_data = btf_type_int(t);
Martin KaFai Lau36fc3c82018-07-19 22:14:31 -07001003 u8 nr_bits = BTF_INT_BITS(int_data);
1004 u8 total_bits_offset;
1005 u8 nr_copy_bytes;
1006 u8 nr_copy_bits;
Okash Khawajab65f3702018-07-10 14:33:07 -07001007 u64 print_num;
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001008
Martin KaFai Lau36fc3c82018-07-19 22:14:31 -07001009 /*
1010 * bits_offset is at most 7.
1011 * BTF_INT_OFFSET() cannot exceed 64 bits.
1012 */
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001013 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
1014 data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
1015 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
1016 nr_copy_bits = nr_bits + bits_offset;
1017 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
1018
Okash Khawajab65f3702018-07-10 14:33:07 -07001019 print_num = 0;
1020 memcpy(&print_num, data, nr_copy_bytes);
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001021
Okash Khawajab65f3702018-07-10 14:33:07 -07001022#ifdef __BIG_ENDIAN_BITFIELD
1023 left_shift_bits = bits_offset;
1024#else
1025 left_shift_bits = BITS_PER_U64 - nr_copy_bits;
1026#endif
1027 right_shift_bits = BITS_PER_U64 - nr_bits;
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001028
Okash Khawajab65f3702018-07-10 14:33:07 -07001029 print_num <<= left_shift_bits;
1030 print_num >>= right_shift_bits;
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001031
Okash Khawajab65f3702018-07-10 14:33:07 -07001032 seq_printf(m, "0x%llx", print_num);
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001033}
1034
1035static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
1036 u32 type_id, void *data, u8 bits_offset,
1037 struct seq_file *m)
1038{
1039 u32 int_data = btf_type_int(t);
1040 u8 encoding = BTF_INT_ENCODING(int_data);
1041 bool sign = encoding & BTF_INT_SIGNED;
Martin KaFai Lau36fc3c82018-07-19 22:14:31 -07001042 u8 nr_bits = BTF_INT_BITS(int_data);
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001043
1044 if (bits_offset || BTF_INT_OFFSET(int_data) ||
1045 BITS_PER_BYTE_MASKED(nr_bits)) {
1046 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1047 return;
1048 }
1049
1050 switch (nr_bits) {
1051 case 64:
1052 if (sign)
1053 seq_printf(m, "%lld", *(s64 *)data);
1054 else
1055 seq_printf(m, "%llu", *(u64 *)data);
1056 break;
1057 case 32:
1058 if (sign)
1059 seq_printf(m, "%d", *(s32 *)data);
1060 else
1061 seq_printf(m, "%u", *(u32 *)data);
1062 break;
1063 case 16:
1064 if (sign)
1065 seq_printf(m, "%d", *(s16 *)data);
1066 else
1067 seq_printf(m, "%u", *(u16 *)data);
1068 break;
1069 case 8:
1070 if (sign)
1071 seq_printf(m, "%d", *(s8 *)data);
1072 else
1073 seq_printf(m, "%u", *(u8 *)data);
1074 break;
1075 default:
1076 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1077 }
1078}
1079
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001080static const struct btf_kind_operations int_ops = {
1081 .check_meta = btf_int_check_meta,
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001082 .resolve = btf_df_resolve,
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001083 .check_member = btf_int_check_member,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001084 .log_details = btf_int_log,
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001085 .seq_show = btf_int_seq_show,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001086};
1087
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001088static int btf_modifier_check_member(struct btf_verifier_env *env,
1089 const struct btf_type *struct_type,
1090 const struct btf_member *member,
1091 const struct btf_type *member_type)
1092{
1093 const struct btf_type *resolved_type;
1094 u32 resolved_type_id = member->type;
1095 struct btf_member resolved_member;
1096 struct btf *btf = env->btf;
1097
1098 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1099 if (!resolved_type) {
1100 btf_verifier_log_member(env, struct_type, member,
1101 "Invalid member");
1102 return -EINVAL;
1103 }
1104
1105 resolved_member = *member;
1106 resolved_member.type = resolved_type_id;
1107
1108 return btf_type_ops(resolved_type)->check_member(env, struct_type,
1109 &resolved_member,
1110 resolved_type);
1111}
1112
1113static int btf_ptr_check_member(struct btf_verifier_env *env,
1114 const struct btf_type *struct_type,
1115 const struct btf_member *member,
1116 const struct btf_type *member_type)
1117{
1118 u32 struct_size, struct_bits_off, bytes_offset;
1119
1120 struct_size = struct_type->size;
1121 struct_bits_off = member->offset;
1122 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1123
1124 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1125 btf_verifier_log_member(env, struct_type, member,
1126 "Member is not byte aligned");
1127 return -EINVAL;
1128 }
1129
1130 if (struct_size - bytes_offset < sizeof(void *)) {
1131 btf_verifier_log_member(env, struct_type, member,
1132 "Member exceeds struct_size");
1133 return -EINVAL;
1134 }
1135
1136 return 0;
1137}
1138
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001139static int btf_ref_type_check_meta(struct btf_verifier_env *env,
1140 const struct btf_type *t,
1141 u32 meta_left)
1142{
1143 if (btf_type_vlen(t)) {
1144 btf_verifier_log_type(env, t, "vlen != 0");
1145 return -EINVAL;
1146 }
1147
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -07001148 if (!BTF_TYPE_ID_VALID(t->type)) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001149 btf_verifier_log_type(env, t, "Invalid type_id");
1150 return -EINVAL;
1151 }
1152
1153 btf_verifier_log_type(env, t, NULL);
1154
1155 return 0;
1156}
1157
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001158static int btf_modifier_resolve(struct btf_verifier_env *env,
1159 const struct resolve_vertex *v)
1160{
1161 const struct btf_type *t = v->t;
1162 const struct btf_type *next_type;
1163 u32 next_type_id = t->type;
1164 struct btf *btf = env->btf;
1165 u32 next_type_size = 0;
1166
1167 next_type = btf_type_by_id(btf, next_type_id);
1168 if (!next_type) {
1169 btf_verifier_log_type(env, v->t, "Invalid type_id");
1170 return -EINVAL;
1171 }
1172
1173 /* "typedef void new_void", "const void"...etc */
Martin KaFai Laub47a0bd2018-11-19 15:29:06 -08001174 if (btf_type_is_void(next_type) || btf_type_is_fwd(next_type))
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001175 goto resolved;
1176
1177 if (!env_type_is_resolve_sink(env, next_type) &&
1178 !env_type_is_resolved(env, next_type_id))
1179 return env_stack_push(env, next_type, next_type_id);
1180
1181 /* Figure out the resolved next_type_id with size.
1182 * They will be stored in the current modifier's
1183 * resolved_ids and resolved_sizes such that it can
1184 * save us a few type-following when we use it later (e.g. in
1185 * pretty print).
1186 */
1187 if (!btf_type_id_size(btf, &next_type_id, &next_type_size) &&
Martin KaFai Laub47a0bd2018-11-19 15:29:06 -08001188 !btf_type_nosize(btf_type_id_resolve(btf, &next_type_id))) {
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001189 btf_verifier_log_type(env, v->t, "Invalid type_id");
1190 return -EINVAL;
1191 }
1192
1193resolved:
1194 env_stack_pop_resolved(env, next_type_id, next_type_size);
1195
1196 return 0;
1197}
1198
1199static int btf_ptr_resolve(struct btf_verifier_env *env,
1200 const struct resolve_vertex *v)
1201{
1202 const struct btf_type *next_type;
1203 const struct btf_type *t = v->t;
1204 u32 next_type_id = t->type;
1205 struct btf *btf = env->btf;
1206 u32 next_type_size = 0;
1207
1208 next_type = btf_type_by_id(btf, next_type_id);
1209 if (!next_type) {
1210 btf_verifier_log_type(env, v->t, "Invalid type_id");
1211 return -EINVAL;
1212 }
1213
1214 /* "void *" */
Martin KaFai Laub47a0bd2018-11-19 15:29:06 -08001215 if (btf_type_is_void(next_type) || btf_type_is_fwd(next_type))
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001216 goto resolved;
1217
1218 if (!env_type_is_resolve_sink(env, next_type) &&
1219 !env_type_is_resolved(env, next_type_id))
1220 return env_stack_push(env, next_type, next_type_id);
1221
1222 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
1223 * the modifier may have stopped resolving when it was resolved
1224 * to a ptr (last-resolved-ptr).
1225 *
1226 * We now need to continue from the last-resolved-ptr to
1227 * ensure the last-resolved-ptr will not referring back to
1228 * the currenct ptr (t).
1229 */
1230 if (btf_type_is_modifier(next_type)) {
1231 const struct btf_type *resolved_type;
1232 u32 resolved_type_id;
1233
1234 resolved_type_id = next_type_id;
1235 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1236
1237 if (btf_type_is_ptr(resolved_type) &&
1238 !env_type_is_resolve_sink(env, resolved_type) &&
1239 !env_type_is_resolved(env, resolved_type_id))
1240 return env_stack_push(env, resolved_type,
1241 resolved_type_id);
1242 }
1243
1244 if (!btf_type_id_size(btf, &next_type_id, &next_type_size) &&
Martin KaFai Laub47a0bd2018-11-19 15:29:06 -08001245 !btf_type_nosize(btf_type_id_resolve(btf, &next_type_id))) {
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001246 btf_verifier_log_type(env, v->t, "Invalid type_id");
1247 return -EINVAL;
1248 }
1249
1250resolved:
1251 env_stack_pop_resolved(env, next_type_id, 0);
1252
1253 return 0;
1254}
1255
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001256static void btf_modifier_seq_show(const struct btf *btf,
1257 const struct btf_type *t,
1258 u32 type_id, void *data,
1259 u8 bits_offset, struct seq_file *m)
1260{
1261 t = btf_type_id_resolve(btf, &type_id);
1262
1263 btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1264}
1265
1266static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t,
1267 u32 type_id, void *data, u8 bits_offset,
1268 struct seq_file *m)
1269{
1270 /* It is a hashed value */
1271 seq_printf(m, "%p", *(void **)data);
1272}
1273
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001274static void btf_ref_type_log(struct btf_verifier_env *env,
1275 const struct btf_type *t)
1276{
1277 btf_verifier_log(env, "type_id=%u", t->type);
1278}
1279
1280static struct btf_kind_operations modifier_ops = {
1281 .check_meta = btf_ref_type_check_meta,
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001282 .resolve = btf_modifier_resolve,
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001283 .check_member = btf_modifier_check_member,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001284 .log_details = btf_ref_type_log,
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001285 .seq_show = btf_modifier_seq_show,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001286};
1287
1288static struct btf_kind_operations ptr_ops = {
1289 .check_meta = btf_ref_type_check_meta,
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001290 .resolve = btf_ptr_resolve,
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001291 .check_member = btf_ptr_check_member,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001292 .log_details = btf_ref_type_log,
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001293 .seq_show = btf_ptr_seq_show,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001294};
1295
Martin KaFai Lau81753832018-06-02 09:06:51 -07001296static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
1297 const struct btf_type *t,
1298 u32 meta_left)
1299{
1300 if (btf_type_vlen(t)) {
1301 btf_verifier_log_type(env, t, "vlen != 0");
1302 return -EINVAL;
1303 }
1304
1305 if (t->type) {
1306 btf_verifier_log_type(env, t, "type != 0");
1307 return -EINVAL;
1308 }
1309
1310 btf_verifier_log_type(env, t, NULL);
1311
1312 return 0;
1313}
1314
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001315static struct btf_kind_operations fwd_ops = {
Martin KaFai Lau81753832018-06-02 09:06:51 -07001316 .check_meta = btf_fwd_check_meta,
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001317 .resolve = btf_df_resolve,
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001318 .check_member = btf_df_check_member,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001319 .log_details = btf_ref_type_log,
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001320 .seq_show = btf_df_seq_show,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001321};
1322
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001323static int btf_array_check_member(struct btf_verifier_env *env,
1324 const struct btf_type *struct_type,
1325 const struct btf_member *member,
1326 const struct btf_type *member_type)
1327{
1328 u32 struct_bits_off = member->offset;
1329 u32 struct_size, bytes_offset;
1330 u32 array_type_id, array_size;
1331 struct btf *btf = env->btf;
1332
1333 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1334 btf_verifier_log_member(env, struct_type, member,
1335 "Member is not byte aligned");
1336 return -EINVAL;
1337 }
1338
1339 array_type_id = member->type;
1340 btf_type_id_size(btf, &array_type_id, &array_size);
1341 struct_size = struct_type->size;
1342 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1343 if (struct_size - bytes_offset < array_size) {
1344 btf_verifier_log_member(env, struct_type, member,
1345 "Member exceeds struct_size");
1346 return -EINVAL;
1347 }
1348
1349 return 0;
1350}
1351
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001352static s32 btf_array_check_meta(struct btf_verifier_env *env,
1353 const struct btf_type *t,
1354 u32 meta_left)
1355{
1356 const struct btf_array *array = btf_type_array(t);
1357 u32 meta_needed = sizeof(*array);
1358
1359 if (meta_left < meta_needed) {
1360 btf_verifier_log_basic(env, t,
1361 "meta_left:%u meta_needed:%u",
1362 meta_left, meta_needed);
1363 return -EINVAL;
1364 }
1365
1366 if (btf_type_vlen(t)) {
1367 btf_verifier_log_type(env, t, "vlen != 0");
1368 return -EINVAL;
1369 }
1370
Martin KaFai Laub9308ae2018-06-02 09:06:50 -07001371 if (t->size) {
1372 btf_verifier_log_type(env, t, "size != 0");
1373 return -EINVAL;
1374 }
1375
Martin KaFai Lau4ef5f572018-05-22 14:57:19 -07001376 /* Array elem type and index type cannot be in type void,
1377 * so !array->type and !array->index_type are not allowed.
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001378 */
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -07001379 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
Martin KaFai Lau4ef5f572018-05-22 14:57:19 -07001380 btf_verifier_log_type(env, t, "Invalid elem");
1381 return -EINVAL;
1382 }
1383
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -07001384 if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
Martin KaFai Lau4ef5f572018-05-22 14:57:19 -07001385 btf_verifier_log_type(env, t, "Invalid index");
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001386 return -EINVAL;
1387 }
1388
1389 btf_verifier_log_type(env, t, NULL);
1390
1391 return meta_needed;
1392}
1393
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001394static int btf_array_resolve(struct btf_verifier_env *env,
1395 const struct resolve_vertex *v)
1396{
1397 const struct btf_array *array = btf_type_array(v->t);
Martin KaFai Lau4ef5f572018-05-22 14:57:19 -07001398 const struct btf_type *elem_type, *index_type;
1399 u32 elem_type_id, index_type_id;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001400 struct btf *btf = env->btf;
1401 u32 elem_size;
1402
Martin KaFai Lau4ef5f572018-05-22 14:57:19 -07001403 /* Check array->index_type */
1404 index_type_id = array->index_type;
1405 index_type = btf_type_by_id(btf, index_type_id);
Martin KaFai Laub47a0bd2018-11-19 15:29:06 -08001406 if (btf_type_nosize_or_null(index_type)) {
Martin KaFai Lau4ef5f572018-05-22 14:57:19 -07001407 btf_verifier_log_type(env, v->t, "Invalid index");
1408 return -EINVAL;
1409 }
1410
1411 if (!env_type_is_resolve_sink(env, index_type) &&
1412 !env_type_is_resolved(env, index_type_id))
1413 return env_stack_push(env, index_type, index_type_id);
1414
1415 index_type = btf_type_id_size(btf, &index_type_id, NULL);
1416 if (!index_type || !btf_type_is_int(index_type) ||
1417 !btf_type_int_is_regular(index_type)) {
1418 btf_verifier_log_type(env, v->t, "Invalid index");
1419 return -EINVAL;
1420 }
1421
1422 /* Check array->type */
1423 elem_type_id = array->type;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001424 elem_type = btf_type_by_id(btf, elem_type_id);
Martin KaFai Laub47a0bd2018-11-19 15:29:06 -08001425 if (btf_type_nosize_or_null(elem_type)) {
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001426 btf_verifier_log_type(env, v->t,
1427 "Invalid elem");
1428 return -EINVAL;
1429 }
1430
1431 if (!env_type_is_resolve_sink(env, elem_type) &&
1432 !env_type_is_resolved(env, elem_type_id))
1433 return env_stack_push(env, elem_type, elem_type_id);
1434
1435 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1436 if (!elem_type) {
1437 btf_verifier_log_type(env, v->t, "Invalid elem");
1438 return -EINVAL;
1439 }
1440
Martin KaFai Lau4ef5f572018-05-22 14:57:19 -07001441 if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
1442 btf_verifier_log_type(env, v->t, "Invalid array of int");
1443 return -EINVAL;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001444 }
1445
1446 if (array->nelems && elem_size > U32_MAX / array->nelems) {
1447 btf_verifier_log_type(env, v->t,
1448 "Array size overflows U32_MAX");
1449 return -EINVAL;
1450 }
1451
1452 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
1453
1454 return 0;
1455}
1456
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001457static void btf_array_log(struct btf_verifier_env *env,
1458 const struct btf_type *t)
1459{
1460 const struct btf_array *array = btf_type_array(t);
1461
1462 btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
1463 array->type, array->index_type, array->nelems);
1464}
1465
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001466static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t,
1467 u32 type_id, void *data, u8 bits_offset,
1468 struct seq_file *m)
1469{
1470 const struct btf_array *array = btf_type_array(t);
1471 const struct btf_kind_operations *elem_ops;
1472 const struct btf_type *elem_type;
1473 u32 i, elem_size, elem_type_id;
1474
1475 elem_type_id = array->type;
1476 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1477 elem_ops = btf_type_ops(elem_type);
1478 seq_puts(m, "[");
1479 for (i = 0; i < array->nelems; i++) {
1480 if (i)
1481 seq_puts(m, ",");
1482
1483 elem_ops->seq_show(btf, elem_type, elem_type_id, data,
1484 bits_offset, m);
1485 data += elem_size;
1486 }
1487 seq_puts(m, "]");
1488}
1489
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001490static struct btf_kind_operations array_ops = {
1491 .check_meta = btf_array_check_meta,
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001492 .resolve = btf_array_resolve,
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001493 .check_member = btf_array_check_member,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001494 .log_details = btf_array_log,
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001495 .seq_show = btf_array_seq_show,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001496};
1497
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001498static int btf_struct_check_member(struct btf_verifier_env *env,
1499 const struct btf_type *struct_type,
1500 const struct btf_member *member,
1501 const struct btf_type *member_type)
1502{
1503 u32 struct_bits_off = member->offset;
1504 u32 struct_size, bytes_offset;
1505
1506 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1507 btf_verifier_log_member(env, struct_type, member,
1508 "Member is not byte aligned");
1509 return -EINVAL;
1510 }
1511
1512 struct_size = struct_type->size;
1513 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1514 if (struct_size - bytes_offset < member_type->size) {
1515 btf_verifier_log_member(env, struct_type, member,
1516 "Member exceeds struct_size");
1517 return -EINVAL;
1518 }
1519
1520 return 0;
1521}
1522
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001523static s32 btf_struct_check_meta(struct btf_verifier_env *env,
1524 const struct btf_type *t,
1525 u32 meta_left)
1526{
1527 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
1528 const struct btf_member *member;
Martin KaFai Lau6283fa32018-07-20 17:38:37 -07001529 u32 meta_needed, last_offset;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001530 struct btf *btf = env->btf;
1531 u32 struct_size = t->size;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001532 u16 i;
1533
1534 meta_needed = btf_type_vlen(t) * sizeof(*member);
1535 if (meta_left < meta_needed) {
1536 btf_verifier_log_basic(env, t,
1537 "meta_left:%u meta_needed:%u",
1538 meta_left, meta_needed);
1539 return -EINVAL;
1540 }
1541
1542 btf_verifier_log_type(env, t, NULL);
1543
Martin KaFai Lau6283fa32018-07-20 17:38:37 -07001544 last_offset = 0;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001545 for_each_member(i, t, member) {
Martin KaFai Laufbcf93e2018-04-21 09:48:23 -07001546 if (!btf_name_offset_valid(btf, member->name_off)) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001547 btf_verifier_log_member(env, t, member,
1548 "Invalid member name_offset:%u",
Martin KaFai Laufbcf93e2018-04-21 09:48:23 -07001549 member->name_off);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001550 return -EINVAL;
1551 }
1552
1553 /* A member cannot be in type void */
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -07001554 if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001555 btf_verifier_log_member(env, t, member,
1556 "Invalid type_id");
1557 return -EINVAL;
1558 }
1559
1560 if (is_union && member->offset) {
1561 btf_verifier_log_member(env, t, member,
1562 "Invalid member bits_offset");
1563 return -EINVAL;
1564 }
1565
Martin KaFai Lau6283fa32018-07-20 17:38:37 -07001566 /*
1567 * ">" instead of ">=" because the last member could be
1568 * "char a[0];"
1569 */
1570 if (last_offset > member->offset) {
1571 btf_verifier_log_member(env, t, member,
1572 "Invalid member bits_offset");
1573 return -EINVAL;
1574 }
1575
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001576 if (BITS_ROUNDUP_BYTES(member->offset) > struct_size) {
1577 btf_verifier_log_member(env, t, member,
1578 "Memmber bits_offset exceeds its struct size");
1579 return -EINVAL;
1580 }
1581
1582 btf_verifier_log_member(env, t, member, NULL);
Martin KaFai Lau6283fa32018-07-20 17:38:37 -07001583 last_offset = member->offset;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001584 }
1585
1586 return meta_needed;
1587}
1588
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001589static int btf_struct_resolve(struct btf_verifier_env *env,
1590 const struct resolve_vertex *v)
1591{
1592 const struct btf_member *member;
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001593 int err;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001594 u16 i;
1595
1596 /* Before continue resolving the next_member,
1597 * ensure the last member is indeed resolved to a
1598 * type with size info.
1599 */
1600 if (v->next_member) {
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001601 const struct btf_type *last_member_type;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001602 const struct btf_member *last_member;
1603 u16 last_member_type_id;
1604
1605 last_member = btf_type_member(v->t) + v->next_member - 1;
1606 last_member_type_id = last_member->type;
1607 if (WARN_ON_ONCE(!env_type_is_resolved(env,
1608 last_member_type_id)))
1609 return -EINVAL;
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001610
1611 last_member_type = btf_type_by_id(env->btf,
1612 last_member_type_id);
1613 err = btf_type_ops(last_member_type)->check_member(env, v->t,
1614 last_member,
1615 last_member_type);
1616 if (err)
1617 return err;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001618 }
1619
1620 for_each_member_from(i, v->next_member, v->t, member) {
1621 u32 member_type_id = member->type;
1622 const struct btf_type *member_type = btf_type_by_id(env->btf,
1623 member_type_id);
1624
Martin KaFai Laub47a0bd2018-11-19 15:29:06 -08001625 if (btf_type_nosize_or_null(member_type)) {
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001626 btf_verifier_log_member(env, v->t, member,
1627 "Invalid member");
1628 return -EINVAL;
1629 }
1630
1631 if (!env_type_is_resolve_sink(env, member_type) &&
1632 !env_type_is_resolved(env, member_type_id)) {
1633 env_stack_set_next_member(env, i + 1);
1634 return env_stack_push(env, member_type, member_type_id);
1635 }
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001636
1637 err = btf_type_ops(member_type)->check_member(env, v->t,
1638 member,
1639 member_type);
1640 if (err)
1641 return err;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001642 }
1643
1644 env_stack_pop_resolved(env, 0, 0);
1645
1646 return 0;
1647}
1648
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001649static void btf_struct_log(struct btf_verifier_env *env,
1650 const struct btf_type *t)
1651{
1652 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
1653}
1654
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001655static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
1656 u32 type_id, void *data, u8 bits_offset,
1657 struct seq_file *m)
1658{
1659 const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ",";
1660 const struct btf_member *member;
1661 u32 i;
1662
1663 seq_puts(m, "{");
1664 for_each_member(i, t, member) {
1665 const struct btf_type *member_type = btf_type_by_id(btf,
1666 member->type);
1667 u32 member_offset = member->offset;
1668 u32 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
1669 u8 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
1670 const struct btf_kind_operations *ops;
1671
1672 if (i)
1673 seq_puts(m, seq);
1674
1675 ops = btf_type_ops(member_type);
1676 ops->seq_show(btf, member_type, member->type,
1677 data + bytes_offset, bits8_offset, m);
1678 }
1679 seq_puts(m, "}");
1680}
1681
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001682static struct btf_kind_operations struct_ops = {
1683 .check_meta = btf_struct_check_meta,
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001684 .resolve = btf_struct_resolve,
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001685 .check_member = btf_struct_check_member,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001686 .log_details = btf_struct_log,
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001687 .seq_show = btf_struct_seq_show,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001688};
1689
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001690static int btf_enum_check_member(struct btf_verifier_env *env,
1691 const struct btf_type *struct_type,
1692 const struct btf_member *member,
1693 const struct btf_type *member_type)
1694{
1695 u32 struct_bits_off = member->offset;
1696 u32 struct_size, bytes_offset;
1697
1698 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1699 btf_verifier_log_member(env, struct_type, member,
1700 "Member is not byte aligned");
1701 return -EINVAL;
1702 }
1703
1704 struct_size = struct_type->size;
1705 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1706 if (struct_size - bytes_offset < sizeof(int)) {
1707 btf_verifier_log_member(env, struct_type, member,
1708 "Member exceeds struct_size");
1709 return -EINVAL;
1710 }
1711
1712 return 0;
1713}
1714
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001715static s32 btf_enum_check_meta(struct btf_verifier_env *env,
1716 const struct btf_type *t,
1717 u32 meta_left)
1718{
1719 const struct btf_enum *enums = btf_type_enum(t);
1720 struct btf *btf = env->btf;
1721 u16 i, nr_enums;
1722 u32 meta_needed;
1723
1724 nr_enums = btf_type_vlen(t);
1725 meta_needed = nr_enums * sizeof(*enums);
1726
1727 if (meta_left < meta_needed) {
1728 btf_verifier_log_basic(env, t,
1729 "meta_left:%u meta_needed:%u",
1730 meta_left, meta_needed);
1731 return -EINVAL;
1732 }
1733
1734 if (t->size != sizeof(int)) {
1735 btf_verifier_log_type(env, t, "Expected size:%zu",
1736 sizeof(int));
1737 return -EINVAL;
1738 }
1739
1740 btf_verifier_log_type(env, t, NULL);
1741
1742 for (i = 0; i < nr_enums; i++) {
Martin KaFai Laufbcf93e2018-04-21 09:48:23 -07001743 if (!btf_name_offset_valid(btf, enums[i].name_off)) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001744 btf_verifier_log(env, "\tInvalid name_offset:%u",
Martin KaFai Laufbcf93e2018-04-21 09:48:23 -07001745 enums[i].name_off);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001746 return -EINVAL;
1747 }
1748
1749 btf_verifier_log(env, "\t%s val=%d\n",
Martin KaFai Laufbcf93e2018-04-21 09:48:23 -07001750 btf_name_by_offset(btf, enums[i].name_off),
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001751 enums[i].val);
1752 }
1753
1754 return meta_needed;
1755}
1756
1757static void btf_enum_log(struct btf_verifier_env *env,
1758 const struct btf_type *t)
1759{
1760 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
1761}
1762
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001763static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t,
1764 u32 type_id, void *data, u8 bits_offset,
1765 struct seq_file *m)
1766{
1767 const struct btf_enum *enums = btf_type_enum(t);
1768 u32 i, nr_enums = btf_type_vlen(t);
1769 int v = *(int *)data;
1770
1771 for (i = 0; i < nr_enums; i++) {
1772 if (v == enums[i].val) {
1773 seq_printf(m, "%s",
Martin KaFai Laufbcf93e2018-04-21 09:48:23 -07001774 btf_name_by_offset(btf, enums[i].name_off));
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001775 return;
1776 }
1777 }
1778
1779 seq_printf(m, "%d", v);
1780}
1781
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001782static struct btf_kind_operations enum_ops = {
1783 .check_meta = btf_enum_check_meta,
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001784 .resolve = btf_df_resolve,
Martin KaFai Lau179cde82018-04-18 15:55:59 -07001785 .check_member = btf_enum_check_member,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001786 .log_details = btf_enum_log,
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07001787 .seq_show = btf_enum_seq_show,
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001788};
1789
1790static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
1791 [BTF_KIND_INT] = &int_ops,
1792 [BTF_KIND_PTR] = &ptr_ops,
1793 [BTF_KIND_ARRAY] = &array_ops,
1794 [BTF_KIND_STRUCT] = &struct_ops,
1795 [BTF_KIND_UNION] = &struct_ops,
1796 [BTF_KIND_ENUM] = &enum_ops,
1797 [BTF_KIND_FWD] = &fwd_ops,
1798 [BTF_KIND_TYPEDEF] = &modifier_ops,
1799 [BTF_KIND_VOLATILE] = &modifier_ops,
1800 [BTF_KIND_CONST] = &modifier_ops,
1801 [BTF_KIND_RESTRICT] = &modifier_ops,
1802};
1803
1804static s32 btf_check_meta(struct btf_verifier_env *env,
1805 const struct btf_type *t,
1806 u32 meta_left)
1807{
1808 u32 saved_meta_left = meta_left;
1809 s32 var_meta_size;
1810
1811 if (meta_left < sizeof(*t)) {
1812 btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
1813 env->log_type_id, meta_left, sizeof(*t));
1814 return -EINVAL;
1815 }
1816 meta_left -= sizeof(*t);
1817
Martin KaFai Lauaea2f7b82018-05-22 14:57:20 -07001818 if (t->info & ~BTF_INFO_MASK) {
1819 btf_verifier_log(env, "[%u] Invalid btf_info:%x",
1820 env->log_type_id, t->info);
1821 return -EINVAL;
1822 }
1823
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001824 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
1825 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
1826 btf_verifier_log(env, "[%u] Invalid kind:%u",
1827 env->log_type_id, BTF_INFO_KIND(t->info));
1828 return -EINVAL;
1829 }
1830
Martin KaFai Laufbcf93e2018-04-21 09:48:23 -07001831 if (!btf_name_offset_valid(env->btf, t->name_off)) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001832 btf_verifier_log(env, "[%u] Invalid name_offset:%u",
Martin KaFai Laufbcf93e2018-04-21 09:48:23 -07001833 env->log_type_id, t->name_off);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001834 return -EINVAL;
1835 }
1836
1837 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
1838 if (var_meta_size < 0)
1839 return var_meta_size;
1840
1841 meta_left -= var_meta_size;
1842
1843 return saved_meta_left - meta_left;
1844}
1845
1846static int btf_check_all_metas(struct btf_verifier_env *env)
1847{
1848 struct btf *btf = env->btf;
1849 struct btf_header *hdr;
1850 void *cur, *end;
1851
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07001852 hdr = &btf->hdr;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001853 cur = btf->nohdr_data + hdr->type_off;
Martin KaFai Lau4b1c5d92018-09-12 10:29:11 -07001854 end = cur + hdr->type_len;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001855
1856 env->log_type_id = 1;
1857 while (cur < end) {
1858 struct btf_type *t = cur;
1859 s32 meta_size;
1860
1861 meta_size = btf_check_meta(env, t, end - cur);
1862 if (meta_size < 0)
1863 return meta_size;
1864
1865 btf_add_type(env, t);
1866 cur += meta_size;
1867 env->log_type_id++;
1868 }
1869
1870 return 0;
1871}
1872
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001873static int btf_resolve(struct btf_verifier_env *env,
1874 const struct btf_type *t, u32 type_id)
1875{
1876 const struct resolve_vertex *v;
1877 int err = 0;
1878
1879 env->resolve_mode = RESOLVE_TBD;
1880 env_stack_push(env, t, type_id);
1881 while (!err && (v = env_stack_peak(env))) {
1882 env->log_type_id = v->type_id;
1883 err = btf_type_ops(v->t)->resolve(env, v);
1884 }
1885
1886 env->log_type_id = type_id;
1887 if (err == -E2BIG)
1888 btf_verifier_log_type(env, t,
1889 "Exceeded max resolving depth:%u",
1890 MAX_RESOLVE_DEPTH);
1891 else if (err == -EEXIST)
1892 btf_verifier_log_type(env, t, "Loop detected");
1893
1894 return err;
1895}
1896
1897static bool btf_resolve_valid(struct btf_verifier_env *env,
1898 const struct btf_type *t,
1899 u32 type_id)
1900{
1901 struct btf *btf = env->btf;
1902
1903 if (!env_type_is_resolved(env, type_id))
1904 return false;
1905
1906 if (btf_type_is_struct(t))
1907 return !btf->resolved_ids[type_id] &&
1908 !btf->resolved_sizes[type_id];
1909
1910 if (btf_type_is_modifier(t) || btf_type_is_ptr(t)) {
1911 t = btf_type_id_resolve(btf, &type_id);
1912 return t && !btf_type_is_modifier(t);
1913 }
1914
1915 if (btf_type_is_array(t)) {
1916 const struct btf_array *array = btf_type_array(t);
1917 const struct btf_type *elem_type;
1918 u32 elem_type_id = array->type;
1919 u32 elem_size;
1920
1921 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1922 return elem_type && !btf_type_is_modifier(elem_type) &&
1923 (array->nelems * elem_size ==
1924 btf->resolved_sizes[type_id]);
1925 }
1926
1927 return false;
1928}
1929
1930static int btf_check_all_types(struct btf_verifier_env *env)
1931{
1932 struct btf *btf = env->btf;
1933 u32 type_id;
1934 int err;
1935
1936 err = env_resolve_init(env);
1937 if (err)
1938 return err;
1939
1940 env->phase++;
1941 for (type_id = 1; type_id <= btf->nr_types; type_id++) {
1942 const struct btf_type *t = btf_type_by_id(btf, type_id);
1943
1944 env->log_type_id = type_id;
1945 if (btf_type_needs_resolve(t) &&
1946 !env_type_is_resolved(env, type_id)) {
1947 err = btf_resolve(env, t, type_id);
1948 if (err)
1949 return err;
1950 }
1951
1952 if (btf_type_needs_resolve(t) &&
1953 !btf_resolve_valid(env, t, type_id)) {
1954 btf_verifier_log_type(env, t, "Invalid resolve state");
1955 return -EINVAL;
1956 }
1957 }
1958
1959 return 0;
1960}
1961
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001962static int btf_parse_type_sec(struct btf_verifier_env *env)
1963{
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07001964 const struct btf_header *hdr = &env->btf->hdr;
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001965 int err;
1966
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07001967 /* Type section must align to 4 bytes */
1968 if (hdr->type_off & (sizeof(u32) - 1)) {
1969 btf_verifier_log(env, "Unaligned type_off");
1970 return -EINVAL;
1971 }
1972
1973 if (!hdr->type_len) {
1974 btf_verifier_log(env, "No type found");
1975 return -EINVAL;
1976 }
1977
Martin KaFai Laueb3f5952018-04-18 15:55:58 -07001978 err = btf_check_all_metas(env);
1979 if (err)
1980 return err;
1981
1982 return btf_check_all_types(env);
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001983}
1984
1985static int btf_parse_str_sec(struct btf_verifier_env *env)
1986{
1987 const struct btf_header *hdr;
1988 struct btf *btf = env->btf;
1989 const char *start, *end;
1990
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07001991 hdr = &btf->hdr;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07001992 start = btf->nohdr_data + hdr->str_off;
1993 end = start + hdr->str_len;
1994
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07001995 if (end != btf->data + btf->data_size) {
1996 btf_verifier_log(env, "String section is not at the end");
1997 return -EINVAL;
1998 }
1999
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002000 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
2001 start[0] || end[-1]) {
2002 btf_verifier_log(env, "Invalid string section");
2003 return -EINVAL;
2004 }
2005
2006 btf->strings = start;
2007
2008 return 0;
2009}
2010
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07002011static const size_t btf_sec_info_offset[] = {
2012 offsetof(struct btf_header, type_off),
2013 offsetof(struct btf_header, str_off),
2014};
2015
2016static int btf_sec_info_cmp(const void *a, const void *b)
2017{
2018 const struct btf_sec_info *x = a;
2019 const struct btf_sec_info *y = b;
2020
2021 return (int)(x->off - y->off) ? : (int)(x->len - y->len);
2022}
2023
2024static int btf_check_sec_info(struct btf_verifier_env *env,
2025 u32 btf_data_size)
2026{
Martin KaFai Laua2889a42018-05-23 11:32:36 -07002027 struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07002028 u32 total, expected_total, i;
2029 const struct btf_header *hdr;
2030 const struct btf *btf;
2031
2032 btf = env->btf;
2033 hdr = &btf->hdr;
2034
2035 /* Populate the secs from hdr */
Martin KaFai Laua2889a42018-05-23 11:32:36 -07002036 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07002037 secs[i] = *(struct btf_sec_info *)((void *)hdr +
2038 btf_sec_info_offset[i]);
2039
Martin KaFai Laua2889a42018-05-23 11:32:36 -07002040 sort(secs, ARRAY_SIZE(btf_sec_info_offset),
2041 sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07002042
2043 /* Check for gaps and overlap among sections */
2044 total = 0;
2045 expected_total = btf_data_size - hdr->hdr_len;
Martin KaFai Laua2889a42018-05-23 11:32:36 -07002046 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07002047 if (expected_total < secs[i].off) {
2048 btf_verifier_log(env, "Invalid section offset");
2049 return -EINVAL;
2050 }
2051 if (total < secs[i].off) {
2052 /* gap */
2053 btf_verifier_log(env, "Unsupported section found");
2054 return -EINVAL;
2055 }
2056 if (total > secs[i].off) {
2057 btf_verifier_log(env, "Section overlap found");
2058 return -EINVAL;
2059 }
2060 if (expected_total - total < secs[i].len) {
2061 btf_verifier_log(env,
2062 "Total section length too long");
2063 return -EINVAL;
2064 }
2065 total += secs[i].len;
2066 }
2067
2068 /* There is data other than hdr and known sections */
2069 if (expected_total != total) {
2070 btf_verifier_log(env, "Unsupported section found");
2071 return -EINVAL;
2072 }
2073
2074 return 0;
2075}
2076
Martin Lau4a6998a2018-10-24 20:42:25 +00002077static int btf_parse_hdr(struct btf_verifier_env *env)
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002078{
Martin Lau4a6998a2018-10-24 20:42:25 +00002079 u32 hdr_len, hdr_copy, btf_data_size;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002080 const struct btf_header *hdr;
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07002081 struct btf *btf;
2082 int err;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002083
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07002084 btf = env->btf;
Martin Lau4a6998a2018-10-24 20:42:25 +00002085 btf_data_size = btf->data_size;
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07002086
Martin Lau4a6998a2018-10-24 20:42:25 +00002087 if (btf_data_size <
2088 offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) {
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07002089 btf_verifier_log(env, "hdr_len not found");
2090 return -EINVAL;
2091 }
2092
Martin Lau4a6998a2018-10-24 20:42:25 +00002093 hdr = btf->data;
2094 hdr_len = hdr->hdr_len;
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07002095 if (btf_data_size < hdr_len) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002096 btf_verifier_log(env, "btf_header not found");
2097 return -EINVAL;
2098 }
2099
Martin Lau4a6998a2018-10-24 20:42:25 +00002100 /* Ensure the unsupported header fields are zero */
2101 if (hdr_len > sizeof(btf->hdr)) {
2102 u8 *expected_zero = btf->data + sizeof(btf->hdr);
2103 u8 *end = btf->data + hdr_len;
2104
2105 for (; expected_zero < end; expected_zero++) {
2106 if (*expected_zero) {
2107 btf_verifier_log(env, "Unsupported btf_header");
2108 return -E2BIG;
2109 }
2110 }
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07002111 }
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002112
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07002113 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
Martin Lau4a6998a2018-10-24 20:42:25 +00002114 memcpy(&btf->hdr, btf->data, hdr_copy);
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07002115
2116 hdr = &btf->hdr;
2117
2118 btf_verifier_log_hdr(env, btf_data_size);
2119
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002120 if (hdr->magic != BTF_MAGIC) {
2121 btf_verifier_log(env, "Invalid magic");
2122 return -EINVAL;
2123 }
2124
2125 if (hdr->version != BTF_VERSION) {
2126 btf_verifier_log(env, "Unsupported version");
2127 return -ENOTSUPP;
2128 }
2129
2130 if (hdr->flags) {
2131 btf_verifier_log(env, "Unsupported flags");
2132 return -ENOTSUPP;
2133 }
2134
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07002135 if (btf_data_size == hdr->hdr_len) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002136 btf_verifier_log(env, "No data");
2137 return -EINVAL;
2138 }
2139
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07002140 err = btf_check_sec_info(env, btf_data_size);
2141 if (err)
2142 return err;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002143
2144 return 0;
2145}
2146
2147static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
2148 u32 log_level, char __user *log_ubuf, u32 log_size)
2149{
2150 struct btf_verifier_env *env = NULL;
2151 struct bpf_verifier_log *log;
2152 struct btf *btf = NULL;
2153 u8 *data;
2154 int err;
2155
2156 if (btf_data_size > BTF_MAX_SIZE)
2157 return ERR_PTR(-E2BIG);
2158
2159 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
2160 if (!env)
2161 return ERR_PTR(-ENOMEM);
2162
2163 log = &env->log;
2164 if (log_level || log_ubuf || log_size) {
2165 /* user requested verbose verifier output
2166 * and supplied buffer to store the verification trace
2167 */
2168 log->level = log_level;
2169 log->ubuf = log_ubuf;
2170 log->len_total = log_size;
2171
2172 /* log attributes have to be sane */
2173 if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
2174 !log->level || !log->ubuf) {
2175 err = -EINVAL;
2176 goto errout;
2177 }
2178 }
2179
2180 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
2181 if (!btf) {
2182 err = -ENOMEM;
2183 goto errout;
2184 }
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07002185 env->btf = btf;
2186
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002187 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
2188 if (!data) {
2189 err = -ENOMEM;
2190 goto errout;
2191 }
2192
2193 btf->data = data;
2194 btf->data_size = btf_data_size;
2195
2196 if (copy_from_user(data, btf_data, btf_data_size)) {
2197 err = -EFAULT;
2198 goto errout;
2199 }
2200
Martin Lau4a6998a2018-10-24 20:42:25 +00002201 err = btf_parse_hdr(env);
2202 if (err)
2203 goto errout;
2204
2205 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
2206
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002207 err = btf_parse_str_sec(env);
2208 if (err)
2209 goto errout;
2210
2211 err = btf_parse_type_sec(env);
2212 if (err)
2213 goto errout;
2214
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07002215 if (log->level && bpf_verifier_log_full(log)) {
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002216 err = -ENOSPC;
2217 goto errout;
2218 }
2219
Martin KaFai Lauf80442a2018-05-22 14:57:18 -07002220 btf_verifier_env_free(env);
2221 refcount_set(&btf->refcnt, 1);
2222 return btf;
Martin KaFai Lau69b693f2018-04-18 15:55:57 -07002223
2224errout:
2225 btf_verifier_env_free(env);
2226 if (btf)
2227 btf_free(btf);
2228 return ERR_PTR(err);
2229}
Martin KaFai Laub00b8da2018-04-18 15:56:00 -07002230
2231void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
2232 struct seq_file *m)
2233{
2234 const struct btf_type *t = btf_type_by_id(btf, type_id);
2235
2236 btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m);
2237}
Martin KaFai Lauf56a6532018-04-18 15:56:01 -07002238
2239static int btf_release(struct inode *inode, struct file *filp)
2240{
2241 btf_put(filp->private_data);
2242 return 0;
2243}
2244
Martin KaFai Lau60197cf2018-04-18 15:56:02 -07002245const struct file_operations btf_fops = {
Martin KaFai Lauf56a6532018-04-18 15:56:01 -07002246 .release = btf_release,
2247};
2248
Martin KaFai Lau78958fc2018-05-04 14:49:51 -07002249static int __btf_new_fd(struct btf *btf)
2250{
2251 return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
2252}
2253
Martin KaFai Lauf56a6532018-04-18 15:56:01 -07002254int btf_new_fd(const union bpf_attr *attr)
2255{
2256 struct btf *btf;
Martin KaFai Lau78958fc2018-05-04 14:49:51 -07002257 int ret;
Martin KaFai Lauf56a6532018-04-18 15:56:01 -07002258
2259 btf = btf_parse(u64_to_user_ptr(attr->btf),
2260 attr->btf_size, attr->btf_log_level,
2261 u64_to_user_ptr(attr->btf_log_buf),
2262 attr->btf_log_size);
2263 if (IS_ERR(btf))
2264 return PTR_ERR(btf);
2265
Martin KaFai Lau78958fc2018-05-04 14:49:51 -07002266 ret = btf_alloc_id(btf);
2267 if (ret) {
2268 btf_free(btf);
2269 return ret;
2270 }
2271
2272 /*
2273 * The BTF ID is published to the userspace.
2274 * All BTF free must go through call_rcu() from
2275 * now on (i.e. free by calling btf_put()).
2276 */
2277
2278 ret = __btf_new_fd(btf);
2279 if (ret < 0)
Martin KaFai Lauf56a6532018-04-18 15:56:01 -07002280 btf_put(btf);
2281
Martin KaFai Lau78958fc2018-05-04 14:49:51 -07002282 return ret;
Martin KaFai Lauf56a6532018-04-18 15:56:01 -07002283}
2284
2285struct btf *btf_get_by_fd(int fd)
2286{
2287 struct btf *btf;
2288 struct fd f;
2289
2290 f = fdget(fd);
2291
2292 if (!f.file)
2293 return ERR_PTR(-EBADF);
2294
2295 if (f.file->f_op != &btf_fops) {
2296 fdput(f);
2297 return ERR_PTR(-EINVAL);
2298 }
2299
2300 btf = f.file->private_data;
Martin KaFai Lau78958fc2018-05-04 14:49:51 -07002301 refcount_inc(&btf->refcnt);
Martin KaFai Lauf56a6532018-04-18 15:56:01 -07002302 fdput(f);
2303
2304 return btf;
2305}
Martin KaFai Lau60197cf2018-04-18 15:56:02 -07002306
2307int btf_get_info_by_fd(const struct btf *btf,
2308 const union bpf_attr *attr,
2309 union bpf_attr __user *uattr)
2310{
Martin KaFai Lau62dab842018-05-04 14:49:52 -07002311 struct bpf_btf_info __user *uinfo;
2312 struct bpf_btf_info info = {};
2313 u32 info_copy, btf_copy;
2314 void __user *ubtf;
2315 u32 uinfo_len;
Martin KaFai Lau60197cf2018-04-18 15:56:02 -07002316
Martin KaFai Lau62dab842018-05-04 14:49:52 -07002317 uinfo = u64_to_user_ptr(attr->info.info);
2318 uinfo_len = attr->info.info_len;
2319
2320 info_copy = min_t(u32, uinfo_len, sizeof(info));
2321 if (copy_from_user(&info, uinfo, info_copy))
2322 return -EFAULT;
2323
2324 info.id = btf->id;
2325 ubtf = u64_to_user_ptr(info.btf);
2326 btf_copy = min_t(u32, btf->data_size, info.btf_size);
2327 if (copy_to_user(ubtf, btf->data, btf_copy))
2328 return -EFAULT;
2329 info.btf_size = btf->data_size;
2330
2331 if (copy_to_user(uinfo, &info, info_copy) ||
2332 put_user(info_copy, &uattr->info.info_len))
Martin KaFai Lau60197cf2018-04-18 15:56:02 -07002333 return -EFAULT;
2334
2335 return 0;
2336}
Martin KaFai Lau78958fc2018-05-04 14:49:51 -07002337
2338int btf_get_fd_by_id(u32 id)
2339{
2340 struct btf *btf;
2341 int fd;
2342
2343 rcu_read_lock();
2344 btf = idr_find(&btf_idr, id);
2345 if (!btf || !refcount_inc_not_zero(&btf->refcnt))
2346 btf = ERR_PTR(-ENOENT);
2347 rcu_read_unlock();
2348
2349 if (IS_ERR(btf))
2350 return PTR_ERR(btf);
2351
2352 fd = __btf_new_fd(btf);
2353 if (fd < 0)
2354 btf_put(btf);
2355
2356 return fd;
2357}
2358
2359u32 btf_id(const struct btf *btf)
2360{
2361 return btf->id;
2362}