blob: 8f578729497149c4b8864157f9a7e6520fd36906 [file] [log] [blame]
Alexei Starovoitov25415172015-03-25 12:49:20 -07001/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/bpf.h>
11#include <linux/filter.h>
12#include <linux/uaccess.h>
13#include "trace.h"
14
15static DEFINE_PER_CPU(int, bpf_prog_active);
16
17/**
18 * trace_call_bpf - invoke BPF program
19 * @prog: BPF program
20 * @ctx: opaque context pointer
21 *
22 * kprobe handlers execute BPF programs via this helper.
23 * Can be used from static tracepoints in the future.
24 *
25 * Return: BPF programs always return an integer which is interpreted by
26 * kprobe handler as:
27 * 0 - return from kprobe (event is filtered out)
28 * 1 - store kprobe event into ring buffer
29 * Other values are reserved and currently alias to 1
30 */
31unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
32{
33 unsigned int ret;
34
35 if (in_nmi()) /* not supported yet */
36 return 1;
37
38 preempt_disable();
39
40 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
41 /*
42 * since some bpf program is already running on this cpu,
43 * don't call into another bpf program (same or different)
44 * and don't send kprobe event into ring-buffer,
45 * so return zero here
46 */
47 ret = 0;
48 goto out;
49 }
50
51 rcu_read_lock();
52 ret = BPF_PROG_RUN(prog, ctx);
53 rcu_read_unlock();
54
55 out:
56 __this_cpu_dec(bpf_prog_active);
57 preempt_enable();
58
59 return ret;
60}
61EXPORT_SYMBOL_GPL(trace_call_bpf);
62
63static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
64{
65 void *dst = (void *) (long) r1;
66 int size = (int) r2;
67 void *unsafe_ptr = (void *) (long) r3;
68
69 return probe_kernel_read(dst, unsafe_ptr, size);
70}
71
72static const struct bpf_func_proto bpf_probe_read_proto = {
73 .func = bpf_probe_read,
74 .gpl_only = true,
75 .ret_type = RET_INTEGER,
76 .arg1_type = ARG_PTR_TO_STACK,
77 .arg2_type = ARG_CONST_STACK_SIZE,
78 .arg3_type = ARG_ANYTHING,
79};
80
Alexei Starovoitovd9847d32015-03-25 12:49:21 -070081static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
82{
83 /* NMI safe access to clock monotonic */
84 return ktime_get_mono_fast_ns();
85}
86
87static const struct bpf_func_proto bpf_ktime_get_ns_proto = {
88 .func = bpf_ktime_get_ns,
89 .gpl_only = true,
90 .ret_type = RET_INTEGER,
91};
92
Alexei Starovoitov25415172015-03-25 12:49:20 -070093static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
94{
95 switch (func_id) {
96 case BPF_FUNC_map_lookup_elem:
97 return &bpf_map_lookup_elem_proto;
98 case BPF_FUNC_map_update_elem:
99 return &bpf_map_update_elem_proto;
100 case BPF_FUNC_map_delete_elem:
101 return &bpf_map_delete_elem_proto;
102 case BPF_FUNC_probe_read:
103 return &bpf_probe_read_proto;
Alexei Starovoitovd9847d32015-03-25 12:49:21 -0700104 case BPF_FUNC_ktime_get_ns:
105 return &bpf_ktime_get_ns_proto;
Alexei Starovoitov25415172015-03-25 12:49:20 -0700106 default:
107 return NULL;
108 }
109}
110
111/* bpf+kprobe programs can access fields of 'struct pt_regs' */
112static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type)
113{
114 /* check bounds */
115 if (off < 0 || off >= sizeof(struct pt_regs))
116 return false;
117
118 /* only read is allowed */
119 if (type != BPF_READ)
120 return false;
121
122 /* disallow misaligned access */
123 if (off % size != 0)
124 return false;
125
126 return true;
127}
128
129static struct bpf_verifier_ops kprobe_prog_ops = {
130 .get_func_proto = kprobe_prog_func_proto,
131 .is_valid_access = kprobe_prog_is_valid_access,
132};
133
134static struct bpf_prog_type_list kprobe_tl = {
135 .ops = &kprobe_prog_ops,
136 .type = BPF_PROG_TYPE_KPROBE,
137};
138
139static int __init register_kprobe_prog_ops(void)
140{
141 bpf_register_prog_type(&kprobe_tl);
142 return 0;
143}
144late_initcall(register_kprobe_prog_ops);