Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 1 | /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com |
| 2 | * |
| 3 | * This program is free software; you can redistribute it and/or |
| 4 | * modify it under the terms of version 2 of the GNU General Public |
| 5 | * License as published by the Free Software Foundation. |
| 6 | */ |
| 7 | #include <linux/kernel.h> |
| 8 | #include <linux/types.h> |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/bpf.h> |
| 11 | #include <linux/filter.h> |
| 12 | #include <linux/uaccess.h> |
| 13 | #include "trace.h" |
| 14 | |
| 15 | static DEFINE_PER_CPU(int, bpf_prog_active); |
| 16 | |
| 17 | /** |
| 18 | * trace_call_bpf - invoke BPF program |
| 19 | * @prog: BPF program |
| 20 | * @ctx: opaque context pointer |
| 21 | * |
| 22 | * kprobe handlers execute BPF programs via this helper. |
| 23 | * Can be used from static tracepoints in the future. |
| 24 | * |
| 25 | * Return: BPF programs always return an integer which is interpreted by |
| 26 | * kprobe handler as: |
| 27 | * 0 - return from kprobe (event is filtered out) |
| 28 | * 1 - store kprobe event into ring buffer |
| 29 | * Other values are reserved and currently alias to 1 |
| 30 | */ |
| 31 | unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx) |
| 32 | { |
| 33 | unsigned int ret; |
| 34 | |
| 35 | if (in_nmi()) /* not supported yet */ |
| 36 | return 1; |
| 37 | |
| 38 | preempt_disable(); |
| 39 | |
| 40 | if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { |
| 41 | /* |
| 42 | * since some bpf program is already running on this cpu, |
| 43 | * don't call into another bpf program (same or different) |
| 44 | * and don't send kprobe event into ring-buffer, |
| 45 | * so return zero here |
| 46 | */ |
| 47 | ret = 0; |
| 48 | goto out; |
| 49 | } |
| 50 | |
| 51 | rcu_read_lock(); |
| 52 | ret = BPF_PROG_RUN(prog, ctx); |
| 53 | rcu_read_unlock(); |
| 54 | |
| 55 | out: |
| 56 | __this_cpu_dec(bpf_prog_active); |
| 57 | preempt_enable(); |
| 58 | |
| 59 | return ret; |
| 60 | } |
| 61 | EXPORT_SYMBOL_GPL(trace_call_bpf); |
| 62 | |
| 63 | static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) |
| 64 | { |
| 65 | void *dst = (void *) (long) r1; |
| 66 | int size = (int) r2; |
| 67 | void *unsafe_ptr = (void *) (long) r3; |
| 68 | |
| 69 | return probe_kernel_read(dst, unsafe_ptr, size); |
| 70 | } |
| 71 | |
| 72 | static const struct bpf_func_proto bpf_probe_read_proto = { |
| 73 | .func = bpf_probe_read, |
| 74 | .gpl_only = true, |
| 75 | .ret_type = RET_INTEGER, |
| 76 | .arg1_type = ARG_PTR_TO_STACK, |
| 77 | .arg2_type = ARG_CONST_STACK_SIZE, |
| 78 | .arg3_type = ARG_ANYTHING, |
| 79 | }; |
| 80 | |
Alexei Starovoitov | d9847d3 | 2015-03-25 12:49:21 -0700 | [diff] [blame^] | 81 | static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) |
| 82 | { |
| 83 | /* NMI safe access to clock monotonic */ |
| 84 | return ktime_get_mono_fast_ns(); |
| 85 | } |
| 86 | |
| 87 | static const struct bpf_func_proto bpf_ktime_get_ns_proto = { |
| 88 | .func = bpf_ktime_get_ns, |
| 89 | .gpl_only = true, |
| 90 | .ret_type = RET_INTEGER, |
| 91 | }; |
| 92 | |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 93 | static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id) |
| 94 | { |
| 95 | switch (func_id) { |
| 96 | case BPF_FUNC_map_lookup_elem: |
| 97 | return &bpf_map_lookup_elem_proto; |
| 98 | case BPF_FUNC_map_update_elem: |
| 99 | return &bpf_map_update_elem_proto; |
| 100 | case BPF_FUNC_map_delete_elem: |
| 101 | return &bpf_map_delete_elem_proto; |
| 102 | case BPF_FUNC_probe_read: |
| 103 | return &bpf_probe_read_proto; |
Alexei Starovoitov | d9847d3 | 2015-03-25 12:49:21 -0700 | [diff] [blame^] | 104 | case BPF_FUNC_ktime_get_ns: |
| 105 | return &bpf_ktime_get_ns_proto; |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 106 | default: |
| 107 | return NULL; |
| 108 | } |
| 109 | } |
| 110 | |
| 111 | /* bpf+kprobe programs can access fields of 'struct pt_regs' */ |
| 112 | static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type) |
| 113 | { |
| 114 | /* check bounds */ |
| 115 | if (off < 0 || off >= sizeof(struct pt_regs)) |
| 116 | return false; |
| 117 | |
| 118 | /* only read is allowed */ |
| 119 | if (type != BPF_READ) |
| 120 | return false; |
| 121 | |
| 122 | /* disallow misaligned access */ |
| 123 | if (off % size != 0) |
| 124 | return false; |
| 125 | |
| 126 | return true; |
| 127 | } |
| 128 | |
| 129 | static struct bpf_verifier_ops kprobe_prog_ops = { |
| 130 | .get_func_proto = kprobe_prog_func_proto, |
| 131 | .is_valid_access = kprobe_prog_is_valid_access, |
| 132 | }; |
| 133 | |
| 134 | static struct bpf_prog_type_list kprobe_tl = { |
| 135 | .ops = &kprobe_prog_ops, |
| 136 | .type = BPF_PROG_TYPE_KPROBE, |
| 137 | }; |
| 138 | |
| 139 | static int __init register_kprobe_prog_ops(void) |
| 140 | { |
| 141 | bpf_register_prog_type(&kprobe_tl); |
| 142 | return 0; |
| 143 | } |
| 144 | late_initcall(register_kprobe_prog_ops); |