Steven Rostedt (VMware) | bcea3f9 | 2018-08-16 11:23:53 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 2 | /* |
| 3 | * uprobes-based tracing events |
| 4 | * |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 5 | * Copyright (C) IBM Corporation, 2010-2012 |
| 6 | * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> |
| 7 | */ |
Masami Hiramatsu | 7257634 | 2017-02-07 20:21:28 +0900 | [diff] [blame] | 8 | #define pr_fmt(fmt) "trace_kprobe: " fmt |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 9 | |
| 10 | #include <linux/module.h> |
| 11 | #include <linux/uaccess.h> |
| 12 | #include <linux/uprobes.h> |
| 13 | #include <linux/namei.h> |
Andy Shevchenko | b2e902f | 2012-12-17 16:01:27 -0800 | [diff] [blame] | 14 | #include <linux/string.h> |
Ingo Molnar | b2d0910 | 2017-02-04 01:27:20 +0100 | [diff] [blame] | 15 | #include <linux/rculist.h> |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 16 | |
| 17 | #include "trace_probe.h" |
Masami Hiramatsu | 5330592 | 2018-04-25 21:18:03 +0900 | [diff] [blame] | 18 | #include "trace_probe_tmpl.h" |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 19 | |
| 20 | #define UPROBE_EVENT_SYSTEM "uprobes" |
| 21 | |
Oleg Nesterov | 457d177 | 2013-03-29 18:26:51 +0100 | [diff] [blame] | 22 | struct uprobe_trace_entry_head { |
| 23 | struct trace_entry ent; |
| 24 | unsigned long vaddr[]; |
| 25 | }; |
| 26 | |
| 27 | #define SIZEOF_TRACE_ENTRY(is_return) \ |
| 28 | (sizeof(struct uprobe_trace_entry_head) + \ |
| 29 | sizeof(unsigned long) * (is_return ? 2 : 1)) |
| 30 | |
| 31 | #define DATAOF_TRACE_ENTRY(entry, is_return) \ |
| 32 | ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return)) |
| 33 | |
Oleg Nesterov | 736288b | 2013-02-03 20:58:35 +0100 | [diff] [blame] | 34 | struct trace_uprobe_filter { |
| 35 | rwlock_t rwlock; |
| 36 | int nr_systemwide; |
| 37 | struct list_head perf_events; |
| 38 | }; |
| 39 | |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 40 | /* |
| 41 | * uprobe event core functions |
| 42 | */ |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 43 | struct trace_uprobe { |
| 44 | struct list_head list; |
Oleg Nesterov | 736288b | 2013-02-03 20:58:35 +0100 | [diff] [blame] | 45 | struct trace_uprobe_filter filter; |
Oleg Nesterov | a932b73 | 2013-01-31 19:47:23 +0100 | [diff] [blame] | 46 | struct uprobe_consumer consumer; |
Song Liu | 0c92c7a | 2018-04-23 10:21:34 -0700 | [diff] [blame] | 47 | struct path path; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 48 | struct inode *inode; |
| 49 | char *filename; |
| 50 | unsigned long offset; |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 51 | unsigned long ref_ctr_offset; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 52 | unsigned long nhit; |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 53 | struct trace_probe tp; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 54 | }; |
| 55 | |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 56 | #define SIZEOF_TRACE_UPROBE(n) \ |
| 57 | (offsetof(struct trace_uprobe, tp.args) + \ |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 58 | (sizeof(struct probe_arg) * (n))) |
| 59 | |
| 60 | static int register_uprobe_event(struct trace_uprobe *tu); |
Steven Rostedt (Red Hat) | c6c2401 | 2013-07-03 23:33:51 -0400 | [diff] [blame] | 61 | static int unregister_uprobe_event(struct trace_uprobe *tu); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 62 | |
| 63 | static DEFINE_MUTEX(uprobe_lock); |
| 64 | static LIST_HEAD(uprobe_list); |
| 65 | |
Namhyung Kim | b7e0bf3 | 2013-11-25 13:42:47 +0900 | [diff] [blame] | 66 | struct uprobe_dispatch_data { |
| 67 | struct trace_uprobe *tu; |
| 68 | unsigned long bp_addr; |
| 69 | }; |
| 70 | |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 71 | static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs); |
Oleg Nesterov | c1ae5c7 | 2013-03-30 18:25:23 +0100 | [diff] [blame] | 72 | static int uretprobe_dispatcher(struct uprobe_consumer *con, |
| 73 | unsigned long func, struct pt_regs *regs); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 74 | |
Namhyung Kim | 3fd996a | 2013-11-26 15:21:04 +0900 | [diff] [blame] | 75 | #ifdef CONFIG_STACK_GROWSUP |
| 76 | static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) |
| 77 | { |
| 78 | return addr - (n * sizeof(long)); |
| 79 | } |
| 80 | #else |
| 81 | static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) |
| 82 | { |
| 83 | return addr + (n * sizeof(long)); |
| 84 | } |
| 85 | #endif |
| 86 | |
| 87 | static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n) |
| 88 | { |
| 89 | unsigned long ret; |
| 90 | unsigned long addr = user_stack_pointer(regs); |
| 91 | |
| 92 | addr = adjust_stack_addr(addr, n); |
| 93 | |
| 94 | if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret))) |
| 95 | return 0; |
| 96 | |
| 97 | return ret; |
| 98 | } |
| 99 | |
| 100 | /* |
| 101 | * Uprobes-specific fetch functions |
| 102 | */ |
Masami Hiramatsu | 5330592 | 2018-04-25 21:18:03 +0900 | [diff] [blame] | 103 | static nokprobe_inline int |
| 104 | probe_user_read(void *dest, void *src, size_t size) |
| 105 | { |
| 106 | void __user *vaddr = (void __force __user *)src; |
Namhyung Kim | 3fd996a | 2013-11-26 15:21:04 +0900 | [diff] [blame] | 107 | |
Masami Hiramatsu | 5330592 | 2018-04-25 21:18:03 +0900 | [diff] [blame] | 108 | return copy_from_user(dest, vaddr, size); |
Namhyung Kim | 5baaa59 | 2013-11-26 15:21:04 +0900 | [diff] [blame] | 109 | } |
Namhyung Kim | 5baaa59 | 2013-11-26 15:21:04 +0900 | [diff] [blame] | 110 | /* |
| 111 | * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max |
| 112 | * length and relative data location. |
| 113 | */ |
Masami Hiramatsu | 9178412 | 2018-04-25 21:19:01 +0900 | [diff] [blame] | 114 | static nokprobe_inline int |
| 115 | fetch_store_string(unsigned long addr, void *dest, void *base) |
Namhyung Kim | 5baaa59 | 2013-11-26 15:21:04 +0900 | [diff] [blame] | 116 | { |
| 117 | long ret; |
Masami Hiramatsu | 9178412 | 2018-04-25 21:19:01 +0900 | [diff] [blame] | 118 | u32 loc = *(u32 *)dest; |
| 119 | int maxlen = get_loc_len(loc); |
| 120 | u8 *dst = get_loc_data(dest, base); |
Namhyung Kim | 5baaa59 | 2013-11-26 15:21:04 +0900 | [diff] [blame] | 121 | void __user *src = (void __force __user *) addr; |
| 122 | |
Masami Hiramatsu | 9178412 | 2018-04-25 21:19:01 +0900 | [diff] [blame] | 123 | if (unlikely(!maxlen)) |
| 124 | return -ENOMEM; |
Namhyung Kim | 5baaa59 | 2013-11-26 15:21:04 +0900 | [diff] [blame] | 125 | |
| 126 | ret = strncpy_from_user(dst, src, maxlen); |
Masami Hiramatsu | 9178412 | 2018-04-25 21:19:01 +0900 | [diff] [blame] | 127 | if (ret >= 0) { |
| 128 | if (ret == maxlen) |
| 129 | dst[ret - 1] = '\0'; |
| 130 | *(u32 *)dest = make_data_loc(ret, (void *)dst - base); |
Namhyung Kim | 5baaa59 | 2013-11-26 15:21:04 +0900 | [diff] [blame] | 131 | } |
Masami Hiramatsu | 9178412 | 2018-04-25 21:19:01 +0900 | [diff] [blame] | 132 | |
| 133 | return ret; |
Namhyung Kim | 5baaa59 | 2013-11-26 15:21:04 +0900 | [diff] [blame] | 134 | } |
| 135 | |
Masami Hiramatsu | 5330592 | 2018-04-25 21:18:03 +0900 | [diff] [blame] | 136 | /* Return the length of string -- including null terminal byte */ |
Masami Hiramatsu | 9178412 | 2018-04-25 21:19:01 +0900 | [diff] [blame] | 137 | static nokprobe_inline int |
| 138 | fetch_store_strlen(unsigned long addr) |
Namhyung Kim | 5baaa59 | 2013-11-26 15:21:04 +0900 | [diff] [blame] | 139 | { |
| 140 | int len; |
| 141 | void __user *vaddr = (void __force __user *) addr; |
| 142 | |
| 143 | len = strnlen_user(vaddr, MAX_STRING_SIZE); |
| 144 | |
Masami Hiramatsu | 9178412 | 2018-04-25 21:19:01 +0900 | [diff] [blame] | 145 | return (len > MAX_STRING_SIZE) ? 0 : len; |
Namhyung Kim | 5baaa59 | 2013-11-26 15:21:04 +0900 | [diff] [blame] | 146 | } |
Namhyung Kim | 3fd996a | 2013-11-26 15:21:04 +0900 | [diff] [blame] | 147 | |
Masami Hiramatsu | 5330592 | 2018-04-25 21:18:03 +0900 | [diff] [blame] | 148 | static unsigned long translate_user_vaddr(unsigned long file_offset) |
Namhyung Kim | b7e0bf3 | 2013-11-25 13:42:47 +0900 | [diff] [blame] | 149 | { |
| 150 | unsigned long base_addr; |
| 151 | struct uprobe_dispatch_data *udd; |
| 152 | |
| 153 | udd = (void *) current->utask->vaddr; |
| 154 | |
| 155 | base_addr = udd->bp_addr - udd->tu->offset; |
Masami Hiramatsu | 5330592 | 2018-04-25 21:18:03 +0900 | [diff] [blame] | 156 | return base_addr + file_offset; |
Namhyung Kim | b7e0bf3 | 2013-11-25 13:42:47 +0900 | [diff] [blame] | 157 | } |
| 158 | |
Masami Hiramatsu | 5330592 | 2018-04-25 21:18:03 +0900 | [diff] [blame] | 159 | /* Note that we don't verify it, since the code does not come from user space */ |
| 160 | static int |
| 161 | process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, |
Masami Hiramatsu | 9178412 | 2018-04-25 21:19:01 +0900 | [diff] [blame] | 162 | void *base) |
Masami Hiramatsu | 5330592 | 2018-04-25 21:18:03 +0900 | [diff] [blame] | 163 | { |
| 164 | unsigned long val; |
Masami Hiramatsu | 9178412 | 2018-04-25 21:19:01 +0900 | [diff] [blame] | 165 | int ret = 0; |
Masami Hiramatsu | 5330592 | 2018-04-25 21:18:03 +0900 | [diff] [blame] | 166 | |
| 167 | /* 1st stage: get value from context */ |
| 168 | switch (code->op) { |
| 169 | case FETCH_OP_REG: |
| 170 | val = regs_get_register(regs, code->param); |
| 171 | break; |
| 172 | case FETCH_OP_STACK: |
| 173 | val = get_user_stack_nth(regs, code->param); |
| 174 | break; |
| 175 | case FETCH_OP_STACKP: |
| 176 | val = user_stack_pointer(regs); |
| 177 | break; |
| 178 | case FETCH_OP_RETVAL: |
| 179 | val = regs_return_value(regs); |
| 180 | break; |
| 181 | case FETCH_OP_IMM: |
| 182 | val = code->immediate; |
| 183 | break; |
| 184 | case FETCH_OP_FOFFS: |
| 185 | val = translate_user_vaddr(code->immediate); |
| 186 | break; |
| 187 | default: |
| 188 | return -EILSEQ; |
| 189 | } |
| 190 | code++; |
| 191 | |
| 192 | /* 2nd stage: dereference memory if needed */ |
| 193 | while (code->op == FETCH_OP_DEREF) { |
| 194 | ret = probe_user_read(&val, (void *)val + code->offset, |
| 195 | sizeof(val)); |
| 196 | if (ret) |
| 197 | return ret; |
| 198 | code++; |
| 199 | } |
| 200 | |
| 201 | /* 3rd stage: store value to buffer */ |
Masami Hiramatsu | 9178412 | 2018-04-25 21:19:01 +0900 | [diff] [blame] | 202 | if (unlikely(!dest)) { |
| 203 | if (code->op == FETCH_OP_ST_STRING) |
| 204 | return fetch_store_strlen(val + code->offset); |
| 205 | else |
| 206 | return -EILSEQ; |
| 207 | } |
| 208 | |
Masami Hiramatsu | 5330592 | 2018-04-25 21:18:03 +0900 | [diff] [blame] | 209 | switch (code->op) { |
| 210 | case FETCH_OP_ST_RAW: |
| 211 | fetch_store_raw(val, code, dest); |
| 212 | break; |
| 213 | case FETCH_OP_ST_MEM: |
Masami Hiramatsu | 9178412 | 2018-04-25 21:19:01 +0900 | [diff] [blame] | 214 | probe_kernel_read(dest, (void *)val + code->offset, code->size); |
Masami Hiramatsu | 5330592 | 2018-04-25 21:18:03 +0900 | [diff] [blame] | 215 | break; |
| 216 | case FETCH_OP_ST_STRING: |
Masami Hiramatsu | 9178412 | 2018-04-25 21:19:01 +0900 | [diff] [blame] | 217 | ret = fetch_store_string(val + code->offset, dest, base); |
Masami Hiramatsu | 5330592 | 2018-04-25 21:18:03 +0900 | [diff] [blame] | 218 | break; |
| 219 | default: |
| 220 | return -EILSEQ; |
| 221 | } |
| 222 | code++; |
| 223 | |
| 224 | /* 4th stage: modify stored value if needed */ |
| 225 | if (code->op == FETCH_OP_MOD_BF) { |
| 226 | fetch_apply_bitfield(code, dest); |
| 227 | code++; |
| 228 | } |
| 229 | |
Masami Hiramatsu | 9178412 | 2018-04-25 21:19:01 +0900 | [diff] [blame] | 230 | return code->op == FETCH_OP_END ? ret : -EILSEQ; |
Masami Hiramatsu | 5330592 | 2018-04-25 21:18:03 +0900 | [diff] [blame] | 231 | } |
| 232 | NOKPROBE_SYMBOL(process_fetch_insn) |
| 233 | |
Oleg Nesterov | 736288b | 2013-02-03 20:58:35 +0100 | [diff] [blame] | 234 | static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter) |
| 235 | { |
| 236 | rwlock_init(&filter->rwlock); |
| 237 | filter->nr_systemwide = 0; |
| 238 | INIT_LIST_HEAD(&filter->perf_events); |
| 239 | } |
| 240 | |
| 241 | static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter) |
| 242 | { |
| 243 | return !filter->nr_systemwide && list_empty(&filter->perf_events); |
| 244 | } |
| 245 | |
Oleg Nesterov | c1ae5c7 | 2013-03-30 18:25:23 +0100 | [diff] [blame] | 246 | static inline bool is_ret_probe(struct trace_uprobe *tu) |
| 247 | { |
| 248 | return tu->consumer.ret_handler != NULL; |
| 249 | } |
| 250 | |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 251 | /* |
| 252 | * Allocate new trace_uprobe and initialize it (including uprobes). |
| 253 | */ |
| 254 | static struct trace_uprobe * |
Oleg Nesterov | c1ae5c7 | 2013-03-30 18:25:23 +0100 | [diff] [blame] | 255 | alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 256 | { |
| 257 | struct trace_uprobe *tu; |
| 258 | |
| 259 | if (!event || !is_good_name(event)) |
| 260 | return ERR_PTR(-EINVAL); |
| 261 | |
| 262 | if (!group || !is_good_name(group)) |
| 263 | return ERR_PTR(-EINVAL); |
| 264 | |
| 265 | tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL); |
| 266 | if (!tu) |
| 267 | return ERR_PTR(-ENOMEM); |
| 268 | |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 269 | tu->tp.call.class = &tu->tp.class; |
| 270 | tu->tp.call.name = kstrdup(event, GFP_KERNEL); |
| 271 | if (!tu->tp.call.name) |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 272 | goto error; |
| 273 | |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 274 | tu->tp.class.system = kstrdup(group, GFP_KERNEL); |
| 275 | if (!tu->tp.class.system) |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 276 | goto error; |
| 277 | |
| 278 | INIT_LIST_HEAD(&tu->list); |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 279 | INIT_LIST_HEAD(&tu->tp.files); |
Oleg Nesterov | a932b73 | 2013-01-31 19:47:23 +0100 | [diff] [blame] | 280 | tu->consumer.handler = uprobe_dispatcher; |
Oleg Nesterov | c1ae5c7 | 2013-03-30 18:25:23 +0100 | [diff] [blame] | 281 | if (is_ret) |
| 282 | tu->consumer.ret_handler = uretprobe_dispatcher; |
Oleg Nesterov | 736288b | 2013-02-03 20:58:35 +0100 | [diff] [blame] | 283 | init_trace_uprobe_filter(&tu->filter); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 284 | return tu; |
| 285 | |
| 286 | error: |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 287 | kfree(tu->tp.call.name); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 288 | kfree(tu); |
| 289 | |
| 290 | return ERR_PTR(-ENOMEM); |
| 291 | } |
| 292 | |
| 293 | static void free_trace_uprobe(struct trace_uprobe *tu) |
| 294 | { |
| 295 | int i; |
| 296 | |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 297 | for (i = 0; i < tu->tp.nr_args; i++) |
| 298 | traceprobe_free_probe_arg(&tu->tp.args[i]); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 299 | |
Song Liu | 0c92c7a | 2018-04-23 10:21:34 -0700 | [diff] [blame] | 300 | path_put(&tu->path); |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 301 | kfree(tu->tp.call.class->system); |
| 302 | kfree(tu->tp.call.name); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 303 | kfree(tu->filename); |
| 304 | kfree(tu); |
| 305 | } |
| 306 | |
| 307 | static struct trace_uprobe *find_probe_event(const char *event, const char *group) |
| 308 | { |
| 309 | struct trace_uprobe *tu; |
| 310 | |
| 311 | list_for_each_entry(tu, &uprobe_list, list) |
Steven Rostedt (Red Hat) | 687fcc4 | 2015-05-13 14:20:14 -0400 | [diff] [blame] | 312 | if (strcmp(trace_event_name(&tu->tp.call), event) == 0 && |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 313 | strcmp(tu->tp.call.class->system, group) == 0) |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 314 | return tu; |
| 315 | |
| 316 | return NULL; |
| 317 | } |
| 318 | |
| 319 | /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */ |
Steven Rostedt (Red Hat) | c6c2401 | 2013-07-03 23:33:51 -0400 | [diff] [blame] | 320 | static int unregister_trace_uprobe(struct trace_uprobe *tu) |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 321 | { |
Steven Rostedt (Red Hat) | c6c2401 | 2013-07-03 23:33:51 -0400 | [diff] [blame] | 322 | int ret; |
| 323 | |
| 324 | ret = unregister_uprobe_event(tu); |
| 325 | if (ret) |
| 326 | return ret; |
| 327 | |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 328 | list_del(&tu->list); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 329 | free_trace_uprobe(tu); |
Steven Rostedt (Red Hat) | c6c2401 | 2013-07-03 23:33:51 -0400 | [diff] [blame] | 330 | return 0; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 331 | } |
| 332 | |
Ravi Bangoria | ccea872 | 2018-08-20 10:12:49 +0530 | [diff] [blame] | 333 | /* |
| 334 | * Uprobe with multiple reference counter is not allowed. i.e. |
| 335 | * If inode and offset matches, reference counter offset *must* |
| 336 | * match as well. Though, there is one exception: If user is |
| 337 | * replacing old trace_uprobe with new one(same group/event), |
| 338 | * then we allow same uprobe with new reference counter as far |
| 339 | * as the new one does not conflict with any other existing |
| 340 | * ones. |
| 341 | */ |
| 342 | static struct trace_uprobe *find_old_trace_uprobe(struct trace_uprobe *new) |
| 343 | { |
| 344 | struct trace_uprobe *tmp, *old = NULL; |
| 345 | struct inode *new_inode = d_real_inode(new->path.dentry); |
| 346 | |
| 347 | old = find_probe_event(trace_event_name(&new->tp.call), |
| 348 | new->tp.call.class->system); |
| 349 | |
| 350 | list_for_each_entry(tmp, &uprobe_list, list) { |
| 351 | if ((old ? old != tmp : true) && |
| 352 | new_inode == d_real_inode(tmp->path.dentry) && |
| 353 | new->offset == tmp->offset && |
| 354 | new->ref_ctr_offset != tmp->ref_ctr_offset) { |
| 355 | pr_warn("Reference counter offset mismatch."); |
| 356 | return ERR_PTR(-EINVAL); |
| 357 | } |
| 358 | } |
| 359 | return old; |
| 360 | } |
| 361 | |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 362 | /* Register a trace_uprobe and probe_event */ |
| 363 | static int register_trace_uprobe(struct trace_uprobe *tu) |
| 364 | { |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 365 | struct trace_uprobe *old_tu; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 366 | int ret; |
| 367 | |
| 368 | mutex_lock(&uprobe_lock); |
| 369 | |
| 370 | /* register as an event */ |
Ravi Bangoria | ccea872 | 2018-08-20 10:12:49 +0530 | [diff] [blame] | 371 | old_tu = find_old_trace_uprobe(tu); |
| 372 | if (IS_ERR(old_tu)) { |
| 373 | ret = PTR_ERR(old_tu); |
| 374 | goto end; |
| 375 | } |
| 376 | |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 377 | if (old_tu) { |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 378 | /* delete old event */ |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 379 | ret = unregister_trace_uprobe(old_tu); |
Steven Rostedt (Red Hat) | c6c2401 | 2013-07-03 23:33:51 -0400 | [diff] [blame] | 380 | if (ret) |
| 381 | goto end; |
| 382 | } |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 383 | |
| 384 | ret = register_uprobe_event(tu); |
| 385 | if (ret) { |
Joe Perches | a395d6a | 2016-03-22 14:28:09 -0700 | [diff] [blame] | 386 | pr_warn("Failed to register probe event(%d)\n", ret); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 387 | goto end; |
| 388 | } |
| 389 | |
| 390 | list_add_tail(&tu->list, &uprobe_list); |
| 391 | |
| 392 | end: |
| 393 | mutex_unlock(&uprobe_lock); |
| 394 | |
| 395 | return ret; |
| 396 | } |
| 397 | |
| 398 | /* |
| 399 | * Argument syntax: |
Namhyung Kim | 306cfe2 | 2013-07-03 16:44:46 +0900 | [diff] [blame] | 400 | * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 401 | * |
| 402 | * - Remove uprobe: -:[GRP/]EVENT |
| 403 | */ |
| 404 | static int create_trace_uprobe(int argc, char **argv) |
| 405 | { |
| 406 | struct trace_uprobe *tu; |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 407 | char *arg, *event, *group, *filename, *rctr, *rctr_end; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 408 | char buf[MAX_EVENT_NAME_LEN]; |
| 409 | struct path path; |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 410 | unsigned long offset, ref_ctr_offset; |
Oleg Nesterov | 4ee5a52 | 2013-03-30 20:28:15 +0100 | [diff] [blame] | 411 | bool is_delete, is_return; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 412 | int i, ret; |
| 413 | |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 414 | ret = 0; |
| 415 | is_delete = false; |
Oleg Nesterov | 4ee5a52 | 2013-03-30 20:28:15 +0100 | [diff] [blame] | 416 | is_return = false; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 417 | event = NULL; |
| 418 | group = NULL; |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 419 | ref_ctr_offset = 0; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 420 | |
| 421 | /* argc must be >= 1 */ |
| 422 | if (argv[0][0] == '-') |
| 423 | is_delete = true; |
Oleg Nesterov | 4ee5a52 | 2013-03-30 20:28:15 +0100 | [diff] [blame] | 424 | else if (argv[0][0] == 'r') |
| 425 | is_return = true; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 426 | else if (argv[0][0] != 'p') { |
Oleg Nesterov | 4ee5a52 | 2013-03-30 20:28:15 +0100 | [diff] [blame] | 427 | pr_info("Probe definition must be started with 'p', 'r' or '-'.\n"); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 428 | return -EINVAL; |
| 429 | } |
| 430 | |
| 431 | if (argv[0][1] == ':') { |
| 432 | event = &argv[0][2]; |
| 433 | arg = strchr(event, '/'); |
| 434 | |
| 435 | if (arg) { |
| 436 | group = event; |
| 437 | event = arg + 1; |
| 438 | event[-1] = '\0'; |
| 439 | |
| 440 | if (strlen(group) == 0) { |
| 441 | pr_info("Group name is not specified\n"); |
| 442 | return -EINVAL; |
| 443 | } |
| 444 | } |
| 445 | if (strlen(event) == 0) { |
| 446 | pr_info("Event name is not specified\n"); |
| 447 | return -EINVAL; |
| 448 | } |
| 449 | } |
| 450 | if (!group) |
| 451 | group = UPROBE_EVENT_SYSTEM; |
| 452 | |
| 453 | if (is_delete) { |
Steven Rostedt (Red Hat) | c6c2401 | 2013-07-03 23:33:51 -0400 | [diff] [blame] | 454 | int ret; |
| 455 | |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 456 | if (!event) { |
| 457 | pr_info("Delete command needs an event name.\n"); |
| 458 | return -EINVAL; |
| 459 | } |
| 460 | mutex_lock(&uprobe_lock); |
| 461 | tu = find_probe_event(event, group); |
| 462 | |
| 463 | if (!tu) { |
| 464 | mutex_unlock(&uprobe_lock); |
| 465 | pr_info("Event %s/%s doesn't exist.\n", group, event); |
| 466 | return -ENOENT; |
| 467 | } |
| 468 | /* delete an event */ |
Steven Rostedt (Red Hat) | c6c2401 | 2013-07-03 23:33:51 -0400 | [diff] [blame] | 469 | ret = unregister_trace_uprobe(tu); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 470 | mutex_unlock(&uprobe_lock); |
Steven Rostedt (Red Hat) | c6c2401 | 2013-07-03 23:33:51 -0400 | [diff] [blame] | 471 | return ret; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 472 | } |
| 473 | |
| 474 | if (argc < 2) { |
| 475 | pr_info("Probe point is not specified.\n"); |
| 476 | return -EINVAL; |
| 477 | } |
Kenny Yu | 6496bb7 | 2017-01-13 08:58:34 -0800 | [diff] [blame] | 478 | /* Find the last occurrence, in case the path contains ':' too. */ |
| 479 | arg = strrchr(argv[1], ':'); |
Song Liu | 0c92c7a | 2018-04-23 10:21:34 -0700 | [diff] [blame] | 480 | if (!arg) |
| 481 | return -EINVAL; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 482 | |
| 483 | *arg++ = '\0'; |
| 484 | filename = argv[1]; |
| 485 | ret = kern_path(filename, LOOKUP_FOLLOW, &path); |
| 486 | if (ret) |
Song Liu | 0c92c7a | 2018-04-23 10:21:34 -0700 | [diff] [blame] | 487 | return ret; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 488 | |
Song Liu | 0c92c7a | 2018-04-23 10:21:34 -0700 | [diff] [blame] | 489 | if (!d_is_reg(path.dentry)) { |
Jovi Zhang | d24d7db | 2012-07-18 18:16:44 +0800 | [diff] [blame] | 490 | ret = -EINVAL; |
| 491 | goto fail_address_parse; |
| 492 | } |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 493 | |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 494 | /* Parse reference counter offset if specified. */ |
| 495 | rctr = strchr(arg, '('); |
| 496 | if (rctr) { |
| 497 | rctr_end = strchr(rctr, ')'); |
| 498 | if (rctr > rctr_end || *(rctr_end + 1) != 0) { |
| 499 | ret = -EINVAL; |
| 500 | pr_info("Invalid reference counter offset.\n"); |
| 501 | goto fail_address_parse; |
| 502 | } |
| 503 | |
| 504 | *rctr++ = '\0'; |
| 505 | *rctr_end = '\0'; |
| 506 | ret = kstrtoul(rctr, 0, &ref_ctr_offset); |
| 507 | if (ret) { |
| 508 | pr_info("Invalid reference counter offset.\n"); |
| 509 | goto fail_address_parse; |
| 510 | } |
| 511 | } |
| 512 | |
| 513 | /* Parse uprobe offset. */ |
Oleg Nesterov | 84d7ed7 | 2013-01-27 18:20:45 +0100 | [diff] [blame] | 514 | ret = kstrtoul(arg, 0, &offset); |
| 515 | if (ret) |
| 516 | goto fail_address_parse; |
| 517 | |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 518 | argc -= 2; |
| 519 | argv += 2; |
| 520 | |
| 521 | /* setup a probe */ |
| 522 | if (!event) { |
Andy Shevchenko | b2e902f | 2012-12-17 16:01:27 -0800 | [diff] [blame] | 523 | char *tail; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 524 | char *ptr; |
| 525 | |
Andy Shevchenko | b2e902f | 2012-12-17 16:01:27 -0800 | [diff] [blame] | 526 | tail = kstrdup(kbasename(filename), GFP_KERNEL); |
| 527 | if (!tail) { |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 528 | ret = -ENOMEM; |
| 529 | goto fail_address_parse; |
| 530 | } |
| 531 | |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 532 | ptr = strpbrk(tail, ".-_"); |
| 533 | if (ptr) |
| 534 | *ptr = '\0'; |
| 535 | |
| 536 | snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset); |
| 537 | event = buf; |
| 538 | kfree(tail); |
| 539 | } |
| 540 | |
Oleg Nesterov | 4ee5a52 | 2013-03-30 20:28:15 +0100 | [diff] [blame] | 541 | tu = alloc_trace_uprobe(group, event, argc, is_return); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 542 | if (IS_ERR(tu)) { |
| 543 | pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu)); |
| 544 | ret = PTR_ERR(tu); |
| 545 | goto fail_address_parse; |
| 546 | } |
| 547 | tu->offset = offset; |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 548 | tu->ref_ctr_offset = ref_ctr_offset; |
Song Liu | 0c92c7a | 2018-04-23 10:21:34 -0700 | [diff] [blame] | 549 | tu->path = path; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 550 | tu->filename = kstrdup(filename, GFP_KERNEL); |
| 551 | |
| 552 | if (!tu->filename) { |
| 553 | pr_info("Failed to allocate filename.\n"); |
| 554 | ret = -ENOMEM; |
| 555 | goto error; |
| 556 | } |
| 557 | |
| 558 | /* parse arguments */ |
| 559 | ret = 0; |
| 560 | for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 561 | struct probe_arg *parg = &tu->tp.args[i]; |
| 562 | |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 563 | /* Increment count for freeing args in error case */ |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 564 | tu->tp.nr_args++; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 565 | |
| 566 | /* Parse argument name */ |
| 567 | arg = strchr(argv[i], '='); |
| 568 | if (arg) { |
| 569 | *arg++ = '\0'; |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 570 | parg->name = kstrdup(argv[i], GFP_KERNEL); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 571 | } else { |
| 572 | arg = argv[i]; |
| 573 | /* If argument name is omitted, set "argN" */ |
| 574 | snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1); |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 575 | parg->name = kstrdup(buf, GFP_KERNEL); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 576 | } |
| 577 | |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 578 | if (!parg->name) { |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 579 | pr_info("Failed to allocate argument[%d] name.\n", i); |
| 580 | ret = -ENOMEM; |
| 581 | goto error; |
| 582 | } |
| 583 | |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 584 | if (!is_good_name(parg->name)) { |
| 585 | pr_info("Invalid argument[%d] name: %s\n", i, parg->name); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 586 | ret = -EINVAL; |
| 587 | goto error; |
| 588 | } |
| 589 | |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 590 | if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) { |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 591 | pr_info("Argument[%d] name '%s' conflicts with " |
| 592 | "another field.\n", i, argv[i]); |
| 593 | ret = -EINVAL; |
| 594 | goto error; |
| 595 | } |
| 596 | |
| 597 | /* Parse fetch argument */ |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 598 | ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg, |
Masami Hiramatsu | f451bc8 | 2018-04-25 21:18:32 +0900 | [diff] [blame] | 599 | is_return, false); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 600 | if (ret) { |
| 601 | pr_info("Parse error at argument[%d]. (%d)\n", i, ret); |
| 602 | goto error; |
| 603 | } |
| 604 | } |
| 605 | |
| 606 | ret = register_trace_uprobe(tu); |
| 607 | if (ret) |
| 608 | goto error; |
| 609 | return 0; |
| 610 | |
| 611 | error: |
| 612 | free_trace_uprobe(tu); |
| 613 | return ret; |
| 614 | |
| 615 | fail_address_parse: |
Song Liu | 0c92c7a | 2018-04-23 10:21:34 -0700 | [diff] [blame] | 616 | path_put(&path); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 617 | |
Jovi Zhang | d24d7db | 2012-07-18 18:16:44 +0800 | [diff] [blame] | 618 | pr_info("Failed to parse address or file.\n"); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 619 | |
| 620 | return ret; |
| 621 | } |
| 622 | |
Steven Rostedt (Red Hat) | c6c2401 | 2013-07-03 23:33:51 -0400 | [diff] [blame] | 623 | static int cleanup_all_probes(void) |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 624 | { |
| 625 | struct trace_uprobe *tu; |
Steven Rostedt (Red Hat) | c6c2401 | 2013-07-03 23:33:51 -0400 | [diff] [blame] | 626 | int ret = 0; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 627 | |
| 628 | mutex_lock(&uprobe_lock); |
| 629 | while (!list_empty(&uprobe_list)) { |
| 630 | tu = list_entry(uprobe_list.next, struct trace_uprobe, list); |
Steven Rostedt (Red Hat) | c6c2401 | 2013-07-03 23:33:51 -0400 | [diff] [blame] | 631 | ret = unregister_trace_uprobe(tu); |
| 632 | if (ret) |
| 633 | break; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 634 | } |
| 635 | mutex_unlock(&uprobe_lock); |
Steven Rostedt (Red Hat) | c6c2401 | 2013-07-03 23:33:51 -0400 | [diff] [blame] | 636 | return ret; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 637 | } |
| 638 | |
| 639 | /* Probes listing interfaces */ |
| 640 | static void *probes_seq_start(struct seq_file *m, loff_t *pos) |
| 641 | { |
| 642 | mutex_lock(&uprobe_lock); |
| 643 | return seq_list_start(&uprobe_list, *pos); |
| 644 | } |
| 645 | |
| 646 | static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos) |
| 647 | { |
| 648 | return seq_list_next(v, &uprobe_list, pos); |
| 649 | } |
| 650 | |
| 651 | static void probes_seq_stop(struct seq_file *m, void *v) |
| 652 | { |
| 653 | mutex_unlock(&uprobe_lock); |
| 654 | } |
| 655 | |
| 656 | static int probes_seq_show(struct seq_file *m, void *v) |
| 657 | { |
| 658 | struct trace_uprobe *tu = v; |
Oleg Nesterov | 3ede82d | 2013-03-30 19:48:09 +0100 | [diff] [blame] | 659 | char c = is_ret_probe(tu) ? 'r' : 'p'; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 660 | int i; |
| 661 | |
Ravi Bangoria | a64b2c0 | 2018-03-15 13:57:56 +0530 | [diff] [blame] | 662 | seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, tu->tp.call.class->system, |
| 663 | trace_event_name(&tu->tp.call), tu->filename, |
| 664 | (int)(sizeof(void *) * 2), tu->offset); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 665 | |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 666 | if (tu->ref_ctr_offset) |
| 667 | seq_printf(m, "(0x%lx)", tu->ref_ctr_offset); |
| 668 | |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 669 | for (i = 0; i < tu->tp.nr_args; i++) |
| 670 | seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 671 | |
Rasmus Villemoes | fa6f0cc | 2014-11-08 21:42:10 +0100 | [diff] [blame] | 672 | seq_putc(m, '\n'); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 673 | return 0; |
| 674 | } |
| 675 | |
| 676 | static const struct seq_operations probes_seq_op = { |
| 677 | .start = probes_seq_start, |
| 678 | .next = probes_seq_next, |
| 679 | .stop = probes_seq_stop, |
| 680 | .show = probes_seq_show |
| 681 | }; |
| 682 | |
| 683 | static int probes_open(struct inode *inode, struct file *file) |
| 684 | { |
Steven Rostedt (Red Hat) | c6c2401 | 2013-07-03 23:33:51 -0400 | [diff] [blame] | 685 | int ret; |
| 686 | |
| 687 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { |
| 688 | ret = cleanup_all_probes(); |
| 689 | if (ret) |
| 690 | return ret; |
| 691 | } |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 692 | |
| 693 | return seq_open(file, &probes_seq_op); |
| 694 | } |
| 695 | |
| 696 | static ssize_t probes_write(struct file *file, const char __user *buffer, |
| 697 | size_t count, loff_t *ppos) |
| 698 | { |
Tom Zanussi | 7e465ba | 2017-09-22 14:58:20 -0500 | [diff] [blame] | 699 | return trace_parse_run_command(file, buffer, count, ppos, create_trace_uprobe); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 700 | } |
| 701 | |
| 702 | static const struct file_operations uprobe_events_ops = { |
| 703 | .owner = THIS_MODULE, |
| 704 | .open = probes_open, |
| 705 | .read = seq_read, |
| 706 | .llseek = seq_lseek, |
| 707 | .release = seq_release, |
| 708 | .write = probes_write, |
| 709 | }; |
| 710 | |
| 711 | /* Probes profiling interfaces */ |
| 712 | static int probes_profile_seq_show(struct seq_file *m, void *v) |
| 713 | { |
| 714 | struct trace_uprobe *tu = v; |
| 715 | |
Mathieu Desnoyers | de7b297 | 2014-04-08 17:26:21 -0400 | [diff] [blame] | 716 | seq_printf(m, " %s %-44s %15lu\n", tu->filename, |
Steven Rostedt (Red Hat) | 687fcc4 | 2015-05-13 14:20:14 -0400 | [diff] [blame] | 717 | trace_event_name(&tu->tp.call), tu->nhit); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 718 | return 0; |
| 719 | } |
| 720 | |
| 721 | static const struct seq_operations profile_seq_op = { |
| 722 | .start = probes_seq_start, |
| 723 | .next = probes_seq_next, |
| 724 | .stop = probes_seq_stop, |
| 725 | .show = probes_profile_seq_show |
| 726 | }; |
| 727 | |
| 728 | static int profile_open(struct inode *inode, struct file *file) |
| 729 | { |
| 730 | return seq_open(file, &profile_seq_op); |
| 731 | } |
| 732 | |
| 733 | static const struct file_operations uprobe_profile_ops = { |
| 734 | .owner = THIS_MODULE, |
| 735 | .open = profile_open, |
| 736 | .read = seq_read, |
| 737 | .llseek = seq_lseek, |
| 738 | .release = seq_release, |
| 739 | }; |
| 740 | |
Namhyung Kim | dcad1a2 | 2013-07-03 16:40:28 +0900 | [diff] [blame] | 741 | struct uprobe_cpu_buffer { |
| 742 | struct mutex mutex; |
| 743 | void *buf; |
| 744 | }; |
| 745 | static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer; |
| 746 | static int uprobe_buffer_refcnt; |
| 747 | |
| 748 | static int uprobe_buffer_init(void) |
| 749 | { |
| 750 | int cpu, err_cpu; |
| 751 | |
| 752 | uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer); |
| 753 | if (uprobe_cpu_buffer == NULL) |
| 754 | return -ENOMEM; |
| 755 | |
| 756 | for_each_possible_cpu(cpu) { |
| 757 | struct page *p = alloc_pages_node(cpu_to_node(cpu), |
| 758 | GFP_KERNEL, 0); |
| 759 | if (p == NULL) { |
| 760 | err_cpu = cpu; |
| 761 | goto err; |
| 762 | } |
| 763 | per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p); |
| 764 | mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex); |
| 765 | } |
| 766 | |
| 767 | return 0; |
| 768 | |
| 769 | err: |
| 770 | for_each_possible_cpu(cpu) { |
| 771 | if (cpu == err_cpu) |
| 772 | break; |
| 773 | free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf); |
| 774 | } |
| 775 | |
| 776 | free_percpu(uprobe_cpu_buffer); |
| 777 | return -ENOMEM; |
| 778 | } |
| 779 | |
| 780 | static int uprobe_buffer_enable(void) |
| 781 | { |
| 782 | int ret = 0; |
| 783 | |
| 784 | BUG_ON(!mutex_is_locked(&event_mutex)); |
| 785 | |
| 786 | if (uprobe_buffer_refcnt++ == 0) { |
| 787 | ret = uprobe_buffer_init(); |
| 788 | if (ret < 0) |
| 789 | uprobe_buffer_refcnt--; |
| 790 | } |
| 791 | |
| 792 | return ret; |
| 793 | } |
| 794 | |
| 795 | static void uprobe_buffer_disable(void) |
| 796 | { |
zhangwei(Jovi) | 6ea6215 | 2014-04-17 16:05:19 +0800 | [diff] [blame] | 797 | int cpu; |
| 798 | |
Namhyung Kim | dcad1a2 | 2013-07-03 16:40:28 +0900 | [diff] [blame] | 799 | BUG_ON(!mutex_is_locked(&event_mutex)); |
| 800 | |
| 801 | if (--uprobe_buffer_refcnt == 0) { |
zhangwei(Jovi) | 6ea6215 | 2014-04-17 16:05:19 +0800 | [diff] [blame] | 802 | for_each_possible_cpu(cpu) |
| 803 | free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, |
| 804 | cpu)->buf); |
| 805 | |
Namhyung Kim | dcad1a2 | 2013-07-03 16:40:28 +0900 | [diff] [blame] | 806 | free_percpu(uprobe_cpu_buffer); |
| 807 | uprobe_cpu_buffer = NULL; |
| 808 | } |
| 809 | } |
| 810 | |
| 811 | static struct uprobe_cpu_buffer *uprobe_buffer_get(void) |
| 812 | { |
| 813 | struct uprobe_cpu_buffer *ucb; |
| 814 | int cpu; |
| 815 | |
| 816 | cpu = raw_smp_processor_id(); |
| 817 | ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu); |
| 818 | |
| 819 | /* |
| 820 | * Use per-cpu buffers for fastest access, but we might migrate |
| 821 | * so the mutex makes sure we have sole access to it. |
| 822 | */ |
| 823 | mutex_lock(&ucb->mutex); |
| 824 | |
| 825 | return ucb; |
| 826 | } |
| 827 | |
| 828 | static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb) |
| 829 | { |
| 830 | mutex_unlock(&ucb->mutex); |
| 831 | } |
| 832 | |
Namhyung Kim | a43b970 | 2014-01-17 17:08:36 +0900 | [diff] [blame] | 833 | static void __uprobe_trace_func(struct trace_uprobe *tu, |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 834 | unsigned long func, struct pt_regs *regs, |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 835 | struct uprobe_cpu_buffer *ucb, int dsize, |
Steven Rostedt (Red Hat) | 7f1d2f8 | 2015-05-05 10:09:53 -0400 | [diff] [blame] | 836 | struct trace_event_file *trace_file) |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 837 | { |
| 838 | struct uprobe_trace_entry_head *entry; |
| 839 | struct ring_buffer_event *event; |
| 840 | struct ring_buffer *buffer; |
Oleg Nesterov | 457d177 | 2013-03-29 18:26:51 +0100 | [diff] [blame] | 841 | void *data; |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 842 | int size, esize; |
Steven Rostedt (Red Hat) | 2425bcb | 2015-05-05 11:45:27 -0400 | [diff] [blame] | 843 | struct trace_event_call *call = &tu->tp.call; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 844 | |
Steven Rostedt (Red Hat) | 7f1d2f8 | 2015-05-05 10:09:53 -0400 | [diff] [blame] | 845 | WARN_ON(call != trace_file->event_call); |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 846 | |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 847 | if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE)) |
Oleg Nesterov | a51cc60 | 2013-03-30 18:02:12 +0100 | [diff] [blame] | 848 | return; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 849 | |
Steven Rostedt (Red Hat) | 09a5059 | 2015-05-13 15:21:25 -0400 | [diff] [blame] | 850 | if (trace_trigger_soft_disabled(trace_file)) |
Namhyung Kim | ca3b162 | 2014-01-17 17:08:39 +0900 | [diff] [blame] | 851 | return; |
| 852 | |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 853 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
Namhyung Kim | dcad1a2 | 2013-07-03 16:40:28 +0900 | [diff] [blame] | 854 | size = esize + tu->tp.size + dsize; |
Steven Rostedt (Red Hat) | 7f1d2f8 | 2015-05-05 10:09:53 -0400 | [diff] [blame] | 855 | event = trace_event_buffer_lock_reserve(&buffer, trace_file, |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 856 | call->event.type, size, 0, 0); |
Namhyung Kim | dcad1a2 | 2013-07-03 16:40:28 +0900 | [diff] [blame] | 857 | if (!event) |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 858 | return; |
Namhyung Kim | dcad1a2 | 2013-07-03 16:40:28 +0900 | [diff] [blame] | 859 | |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 860 | entry = ring_buffer_event_data(event); |
Oleg Nesterov | 393a736 | 2013-03-30 18:46:22 +0100 | [diff] [blame] | 861 | if (is_ret_probe(tu)) { |
| 862 | entry->vaddr[0] = func; |
| 863 | entry->vaddr[1] = instruction_pointer(regs); |
| 864 | data = DATAOF_TRACE_ENTRY(entry, true); |
| 865 | } else { |
| 866 | entry->vaddr[0] = instruction_pointer(regs); |
| 867 | data = DATAOF_TRACE_ENTRY(entry, false); |
| 868 | } |
| 869 | |
Namhyung Kim | dcad1a2 | 2013-07-03 16:40:28 +0900 | [diff] [blame] | 870 | memcpy(data, ucb->buf, tu->tp.size + dsize); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 871 | |
Steven Rostedt (Red Hat) | 7f1d2f8 | 2015-05-05 10:09:53 -0400 | [diff] [blame] | 872 | event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0); |
Oleg Nesterov | a51cc60 | 2013-03-30 18:02:12 +0100 | [diff] [blame] | 873 | } |
Oleg Nesterov | f42d24a | 2013-02-04 17:48:34 +0100 | [diff] [blame] | 874 | |
Oleg Nesterov | a51cc60 | 2013-03-30 18:02:12 +0100 | [diff] [blame] | 875 | /* uprobe handler */ |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 876 | static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs, |
| 877 | struct uprobe_cpu_buffer *ucb, int dsize) |
Oleg Nesterov | a51cc60 | 2013-03-30 18:02:12 +0100 | [diff] [blame] | 878 | { |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 879 | struct event_file_link *link; |
| 880 | |
| 881 | if (is_ret_probe(tu)) |
| 882 | return 0; |
| 883 | |
| 884 | rcu_read_lock(); |
| 885 | list_for_each_entry_rcu(link, &tu->tp.files, list) |
| 886 | __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file); |
| 887 | rcu_read_unlock(); |
| 888 | |
Oleg Nesterov | f42d24a | 2013-02-04 17:48:34 +0100 | [diff] [blame] | 889 | return 0; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 890 | } |
| 891 | |
Oleg Nesterov | c1ae5c7 | 2013-03-30 18:25:23 +0100 | [diff] [blame] | 892 | static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 893 | struct pt_regs *regs, |
| 894 | struct uprobe_cpu_buffer *ucb, int dsize) |
Oleg Nesterov | c1ae5c7 | 2013-03-30 18:25:23 +0100 | [diff] [blame] | 895 | { |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 896 | struct event_file_link *link; |
| 897 | |
| 898 | rcu_read_lock(); |
| 899 | list_for_each_entry_rcu(link, &tu->tp.files, list) |
| 900 | __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file); |
| 901 | rcu_read_unlock(); |
Oleg Nesterov | c1ae5c7 | 2013-03-30 18:25:23 +0100 | [diff] [blame] | 902 | } |
| 903 | |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 904 | /* Event entry printers */ |
| 905 | static enum print_line_t |
| 906 | print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event) |
| 907 | { |
Oleg Nesterov | 457d177 | 2013-03-29 18:26:51 +0100 | [diff] [blame] | 908 | struct uprobe_trace_entry_head *entry; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 909 | struct trace_seq *s = &iter->seq; |
| 910 | struct trace_uprobe *tu; |
| 911 | u8 *data; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 912 | |
Oleg Nesterov | 457d177 | 2013-03-29 18:26:51 +0100 | [diff] [blame] | 913 | entry = (struct uprobe_trace_entry_head *)iter->ent; |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 914 | tu = container_of(event, struct trace_uprobe, tp.call.event); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 915 | |
Oleg Nesterov | 3ede82d | 2013-03-30 19:48:09 +0100 | [diff] [blame] | 916 | if (is_ret_probe(tu)) { |
Steven Rostedt (Red Hat) | 8579a10 | 2014-11-12 17:26:57 -0500 | [diff] [blame] | 917 | trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", |
Steven Rostedt (Red Hat) | 687fcc4 | 2015-05-13 14:20:14 -0400 | [diff] [blame] | 918 | trace_event_name(&tu->tp.call), |
Steven Rostedt (Red Hat) | 8579a10 | 2014-11-12 17:26:57 -0500 | [diff] [blame] | 919 | entry->vaddr[1], entry->vaddr[0]); |
Oleg Nesterov | 3ede82d | 2013-03-30 19:48:09 +0100 | [diff] [blame] | 920 | data = DATAOF_TRACE_ENTRY(entry, true); |
| 921 | } else { |
Steven Rostedt (Red Hat) | 8579a10 | 2014-11-12 17:26:57 -0500 | [diff] [blame] | 922 | trace_seq_printf(s, "%s: (0x%lx)", |
Steven Rostedt (Red Hat) | 687fcc4 | 2015-05-13 14:20:14 -0400 | [diff] [blame] | 923 | trace_event_name(&tu->tp.call), |
Steven Rostedt (Red Hat) | 8579a10 | 2014-11-12 17:26:57 -0500 | [diff] [blame] | 924 | entry->vaddr[0]); |
Oleg Nesterov | 3ede82d | 2013-03-30 19:48:09 +0100 | [diff] [blame] | 925 | data = DATAOF_TRACE_ENTRY(entry, false); |
| 926 | } |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 927 | |
Masami Hiramatsu | 56de763 | 2018-04-25 21:16:36 +0900 | [diff] [blame] | 928 | if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0) |
| 929 | goto out; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 930 | |
Steven Rostedt (Red Hat) | 8579a10 | 2014-11-12 17:26:57 -0500 | [diff] [blame] | 931 | trace_seq_putc(s, '\n'); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 932 | |
Steven Rostedt (Red Hat) | 8579a10 | 2014-11-12 17:26:57 -0500 | [diff] [blame] | 933 | out: |
| 934 | return trace_handle_return(s); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 935 | } |
| 936 | |
Oleg Nesterov | 31ba334 | 2013-02-04 17:11:58 +0100 | [diff] [blame] | 937 | typedef bool (*filter_func_t)(struct uprobe_consumer *self, |
| 938 | enum uprobe_filter_ctx ctx, |
| 939 | struct mm_struct *mm); |
| 940 | |
| 941 | static int |
Steven Rostedt (Red Hat) | 7f1d2f8 | 2015-05-05 10:09:53 -0400 | [diff] [blame] | 942 | probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file, |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 943 | filter_func_t filter) |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 944 | { |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 945 | bool enabled = trace_probe_is_enabled(&tu->tp); |
| 946 | struct event_file_link *link = NULL; |
| 947 | int ret; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 948 | |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 949 | if (file) { |
Oleg Nesterov | 4821254 | 2014-06-27 19:01:36 +0200 | [diff] [blame] | 950 | if (tu->tp.flags & TP_FLAG_PROFILE) |
| 951 | return -EINTR; |
| 952 | |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 953 | link = kmalloc(sizeof(*link), GFP_KERNEL); |
| 954 | if (!link) |
| 955 | return -ENOMEM; |
| 956 | |
| 957 | link->file = file; |
| 958 | list_add_tail_rcu(&link->list, &tu->tp.files); |
| 959 | |
| 960 | tu->tp.flags |= TP_FLAG_TRACE; |
Oleg Nesterov | 4821254 | 2014-06-27 19:01:36 +0200 | [diff] [blame] | 961 | } else { |
| 962 | if (tu->tp.flags & TP_FLAG_TRACE) |
| 963 | return -EINTR; |
| 964 | |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 965 | tu->tp.flags |= TP_FLAG_PROFILE; |
Oleg Nesterov | 4821254 | 2014-06-27 19:01:36 +0200 | [diff] [blame] | 966 | } |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 967 | |
Oleg Nesterov | 736288b | 2013-02-03 20:58:35 +0100 | [diff] [blame] | 968 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); |
| 969 | |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 970 | if (enabled) |
| 971 | return 0; |
| 972 | |
Oleg Nesterov | fb6bab6 | 2014-06-27 19:01:46 +0200 | [diff] [blame] | 973 | ret = uprobe_buffer_enable(); |
| 974 | if (ret) |
| 975 | goto err_flags; |
| 976 | |
Oleg Nesterov | 31ba334 | 2013-02-04 17:11:58 +0100 | [diff] [blame] | 977 | tu->consumer.filter = filter; |
Song Liu | 0c92c7a | 2018-04-23 10:21:34 -0700 | [diff] [blame] | 978 | tu->inode = d_real_inode(tu->path.dentry); |
Ravi Bangoria | 1cc3316 | 2018-08-20 10:12:47 +0530 | [diff] [blame] | 979 | if (tu->ref_ctr_offset) { |
| 980 | ret = uprobe_register_refctr(tu->inode, tu->offset, |
| 981 | tu->ref_ctr_offset, &tu->consumer); |
| 982 | } else { |
| 983 | ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); |
| 984 | } |
| 985 | |
Oleg Nesterov | fb6bab6 | 2014-06-27 19:01:46 +0200 | [diff] [blame] | 986 | if (ret) |
| 987 | goto err_buffer; |
Oleg Nesterov | 4161824 | 2013-01-27 18:36:24 +0100 | [diff] [blame] | 988 | |
Oleg Nesterov | fb6bab6 | 2014-06-27 19:01:46 +0200 | [diff] [blame] | 989 | return 0; |
| 990 | |
| 991 | err_buffer: |
| 992 | uprobe_buffer_disable(); |
| 993 | |
| 994 | err_flags: |
| 995 | if (file) { |
| 996 | list_del(&link->list); |
| 997 | kfree(link); |
| 998 | tu->tp.flags &= ~TP_FLAG_TRACE; |
| 999 | } else { |
| 1000 | tu->tp.flags &= ~TP_FLAG_PROFILE; |
| 1001 | } |
Oleg Nesterov | 4161824 | 2013-01-27 18:36:24 +0100 | [diff] [blame] | 1002 | return ret; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1003 | } |
| 1004 | |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 1005 | static void |
Steven Rostedt (Red Hat) | 7f1d2f8 | 2015-05-05 10:09:53 -0400 | [diff] [blame] | 1006 | probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file) |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1007 | { |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 1008 | if (!trace_probe_is_enabled(&tu->tp)) |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1009 | return; |
| 1010 | |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 1011 | if (file) { |
| 1012 | struct event_file_link *link; |
| 1013 | |
| 1014 | link = find_event_file_link(&tu->tp, file); |
| 1015 | if (!link) |
| 1016 | return; |
| 1017 | |
| 1018 | list_del_rcu(&link->list); |
| 1019 | /* synchronize with u{,ret}probe_trace_func */ |
Steven Rostedt (VMware) | 016f8ff | 2018-08-09 15:37:59 -0400 | [diff] [blame] | 1020 | synchronize_rcu(); |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 1021 | kfree(link); |
| 1022 | |
| 1023 | if (!list_empty(&tu->tp.files)) |
| 1024 | return; |
| 1025 | } |
| 1026 | |
Oleg Nesterov | 736288b | 2013-02-03 20:58:35 +0100 | [diff] [blame] | 1027 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); |
| 1028 | |
Oleg Nesterov | a932b73 | 2013-01-31 19:47:23 +0100 | [diff] [blame] | 1029 | uprobe_unregister(tu->inode, tu->offset, &tu->consumer); |
Song Liu | 0c92c7a | 2018-04-23 10:21:34 -0700 | [diff] [blame] | 1030 | tu->inode = NULL; |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 1031 | tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE; |
Namhyung Kim | dcad1a2 | 2013-07-03 16:40:28 +0900 | [diff] [blame] | 1032 | |
| 1033 | uprobe_buffer_disable(); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1034 | } |
| 1035 | |
Steven Rostedt (Red Hat) | 2425bcb | 2015-05-05 11:45:27 -0400 | [diff] [blame] | 1036 | static int uprobe_event_define_fields(struct trace_event_call *event_call) |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1037 | { |
Masami Hiramatsu | eeb07b0 | 2018-04-25 21:17:05 +0900 | [diff] [blame] | 1038 | int ret, size; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1039 | struct uprobe_trace_entry_head field; |
Oleg Nesterov | 457d177 | 2013-03-29 18:26:51 +0100 | [diff] [blame] | 1040 | struct trace_uprobe *tu = event_call->data; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1041 | |
Oleg Nesterov | 4d1298e | 2013-03-30 19:23:15 +0100 | [diff] [blame] | 1042 | if (is_ret_probe(tu)) { |
| 1043 | DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0); |
| 1044 | DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0); |
| 1045 | size = SIZEOF_TRACE_ENTRY(true); |
| 1046 | } else { |
| 1047 | DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0); |
| 1048 | size = SIZEOF_TRACE_ENTRY(false); |
| 1049 | } |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 1050 | |
Masami Hiramatsu | eeb07b0 | 2018-04-25 21:17:05 +0900 | [diff] [blame] | 1051 | return traceprobe_define_arg_fields(event_call, size, &tu->tp); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1052 | } |
| 1053 | |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1054 | #ifdef CONFIG_PERF_EVENTS |
Oleg Nesterov | 31ba334 | 2013-02-04 17:11:58 +0100 | [diff] [blame] | 1055 | static bool |
| 1056 | __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm) |
| 1057 | { |
| 1058 | struct perf_event *event; |
| 1059 | |
| 1060 | if (filter->nr_systemwide) |
| 1061 | return true; |
| 1062 | |
| 1063 | list_for_each_entry(event, &filter->perf_events, hw.tp_list) { |
Peter Zijlstra | 50f16a8 | 2015-03-05 22:10:19 +0100 | [diff] [blame] | 1064 | if (event->hw.target->mm == mm) |
Oleg Nesterov | 31ba334 | 2013-02-04 17:11:58 +0100 | [diff] [blame] | 1065 | return true; |
| 1066 | } |
| 1067 | |
| 1068 | return false; |
| 1069 | } |
| 1070 | |
Oleg Nesterov | b2fe8ba | 2013-02-04 19:05:43 +0100 | [diff] [blame] | 1071 | static inline bool |
| 1072 | uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event) |
| 1073 | { |
Peter Zijlstra | 50f16a8 | 2015-03-05 22:10:19 +0100 | [diff] [blame] | 1074 | return __uprobe_perf_filter(&tu->filter, event->hw.target->mm); |
Oleg Nesterov | b2fe8ba | 2013-02-04 19:05:43 +0100 | [diff] [blame] | 1075 | } |
| 1076 | |
Oleg Nesterov | ce5f36a | 2014-04-24 13:26:01 +0200 | [diff] [blame] | 1077 | static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event) |
| 1078 | { |
| 1079 | bool done; |
| 1080 | |
| 1081 | write_lock(&tu->filter.rwlock); |
Peter Zijlstra | 50f16a8 | 2015-03-05 22:10:19 +0100 | [diff] [blame] | 1082 | if (event->hw.target) { |
Oleg Nesterov | ce5f36a | 2014-04-24 13:26:01 +0200 | [diff] [blame] | 1083 | list_del(&event->hw.tp_list); |
| 1084 | done = tu->filter.nr_systemwide || |
Peter Zijlstra | 50f16a8 | 2015-03-05 22:10:19 +0100 | [diff] [blame] | 1085 | (event->hw.target->flags & PF_EXITING) || |
Oleg Nesterov | ce5f36a | 2014-04-24 13:26:01 +0200 | [diff] [blame] | 1086 | uprobe_filter_event(tu, event); |
| 1087 | } else { |
| 1088 | tu->filter.nr_systemwide--; |
| 1089 | done = tu->filter.nr_systemwide; |
| 1090 | } |
| 1091 | write_unlock(&tu->filter.rwlock); |
| 1092 | |
| 1093 | if (!done) |
Oleg Nesterov | 927d687 | 2014-04-24 13:33:31 +0200 | [diff] [blame] | 1094 | return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false); |
Oleg Nesterov | ce5f36a | 2014-04-24 13:26:01 +0200 | [diff] [blame] | 1095 | |
| 1096 | return 0; |
| 1097 | } |
| 1098 | |
Oleg Nesterov | 736288b | 2013-02-03 20:58:35 +0100 | [diff] [blame] | 1099 | static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) |
| 1100 | { |
Oleg Nesterov | b2fe8ba | 2013-02-04 19:05:43 +0100 | [diff] [blame] | 1101 | bool done; |
Oleg Nesterov | 927d687 | 2014-04-24 13:33:31 +0200 | [diff] [blame] | 1102 | int err; |
Oleg Nesterov | b2fe8ba | 2013-02-04 19:05:43 +0100 | [diff] [blame] | 1103 | |
Oleg Nesterov | 736288b | 2013-02-03 20:58:35 +0100 | [diff] [blame] | 1104 | write_lock(&tu->filter.rwlock); |
Peter Zijlstra | 50f16a8 | 2015-03-05 22:10:19 +0100 | [diff] [blame] | 1105 | if (event->hw.target) { |
Oleg Nesterov | b2fe8ba | 2013-02-04 19:05:43 +0100 | [diff] [blame] | 1106 | /* |
| 1107 | * event->parent != NULL means copy_process(), we can avoid |
| 1108 | * uprobe_apply(). current->mm must be probed and we can rely |
| 1109 | * on dup_mmap() which preserves the already installed bp's. |
| 1110 | * |
| 1111 | * attr.enable_on_exec means that exec/mmap will install the |
| 1112 | * breakpoints we need. |
| 1113 | */ |
| 1114 | done = tu->filter.nr_systemwide || |
| 1115 | event->parent || event->attr.enable_on_exec || |
| 1116 | uprobe_filter_event(tu, event); |
Oleg Nesterov | 736288b | 2013-02-03 20:58:35 +0100 | [diff] [blame] | 1117 | list_add(&event->hw.tp_list, &tu->filter.perf_events); |
Oleg Nesterov | b2fe8ba | 2013-02-04 19:05:43 +0100 | [diff] [blame] | 1118 | } else { |
| 1119 | done = tu->filter.nr_systemwide; |
Oleg Nesterov | 736288b | 2013-02-03 20:58:35 +0100 | [diff] [blame] | 1120 | tu->filter.nr_systemwide++; |
Oleg Nesterov | b2fe8ba | 2013-02-04 19:05:43 +0100 | [diff] [blame] | 1121 | } |
Oleg Nesterov | 736288b | 2013-02-03 20:58:35 +0100 | [diff] [blame] | 1122 | write_unlock(&tu->filter.rwlock); |
| 1123 | |
Oleg Nesterov | 927d687 | 2014-04-24 13:33:31 +0200 | [diff] [blame] | 1124 | err = 0; |
| 1125 | if (!done) { |
| 1126 | err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true); |
| 1127 | if (err) |
| 1128 | uprobe_perf_close(tu, event); |
| 1129 | } |
| 1130 | return err; |
Oleg Nesterov | 736288b | 2013-02-03 20:58:35 +0100 | [diff] [blame] | 1131 | } |
| 1132 | |
Oleg Nesterov | 31ba334 | 2013-02-04 17:11:58 +0100 | [diff] [blame] | 1133 | static bool uprobe_perf_filter(struct uprobe_consumer *uc, |
| 1134 | enum uprobe_filter_ctx ctx, struct mm_struct *mm) |
| 1135 | { |
| 1136 | struct trace_uprobe *tu; |
| 1137 | int ret; |
| 1138 | |
| 1139 | tu = container_of(uc, struct trace_uprobe, consumer); |
| 1140 | read_lock(&tu->filter.rwlock); |
| 1141 | ret = __uprobe_perf_filter(&tu->filter, mm); |
| 1142 | read_unlock(&tu->filter.rwlock); |
| 1143 | |
| 1144 | return ret; |
| 1145 | } |
| 1146 | |
Namhyung Kim | a43b970 | 2014-01-17 17:08:36 +0900 | [diff] [blame] | 1147 | static void __uprobe_perf_func(struct trace_uprobe *tu, |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 1148 | unsigned long func, struct pt_regs *regs, |
| 1149 | struct uprobe_cpu_buffer *ucb, int dsize) |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1150 | { |
Steven Rostedt (Red Hat) | 2425bcb | 2015-05-05 11:45:27 -0400 | [diff] [blame] | 1151 | struct trace_event_call *call = &tu->tp.call; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1152 | struct uprobe_trace_entry_head *entry; |
| 1153 | struct hlist_head *head; |
Oleg Nesterov | 457d177 | 2013-03-29 18:26:51 +0100 | [diff] [blame] | 1154 | void *data; |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 1155 | int size, esize; |
Namhyung Kim | dcad1a2 | 2013-07-03 16:40:28 +0900 | [diff] [blame] | 1156 | int rctx; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1157 | |
Yonghong Song | e87c6bc38 | 2017-10-23 23:53:08 -0700 | [diff] [blame] | 1158 | if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs)) |
Wang Nan | 04a22fa | 2015-07-01 02:13:50 +0000 | [diff] [blame] | 1159 | return; |
| 1160 | |
Namhyung Kim | dcad1a2 | 2013-07-03 16:40:28 +0900 | [diff] [blame] | 1161 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
| 1162 | |
Namhyung Kim | dcad1a2 | 2013-07-03 16:40:28 +0900 | [diff] [blame] | 1163 | size = esize + tu->tp.size + dsize; |
| 1164 | size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); |
| 1165 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) |
| 1166 | return; |
| 1167 | |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1168 | preempt_disable(); |
Oleg Nesterov | 515619f | 2013-04-13 15:36:49 +0200 | [diff] [blame] | 1169 | head = this_cpu_ptr(call->perf_events); |
| 1170 | if (hlist_empty(head)) |
| 1171 | goto out; |
| 1172 | |
Alexei Starovoitov | 1e1dcd9 | 2016-04-06 18:43:24 -0700 | [diff] [blame] | 1173 | entry = perf_trace_buf_alloc(size, NULL, &rctx); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1174 | if (!entry) |
| 1175 | goto out; |
| 1176 | |
Oleg Nesterov | 393a736 | 2013-03-30 18:46:22 +0100 | [diff] [blame] | 1177 | if (is_ret_probe(tu)) { |
| 1178 | entry->vaddr[0] = func; |
Oleg Nesterov | 32520b2 | 2013-04-10 16:25:49 +0200 | [diff] [blame] | 1179 | entry->vaddr[1] = instruction_pointer(regs); |
Oleg Nesterov | 393a736 | 2013-03-30 18:46:22 +0100 | [diff] [blame] | 1180 | data = DATAOF_TRACE_ENTRY(entry, true); |
| 1181 | } else { |
Oleg Nesterov | 32520b2 | 2013-04-10 16:25:49 +0200 | [diff] [blame] | 1182 | entry->vaddr[0] = instruction_pointer(regs); |
Oleg Nesterov | 393a736 | 2013-03-30 18:46:22 +0100 | [diff] [blame] | 1183 | data = DATAOF_TRACE_ENTRY(entry, false); |
| 1184 | } |
| 1185 | |
Namhyung Kim | dcad1a2 | 2013-07-03 16:40:28 +0900 | [diff] [blame] | 1186 | memcpy(data, ucb->buf, tu->tp.size + dsize); |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 1187 | |
Namhyung Kim | dcad1a2 | 2013-07-03 16:40:28 +0900 | [diff] [blame] | 1188 | if (size - esize > tu->tp.size + dsize) { |
| 1189 | int len = tu->tp.size + dsize; |
| 1190 | |
| 1191 | memset(data + len, 0, size - esize - len); |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 1192 | } |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1193 | |
Alexei Starovoitov | 1e1dcd9 | 2016-04-06 18:43:24 -0700 | [diff] [blame] | 1194 | perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, |
Peter Zijlstra | 8fd0fbb | 2017-10-11 09:45:29 +0200 | [diff] [blame] | 1195 | head, NULL); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1196 | out: |
| 1197 | preempt_enable(); |
Oleg Nesterov | a51cc60 | 2013-03-30 18:02:12 +0100 | [diff] [blame] | 1198 | } |
| 1199 | |
| 1200 | /* uprobe profile handler */ |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 1201 | static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs, |
| 1202 | struct uprobe_cpu_buffer *ucb, int dsize) |
Oleg Nesterov | a51cc60 | 2013-03-30 18:02:12 +0100 | [diff] [blame] | 1203 | { |
| 1204 | if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) |
| 1205 | return UPROBE_HANDLER_REMOVE; |
| 1206 | |
Oleg Nesterov | 393a736 | 2013-03-30 18:46:22 +0100 | [diff] [blame] | 1207 | if (!is_ret_probe(tu)) |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 1208 | __uprobe_perf_func(tu, 0, regs, ucb, dsize); |
Oleg Nesterov | f42d24a | 2013-02-04 17:48:34 +0100 | [diff] [blame] | 1209 | return 0; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1210 | } |
Oleg Nesterov | c1ae5c7 | 2013-03-30 18:25:23 +0100 | [diff] [blame] | 1211 | |
| 1212 | static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 1213 | struct pt_regs *regs, |
| 1214 | struct uprobe_cpu_buffer *ucb, int dsize) |
Oleg Nesterov | c1ae5c7 | 2013-03-30 18:25:23 +0100 | [diff] [blame] | 1215 | { |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 1216 | __uprobe_perf_func(tu, func, regs, ucb, dsize); |
Oleg Nesterov | c1ae5c7 | 2013-03-30 18:25:23 +0100 | [diff] [blame] | 1217 | } |
Yonghong Song | 41bdc4b | 2018-05-24 11:21:09 -0700 | [diff] [blame] | 1218 | |
| 1219 | int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type, |
| 1220 | const char **filename, u64 *probe_offset, |
| 1221 | bool perf_type_tracepoint) |
| 1222 | { |
| 1223 | const char *pevent = trace_event_name(event->tp_event); |
| 1224 | const char *group = event->tp_event->class->system; |
| 1225 | struct trace_uprobe *tu; |
| 1226 | |
| 1227 | if (perf_type_tracepoint) |
| 1228 | tu = find_probe_event(pevent, group); |
| 1229 | else |
| 1230 | tu = event->tp_event->data; |
| 1231 | if (!tu) |
| 1232 | return -EINVAL; |
| 1233 | |
| 1234 | *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE |
| 1235 | : BPF_FD_TYPE_UPROBE; |
| 1236 | *filename = tu->filename; |
| 1237 | *probe_offset = tu->offset; |
| 1238 | return 0; |
| 1239 | } |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1240 | #endif /* CONFIG_PERF_EVENTS */ |
| 1241 | |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 1242 | static int |
Steven Rostedt (Red Hat) | 2425bcb | 2015-05-05 11:45:27 -0400 | [diff] [blame] | 1243 | trace_uprobe_register(struct trace_event_call *event, enum trace_reg type, |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 1244 | void *data) |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1245 | { |
Oleg Nesterov | 457d177 | 2013-03-29 18:26:51 +0100 | [diff] [blame] | 1246 | struct trace_uprobe *tu = event->data; |
Steven Rostedt (Red Hat) | 7f1d2f8 | 2015-05-05 10:09:53 -0400 | [diff] [blame] | 1247 | struct trace_event_file *file = data; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1248 | |
| 1249 | switch (type) { |
| 1250 | case TRACE_REG_REGISTER: |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 1251 | return probe_event_enable(tu, file, NULL); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1252 | |
| 1253 | case TRACE_REG_UNREGISTER: |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 1254 | probe_event_disable(tu, file); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1255 | return 0; |
| 1256 | |
| 1257 | #ifdef CONFIG_PERF_EVENTS |
| 1258 | case TRACE_REG_PERF_REGISTER: |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 1259 | return probe_event_enable(tu, NULL, uprobe_perf_filter); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1260 | |
| 1261 | case TRACE_REG_PERF_UNREGISTER: |
zhangwei(Jovi) | 70ed91c | 2014-01-17 17:08:38 +0900 | [diff] [blame] | 1262 | probe_event_disable(tu, NULL); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1263 | return 0; |
Oleg Nesterov | 736288b | 2013-02-03 20:58:35 +0100 | [diff] [blame] | 1264 | |
| 1265 | case TRACE_REG_PERF_OPEN: |
| 1266 | return uprobe_perf_open(tu, data); |
| 1267 | |
| 1268 | case TRACE_REG_PERF_CLOSE: |
| 1269 | return uprobe_perf_close(tu, data); |
| 1270 | |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1271 | #endif |
| 1272 | default: |
| 1273 | return 0; |
| 1274 | } |
| 1275 | return 0; |
| 1276 | } |
| 1277 | |
| 1278 | static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) |
| 1279 | { |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1280 | struct trace_uprobe *tu; |
Namhyung Kim | b7e0bf3 | 2013-11-25 13:42:47 +0900 | [diff] [blame] | 1281 | struct uprobe_dispatch_data udd; |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 1282 | struct uprobe_cpu_buffer *ucb; |
| 1283 | int dsize, esize; |
Oleg Nesterov | f42d24a | 2013-02-04 17:48:34 +0100 | [diff] [blame] | 1284 | int ret = 0; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1285 | |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 1286 | |
Oleg Nesterov | a932b73 | 2013-01-31 19:47:23 +0100 | [diff] [blame] | 1287 | tu = container_of(con, struct trace_uprobe, consumer); |
Oleg Nesterov | 1b47aef | 2013-01-31 19:55:27 +0100 | [diff] [blame] | 1288 | tu->nhit++; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1289 | |
Namhyung Kim | b7e0bf3 | 2013-11-25 13:42:47 +0900 | [diff] [blame] | 1290 | udd.tu = tu; |
| 1291 | udd.bp_addr = instruction_pointer(regs); |
| 1292 | |
| 1293 | current->utask->vaddr = (unsigned long) &udd; |
| 1294 | |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 1295 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) |
| 1296 | return 0; |
| 1297 | |
| 1298 | dsize = __get_data_size(&tu->tp, regs); |
| 1299 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
| 1300 | |
| 1301 | ucb = uprobe_buffer_get(); |
Masami Hiramatsu | 9178412 | 2018-04-25 21:19:01 +0900 | [diff] [blame] | 1302 | store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize); |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 1303 | |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 1304 | if (tu->tp.flags & TP_FLAG_TRACE) |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 1305 | ret |= uprobe_trace_func(tu, regs, ucb, dsize); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1306 | |
| 1307 | #ifdef CONFIG_PERF_EVENTS |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 1308 | if (tu->tp.flags & TP_FLAG_PROFILE) |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 1309 | ret |= uprobe_perf_func(tu, regs, ucb, dsize); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1310 | #endif |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 1311 | uprobe_buffer_put(ucb); |
Oleg Nesterov | f42d24a | 2013-02-04 17:48:34 +0100 | [diff] [blame] | 1312 | return ret; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1313 | } |
| 1314 | |
Oleg Nesterov | c1ae5c7 | 2013-03-30 18:25:23 +0100 | [diff] [blame] | 1315 | static int uretprobe_dispatcher(struct uprobe_consumer *con, |
| 1316 | unsigned long func, struct pt_regs *regs) |
| 1317 | { |
| 1318 | struct trace_uprobe *tu; |
Namhyung Kim | b7e0bf3 | 2013-11-25 13:42:47 +0900 | [diff] [blame] | 1319 | struct uprobe_dispatch_data udd; |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 1320 | struct uprobe_cpu_buffer *ucb; |
| 1321 | int dsize, esize; |
Oleg Nesterov | c1ae5c7 | 2013-03-30 18:25:23 +0100 | [diff] [blame] | 1322 | |
| 1323 | tu = container_of(con, struct trace_uprobe, consumer); |
| 1324 | |
Namhyung Kim | b7e0bf3 | 2013-11-25 13:42:47 +0900 | [diff] [blame] | 1325 | udd.tu = tu; |
| 1326 | udd.bp_addr = func; |
| 1327 | |
| 1328 | current->utask->vaddr = (unsigned long) &udd; |
| 1329 | |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 1330 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) |
| 1331 | return 0; |
| 1332 | |
| 1333 | dsize = __get_data_size(&tu->tp, regs); |
| 1334 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
| 1335 | |
| 1336 | ucb = uprobe_buffer_get(); |
Masami Hiramatsu | 9178412 | 2018-04-25 21:19:01 +0900 | [diff] [blame] | 1337 | store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize); |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 1338 | |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 1339 | if (tu->tp.flags & TP_FLAG_TRACE) |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 1340 | uretprobe_trace_func(tu, func, regs, ucb, dsize); |
Oleg Nesterov | c1ae5c7 | 2013-03-30 18:25:23 +0100 | [diff] [blame] | 1341 | |
| 1342 | #ifdef CONFIG_PERF_EVENTS |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 1343 | if (tu->tp.flags & TP_FLAG_PROFILE) |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 1344 | uretprobe_perf_func(tu, func, regs, ucb, dsize); |
Oleg Nesterov | c1ae5c7 | 2013-03-30 18:25:23 +0100 | [diff] [blame] | 1345 | #endif |
Namhyung Kim | dd9fa55 | 2014-01-17 17:08:37 +0900 | [diff] [blame] | 1346 | uprobe_buffer_put(ucb); |
Oleg Nesterov | c1ae5c7 | 2013-03-30 18:25:23 +0100 | [diff] [blame] | 1347 | return 0; |
| 1348 | } |
| 1349 | |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1350 | static struct trace_event_functions uprobe_funcs = { |
| 1351 | .trace = print_uprobe_event |
| 1352 | }; |
| 1353 | |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 1354 | static inline void init_trace_event_call(struct trace_uprobe *tu, |
| 1355 | struct trace_event_call *call) |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1356 | { |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1357 | INIT_LIST_HEAD(&call->class->fields); |
| 1358 | call->event.funcs = &uprobe_funcs; |
| 1359 | call->class->define_fields = uprobe_event_define_fields; |
| 1360 | |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 1361 | call->flags = TRACE_EVENT_FL_UPROBE; |
| 1362 | call->class->reg = trace_uprobe_register; |
| 1363 | call->data = tu; |
| 1364 | } |
| 1365 | |
| 1366 | static int register_uprobe_event(struct trace_uprobe *tu) |
| 1367 | { |
| 1368 | struct trace_event_call *call = &tu->tp.call; |
| 1369 | int ret = 0; |
| 1370 | |
| 1371 | init_trace_event_call(tu, call); |
| 1372 | |
Masami Hiramatsu | 0a46c85 | 2018-04-25 21:19:30 +0900 | [diff] [blame^] | 1373 | if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1374 | return -ENOMEM; |
| 1375 | |
Steven Rostedt (Red Hat) | 9023c93 | 2015-05-05 09:39:12 -0400 | [diff] [blame] | 1376 | ret = register_trace_event(&call->event); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1377 | if (!ret) { |
| 1378 | kfree(call->print_fmt); |
| 1379 | return -ENODEV; |
| 1380 | } |
Oleg Nesterov | ede392a | 2014-07-15 20:48:24 +0200 | [diff] [blame] | 1381 | |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1382 | ret = trace_add_event_call(call); |
| 1383 | |
| 1384 | if (ret) { |
Mathieu Desnoyers | de7b297 | 2014-04-08 17:26:21 -0400 | [diff] [blame] | 1385 | pr_info("Failed to register uprobe event: %s\n", |
Steven Rostedt (Red Hat) | 687fcc4 | 2015-05-13 14:20:14 -0400 | [diff] [blame] | 1386 | trace_event_name(call)); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1387 | kfree(call->print_fmt); |
Steven Rostedt (Red Hat) | 9023c93 | 2015-05-05 09:39:12 -0400 | [diff] [blame] | 1388 | unregister_trace_event(&call->event); |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1389 | } |
| 1390 | |
| 1391 | return ret; |
| 1392 | } |
| 1393 | |
Steven Rostedt (Red Hat) | c6c2401 | 2013-07-03 23:33:51 -0400 | [diff] [blame] | 1394 | static int unregister_uprobe_event(struct trace_uprobe *tu) |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1395 | { |
Steven Rostedt (Red Hat) | c6c2401 | 2013-07-03 23:33:51 -0400 | [diff] [blame] | 1396 | int ret; |
| 1397 | |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1398 | /* tu->event is unregistered in trace_remove_event_call() */ |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 1399 | ret = trace_remove_event_call(&tu->tp.call); |
Steven Rostedt (Red Hat) | c6c2401 | 2013-07-03 23:33:51 -0400 | [diff] [blame] | 1400 | if (ret) |
| 1401 | return ret; |
Namhyung Kim | 14577c3 | 2013-07-03 15:42:53 +0900 | [diff] [blame] | 1402 | kfree(tu->tp.call.print_fmt); |
| 1403 | tu->tp.call.print_fmt = NULL; |
Steven Rostedt (Red Hat) | c6c2401 | 2013-07-03 23:33:51 -0400 | [diff] [blame] | 1404 | return 0; |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1405 | } |
| 1406 | |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 1407 | #ifdef CONFIG_PERF_EVENTS |
| 1408 | struct trace_event_call * |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 1409 | create_local_trace_uprobe(char *name, unsigned long offs, |
| 1410 | unsigned long ref_ctr_offset, bool is_return) |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 1411 | { |
| 1412 | struct trace_uprobe *tu; |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 1413 | struct path path; |
| 1414 | int ret; |
| 1415 | |
| 1416 | ret = kern_path(name, LOOKUP_FOLLOW, &path); |
| 1417 | if (ret) |
| 1418 | return ERR_PTR(ret); |
| 1419 | |
Song Liu | 0c92c7a | 2018-04-23 10:21:34 -0700 | [diff] [blame] | 1420 | if (!d_is_reg(path.dentry)) { |
| 1421 | path_put(&path); |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 1422 | return ERR_PTR(-EINVAL); |
| 1423 | } |
| 1424 | |
| 1425 | /* |
| 1426 | * local trace_kprobes are not added to probe_list, so they are never |
| 1427 | * searched in find_trace_kprobe(). Therefore, there is no concern of |
| 1428 | * duplicated name "DUMMY_EVENT" here. |
| 1429 | */ |
| 1430 | tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0, |
| 1431 | is_return); |
| 1432 | |
| 1433 | if (IS_ERR(tu)) { |
| 1434 | pr_info("Failed to allocate trace_uprobe.(%d)\n", |
| 1435 | (int)PTR_ERR(tu)); |
Song Liu | 0c92c7a | 2018-04-23 10:21:34 -0700 | [diff] [blame] | 1436 | path_put(&path); |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 1437 | return ERR_CAST(tu); |
| 1438 | } |
| 1439 | |
| 1440 | tu->offset = offs; |
Song Liu | 0c92c7a | 2018-04-23 10:21:34 -0700 | [diff] [blame] | 1441 | tu->path = path; |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 1442 | tu->ref_ctr_offset = ref_ctr_offset; |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 1443 | tu->filename = kstrdup(name, GFP_KERNEL); |
| 1444 | init_trace_event_call(tu, &tu->tp.call); |
| 1445 | |
Masami Hiramatsu | 0a46c85 | 2018-04-25 21:19:30 +0900 | [diff] [blame^] | 1446 | if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) { |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 1447 | ret = -ENOMEM; |
| 1448 | goto error; |
| 1449 | } |
| 1450 | |
| 1451 | return &tu->tp.call; |
| 1452 | error: |
| 1453 | free_trace_uprobe(tu); |
| 1454 | return ERR_PTR(ret); |
| 1455 | } |
| 1456 | |
| 1457 | void destroy_local_trace_uprobe(struct trace_event_call *event_call) |
| 1458 | { |
| 1459 | struct trace_uprobe *tu; |
| 1460 | |
| 1461 | tu = container_of(event_call, struct trace_uprobe, tp.call); |
| 1462 | |
| 1463 | kfree(tu->tp.call.print_fmt); |
| 1464 | tu->tp.call.print_fmt = NULL; |
| 1465 | |
| 1466 | free_trace_uprobe(tu); |
| 1467 | } |
| 1468 | #endif /* CONFIG_PERF_EVENTS */ |
| 1469 | |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1470 | /* Make a trace interface for controling probe points */ |
| 1471 | static __init int init_uprobe_trace(void) |
| 1472 | { |
| 1473 | struct dentry *d_tracer; |
| 1474 | |
| 1475 | d_tracer = tracing_init_dentry(); |
Steven Rostedt (Red Hat) | 14a5ae4 | 2015-01-20 11:14:16 -0500 | [diff] [blame] | 1476 | if (IS_ERR(d_tracer)) |
Srikar Dronamraju | f3f096c | 2012-04-11 16:00:43 +0530 | [diff] [blame] | 1477 | return 0; |
| 1478 | |
| 1479 | trace_create_file("uprobe_events", 0644, d_tracer, |
| 1480 | NULL, &uprobe_events_ops); |
| 1481 | /* Profile interface */ |
| 1482 | trace_create_file("uprobe_profile", 0444, d_tracer, |
| 1483 | NULL, &uprobe_profile_ops); |
| 1484 | return 0; |
| 1485 | } |
| 1486 | |
| 1487 | fs_initcall(init_uprobe_trace); |