blob: b55906c77ce0602ad6d7f0c20225e99f288ddbb9 [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05302/*
3 * uprobes-based tracing events
4 *
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05305 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */
Andreas Zieglerea6eb5e2019-01-17 14:30:23 +01008#define pr_fmt(fmt) "trace_uprobe: " fmt
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05309
Masami Hiramatsu0597c492018-11-05 18:03:04 +090010#include <linux/ctype.h>
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053011#include <linux/module.h>
12#include <linux/uaccess.h>
13#include <linux/uprobes.h>
14#include <linux/namei.h>
Andy Shevchenkob2e902f2012-12-17 16:01:27 -080015#include <linux/string.h>
Ingo Molnarb2d09102017-02-04 01:27:20 +010016#include <linux/rculist.h>
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053017
Masami Hiramatsu0597c492018-11-05 18:03:04 +090018#include "trace_dynevent.h"
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053019#include "trace_probe.h"
Masami Hiramatsu53305922018-04-25 21:18:03 +090020#include "trace_probe_tmpl.h"
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053021
22#define UPROBE_EVENT_SYSTEM "uprobes"
23
Oleg Nesterov457d1772013-03-29 18:26:51 +010024struct uprobe_trace_entry_head {
25 struct trace_entry ent;
26 unsigned long vaddr[];
27};
28
29#define SIZEOF_TRACE_ENTRY(is_return) \
30 (sizeof(struct uprobe_trace_entry_head) + \
31 sizeof(unsigned long) * (is_return ? 2 : 1))
32
33#define DATAOF_TRACE_ENTRY(entry, is_return) \
34 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
35
Oleg Nesterov736288b2013-02-03 20:58:35 +010036struct trace_uprobe_filter {
37 rwlock_t rwlock;
38 int nr_systemwide;
39 struct list_head perf_events;
40};
41
Masami Hiramatsu0597c492018-11-05 18:03:04 +090042static int trace_uprobe_create(int argc, const char **argv);
43static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
44static int trace_uprobe_release(struct dyn_event *ev);
45static bool trace_uprobe_is_busy(struct dyn_event *ev);
46static bool trace_uprobe_match(const char *system, const char *event,
47 struct dyn_event *ev);
48
49static struct dyn_event_operations trace_uprobe_ops = {
50 .create = trace_uprobe_create,
51 .show = trace_uprobe_show,
52 .is_busy = trace_uprobe_is_busy,
53 .free = trace_uprobe_release,
54 .match = trace_uprobe_match,
55};
56
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053057/*
58 * uprobe event core functions
59 */
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053060struct trace_uprobe {
Masami Hiramatsu0597c492018-11-05 18:03:04 +090061 struct dyn_event devent;
Oleg Nesterov736288b2013-02-03 20:58:35 +010062 struct trace_uprobe_filter filter;
Oleg Nesterova932b732013-01-31 19:47:23 +010063 struct uprobe_consumer consumer;
Song Liu0c92c7a2018-04-23 10:21:34 -070064 struct path path;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053065 struct inode *inode;
66 char *filename;
67 unsigned long offset;
Ravi Bangoria1cc33162018-08-20 10:12:47 +053068 unsigned long ref_ctr_offset;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053069 unsigned long nhit;
Namhyung Kim14577c32013-07-03 15:42:53 +090070 struct trace_probe tp;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053071};
72
Masami Hiramatsu0597c492018-11-05 18:03:04 +090073static bool is_trace_uprobe(struct dyn_event *ev)
74{
75 return ev->ops == &trace_uprobe_ops;
76}
77
78static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
79{
80 return container_of(ev, struct trace_uprobe, devent);
81}
82
83/**
84 * for_each_trace_uprobe - iterate over the trace_uprobe list
85 * @pos: the struct trace_uprobe * for each entry
86 * @dpos: the struct dyn_event * to use as a loop cursor
87 */
88#define for_each_trace_uprobe(pos, dpos) \
89 for_each_dyn_event(dpos) \
90 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
91
Namhyung Kim14577c32013-07-03 15:42:53 +090092#define SIZEOF_TRACE_UPROBE(n) \
93 (offsetof(struct trace_uprobe, tp.args) + \
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053094 (sizeof(struct probe_arg) * (n)))
95
96static int register_uprobe_event(struct trace_uprobe *tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -040097static int unregister_uprobe_event(struct trace_uprobe *tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053098
Namhyung Kimb7e0bf32013-11-25 13:42:47 +090099struct uprobe_dispatch_data {
100 struct trace_uprobe *tu;
101 unsigned long bp_addr;
102};
103
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530104static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100105static int uretprobe_dispatcher(struct uprobe_consumer *con,
106 unsigned long func, struct pt_regs *regs);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530107
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900108#ifdef CONFIG_STACK_GROWSUP
109static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
110{
111 return addr - (n * sizeof(long));
112}
113#else
114static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
115{
116 return addr + (n * sizeof(long));
117}
118#endif
119
120static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
121{
122 unsigned long ret;
123 unsigned long addr = user_stack_pointer(regs);
124
125 addr = adjust_stack_addr(addr, n);
126
127 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
128 return 0;
129
130 return ret;
131}
132
133/*
134 * Uprobes-specific fetch functions
135 */
Masami Hiramatsu53305922018-04-25 21:18:03 +0900136static nokprobe_inline int
Masami Hiramatsu9b960a32018-04-25 21:19:59 +0900137probe_mem_read(void *dest, void *src, size_t size)
Masami Hiramatsu53305922018-04-25 21:18:03 +0900138{
139 void __user *vaddr = (void __force __user *)src;
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900140
Masami Hiramatsuf3f58932018-08-29 01:17:47 +0900141 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900142}
Namhyung Kim5baaa592013-11-26 15:21:04 +0900143/*
144 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
145 * length and relative data location.
146 */
Masami Hiramatsu91784122018-04-25 21:19:01 +0900147static nokprobe_inline int
148fetch_store_string(unsigned long addr, void *dest, void *base)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900149{
150 long ret;
Masami Hiramatsu91784122018-04-25 21:19:01 +0900151 u32 loc = *(u32 *)dest;
152 int maxlen = get_loc_len(loc);
153 u8 *dst = get_loc_data(dest, base);
Namhyung Kim5baaa592013-11-26 15:21:04 +0900154 void __user *src = (void __force __user *) addr;
155
Masami Hiramatsu91784122018-04-25 21:19:01 +0900156 if (unlikely(!maxlen))
157 return -ENOMEM;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900158
Masami Hiramatsu4dd537a2019-05-07 22:55:41 +0900159 if (addr == FETCH_TOKEN_COMM)
160 ret = strlcpy(dst, current->comm, maxlen);
161 else
162 ret = strncpy_from_user(dst, src, maxlen);
Masami Hiramatsu91784122018-04-25 21:19:01 +0900163 if (ret >= 0) {
164 if (ret == maxlen)
165 dst[ret - 1] = '\0';
Andreas Ziegler07220692019-01-16 15:16:29 +0100166 else
167 /*
168 * Include the terminating null byte. In this case it
169 * was copied by strncpy_from_user but not accounted
170 * for in ret.
171 */
172 ret++;
Masami Hiramatsu91784122018-04-25 21:19:01 +0900173 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
Namhyung Kim5baaa592013-11-26 15:21:04 +0900174 }
Masami Hiramatsu91784122018-04-25 21:19:01 +0900175
176 return ret;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900177}
178
Masami Hiramatsu53305922018-04-25 21:18:03 +0900179/* Return the length of string -- including null terminal byte */
Masami Hiramatsu91784122018-04-25 21:19:01 +0900180static nokprobe_inline int
181fetch_store_strlen(unsigned long addr)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900182{
183 int len;
184 void __user *vaddr = (void __force __user *) addr;
185
Masami Hiramatsu4dd537a2019-05-07 22:55:41 +0900186 if (addr == FETCH_TOKEN_COMM)
187 len = strlen(current->comm) + 1;
188 else
189 len = strnlen_user(vaddr, MAX_STRING_SIZE);
Namhyung Kim5baaa592013-11-26 15:21:04 +0900190
Masami Hiramatsu91784122018-04-25 21:19:01 +0900191 return (len > MAX_STRING_SIZE) ? 0 : len;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900192}
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900193
Masami Hiramatsu53305922018-04-25 21:18:03 +0900194static unsigned long translate_user_vaddr(unsigned long file_offset)
Namhyung Kimb7e0bf32013-11-25 13:42:47 +0900195{
196 unsigned long base_addr;
197 struct uprobe_dispatch_data *udd;
198
199 udd = (void *) current->utask->vaddr;
200
201 base_addr = udd->bp_addr - udd->tu->offset;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900202 return base_addr + file_offset;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +0900203}
204
Masami Hiramatsu53305922018-04-25 21:18:03 +0900205/* Note that we don't verify it, since the code does not come from user space */
206static int
207process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
Masami Hiramatsu91784122018-04-25 21:19:01 +0900208 void *base)
Masami Hiramatsu53305922018-04-25 21:18:03 +0900209{
210 unsigned long val;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900211
212 /* 1st stage: get value from context */
213 switch (code->op) {
214 case FETCH_OP_REG:
215 val = regs_get_register(regs, code->param);
216 break;
217 case FETCH_OP_STACK:
218 val = get_user_stack_nth(regs, code->param);
219 break;
220 case FETCH_OP_STACKP:
221 val = user_stack_pointer(regs);
222 break;
223 case FETCH_OP_RETVAL:
224 val = regs_return_value(regs);
225 break;
226 case FETCH_OP_IMM:
227 val = code->immediate;
228 break;
Masami Hiramatsu4dd537a2019-05-07 22:55:41 +0900229 case FETCH_OP_COMM:
230 val = FETCH_TOKEN_COMM;
231 break;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900232 case FETCH_OP_FOFFS:
233 val = translate_user_vaddr(code->immediate);
234 break;
235 default:
236 return -EILSEQ;
237 }
238 code++;
239
Masami Hiramatsu9b960a32018-04-25 21:19:59 +0900240 return process_fetch_insn_bottom(code, val, dest, base);
Masami Hiramatsu53305922018-04-25 21:18:03 +0900241}
242NOKPROBE_SYMBOL(process_fetch_insn)
243
Oleg Nesterov736288b2013-02-03 20:58:35 +0100244static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
245{
246 rwlock_init(&filter->rwlock);
247 filter->nr_systemwide = 0;
248 INIT_LIST_HEAD(&filter->perf_events);
249}
250
251static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
252{
253 return !filter->nr_systemwide && list_empty(&filter->perf_events);
254}
255
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100256static inline bool is_ret_probe(struct trace_uprobe *tu)
257{
258 return tu->consumer.ret_handler != NULL;
259}
260
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900261static bool trace_uprobe_is_busy(struct dyn_event *ev)
262{
263 struct trace_uprobe *tu = to_trace_uprobe(ev);
264
265 return trace_probe_is_enabled(&tu->tp);
266}
267
268static bool trace_uprobe_match(const char *system, const char *event,
269 struct dyn_event *ev)
270{
271 struct trace_uprobe *tu = to_trace_uprobe(ev);
272
273 return strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
274 (!system || strcmp(tu->tp.call.class->system, system) == 0);
275}
276
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530277/*
278 * Allocate new trace_uprobe and initialize it (including uprobes).
279 */
280static struct trace_uprobe *
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100281alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530282{
283 struct trace_uprobe *tu;
284
Masami Hiramatsu5b7a9622019-03-14 13:30:40 +0900285 if (!event || !group)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530286 return ERR_PTR(-EINVAL);
287
288 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
289 if (!tu)
290 return ERR_PTR(-ENOMEM);
291
Namhyung Kim14577c32013-07-03 15:42:53 +0900292 tu->tp.call.class = &tu->tp.class;
293 tu->tp.call.name = kstrdup(event, GFP_KERNEL);
294 if (!tu->tp.call.name)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530295 goto error;
296
Namhyung Kim14577c32013-07-03 15:42:53 +0900297 tu->tp.class.system = kstrdup(group, GFP_KERNEL);
298 if (!tu->tp.class.system)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530299 goto error;
300
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900301 dyn_event_init(&tu->devent, &trace_uprobe_ops);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900302 INIT_LIST_HEAD(&tu->tp.files);
Oleg Nesterova932b732013-01-31 19:47:23 +0100303 tu->consumer.handler = uprobe_dispatcher;
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100304 if (is_ret)
305 tu->consumer.ret_handler = uretprobe_dispatcher;
Oleg Nesterov736288b2013-02-03 20:58:35 +0100306 init_trace_uprobe_filter(&tu->filter);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530307 return tu;
308
309error:
Namhyung Kim14577c32013-07-03 15:42:53 +0900310 kfree(tu->tp.call.name);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530311 kfree(tu);
312
313 return ERR_PTR(-ENOMEM);
314}
315
316static void free_trace_uprobe(struct trace_uprobe *tu)
317{
318 int i;
319
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900320 if (!tu)
321 return;
322
Namhyung Kim14577c32013-07-03 15:42:53 +0900323 for (i = 0; i < tu->tp.nr_args; i++)
324 traceprobe_free_probe_arg(&tu->tp.args[i]);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530325
Song Liu0c92c7a2018-04-23 10:21:34 -0700326 path_put(&tu->path);
Namhyung Kim14577c32013-07-03 15:42:53 +0900327 kfree(tu->tp.call.class->system);
328 kfree(tu->tp.call.name);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530329 kfree(tu->filename);
330 kfree(tu);
331}
332
333static struct trace_uprobe *find_probe_event(const char *event, const char *group)
334{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900335 struct dyn_event *pos;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530336 struct trace_uprobe *tu;
337
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900338 for_each_trace_uprobe(tu, pos)
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400339 if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
Namhyung Kim14577c32013-07-03 15:42:53 +0900340 strcmp(tu->tp.call.class->system, group) == 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530341 return tu;
342
343 return NULL;
344}
345
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900346/* Unregister a trace_uprobe and probe_event */
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400347static int unregister_trace_uprobe(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530348{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400349 int ret;
350
351 ret = unregister_uprobe_event(tu);
352 if (ret)
353 return ret;
354
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900355 dyn_event_remove(&tu->devent);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530356 free_trace_uprobe(tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400357 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530358}
359
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530360/*
361 * Uprobe with multiple reference counter is not allowed. i.e.
362 * If inode and offset matches, reference counter offset *must*
363 * match as well. Though, there is one exception: If user is
364 * replacing old trace_uprobe with new one(same group/event),
365 * then we allow same uprobe with new reference counter as far
366 * as the new one does not conflict with any other existing
367 * ones.
368 */
369static struct trace_uprobe *find_old_trace_uprobe(struct trace_uprobe *new)
370{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900371 struct dyn_event *pos;
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530372 struct trace_uprobe *tmp, *old = NULL;
373 struct inode *new_inode = d_real_inode(new->path.dentry);
374
375 old = find_probe_event(trace_event_name(&new->tp.call),
376 new->tp.call.class->system);
377
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900378 for_each_trace_uprobe(tmp, pos) {
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530379 if ((old ? old != tmp : true) &&
380 new_inode == d_real_inode(tmp->path.dentry) &&
381 new->offset == tmp->offset &&
382 new->ref_ctr_offset != tmp->ref_ctr_offset) {
383 pr_warn("Reference counter offset mismatch.");
384 return ERR_PTR(-EINVAL);
385 }
386 }
387 return old;
388}
389
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530390/* Register a trace_uprobe and probe_event */
391static int register_trace_uprobe(struct trace_uprobe *tu)
392{
Namhyung Kim14577c32013-07-03 15:42:53 +0900393 struct trace_uprobe *old_tu;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530394 int ret;
395
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900396 mutex_lock(&event_mutex);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530397
398 /* register as an event */
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530399 old_tu = find_old_trace_uprobe(tu);
400 if (IS_ERR(old_tu)) {
401 ret = PTR_ERR(old_tu);
402 goto end;
403 }
404
Namhyung Kim14577c32013-07-03 15:42:53 +0900405 if (old_tu) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530406 /* delete old event */
Namhyung Kim14577c32013-07-03 15:42:53 +0900407 ret = unregister_trace_uprobe(old_tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400408 if (ret)
409 goto end;
410 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530411
412 ret = register_uprobe_event(tu);
413 if (ret) {
Joe Perchesa395d6a2016-03-22 14:28:09 -0700414 pr_warn("Failed to register probe event(%d)\n", ret);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530415 goto end;
416 }
417
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900418 dyn_event_add(&tu->devent);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530419
420end:
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900421 mutex_unlock(&event_mutex);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530422
423 return ret;
424}
425
426/*
427 * Argument syntax:
Namhyung Kim306cfe22013-07-03 16:44:46 +0900428 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530429 */
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900430static int trace_uprobe_create(int argc, const char **argv)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530431{
432 struct trace_uprobe *tu;
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900433 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
434 char *arg, *filename, *rctr, *rctr_end, *tmp;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530435 char buf[MAX_EVENT_NAME_LEN];
436 struct path path;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530437 unsigned long offset, ref_ctr_offset;
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900438 bool is_return = false;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530439 int i, ret;
440
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530441 ret = 0;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530442 ref_ctr_offset = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530443
Eiichi Tsukataf01098c2019-06-14 16:40:25 +0900444 switch (argv[0][0]) {
445 case 'r':
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100446 is_return = true;
Eiichi Tsukataf01098c2019-06-14 16:40:25 +0900447 break;
448 case 'p':
449 break;
450 default:
451 return -ECANCELED;
452 }
453
454 if (argc < 2)
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900455 return -ECANCELED;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530456
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900457 if (argv[0][1] == ':')
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530458 event = &argv[0][2];
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530459
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900460 if (!strchr(argv[1], '/'))
461 return -ECANCELED;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530462
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900463 filename = kstrdup(argv[1], GFP_KERNEL);
464 if (!filename)
465 return -ENOMEM;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530466
Kenny Yu6496bb72017-01-13 08:58:34 -0800467 /* Find the last occurrence, in case the path contains ':' too. */
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900468 arg = strrchr(filename, ':');
469 if (!arg || !isdigit(arg[1])) {
470 kfree(filename);
471 return -ECANCELED;
472 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530473
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500474 trace_probe_log_init("trace_uprobe", argc, argv);
475 trace_probe_log_set_index(1); /* filename is the 2nd argument */
476
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530477 *arg++ = '\0';
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530478 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900479 if (ret) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500480 trace_probe_log_err(0, FILE_NOT_FOUND);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900481 kfree(filename);
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500482 trace_probe_log_clear();
Song Liu0c92c7a2018-04-23 10:21:34 -0700483 return ret;
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900484 }
Song Liu0c92c7a2018-04-23 10:21:34 -0700485 if (!d_is_reg(path.dentry)) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500486 trace_probe_log_err(0, NO_REGULAR_FILE);
Jovi Zhangd24d7db2012-07-18 18:16:44 +0800487 ret = -EINVAL;
488 goto fail_address_parse;
489 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530490
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530491 /* Parse reference counter offset if specified. */
492 rctr = strchr(arg, '(');
493 if (rctr) {
494 rctr_end = strchr(rctr, ')');
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500495 if (!rctr_end) {
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530496 ret = -EINVAL;
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500497 rctr_end = rctr + strlen(rctr);
498 trace_probe_log_err(rctr_end - filename,
499 REFCNT_OPEN_BRACE);
500 goto fail_address_parse;
501 } else if (rctr_end[1] != '\0') {
502 ret = -EINVAL;
503 trace_probe_log_err(rctr_end + 1 - filename,
504 BAD_REFCNT_SUFFIX);
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530505 goto fail_address_parse;
506 }
507
508 *rctr++ = '\0';
509 *rctr_end = '\0';
510 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
511 if (ret) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500512 trace_probe_log_err(rctr - filename, BAD_REFCNT);
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530513 goto fail_address_parse;
514 }
515 }
516
517 /* Parse uprobe offset. */
Oleg Nesterov84d7ed72013-01-27 18:20:45 +0100518 ret = kstrtoul(arg, 0, &offset);
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500519 if (ret) {
520 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
Oleg Nesterov84d7ed72013-01-27 18:20:45 +0100521 goto fail_address_parse;
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500522 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530523
524 /* setup a probe */
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500525 trace_probe_log_set_index(0);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900526 if (event) {
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500527 ret = traceprobe_parse_event_name(&event, &group, buf,
528 event - argv[0]);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900529 if (ret)
530 goto fail_address_parse;
531 } else {
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800532 char *tail;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530533 char *ptr;
534
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800535 tail = kstrdup(kbasename(filename), GFP_KERNEL);
536 if (!tail) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530537 ret = -ENOMEM;
538 goto fail_address_parse;
539 }
540
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530541 ptr = strpbrk(tail, ".-_");
542 if (ptr)
543 *ptr = '\0';
544
545 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
546 event = buf;
547 kfree(tail);
548 }
549
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500550 argc -= 2;
551 argv += 2;
552
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100553 tu = alloc_trace_uprobe(group, event, argc, is_return);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530554 if (IS_ERR(tu)) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530555 ret = PTR_ERR(tu);
Masami Hiramatsua0394802019-03-14 13:30:50 +0900556 /* This must return -ENOMEM otherwise there is a bug */
557 WARN_ON_ONCE(ret != -ENOMEM);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530558 goto fail_address_parse;
559 }
560 tu->offset = offset;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530561 tu->ref_ctr_offset = ref_ctr_offset;
Song Liu0c92c7a2018-04-23 10:21:34 -0700562 tu->path = path;
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900563 tu->filename = filename;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530564
565 /* parse arguments */
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530566 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900567 tmp = kstrdup(argv[i], GFP_KERNEL);
568 if (!tmp) {
569 ret = -ENOMEM;
570 goto error;
571 }
572
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500573 trace_probe_log_set_index(i + 2);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900574 ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
Masami Hiramatsua1303af2018-04-25 21:21:26 +0900575 is_return ? TPARG_FL_RETURN : 0);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900576 kfree(tmp);
Masami Hiramatsud00bbea92018-11-05 18:01:40 +0900577 if (ret)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530578 goto error;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530579 }
580
581 ret = register_trace_uprobe(tu);
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500582 if (!ret)
583 goto out;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530584
585error:
586 free_trace_uprobe(tu);
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500587out:
588 trace_probe_log_clear();
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530589 return ret;
590
591fail_address_parse:
Masami Hiramatsuab105a42019-03-31 18:48:19 -0500592 trace_probe_log_clear();
Song Liu0c92c7a2018-04-23 10:21:34 -0700593 path_put(&path);
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900594 kfree(filename);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530595
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530596 return ret;
597}
598
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900599static int create_or_delete_trace_uprobe(int argc, char **argv)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530600{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900601 int ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530602
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900603 if (argv[0][0] == '-')
604 return dyn_event_release(argc, argv, &trace_uprobe_ops);
605
606 ret = trace_uprobe_create(argc, (const char **)argv);
607 return ret == -ECANCELED ? -EINVAL : ret;
608}
609
610static int trace_uprobe_release(struct dyn_event *ev)
611{
612 struct trace_uprobe *tu = to_trace_uprobe(ev);
613
614 return unregister_trace_uprobe(tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530615}
616
617/* Probes listing interfaces */
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900618static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530619{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900620 struct trace_uprobe *tu = to_trace_uprobe(ev);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100621 char c = is_ret_probe(tu) ? 'r' : 'p';
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530622 int i;
623
Ravi Bangoriaa64b2c02018-03-15 13:57:56 +0530624 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, tu->tp.call.class->system,
625 trace_event_name(&tu->tp.call), tu->filename,
626 (int)(sizeof(void *) * 2), tu->offset);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530627
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530628 if (tu->ref_ctr_offset)
629 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
630
Namhyung Kim14577c32013-07-03 15:42:53 +0900631 for (i = 0; i < tu->tp.nr_args; i++)
632 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530633
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +0100634 seq_putc(m, '\n');
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530635 return 0;
636}
637
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900638static int probes_seq_show(struct seq_file *m, void *v)
639{
640 struct dyn_event *ev = v;
641
642 if (!is_trace_uprobe(ev))
643 return 0;
644
645 return trace_uprobe_show(m, ev);
646}
647
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530648static const struct seq_operations probes_seq_op = {
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900649 .start = dyn_event_seq_start,
650 .next = dyn_event_seq_next,
651 .stop = dyn_event_seq_stop,
652 .show = probes_seq_show
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530653};
654
655static int probes_open(struct inode *inode, struct file *file)
656{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400657 int ret;
658
659 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900660 ret = dyn_events_release_all(&trace_uprobe_ops);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400661 if (ret)
662 return ret;
663 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530664
665 return seq_open(file, &probes_seq_op);
666}
667
668static ssize_t probes_write(struct file *file, const char __user *buffer,
669 size_t count, loff_t *ppos)
670{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900671 return trace_parse_run_command(file, buffer, count, ppos,
672 create_or_delete_trace_uprobe);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530673}
674
675static const struct file_operations uprobe_events_ops = {
676 .owner = THIS_MODULE,
677 .open = probes_open,
678 .read = seq_read,
679 .llseek = seq_lseek,
680 .release = seq_release,
681 .write = probes_write,
682};
683
684/* Probes profiling interfaces */
685static int probes_profile_seq_show(struct seq_file *m, void *v)
686{
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900687 struct dyn_event *ev = v;
688 struct trace_uprobe *tu;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530689
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900690 if (!is_trace_uprobe(ev))
691 return 0;
692
693 tu = to_trace_uprobe(ev);
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400694 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400695 trace_event_name(&tu->tp.call), tu->nhit);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530696 return 0;
697}
698
699static const struct seq_operations profile_seq_op = {
Masami Hiramatsu0597c492018-11-05 18:03:04 +0900700 .start = dyn_event_seq_start,
701 .next = dyn_event_seq_next,
702 .stop = dyn_event_seq_stop,
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530703 .show = probes_profile_seq_show
704};
705
706static int profile_open(struct inode *inode, struct file *file)
707{
708 return seq_open(file, &profile_seq_op);
709}
710
711static const struct file_operations uprobe_profile_ops = {
712 .owner = THIS_MODULE,
713 .open = profile_open,
714 .read = seq_read,
715 .llseek = seq_lseek,
716 .release = seq_release,
717};
718
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900719struct uprobe_cpu_buffer {
720 struct mutex mutex;
721 void *buf;
722};
723static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
724static int uprobe_buffer_refcnt;
725
726static int uprobe_buffer_init(void)
727{
728 int cpu, err_cpu;
729
730 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
731 if (uprobe_cpu_buffer == NULL)
732 return -ENOMEM;
733
734 for_each_possible_cpu(cpu) {
735 struct page *p = alloc_pages_node(cpu_to_node(cpu),
736 GFP_KERNEL, 0);
737 if (p == NULL) {
738 err_cpu = cpu;
739 goto err;
740 }
741 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
742 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
743 }
744
745 return 0;
746
747err:
748 for_each_possible_cpu(cpu) {
749 if (cpu == err_cpu)
750 break;
751 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
752 }
753
754 free_percpu(uprobe_cpu_buffer);
755 return -ENOMEM;
756}
757
758static int uprobe_buffer_enable(void)
759{
760 int ret = 0;
761
762 BUG_ON(!mutex_is_locked(&event_mutex));
763
764 if (uprobe_buffer_refcnt++ == 0) {
765 ret = uprobe_buffer_init();
766 if (ret < 0)
767 uprobe_buffer_refcnt--;
768 }
769
770 return ret;
771}
772
773static void uprobe_buffer_disable(void)
774{
zhangwei(Jovi)6ea62152014-04-17 16:05:19 +0800775 int cpu;
776
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900777 BUG_ON(!mutex_is_locked(&event_mutex));
778
779 if (--uprobe_buffer_refcnt == 0) {
zhangwei(Jovi)6ea62152014-04-17 16:05:19 +0800780 for_each_possible_cpu(cpu)
781 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
782 cpu)->buf);
783
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900784 free_percpu(uprobe_cpu_buffer);
785 uprobe_cpu_buffer = NULL;
786 }
787}
788
789static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
790{
791 struct uprobe_cpu_buffer *ucb;
792 int cpu;
793
794 cpu = raw_smp_processor_id();
795 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
796
797 /*
798 * Use per-cpu buffers for fastest access, but we might migrate
799 * so the mutex makes sure we have sole access to it.
800 */
801 mutex_lock(&ucb->mutex);
802
803 return ucb;
804}
805
806static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
807{
808 mutex_unlock(&ucb->mutex);
809}
810
Namhyung Kima43b9702014-01-17 17:08:36 +0900811static void __uprobe_trace_func(struct trace_uprobe *tu,
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900812 unsigned long func, struct pt_regs *regs,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900813 struct uprobe_cpu_buffer *ucb, int dsize,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400814 struct trace_event_file *trace_file)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530815{
816 struct uprobe_trace_entry_head *entry;
817 struct ring_buffer_event *event;
818 struct ring_buffer *buffer;
Oleg Nesterov457d1772013-03-29 18:26:51 +0100819 void *data;
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900820 int size, esize;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400821 struct trace_event_call *call = &tu->tp.call;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530822
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400823 WARN_ON(call != trace_file->event_call);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900824
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900825 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
Oleg Nesterova51cc602013-03-30 18:02:12 +0100826 return;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530827
Steven Rostedt (Red Hat)09a50592015-05-13 15:21:25 -0400828 if (trace_trigger_soft_disabled(trace_file))
Namhyung Kimca3b1622014-01-17 17:08:39 +0900829 return;
830
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900831 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900832 size = esize + tu->tp.size + dsize;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400833 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900834 call->event.type, size, 0, 0);
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900835 if (!event)
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900836 return;
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900837
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530838 entry = ring_buffer_event_data(event);
Oleg Nesterov393a7362013-03-30 18:46:22 +0100839 if (is_ret_probe(tu)) {
840 entry->vaddr[0] = func;
841 entry->vaddr[1] = instruction_pointer(regs);
842 data = DATAOF_TRACE_ENTRY(entry, true);
843 } else {
844 entry->vaddr[0] = instruction_pointer(regs);
845 data = DATAOF_TRACE_ENTRY(entry, false);
846 }
847
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900848 memcpy(data, ucb->buf, tu->tp.size + dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530849
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400850 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
Oleg Nesterova51cc602013-03-30 18:02:12 +0100851}
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100852
Oleg Nesterova51cc602013-03-30 18:02:12 +0100853/* uprobe handler */
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900854static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
855 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterova51cc602013-03-30 18:02:12 +0100856{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900857 struct event_file_link *link;
858
859 if (is_ret_probe(tu))
860 return 0;
861
862 rcu_read_lock();
863 list_for_each_entry_rcu(link, &tu->tp.files, list)
864 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
865 rcu_read_unlock();
866
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100867 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530868}
869
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100870static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900871 struct pt_regs *regs,
872 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100873{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900874 struct event_file_link *link;
875
876 rcu_read_lock();
877 list_for_each_entry_rcu(link, &tu->tp.files, list)
878 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
879 rcu_read_unlock();
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100880}
881
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530882/* Event entry printers */
883static enum print_line_t
884print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
885{
Oleg Nesterov457d1772013-03-29 18:26:51 +0100886 struct uprobe_trace_entry_head *entry;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530887 struct trace_seq *s = &iter->seq;
888 struct trace_uprobe *tu;
889 u8 *data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530890
Oleg Nesterov457d1772013-03-29 18:26:51 +0100891 entry = (struct uprobe_trace_entry_head *)iter->ent;
Namhyung Kim14577c32013-07-03 15:42:53 +0900892 tu = container_of(event, struct trace_uprobe, tp.call.event);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530893
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100894 if (is_ret_probe(tu)) {
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500895 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400896 trace_event_name(&tu->tp.call),
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500897 entry->vaddr[1], entry->vaddr[0]);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100898 data = DATAOF_TRACE_ENTRY(entry, true);
899 } else {
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500900 trace_seq_printf(s, "%s: (0x%lx)",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400901 trace_event_name(&tu->tp.call),
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500902 entry->vaddr[0]);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100903 data = DATAOF_TRACE_ENTRY(entry, false);
904 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530905
Masami Hiramatsu56de7632018-04-25 21:16:36 +0900906 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
907 goto out;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530908
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500909 trace_seq_putc(s, '\n');
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530910
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500911 out:
912 return trace_handle_return(s);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530913}
914
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100915typedef bool (*filter_func_t)(struct uprobe_consumer *self,
916 enum uprobe_filter_ctx ctx,
917 struct mm_struct *mm);
918
919static int
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400920probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900921 filter_func_t filter)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530922{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900923 bool enabled = trace_probe_is_enabled(&tu->tp);
924 struct event_file_link *link = NULL;
925 int ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530926
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900927 if (file) {
Oleg Nesterov48212542014-06-27 19:01:36 +0200928 if (tu->tp.flags & TP_FLAG_PROFILE)
929 return -EINTR;
930
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900931 link = kmalloc(sizeof(*link), GFP_KERNEL);
932 if (!link)
933 return -ENOMEM;
934
935 link->file = file;
936 list_add_tail_rcu(&link->list, &tu->tp.files);
937
938 tu->tp.flags |= TP_FLAG_TRACE;
Oleg Nesterov48212542014-06-27 19:01:36 +0200939 } else {
940 if (tu->tp.flags & TP_FLAG_TRACE)
941 return -EINTR;
942
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900943 tu->tp.flags |= TP_FLAG_PROFILE;
Oleg Nesterov48212542014-06-27 19:01:36 +0200944 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530945
Oleg Nesterov736288b2013-02-03 20:58:35 +0100946 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
947
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900948 if (enabled)
949 return 0;
950
Oleg Nesterovfb6bab62014-06-27 19:01:46 +0200951 ret = uprobe_buffer_enable();
952 if (ret)
953 goto err_flags;
954
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100955 tu->consumer.filter = filter;
Song Liu0c92c7a2018-04-23 10:21:34 -0700956 tu->inode = d_real_inode(tu->path.dentry);
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530957 if (tu->ref_ctr_offset) {
958 ret = uprobe_register_refctr(tu->inode, tu->offset,
959 tu->ref_ctr_offset, &tu->consumer);
960 } else {
961 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
962 }
963
Oleg Nesterovfb6bab62014-06-27 19:01:46 +0200964 if (ret)
965 goto err_buffer;
Oleg Nesterov41618242013-01-27 18:36:24 +0100966
Oleg Nesterovfb6bab62014-06-27 19:01:46 +0200967 return 0;
968
969 err_buffer:
970 uprobe_buffer_disable();
971
972 err_flags:
973 if (file) {
974 list_del(&link->list);
975 kfree(link);
976 tu->tp.flags &= ~TP_FLAG_TRACE;
977 } else {
978 tu->tp.flags &= ~TP_FLAG_PROFILE;
979 }
Oleg Nesterov41618242013-01-27 18:36:24 +0100980 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530981}
982
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900983static void
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400984probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530985{
Namhyung Kim14577c32013-07-03 15:42:53 +0900986 if (!trace_probe_is_enabled(&tu->tp))
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530987 return;
988
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900989 if (file) {
990 struct event_file_link *link;
991
992 link = find_event_file_link(&tu->tp, file);
993 if (!link)
994 return;
995
996 list_del_rcu(&link->list);
997 /* synchronize with u{,ret}probe_trace_func */
Steven Rostedt (VMware)016f8ff2018-08-09 15:37:59 -0400998 synchronize_rcu();
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900999 kfree(link);
1000
1001 if (!list_empty(&tu->tp.files))
1002 return;
1003 }
1004
Oleg Nesterov736288b2013-02-03 20:58:35 +01001005 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
1006
Oleg Nesterova932b732013-01-31 19:47:23 +01001007 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
Song Liu0c92c7a2018-04-23 10:21:34 -07001008 tu->inode = NULL;
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001009 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001010
1011 uprobe_buffer_disable();
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301012}
1013
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001014static int uprobe_event_define_fields(struct trace_event_call *event_call)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301015{
Masami Hiramatsueeb07b02018-04-25 21:17:05 +09001016 int ret, size;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301017 struct uprobe_trace_entry_head field;
Oleg Nesterov457d1772013-03-29 18:26:51 +01001018 struct trace_uprobe *tu = event_call->data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301019
Oleg Nesterov4d1298e2013-03-30 19:23:15 +01001020 if (is_ret_probe(tu)) {
1021 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1022 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1023 size = SIZEOF_TRACE_ENTRY(true);
1024 } else {
1025 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1026 size = SIZEOF_TRACE_ENTRY(false);
1027 }
Namhyung Kim14577c32013-07-03 15:42:53 +09001028
Masami Hiramatsueeb07b02018-04-25 21:17:05 +09001029 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301030}
1031
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301032#ifdef CONFIG_PERF_EVENTS
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001033static bool
1034__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1035{
1036 struct perf_event *event;
1037
1038 if (filter->nr_systemwide)
1039 return true;
1040
1041 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001042 if (event->hw.target->mm == mm)
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001043 return true;
1044 }
1045
1046 return false;
1047}
1048
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001049static inline bool
1050uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1051{
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001052 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001053}
1054
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001055static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1056{
1057 bool done;
1058
1059 write_lock(&tu->filter.rwlock);
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001060 if (event->hw.target) {
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001061 list_del(&event->hw.tp_list);
1062 done = tu->filter.nr_systemwide ||
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001063 (event->hw.target->flags & PF_EXITING) ||
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001064 uprobe_filter_event(tu, event);
1065 } else {
1066 tu->filter.nr_systemwide--;
1067 done = tu->filter.nr_systemwide;
1068 }
1069 write_unlock(&tu->filter.rwlock);
1070
1071 if (!done)
Oleg Nesterov927d6872014-04-24 13:33:31 +02001072 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001073
1074 return 0;
1075}
1076
Oleg Nesterov736288b2013-02-03 20:58:35 +01001077static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1078{
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001079 bool done;
Oleg Nesterov927d6872014-04-24 13:33:31 +02001080 int err;
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001081
Oleg Nesterov736288b2013-02-03 20:58:35 +01001082 write_lock(&tu->filter.rwlock);
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001083 if (event->hw.target) {
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001084 /*
1085 * event->parent != NULL means copy_process(), we can avoid
1086 * uprobe_apply(). current->mm must be probed and we can rely
1087 * on dup_mmap() which preserves the already installed bp's.
1088 *
1089 * attr.enable_on_exec means that exec/mmap will install the
1090 * breakpoints we need.
1091 */
1092 done = tu->filter.nr_systemwide ||
1093 event->parent || event->attr.enable_on_exec ||
1094 uprobe_filter_event(tu, event);
Oleg Nesterov736288b2013-02-03 20:58:35 +01001095 list_add(&event->hw.tp_list, &tu->filter.perf_events);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001096 } else {
1097 done = tu->filter.nr_systemwide;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001098 tu->filter.nr_systemwide++;
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001099 }
Oleg Nesterov736288b2013-02-03 20:58:35 +01001100 write_unlock(&tu->filter.rwlock);
1101
Oleg Nesterov927d6872014-04-24 13:33:31 +02001102 err = 0;
1103 if (!done) {
1104 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1105 if (err)
1106 uprobe_perf_close(tu, event);
1107 }
1108 return err;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001109}
1110
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001111static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1112 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1113{
1114 struct trace_uprobe *tu;
1115 int ret;
1116
1117 tu = container_of(uc, struct trace_uprobe, consumer);
1118 read_lock(&tu->filter.rwlock);
1119 ret = __uprobe_perf_filter(&tu->filter, mm);
1120 read_unlock(&tu->filter.rwlock);
1121
1122 return ret;
1123}
1124
Namhyung Kima43b9702014-01-17 17:08:36 +09001125static void __uprobe_perf_func(struct trace_uprobe *tu,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001126 unsigned long func, struct pt_regs *regs,
1127 struct uprobe_cpu_buffer *ucb, int dsize)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301128{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001129 struct trace_event_call *call = &tu->tp.call;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301130 struct uprobe_trace_entry_head *entry;
1131 struct hlist_head *head;
Oleg Nesterov457d1772013-03-29 18:26:51 +01001132 void *data;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001133 int size, esize;
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001134 int rctx;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301135
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001136 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
Wang Nan04a22fa2015-07-01 02:13:50 +00001137 return;
1138
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001139 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1140
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001141 size = esize + tu->tp.size + dsize;
1142 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1143 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1144 return;
1145
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301146 preempt_disable();
Oleg Nesterov515619f2013-04-13 15:36:49 +02001147 head = this_cpu_ptr(call->perf_events);
1148 if (hlist_empty(head))
1149 goto out;
1150
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001151 entry = perf_trace_buf_alloc(size, NULL, &rctx);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301152 if (!entry)
1153 goto out;
1154
Oleg Nesterov393a7362013-03-30 18:46:22 +01001155 if (is_ret_probe(tu)) {
1156 entry->vaddr[0] = func;
Oleg Nesterov32520b22013-04-10 16:25:49 +02001157 entry->vaddr[1] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +01001158 data = DATAOF_TRACE_ENTRY(entry, true);
1159 } else {
Oleg Nesterov32520b22013-04-10 16:25:49 +02001160 entry->vaddr[0] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +01001161 data = DATAOF_TRACE_ENTRY(entry, false);
1162 }
1163
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001164 memcpy(data, ucb->buf, tu->tp.size + dsize);
Namhyung Kim14577c32013-07-03 15:42:53 +09001165
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001166 if (size - esize > tu->tp.size + dsize) {
1167 int len = tu->tp.size + dsize;
1168
1169 memset(data + len, 0, size - esize - len);
Namhyung Kim14577c32013-07-03 15:42:53 +09001170 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301171
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001172 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
Peter Zijlstra8fd0fbb2017-10-11 09:45:29 +02001173 head, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301174 out:
1175 preempt_enable();
Oleg Nesterova51cc602013-03-30 18:02:12 +01001176}
1177
1178/* uprobe profile handler */
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001179static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1180 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterova51cc602013-03-30 18:02:12 +01001181{
1182 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1183 return UPROBE_HANDLER_REMOVE;
1184
Oleg Nesterov393a7362013-03-30 18:46:22 +01001185 if (!is_ret_probe(tu))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001186 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001187 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301188}
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001189
1190static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001191 struct pt_regs *regs,
1192 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001193{
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001194 __uprobe_perf_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001195}
Yonghong Song41bdc4b2018-05-24 11:21:09 -07001196
1197int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1198 const char **filename, u64 *probe_offset,
1199 bool perf_type_tracepoint)
1200{
1201 const char *pevent = trace_event_name(event->tp_event);
1202 const char *group = event->tp_event->class->system;
1203 struct trace_uprobe *tu;
1204
1205 if (perf_type_tracepoint)
1206 tu = find_probe_event(pevent, group);
1207 else
1208 tu = event->tp_event->data;
1209 if (!tu)
1210 return -EINVAL;
1211
1212 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1213 : BPF_FD_TYPE_UPROBE;
1214 *filename = tu->filename;
1215 *probe_offset = tu->offset;
1216 return 0;
1217}
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301218#endif /* CONFIG_PERF_EVENTS */
1219
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001220static int
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001221trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001222 void *data)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301223{
Oleg Nesterov457d1772013-03-29 18:26:51 +01001224 struct trace_uprobe *tu = event->data;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001225 struct trace_event_file *file = data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301226
1227 switch (type) {
1228 case TRACE_REG_REGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001229 return probe_event_enable(tu, file, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301230
1231 case TRACE_REG_UNREGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001232 probe_event_disable(tu, file);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301233 return 0;
1234
1235#ifdef CONFIG_PERF_EVENTS
1236 case TRACE_REG_PERF_REGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001237 return probe_event_enable(tu, NULL, uprobe_perf_filter);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301238
1239 case TRACE_REG_PERF_UNREGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001240 probe_event_disable(tu, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301241 return 0;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001242
1243 case TRACE_REG_PERF_OPEN:
1244 return uprobe_perf_open(tu, data);
1245
1246 case TRACE_REG_PERF_CLOSE:
1247 return uprobe_perf_close(tu, data);
1248
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301249#endif
1250 default:
1251 return 0;
1252 }
1253 return 0;
1254}
1255
1256static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1257{
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301258 struct trace_uprobe *tu;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001259 struct uprobe_dispatch_data udd;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001260 struct uprobe_cpu_buffer *ucb;
1261 int dsize, esize;
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001262 int ret = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301263
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001264
Oleg Nesterova932b732013-01-31 19:47:23 +01001265 tu = container_of(con, struct trace_uprobe, consumer);
Oleg Nesterov1b47aef2013-01-31 19:55:27 +01001266 tu->nhit++;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301267
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001268 udd.tu = tu;
1269 udd.bp_addr = instruction_pointer(regs);
1270
1271 current->utask->vaddr = (unsigned long) &udd;
1272
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001273 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1274 return 0;
1275
1276 dsize = __get_data_size(&tu->tp, regs);
1277 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1278
1279 ucb = uprobe_buffer_get();
Masami Hiramatsu91784122018-04-25 21:19:01 +09001280 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001281
Namhyung Kim14577c32013-07-03 15:42:53 +09001282 if (tu->tp.flags & TP_FLAG_TRACE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001283 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301284
1285#ifdef CONFIG_PERF_EVENTS
Namhyung Kim14577c32013-07-03 15:42:53 +09001286 if (tu->tp.flags & TP_FLAG_PROFILE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001287 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301288#endif
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001289 uprobe_buffer_put(ucb);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001290 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301291}
1292
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001293static int uretprobe_dispatcher(struct uprobe_consumer *con,
1294 unsigned long func, struct pt_regs *regs)
1295{
1296 struct trace_uprobe *tu;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001297 struct uprobe_dispatch_data udd;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001298 struct uprobe_cpu_buffer *ucb;
1299 int dsize, esize;
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001300
1301 tu = container_of(con, struct trace_uprobe, consumer);
1302
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001303 udd.tu = tu;
1304 udd.bp_addr = func;
1305
1306 current->utask->vaddr = (unsigned long) &udd;
1307
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001308 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1309 return 0;
1310
1311 dsize = __get_data_size(&tu->tp, regs);
1312 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1313
1314 ucb = uprobe_buffer_get();
Masami Hiramatsu91784122018-04-25 21:19:01 +09001315 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001316
Namhyung Kim14577c32013-07-03 15:42:53 +09001317 if (tu->tp.flags & TP_FLAG_TRACE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001318 uretprobe_trace_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001319
1320#ifdef CONFIG_PERF_EVENTS
Namhyung Kim14577c32013-07-03 15:42:53 +09001321 if (tu->tp.flags & TP_FLAG_PROFILE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001322 uretprobe_perf_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001323#endif
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001324 uprobe_buffer_put(ucb);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001325 return 0;
1326}
1327
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301328static struct trace_event_functions uprobe_funcs = {
1329 .trace = print_uprobe_event
1330};
1331
Song Liu33ea4b22017-12-06 14:45:16 -08001332static inline void init_trace_event_call(struct trace_uprobe *tu,
1333 struct trace_event_call *call)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301334{
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301335 INIT_LIST_HEAD(&call->class->fields);
1336 call->event.funcs = &uprobe_funcs;
1337 call->class->define_fields = uprobe_event_define_fields;
1338
Song Liu33ea4b22017-12-06 14:45:16 -08001339 call->flags = TRACE_EVENT_FL_UPROBE;
1340 call->class->reg = trace_uprobe_register;
1341 call->data = tu;
1342}
1343
1344static int register_uprobe_event(struct trace_uprobe *tu)
1345{
1346 struct trace_event_call *call = &tu->tp.call;
1347 int ret = 0;
1348
1349 init_trace_event_call(tu, call);
1350
Masami Hiramatsu0a46c852018-04-25 21:19:30 +09001351 if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301352 return -ENOMEM;
1353
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -04001354 ret = register_trace_event(&call->event);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301355 if (!ret) {
1356 kfree(call->print_fmt);
1357 return -ENODEV;
1358 }
Oleg Nesterovede392a2014-07-15 20:48:24 +02001359
Steven Rostedt (VMware)7e1413e2018-12-04 13:35:45 -05001360 ret = trace_add_event_call(call);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301361
1362 if (ret) {
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04001363 pr_info("Failed to register uprobe event: %s\n",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04001364 trace_event_name(call));
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301365 kfree(call->print_fmt);
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -04001366 unregister_trace_event(&call->event);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301367 }
1368
1369 return ret;
1370}
1371
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001372static int unregister_uprobe_event(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301373{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001374 int ret;
1375
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301376 /* tu->event is unregistered in trace_remove_event_call() */
Steven Rostedt (VMware)7e1413e2018-12-04 13:35:45 -05001377 ret = trace_remove_event_call(&tu->tp.call);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001378 if (ret)
1379 return ret;
Namhyung Kim14577c32013-07-03 15:42:53 +09001380 kfree(tu->tp.call.print_fmt);
1381 tu->tp.call.print_fmt = NULL;
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001382 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301383}
1384
Song Liu33ea4b22017-12-06 14:45:16 -08001385#ifdef CONFIG_PERF_EVENTS
1386struct trace_event_call *
Song Liua6ca88b2018-10-01 22:36:36 -07001387create_local_trace_uprobe(char *name, unsigned long offs,
1388 unsigned long ref_ctr_offset, bool is_return)
Song Liu33ea4b22017-12-06 14:45:16 -08001389{
1390 struct trace_uprobe *tu;
Song Liu33ea4b22017-12-06 14:45:16 -08001391 struct path path;
1392 int ret;
1393
1394 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1395 if (ret)
1396 return ERR_PTR(ret);
1397
Song Liu0c92c7a2018-04-23 10:21:34 -07001398 if (!d_is_reg(path.dentry)) {
1399 path_put(&path);
Song Liu33ea4b22017-12-06 14:45:16 -08001400 return ERR_PTR(-EINVAL);
1401 }
1402
1403 /*
Masami Hiramatsu0597c492018-11-05 18:03:04 +09001404 * local trace_kprobes are not added to dyn_event, so they are never
Song Liu33ea4b22017-12-06 14:45:16 -08001405 * searched in find_trace_kprobe(). Therefore, there is no concern of
1406 * duplicated name "DUMMY_EVENT" here.
1407 */
1408 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1409 is_return);
1410
1411 if (IS_ERR(tu)) {
1412 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1413 (int)PTR_ERR(tu));
Song Liu0c92c7a2018-04-23 10:21:34 -07001414 path_put(&path);
Song Liu33ea4b22017-12-06 14:45:16 -08001415 return ERR_CAST(tu);
1416 }
1417
1418 tu->offset = offs;
Song Liu0c92c7a2018-04-23 10:21:34 -07001419 tu->path = path;
Song Liua6ca88b2018-10-01 22:36:36 -07001420 tu->ref_ctr_offset = ref_ctr_offset;
Song Liu33ea4b22017-12-06 14:45:16 -08001421 tu->filename = kstrdup(name, GFP_KERNEL);
1422 init_trace_event_call(tu, &tu->tp.call);
1423
Masami Hiramatsu0a46c852018-04-25 21:19:30 +09001424 if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
Song Liu33ea4b22017-12-06 14:45:16 -08001425 ret = -ENOMEM;
1426 goto error;
1427 }
1428
1429 return &tu->tp.call;
1430error:
1431 free_trace_uprobe(tu);
1432 return ERR_PTR(ret);
1433}
1434
1435void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1436{
1437 struct trace_uprobe *tu;
1438
1439 tu = container_of(event_call, struct trace_uprobe, tp.call);
1440
1441 kfree(tu->tp.call.print_fmt);
1442 tu->tp.call.print_fmt = NULL;
1443
1444 free_trace_uprobe(tu);
1445}
1446#endif /* CONFIG_PERF_EVENTS */
1447
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301448/* Make a trace interface for controling probe points */
1449static __init int init_uprobe_trace(void)
1450{
1451 struct dentry *d_tracer;
Masami Hiramatsu0597c492018-11-05 18:03:04 +09001452 int ret;
1453
1454 ret = dyn_event_register(&trace_uprobe_ops);
1455 if (ret)
1456 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301457
1458 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05001459 if (IS_ERR(d_tracer))
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301460 return 0;
1461
1462 trace_create_file("uprobe_events", 0644, d_tracer,
1463 NULL, &uprobe_events_ops);
1464 /* Profile interface */
1465 trace_create_file("uprobe_profile", 0444, d_tracer,
1466 NULL, &uprobe_profile_ops);
1467 return 0;
1468}
1469
1470fs_initcall(init_uprobe_trace);