blob: 912cb2093944ec310acd3d30f6c0a1eaa1e1df67 [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05302/*
3 * uprobes-based tracing events
4 *
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05305 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */
Masami Hiramatsu72576342017-02-07 20:21:28 +09008#define pr_fmt(fmt) "trace_kprobe: " fmt
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05309
10#include <linux/module.h>
11#include <linux/uaccess.h>
12#include <linux/uprobes.h>
13#include <linux/namei.h>
Andy Shevchenkob2e902f2012-12-17 16:01:27 -080014#include <linux/string.h>
Ingo Molnarb2d09102017-02-04 01:27:20 +010015#include <linux/rculist.h>
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053016
17#include "trace_probe.h"
Masami Hiramatsu53305922018-04-25 21:18:03 +090018#include "trace_probe_tmpl.h"
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053019
20#define UPROBE_EVENT_SYSTEM "uprobes"
21
Oleg Nesterov457d1772013-03-29 18:26:51 +010022struct uprobe_trace_entry_head {
23 struct trace_entry ent;
24 unsigned long vaddr[];
25};
26
27#define SIZEOF_TRACE_ENTRY(is_return) \
28 (sizeof(struct uprobe_trace_entry_head) + \
29 sizeof(unsigned long) * (is_return ? 2 : 1))
30
31#define DATAOF_TRACE_ENTRY(entry, is_return) \
32 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
33
Oleg Nesterov736288b2013-02-03 20:58:35 +010034struct trace_uprobe_filter {
35 rwlock_t rwlock;
36 int nr_systemwide;
37 struct list_head perf_events;
38};
39
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053040/*
41 * uprobe event core functions
42 */
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053043struct trace_uprobe {
44 struct list_head list;
Oleg Nesterov736288b2013-02-03 20:58:35 +010045 struct trace_uprobe_filter filter;
Oleg Nesterova932b732013-01-31 19:47:23 +010046 struct uprobe_consumer consumer;
Song Liu0c92c7a2018-04-23 10:21:34 -070047 struct path path;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053048 struct inode *inode;
49 char *filename;
50 unsigned long offset;
Ravi Bangoria1cc33162018-08-20 10:12:47 +053051 unsigned long ref_ctr_offset;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053052 unsigned long nhit;
Namhyung Kim14577c32013-07-03 15:42:53 +090053 struct trace_probe tp;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053054};
55
Namhyung Kim14577c32013-07-03 15:42:53 +090056#define SIZEOF_TRACE_UPROBE(n) \
57 (offsetof(struct trace_uprobe, tp.args) + \
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053058 (sizeof(struct probe_arg) * (n)))
59
60static int register_uprobe_event(struct trace_uprobe *tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -040061static int unregister_uprobe_event(struct trace_uprobe *tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053062
63static DEFINE_MUTEX(uprobe_lock);
64static LIST_HEAD(uprobe_list);
65
Namhyung Kimb7e0bf32013-11-25 13:42:47 +090066struct uprobe_dispatch_data {
67 struct trace_uprobe *tu;
68 unsigned long bp_addr;
69};
70
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053071static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +010072static int uretprobe_dispatcher(struct uprobe_consumer *con,
73 unsigned long func, struct pt_regs *regs);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053074
Namhyung Kim3fd996a2013-11-26 15:21:04 +090075#ifdef CONFIG_STACK_GROWSUP
76static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
77{
78 return addr - (n * sizeof(long));
79}
80#else
81static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
82{
83 return addr + (n * sizeof(long));
84}
85#endif
86
87static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
88{
89 unsigned long ret;
90 unsigned long addr = user_stack_pointer(regs);
91
92 addr = adjust_stack_addr(addr, n);
93
94 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
95 return 0;
96
97 return ret;
98}
99
100/*
101 * Uprobes-specific fetch functions
102 */
Masami Hiramatsu53305922018-04-25 21:18:03 +0900103static nokprobe_inline int
104probe_user_read(void *dest, void *src, size_t size)
105{
106 void __user *vaddr = (void __force __user *)src;
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900107
Masami Hiramatsu53305922018-04-25 21:18:03 +0900108 return copy_from_user(dest, vaddr, size);
Namhyung Kim5baaa592013-11-26 15:21:04 +0900109}
Namhyung Kim5baaa592013-11-26 15:21:04 +0900110/*
111 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
112 * length and relative data location.
113 */
Masami Hiramatsu91784122018-04-25 21:19:01 +0900114static nokprobe_inline int
115fetch_store_string(unsigned long addr, void *dest, void *base)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900116{
117 long ret;
Masami Hiramatsu91784122018-04-25 21:19:01 +0900118 u32 loc = *(u32 *)dest;
119 int maxlen = get_loc_len(loc);
120 u8 *dst = get_loc_data(dest, base);
Namhyung Kim5baaa592013-11-26 15:21:04 +0900121 void __user *src = (void __force __user *) addr;
122
Masami Hiramatsu91784122018-04-25 21:19:01 +0900123 if (unlikely(!maxlen))
124 return -ENOMEM;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900125
126 ret = strncpy_from_user(dst, src, maxlen);
Masami Hiramatsu91784122018-04-25 21:19:01 +0900127 if (ret >= 0) {
128 if (ret == maxlen)
129 dst[ret - 1] = '\0';
130 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
Namhyung Kim5baaa592013-11-26 15:21:04 +0900131 }
Masami Hiramatsu91784122018-04-25 21:19:01 +0900132
133 return ret;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900134}
135
Masami Hiramatsu53305922018-04-25 21:18:03 +0900136/* Return the length of string -- including null terminal byte */
Masami Hiramatsu91784122018-04-25 21:19:01 +0900137static nokprobe_inline int
138fetch_store_strlen(unsigned long addr)
Namhyung Kim5baaa592013-11-26 15:21:04 +0900139{
140 int len;
141 void __user *vaddr = (void __force __user *) addr;
142
143 len = strnlen_user(vaddr, MAX_STRING_SIZE);
144
Masami Hiramatsu91784122018-04-25 21:19:01 +0900145 return (len > MAX_STRING_SIZE) ? 0 : len;
Namhyung Kim5baaa592013-11-26 15:21:04 +0900146}
Namhyung Kim3fd996a2013-11-26 15:21:04 +0900147
Masami Hiramatsu53305922018-04-25 21:18:03 +0900148static unsigned long translate_user_vaddr(unsigned long file_offset)
Namhyung Kimb7e0bf32013-11-25 13:42:47 +0900149{
150 unsigned long base_addr;
151 struct uprobe_dispatch_data *udd;
152
153 udd = (void *) current->utask->vaddr;
154
155 base_addr = udd->bp_addr - udd->tu->offset;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900156 return base_addr + file_offset;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +0900157}
158
Masami Hiramatsu53305922018-04-25 21:18:03 +0900159/* Note that we don't verify it, since the code does not come from user space */
160static int
161process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
Masami Hiramatsu91784122018-04-25 21:19:01 +0900162 void *base)
Masami Hiramatsu53305922018-04-25 21:18:03 +0900163{
164 unsigned long val;
Masami Hiramatsu91784122018-04-25 21:19:01 +0900165 int ret = 0;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900166
167 /* 1st stage: get value from context */
168 switch (code->op) {
169 case FETCH_OP_REG:
170 val = regs_get_register(regs, code->param);
171 break;
172 case FETCH_OP_STACK:
173 val = get_user_stack_nth(regs, code->param);
174 break;
175 case FETCH_OP_STACKP:
176 val = user_stack_pointer(regs);
177 break;
178 case FETCH_OP_RETVAL:
179 val = regs_return_value(regs);
180 break;
181 case FETCH_OP_IMM:
182 val = code->immediate;
183 break;
184 case FETCH_OP_FOFFS:
185 val = translate_user_vaddr(code->immediate);
186 break;
187 default:
188 return -EILSEQ;
189 }
190 code++;
191
192 /* 2nd stage: dereference memory if needed */
193 while (code->op == FETCH_OP_DEREF) {
194 ret = probe_user_read(&val, (void *)val + code->offset,
195 sizeof(val));
196 if (ret)
197 return ret;
198 code++;
199 }
200
201 /* 3rd stage: store value to buffer */
Masami Hiramatsu91784122018-04-25 21:19:01 +0900202 if (unlikely(!dest)) {
203 if (code->op == FETCH_OP_ST_STRING)
204 return fetch_store_strlen(val + code->offset);
205 else
206 return -EILSEQ;
207 }
208
Masami Hiramatsu53305922018-04-25 21:18:03 +0900209 switch (code->op) {
210 case FETCH_OP_ST_RAW:
211 fetch_store_raw(val, code, dest);
212 break;
213 case FETCH_OP_ST_MEM:
Masami Hiramatsu91784122018-04-25 21:19:01 +0900214 probe_kernel_read(dest, (void *)val + code->offset, code->size);
Masami Hiramatsu53305922018-04-25 21:18:03 +0900215 break;
216 case FETCH_OP_ST_STRING:
Masami Hiramatsu91784122018-04-25 21:19:01 +0900217 ret = fetch_store_string(val + code->offset, dest, base);
Masami Hiramatsu53305922018-04-25 21:18:03 +0900218 break;
219 default:
220 return -EILSEQ;
221 }
222 code++;
223
224 /* 4th stage: modify stored value if needed */
225 if (code->op == FETCH_OP_MOD_BF) {
226 fetch_apply_bitfield(code, dest);
227 code++;
228 }
229
Masami Hiramatsu91784122018-04-25 21:19:01 +0900230 return code->op == FETCH_OP_END ? ret : -EILSEQ;
Masami Hiramatsu53305922018-04-25 21:18:03 +0900231}
232NOKPROBE_SYMBOL(process_fetch_insn)
233
Oleg Nesterov736288b2013-02-03 20:58:35 +0100234static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
235{
236 rwlock_init(&filter->rwlock);
237 filter->nr_systemwide = 0;
238 INIT_LIST_HEAD(&filter->perf_events);
239}
240
241static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
242{
243 return !filter->nr_systemwide && list_empty(&filter->perf_events);
244}
245
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100246static inline bool is_ret_probe(struct trace_uprobe *tu)
247{
248 return tu->consumer.ret_handler != NULL;
249}
250
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530251/*
252 * Allocate new trace_uprobe and initialize it (including uprobes).
253 */
254static struct trace_uprobe *
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100255alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530256{
257 struct trace_uprobe *tu;
258
259 if (!event || !is_good_name(event))
260 return ERR_PTR(-EINVAL);
261
262 if (!group || !is_good_name(group))
263 return ERR_PTR(-EINVAL);
264
265 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
266 if (!tu)
267 return ERR_PTR(-ENOMEM);
268
Namhyung Kim14577c32013-07-03 15:42:53 +0900269 tu->tp.call.class = &tu->tp.class;
270 tu->tp.call.name = kstrdup(event, GFP_KERNEL);
271 if (!tu->tp.call.name)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530272 goto error;
273
Namhyung Kim14577c32013-07-03 15:42:53 +0900274 tu->tp.class.system = kstrdup(group, GFP_KERNEL);
275 if (!tu->tp.class.system)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530276 goto error;
277
278 INIT_LIST_HEAD(&tu->list);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900279 INIT_LIST_HEAD(&tu->tp.files);
Oleg Nesterova932b732013-01-31 19:47:23 +0100280 tu->consumer.handler = uprobe_dispatcher;
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100281 if (is_ret)
282 tu->consumer.ret_handler = uretprobe_dispatcher;
Oleg Nesterov736288b2013-02-03 20:58:35 +0100283 init_trace_uprobe_filter(&tu->filter);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530284 return tu;
285
286error:
Namhyung Kim14577c32013-07-03 15:42:53 +0900287 kfree(tu->tp.call.name);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530288 kfree(tu);
289
290 return ERR_PTR(-ENOMEM);
291}
292
293static void free_trace_uprobe(struct trace_uprobe *tu)
294{
295 int i;
296
Namhyung Kim14577c32013-07-03 15:42:53 +0900297 for (i = 0; i < tu->tp.nr_args; i++)
298 traceprobe_free_probe_arg(&tu->tp.args[i]);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530299
Song Liu0c92c7a2018-04-23 10:21:34 -0700300 path_put(&tu->path);
Namhyung Kim14577c32013-07-03 15:42:53 +0900301 kfree(tu->tp.call.class->system);
302 kfree(tu->tp.call.name);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530303 kfree(tu->filename);
304 kfree(tu);
305}
306
307static struct trace_uprobe *find_probe_event(const char *event, const char *group)
308{
309 struct trace_uprobe *tu;
310
311 list_for_each_entry(tu, &uprobe_list, list)
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400312 if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
Namhyung Kim14577c32013-07-03 15:42:53 +0900313 strcmp(tu->tp.call.class->system, group) == 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530314 return tu;
315
316 return NULL;
317}
318
319/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400320static int unregister_trace_uprobe(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530321{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400322 int ret;
323
324 ret = unregister_uprobe_event(tu);
325 if (ret)
326 return ret;
327
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530328 list_del(&tu->list);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530329 free_trace_uprobe(tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400330 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530331}
332
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530333/*
334 * Uprobe with multiple reference counter is not allowed. i.e.
335 * If inode and offset matches, reference counter offset *must*
336 * match as well. Though, there is one exception: If user is
337 * replacing old trace_uprobe with new one(same group/event),
338 * then we allow same uprobe with new reference counter as far
339 * as the new one does not conflict with any other existing
340 * ones.
341 */
342static struct trace_uprobe *find_old_trace_uprobe(struct trace_uprobe *new)
343{
344 struct trace_uprobe *tmp, *old = NULL;
345 struct inode *new_inode = d_real_inode(new->path.dentry);
346
347 old = find_probe_event(trace_event_name(&new->tp.call),
348 new->tp.call.class->system);
349
350 list_for_each_entry(tmp, &uprobe_list, list) {
351 if ((old ? old != tmp : true) &&
352 new_inode == d_real_inode(tmp->path.dentry) &&
353 new->offset == tmp->offset &&
354 new->ref_ctr_offset != tmp->ref_ctr_offset) {
355 pr_warn("Reference counter offset mismatch.");
356 return ERR_PTR(-EINVAL);
357 }
358 }
359 return old;
360}
361
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530362/* Register a trace_uprobe and probe_event */
363static int register_trace_uprobe(struct trace_uprobe *tu)
364{
Namhyung Kim14577c32013-07-03 15:42:53 +0900365 struct trace_uprobe *old_tu;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530366 int ret;
367
368 mutex_lock(&uprobe_lock);
369
370 /* register as an event */
Ravi Bangoriaccea8722018-08-20 10:12:49 +0530371 old_tu = find_old_trace_uprobe(tu);
372 if (IS_ERR(old_tu)) {
373 ret = PTR_ERR(old_tu);
374 goto end;
375 }
376
Namhyung Kim14577c32013-07-03 15:42:53 +0900377 if (old_tu) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530378 /* delete old event */
Namhyung Kim14577c32013-07-03 15:42:53 +0900379 ret = unregister_trace_uprobe(old_tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400380 if (ret)
381 goto end;
382 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530383
384 ret = register_uprobe_event(tu);
385 if (ret) {
Joe Perchesa395d6a2016-03-22 14:28:09 -0700386 pr_warn("Failed to register probe event(%d)\n", ret);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530387 goto end;
388 }
389
390 list_add_tail(&tu->list, &uprobe_list);
391
392end:
393 mutex_unlock(&uprobe_lock);
394
395 return ret;
396}
397
398/*
399 * Argument syntax:
Namhyung Kim306cfe22013-07-03 16:44:46 +0900400 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530401 *
402 * - Remove uprobe: -:[GRP/]EVENT
403 */
404static int create_trace_uprobe(int argc, char **argv)
405{
406 struct trace_uprobe *tu;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530407 char *arg, *event, *group, *filename, *rctr, *rctr_end;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530408 char buf[MAX_EVENT_NAME_LEN];
409 struct path path;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530410 unsigned long offset, ref_ctr_offset;
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100411 bool is_delete, is_return;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530412 int i, ret;
413
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530414 ret = 0;
415 is_delete = false;
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100416 is_return = false;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530417 event = NULL;
418 group = NULL;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530419 ref_ctr_offset = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530420
421 /* argc must be >= 1 */
422 if (argv[0][0] == '-')
423 is_delete = true;
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100424 else if (argv[0][0] == 'r')
425 is_return = true;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530426 else if (argv[0][0] != 'p') {
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100427 pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530428 return -EINVAL;
429 }
430
431 if (argv[0][1] == ':') {
432 event = &argv[0][2];
433 arg = strchr(event, '/');
434
435 if (arg) {
436 group = event;
437 event = arg + 1;
438 event[-1] = '\0';
439
440 if (strlen(group) == 0) {
441 pr_info("Group name is not specified\n");
442 return -EINVAL;
443 }
444 }
445 if (strlen(event) == 0) {
446 pr_info("Event name is not specified\n");
447 return -EINVAL;
448 }
449 }
450 if (!group)
451 group = UPROBE_EVENT_SYSTEM;
452
453 if (is_delete) {
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400454 int ret;
455
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530456 if (!event) {
457 pr_info("Delete command needs an event name.\n");
458 return -EINVAL;
459 }
460 mutex_lock(&uprobe_lock);
461 tu = find_probe_event(event, group);
462
463 if (!tu) {
464 mutex_unlock(&uprobe_lock);
465 pr_info("Event %s/%s doesn't exist.\n", group, event);
466 return -ENOENT;
467 }
468 /* delete an event */
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400469 ret = unregister_trace_uprobe(tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530470 mutex_unlock(&uprobe_lock);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400471 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530472 }
473
474 if (argc < 2) {
475 pr_info("Probe point is not specified.\n");
476 return -EINVAL;
477 }
Kenny Yu6496bb72017-01-13 08:58:34 -0800478 /* Find the last occurrence, in case the path contains ':' too. */
479 arg = strrchr(argv[1], ':');
Song Liu0c92c7a2018-04-23 10:21:34 -0700480 if (!arg)
481 return -EINVAL;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530482
483 *arg++ = '\0';
484 filename = argv[1];
485 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
486 if (ret)
Song Liu0c92c7a2018-04-23 10:21:34 -0700487 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530488
Song Liu0c92c7a2018-04-23 10:21:34 -0700489 if (!d_is_reg(path.dentry)) {
Jovi Zhangd24d7db2012-07-18 18:16:44 +0800490 ret = -EINVAL;
491 goto fail_address_parse;
492 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530493
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530494 /* Parse reference counter offset if specified. */
495 rctr = strchr(arg, '(');
496 if (rctr) {
497 rctr_end = strchr(rctr, ')');
498 if (rctr > rctr_end || *(rctr_end + 1) != 0) {
499 ret = -EINVAL;
500 pr_info("Invalid reference counter offset.\n");
501 goto fail_address_parse;
502 }
503
504 *rctr++ = '\0';
505 *rctr_end = '\0';
506 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
507 if (ret) {
508 pr_info("Invalid reference counter offset.\n");
509 goto fail_address_parse;
510 }
511 }
512
513 /* Parse uprobe offset. */
Oleg Nesterov84d7ed72013-01-27 18:20:45 +0100514 ret = kstrtoul(arg, 0, &offset);
515 if (ret)
516 goto fail_address_parse;
517
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530518 argc -= 2;
519 argv += 2;
520
521 /* setup a probe */
522 if (!event) {
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800523 char *tail;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530524 char *ptr;
525
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800526 tail = kstrdup(kbasename(filename), GFP_KERNEL);
527 if (!tail) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530528 ret = -ENOMEM;
529 goto fail_address_parse;
530 }
531
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530532 ptr = strpbrk(tail, ".-_");
533 if (ptr)
534 *ptr = '\0';
535
536 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
537 event = buf;
538 kfree(tail);
539 }
540
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100541 tu = alloc_trace_uprobe(group, event, argc, is_return);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530542 if (IS_ERR(tu)) {
543 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
544 ret = PTR_ERR(tu);
545 goto fail_address_parse;
546 }
547 tu->offset = offset;
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530548 tu->ref_ctr_offset = ref_ctr_offset;
Song Liu0c92c7a2018-04-23 10:21:34 -0700549 tu->path = path;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530550 tu->filename = kstrdup(filename, GFP_KERNEL);
551
552 if (!tu->filename) {
553 pr_info("Failed to allocate filename.\n");
554 ret = -ENOMEM;
555 goto error;
556 }
557
558 /* parse arguments */
559 ret = 0;
560 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
Namhyung Kim14577c32013-07-03 15:42:53 +0900561 struct probe_arg *parg = &tu->tp.args[i];
562
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530563 /* Increment count for freeing args in error case */
Namhyung Kim14577c32013-07-03 15:42:53 +0900564 tu->tp.nr_args++;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530565
566 /* Parse argument name */
567 arg = strchr(argv[i], '=');
568 if (arg) {
569 *arg++ = '\0';
Namhyung Kim14577c32013-07-03 15:42:53 +0900570 parg->name = kstrdup(argv[i], GFP_KERNEL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530571 } else {
572 arg = argv[i];
573 /* If argument name is omitted, set "argN" */
574 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
Namhyung Kim14577c32013-07-03 15:42:53 +0900575 parg->name = kstrdup(buf, GFP_KERNEL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530576 }
577
Namhyung Kim14577c32013-07-03 15:42:53 +0900578 if (!parg->name) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530579 pr_info("Failed to allocate argument[%d] name.\n", i);
580 ret = -ENOMEM;
581 goto error;
582 }
583
Namhyung Kim14577c32013-07-03 15:42:53 +0900584 if (!is_good_name(parg->name)) {
585 pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530586 ret = -EINVAL;
587 goto error;
588 }
589
Namhyung Kim14577c32013-07-03 15:42:53 +0900590 if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530591 pr_info("Argument[%d] name '%s' conflicts with "
592 "another field.\n", i, argv[i]);
593 ret = -EINVAL;
594 goto error;
595 }
596
597 /* Parse fetch argument */
Namhyung Kim14577c32013-07-03 15:42:53 +0900598 ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
Masami Hiramatsuf451bc82018-04-25 21:18:32 +0900599 is_return, false);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530600 if (ret) {
601 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
602 goto error;
603 }
604 }
605
606 ret = register_trace_uprobe(tu);
607 if (ret)
608 goto error;
609 return 0;
610
611error:
612 free_trace_uprobe(tu);
613 return ret;
614
615fail_address_parse:
Song Liu0c92c7a2018-04-23 10:21:34 -0700616 path_put(&path);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530617
Jovi Zhangd24d7db2012-07-18 18:16:44 +0800618 pr_info("Failed to parse address or file.\n");
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530619
620 return ret;
621}
622
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400623static int cleanup_all_probes(void)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530624{
625 struct trace_uprobe *tu;
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400626 int ret = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530627
628 mutex_lock(&uprobe_lock);
629 while (!list_empty(&uprobe_list)) {
630 tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400631 ret = unregister_trace_uprobe(tu);
632 if (ret)
633 break;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530634 }
635 mutex_unlock(&uprobe_lock);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400636 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530637}
638
639/* Probes listing interfaces */
640static void *probes_seq_start(struct seq_file *m, loff_t *pos)
641{
642 mutex_lock(&uprobe_lock);
643 return seq_list_start(&uprobe_list, *pos);
644}
645
646static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
647{
648 return seq_list_next(v, &uprobe_list, pos);
649}
650
651static void probes_seq_stop(struct seq_file *m, void *v)
652{
653 mutex_unlock(&uprobe_lock);
654}
655
656static int probes_seq_show(struct seq_file *m, void *v)
657{
658 struct trace_uprobe *tu = v;
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100659 char c = is_ret_probe(tu) ? 'r' : 'p';
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530660 int i;
661
Ravi Bangoriaa64b2c02018-03-15 13:57:56 +0530662 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, tu->tp.call.class->system,
663 trace_event_name(&tu->tp.call), tu->filename,
664 (int)(sizeof(void *) * 2), tu->offset);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530665
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530666 if (tu->ref_ctr_offset)
667 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
668
Namhyung Kim14577c32013-07-03 15:42:53 +0900669 for (i = 0; i < tu->tp.nr_args; i++)
670 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530671
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +0100672 seq_putc(m, '\n');
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530673 return 0;
674}
675
676static const struct seq_operations probes_seq_op = {
677 .start = probes_seq_start,
678 .next = probes_seq_next,
679 .stop = probes_seq_stop,
680 .show = probes_seq_show
681};
682
683static int probes_open(struct inode *inode, struct file *file)
684{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400685 int ret;
686
687 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
688 ret = cleanup_all_probes();
689 if (ret)
690 return ret;
691 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530692
693 return seq_open(file, &probes_seq_op);
694}
695
696static ssize_t probes_write(struct file *file, const char __user *buffer,
697 size_t count, loff_t *ppos)
698{
Tom Zanussi7e465ba2017-09-22 14:58:20 -0500699 return trace_parse_run_command(file, buffer, count, ppos, create_trace_uprobe);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530700}
701
702static const struct file_operations uprobe_events_ops = {
703 .owner = THIS_MODULE,
704 .open = probes_open,
705 .read = seq_read,
706 .llseek = seq_lseek,
707 .release = seq_release,
708 .write = probes_write,
709};
710
711/* Probes profiling interfaces */
712static int probes_profile_seq_show(struct seq_file *m, void *v)
713{
714 struct trace_uprobe *tu = v;
715
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400716 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400717 trace_event_name(&tu->tp.call), tu->nhit);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530718 return 0;
719}
720
721static const struct seq_operations profile_seq_op = {
722 .start = probes_seq_start,
723 .next = probes_seq_next,
724 .stop = probes_seq_stop,
725 .show = probes_profile_seq_show
726};
727
728static int profile_open(struct inode *inode, struct file *file)
729{
730 return seq_open(file, &profile_seq_op);
731}
732
733static const struct file_operations uprobe_profile_ops = {
734 .owner = THIS_MODULE,
735 .open = profile_open,
736 .read = seq_read,
737 .llseek = seq_lseek,
738 .release = seq_release,
739};
740
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900741struct uprobe_cpu_buffer {
742 struct mutex mutex;
743 void *buf;
744};
745static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
746static int uprobe_buffer_refcnt;
747
748static int uprobe_buffer_init(void)
749{
750 int cpu, err_cpu;
751
752 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
753 if (uprobe_cpu_buffer == NULL)
754 return -ENOMEM;
755
756 for_each_possible_cpu(cpu) {
757 struct page *p = alloc_pages_node(cpu_to_node(cpu),
758 GFP_KERNEL, 0);
759 if (p == NULL) {
760 err_cpu = cpu;
761 goto err;
762 }
763 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
764 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
765 }
766
767 return 0;
768
769err:
770 for_each_possible_cpu(cpu) {
771 if (cpu == err_cpu)
772 break;
773 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
774 }
775
776 free_percpu(uprobe_cpu_buffer);
777 return -ENOMEM;
778}
779
780static int uprobe_buffer_enable(void)
781{
782 int ret = 0;
783
784 BUG_ON(!mutex_is_locked(&event_mutex));
785
786 if (uprobe_buffer_refcnt++ == 0) {
787 ret = uprobe_buffer_init();
788 if (ret < 0)
789 uprobe_buffer_refcnt--;
790 }
791
792 return ret;
793}
794
795static void uprobe_buffer_disable(void)
796{
zhangwei(Jovi)6ea62152014-04-17 16:05:19 +0800797 int cpu;
798
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900799 BUG_ON(!mutex_is_locked(&event_mutex));
800
801 if (--uprobe_buffer_refcnt == 0) {
zhangwei(Jovi)6ea62152014-04-17 16:05:19 +0800802 for_each_possible_cpu(cpu)
803 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
804 cpu)->buf);
805
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900806 free_percpu(uprobe_cpu_buffer);
807 uprobe_cpu_buffer = NULL;
808 }
809}
810
811static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
812{
813 struct uprobe_cpu_buffer *ucb;
814 int cpu;
815
816 cpu = raw_smp_processor_id();
817 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
818
819 /*
820 * Use per-cpu buffers for fastest access, but we might migrate
821 * so the mutex makes sure we have sole access to it.
822 */
823 mutex_lock(&ucb->mutex);
824
825 return ucb;
826}
827
828static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
829{
830 mutex_unlock(&ucb->mutex);
831}
832
Namhyung Kima43b9702014-01-17 17:08:36 +0900833static void __uprobe_trace_func(struct trace_uprobe *tu,
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900834 unsigned long func, struct pt_regs *regs,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900835 struct uprobe_cpu_buffer *ucb, int dsize,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400836 struct trace_event_file *trace_file)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530837{
838 struct uprobe_trace_entry_head *entry;
839 struct ring_buffer_event *event;
840 struct ring_buffer *buffer;
Oleg Nesterov457d1772013-03-29 18:26:51 +0100841 void *data;
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900842 int size, esize;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400843 struct trace_event_call *call = &tu->tp.call;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530844
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400845 WARN_ON(call != trace_file->event_call);
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900846
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900847 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
Oleg Nesterova51cc602013-03-30 18:02:12 +0100848 return;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530849
Steven Rostedt (Red Hat)09a50592015-05-13 15:21:25 -0400850 if (trace_trigger_soft_disabled(trace_file))
Namhyung Kimca3b1622014-01-17 17:08:39 +0900851 return;
852
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900853 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900854 size = esize + tu->tp.size + dsize;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400855 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900856 call->event.type, size, 0, 0);
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900857 if (!event)
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900858 return;
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900859
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530860 entry = ring_buffer_event_data(event);
Oleg Nesterov393a7362013-03-30 18:46:22 +0100861 if (is_ret_probe(tu)) {
862 entry->vaddr[0] = func;
863 entry->vaddr[1] = instruction_pointer(regs);
864 data = DATAOF_TRACE_ENTRY(entry, true);
865 } else {
866 entry->vaddr[0] = instruction_pointer(regs);
867 data = DATAOF_TRACE_ENTRY(entry, false);
868 }
869
Namhyung Kimdcad1a22013-07-03 16:40:28 +0900870 memcpy(data, ucb->buf, tu->tp.size + dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530871
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400872 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
Oleg Nesterova51cc602013-03-30 18:02:12 +0100873}
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100874
Oleg Nesterova51cc602013-03-30 18:02:12 +0100875/* uprobe handler */
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900876static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
877 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterova51cc602013-03-30 18:02:12 +0100878{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900879 struct event_file_link *link;
880
881 if (is_ret_probe(tu))
882 return 0;
883
884 rcu_read_lock();
885 list_for_each_entry_rcu(link, &tu->tp.files, list)
886 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
887 rcu_read_unlock();
888
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100889 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530890}
891
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100892static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
Namhyung Kimdd9fa552014-01-17 17:08:37 +0900893 struct pt_regs *regs,
894 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100895{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900896 struct event_file_link *link;
897
898 rcu_read_lock();
899 list_for_each_entry_rcu(link, &tu->tp.files, list)
900 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
901 rcu_read_unlock();
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100902}
903
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530904/* Event entry printers */
905static enum print_line_t
906print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
907{
Oleg Nesterov457d1772013-03-29 18:26:51 +0100908 struct uprobe_trace_entry_head *entry;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530909 struct trace_seq *s = &iter->seq;
910 struct trace_uprobe *tu;
911 u8 *data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530912
Oleg Nesterov457d1772013-03-29 18:26:51 +0100913 entry = (struct uprobe_trace_entry_head *)iter->ent;
Namhyung Kim14577c32013-07-03 15:42:53 +0900914 tu = container_of(event, struct trace_uprobe, tp.call.event);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530915
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100916 if (is_ret_probe(tu)) {
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500917 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400918 trace_event_name(&tu->tp.call),
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500919 entry->vaddr[1], entry->vaddr[0]);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100920 data = DATAOF_TRACE_ENTRY(entry, true);
921 } else {
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500922 trace_seq_printf(s, "%s: (0x%lx)",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400923 trace_event_name(&tu->tp.call),
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500924 entry->vaddr[0]);
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100925 data = DATAOF_TRACE_ENTRY(entry, false);
926 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530927
Masami Hiramatsu56de7632018-04-25 21:16:36 +0900928 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
929 goto out;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530930
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500931 trace_seq_putc(s, '\n');
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530932
Steven Rostedt (Red Hat)8579a102014-11-12 17:26:57 -0500933 out:
934 return trace_handle_return(s);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530935}
936
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100937typedef bool (*filter_func_t)(struct uprobe_consumer *self,
938 enum uprobe_filter_ctx ctx,
939 struct mm_struct *mm);
940
941static int
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400942probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900943 filter_func_t filter)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530944{
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900945 bool enabled = trace_probe_is_enabled(&tu->tp);
946 struct event_file_link *link = NULL;
947 int ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530948
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900949 if (file) {
Oleg Nesterov48212542014-06-27 19:01:36 +0200950 if (tu->tp.flags & TP_FLAG_PROFILE)
951 return -EINTR;
952
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900953 link = kmalloc(sizeof(*link), GFP_KERNEL);
954 if (!link)
955 return -ENOMEM;
956
957 link->file = file;
958 list_add_tail_rcu(&link->list, &tu->tp.files);
959
960 tu->tp.flags |= TP_FLAG_TRACE;
Oleg Nesterov48212542014-06-27 19:01:36 +0200961 } else {
962 if (tu->tp.flags & TP_FLAG_TRACE)
963 return -EINTR;
964
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900965 tu->tp.flags |= TP_FLAG_PROFILE;
Oleg Nesterov48212542014-06-27 19:01:36 +0200966 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530967
Oleg Nesterov736288b2013-02-03 20:58:35 +0100968 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
969
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +0900970 if (enabled)
971 return 0;
972
Oleg Nesterovfb6bab62014-06-27 19:01:46 +0200973 ret = uprobe_buffer_enable();
974 if (ret)
975 goto err_flags;
976
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100977 tu->consumer.filter = filter;
Song Liu0c92c7a2018-04-23 10:21:34 -0700978 tu->inode = d_real_inode(tu->path.dentry);
Ravi Bangoria1cc33162018-08-20 10:12:47 +0530979 if (tu->ref_ctr_offset) {
980 ret = uprobe_register_refctr(tu->inode, tu->offset,
981 tu->ref_ctr_offset, &tu->consumer);
982 } else {
983 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
984 }
985
Oleg Nesterovfb6bab62014-06-27 19:01:46 +0200986 if (ret)
987 goto err_buffer;
Oleg Nesterov41618242013-01-27 18:36:24 +0100988
Oleg Nesterovfb6bab62014-06-27 19:01:46 +0200989 return 0;
990
991 err_buffer:
992 uprobe_buffer_disable();
993
994 err_flags:
995 if (file) {
996 list_del(&link->list);
997 kfree(link);
998 tu->tp.flags &= ~TP_FLAG_TRACE;
999 } else {
1000 tu->tp.flags &= ~TP_FLAG_PROFILE;
1001 }
Oleg Nesterov41618242013-01-27 18:36:24 +01001002 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301003}
1004
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001005static void
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001006probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301007{
Namhyung Kim14577c32013-07-03 15:42:53 +09001008 if (!trace_probe_is_enabled(&tu->tp))
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301009 return;
1010
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001011 if (file) {
1012 struct event_file_link *link;
1013
1014 link = find_event_file_link(&tu->tp, file);
1015 if (!link)
1016 return;
1017
1018 list_del_rcu(&link->list);
1019 /* synchronize with u{,ret}probe_trace_func */
Steven Rostedt (VMware)016f8ff2018-08-09 15:37:59 -04001020 synchronize_rcu();
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001021 kfree(link);
1022
1023 if (!list_empty(&tu->tp.files))
1024 return;
1025 }
1026
Oleg Nesterov736288b2013-02-03 20:58:35 +01001027 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
1028
Oleg Nesterova932b732013-01-31 19:47:23 +01001029 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
Song Liu0c92c7a2018-04-23 10:21:34 -07001030 tu->inode = NULL;
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001031 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001032
1033 uprobe_buffer_disable();
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301034}
1035
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001036static int uprobe_event_define_fields(struct trace_event_call *event_call)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301037{
Masami Hiramatsueeb07b02018-04-25 21:17:05 +09001038 int ret, size;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301039 struct uprobe_trace_entry_head field;
Oleg Nesterov457d1772013-03-29 18:26:51 +01001040 struct trace_uprobe *tu = event_call->data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301041
Oleg Nesterov4d1298e2013-03-30 19:23:15 +01001042 if (is_ret_probe(tu)) {
1043 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1044 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1045 size = SIZEOF_TRACE_ENTRY(true);
1046 } else {
1047 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1048 size = SIZEOF_TRACE_ENTRY(false);
1049 }
Namhyung Kim14577c32013-07-03 15:42:53 +09001050
Masami Hiramatsueeb07b02018-04-25 21:17:05 +09001051 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301052}
1053
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301054#ifdef CONFIG_PERF_EVENTS
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001055static bool
1056__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1057{
1058 struct perf_event *event;
1059
1060 if (filter->nr_systemwide)
1061 return true;
1062
1063 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001064 if (event->hw.target->mm == mm)
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001065 return true;
1066 }
1067
1068 return false;
1069}
1070
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001071static inline bool
1072uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1073{
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001074 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001075}
1076
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001077static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1078{
1079 bool done;
1080
1081 write_lock(&tu->filter.rwlock);
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001082 if (event->hw.target) {
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001083 list_del(&event->hw.tp_list);
1084 done = tu->filter.nr_systemwide ||
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001085 (event->hw.target->flags & PF_EXITING) ||
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001086 uprobe_filter_event(tu, event);
1087 } else {
1088 tu->filter.nr_systemwide--;
1089 done = tu->filter.nr_systemwide;
1090 }
1091 write_unlock(&tu->filter.rwlock);
1092
1093 if (!done)
Oleg Nesterov927d6872014-04-24 13:33:31 +02001094 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
Oleg Nesterovce5f36a2014-04-24 13:26:01 +02001095
1096 return 0;
1097}
1098
Oleg Nesterov736288b2013-02-03 20:58:35 +01001099static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1100{
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001101 bool done;
Oleg Nesterov927d6872014-04-24 13:33:31 +02001102 int err;
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001103
Oleg Nesterov736288b2013-02-03 20:58:35 +01001104 write_lock(&tu->filter.rwlock);
Peter Zijlstra50f16a82015-03-05 22:10:19 +01001105 if (event->hw.target) {
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001106 /*
1107 * event->parent != NULL means copy_process(), we can avoid
1108 * uprobe_apply(). current->mm must be probed and we can rely
1109 * on dup_mmap() which preserves the already installed bp's.
1110 *
1111 * attr.enable_on_exec means that exec/mmap will install the
1112 * breakpoints we need.
1113 */
1114 done = tu->filter.nr_systemwide ||
1115 event->parent || event->attr.enable_on_exec ||
1116 uprobe_filter_event(tu, event);
Oleg Nesterov736288b2013-02-03 20:58:35 +01001117 list_add(&event->hw.tp_list, &tu->filter.perf_events);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001118 } else {
1119 done = tu->filter.nr_systemwide;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001120 tu->filter.nr_systemwide++;
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +01001121 }
Oleg Nesterov736288b2013-02-03 20:58:35 +01001122 write_unlock(&tu->filter.rwlock);
1123
Oleg Nesterov927d6872014-04-24 13:33:31 +02001124 err = 0;
1125 if (!done) {
1126 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1127 if (err)
1128 uprobe_perf_close(tu, event);
1129 }
1130 return err;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001131}
1132
Oleg Nesterov31ba3342013-02-04 17:11:58 +01001133static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1134 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1135{
1136 struct trace_uprobe *tu;
1137 int ret;
1138
1139 tu = container_of(uc, struct trace_uprobe, consumer);
1140 read_lock(&tu->filter.rwlock);
1141 ret = __uprobe_perf_filter(&tu->filter, mm);
1142 read_unlock(&tu->filter.rwlock);
1143
1144 return ret;
1145}
1146
Namhyung Kima43b9702014-01-17 17:08:36 +09001147static void __uprobe_perf_func(struct trace_uprobe *tu,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001148 unsigned long func, struct pt_regs *regs,
1149 struct uprobe_cpu_buffer *ucb, int dsize)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301150{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001151 struct trace_event_call *call = &tu->tp.call;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301152 struct uprobe_trace_entry_head *entry;
1153 struct hlist_head *head;
Oleg Nesterov457d1772013-03-29 18:26:51 +01001154 void *data;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001155 int size, esize;
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001156 int rctx;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301157
Yonghong Songe87c6bc382017-10-23 23:53:08 -07001158 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
Wang Nan04a22fa2015-07-01 02:13:50 +00001159 return;
1160
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001161 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1162
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001163 size = esize + tu->tp.size + dsize;
1164 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1165 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1166 return;
1167
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301168 preempt_disable();
Oleg Nesterov515619f2013-04-13 15:36:49 +02001169 head = this_cpu_ptr(call->perf_events);
1170 if (hlist_empty(head))
1171 goto out;
1172
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001173 entry = perf_trace_buf_alloc(size, NULL, &rctx);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301174 if (!entry)
1175 goto out;
1176
Oleg Nesterov393a7362013-03-30 18:46:22 +01001177 if (is_ret_probe(tu)) {
1178 entry->vaddr[0] = func;
Oleg Nesterov32520b22013-04-10 16:25:49 +02001179 entry->vaddr[1] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +01001180 data = DATAOF_TRACE_ENTRY(entry, true);
1181 } else {
Oleg Nesterov32520b22013-04-10 16:25:49 +02001182 entry->vaddr[0] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +01001183 data = DATAOF_TRACE_ENTRY(entry, false);
1184 }
1185
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001186 memcpy(data, ucb->buf, tu->tp.size + dsize);
Namhyung Kim14577c32013-07-03 15:42:53 +09001187
Namhyung Kimdcad1a22013-07-03 16:40:28 +09001188 if (size - esize > tu->tp.size + dsize) {
1189 int len = tu->tp.size + dsize;
1190
1191 memset(data + len, 0, size - esize - len);
Namhyung Kim14577c32013-07-03 15:42:53 +09001192 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301193
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07001194 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
Peter Zijlstra8fd0fbb2017-10-11 09:45:29 +02001195 head, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301196 out:
1197 preempt_enable();
Oleg Nesterova51cc602013-03-30 18:02:12 +01001198}
1199
1200/* uprobe profile handler */
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001201static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1202 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterova51cc602013-03-30 18:02:12 +01001203{
1204 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1205 return UPROBE_HANDLER_REMOVE;
1206
Oleg Nesterov393a7362013-03-30 18:46:22 +01001207 if (!is_ret_probe(tu))
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001208 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001209 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301210}
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001211
1212static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001213 struct pt_regs *regs,
1214 struct uprobe_cpu_buffer *ucb, int dsize)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001215{
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001216 __uprobe_perf_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001217}
Yonghong Song41bdc4b2018-05-24 11:21:09 -07001218
1219int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1220 const char **filename, u64 *probe_offset,
1221 bool perf_type_tracepoint)
1222{
1223 const char *pevent = trace_event_name(event->tp_event);
1224 const char *group = event->tp_event->class->system;
1225 struct trace_uprobe *tu;
1226
1227 if (perf_type_tracepoint)
1228 tu = find_probe_event(pevent, group);
1229 else
1230 tu = event->tp_event->data;
1231 if (!tu)
1232 return -EINVAL;
1233
1234 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1235 : BPF_FD_TYPE_UPROBE;
1236 *filename = tu->filename;
1237 *probe_offset = tu->offset;
1238 return 0;
1239}
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301240#endif /* CONFIG_PERF_EVENTS */
1241
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001242static int
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001243trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001244 void *data)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301245{
Oleg Nesterov457d1772013-03-29 18:26:51 +01001246 struct trace_uprobe *tu = event->data;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001247 struct trace_event_file *file = data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301248
1249 switch (type) {
1250 case TRACE_REG_REGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001251 return probe_event_enable(tu, file, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301252
1253 case TRACE_REG_UNREGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001254 probe_event_disable(tu, file);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301255 return 0;
1256
1257#ifdef CONFIG_PERF_EVENTS
1258 case TRACE_REG_PERF_REGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001259 return probe_event_enable(tu, NULL, uprobe_perf_filter);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301260
1261 case TRACE_REG_PERF_UNREGISTER:
zhangwei(Jovi)70ed91c2014-01-17 17:08:38 +09001262 probe_event_disable(tu, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301263 return 0;
Oleg Nesterov736288b2013-02-03 20:58:35 +01001264
1265 case TRACE_REG_PERF_OPEN:
1266 return uprobe_perf_open(tu, data);
1267
1268 case TRACE_REG_PERF_CLOSE:
1269 return uprobe_perf_close(tu, data);
1270
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301271#endif
1272 default:
1273 return 0;
1274 }
1275 return 0;
1276}
1277
1278static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1279{
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301280 struct trace_uprobe *tu;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001281 struct uprobe_dispatch_data udd;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001282 struct uprobe_cpu_buffer *ucb;
1283 int dsize, esize;
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001284 int ret = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301285
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001286
Oleg Nesterova932b732013-01-31 19:47:23 +01001287 tu = container_of(con, struct trace_uprobe, consumer);
Oleg Nesterov1b47aef2013-01-31 19:55:27 +01001288 tu->nhit++;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301289
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001290 udd.tu = tu;
1291 udd.bp_addr = instruction_pointer(regs);
1292
1293 current->utask->vaddr = (unsigned long) &udd;
1294
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001295 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1296 return 0;
1297
1298 dsize = __get_data_size(&tu->tp, regs);
1299 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1300
1301 ucb = uprobe_buffer_get();
Masami Hiramatsu91784122018-04-25 21:19:01 +09001302 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001303
Namhyung Kim14577c32013-07-03 15:42:53 +09001304 if (tu->tp.flags & TP_FLAG_TRACE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001305 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301306
1307#ifdef CONFIG_PERF_EVENTS
Namhyung Kim14577c32013-07-03 15:42:53 +09001308 if (tu->tp.flags & TP_FLAG_PROFILE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001309 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301310#endif
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001311 uprobe_buffer_put(ucb);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +01001312 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301313}
1314
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001315static int uretprobe_dispatcher(struct uprobe_consumer *con,
1316 unsigned long func, struct pt_regs *regs)
1317{
1318 struct trace_uprobe *tu;
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001319 struct uprobe_dispatch_data udd;
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001320 struct uprobe_cpu_buffer *ucb;
1321 int dsize, esize;
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001322
1323 tu = container_of(con, struct trace_uprobe, consumer);
1324
Namhyung Kimb7e0bf32013-11-25 13:42:47 +09001325 udd.tu = tu;
1326 udd.bp_addr = func;
1327
1328 current->utask->vaddr = (unsigned long) &udd;
1329
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001330 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1331 return 0;
1332
1333 dsize = __get_data_size(&tu->tp, regs);
1334 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1335
1336 ucb = uprobe_buffer_get();
Masami Hiramatsu91784122018-04-25 21:19:01 +09001337 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001338
Namhyung Kim14577c32013-07-03 15:42:53 +09001339 if (tu->tp.flags & TP_FLAG_TRACE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001340 uretprobe_trace_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001341
1342#ifdef CONFIG_PERF_EVENTS
Namhyung Kim14577c32013-07-03 15:42:53 +09001343 if (tu->tp.flags & TP_FLAG_PROFILE)
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001344 uretprobe_perf_func(tu, func, regs, ucb, dsize);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001345#endif
Namhyung Kimdd9fa552014-01-17 17:08:37 +09001346 uprobe_buffer_put(ucb);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +01001347 return 0;
1348}
1349
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301350static struct trace_event_functions uprobe_funcs = {
1351 .trace = print_uprobe_event
1352};
1353
Song Liu33ea4b22017-12-06 14:45:16 -08001354static inline void init_trace_event_call(struct trace_uprobe *tu,
1355 struct trace_event_call *call)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301356{
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301357 INIT_LIST_HEAD(&call->class->fields);
1358 call->event.funcs = &uprobe_funcs;
1359 call->class->define_fields = uprobe_event_define_fields;
1360
Song Liu33ea4b22017-12-06 14:45:16 -08001361 call->flags = TRACE_EVENT_FL_UPROBE;
1362 call->class->reg = trace_uprobe_register;
1363 call->data = tu;
1364}
1365
1366static int register_uprobe_event(struct trace_uprobe *tu)
1367{
1368 struct trace_event_call *call = &tu->tp.call;
1369 int ret = 0;
1370
1371 init_trace_event_call(tu, call);
1372
Masami Hiramatsu0a46c852018-04-25 21:19:30 +09001373 if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301374 return -ENOMEM;
1375
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -04001376 ret = register_trace_event(&call->event);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301377 if (!ret) {
1378 kfree(call->print_fmt);
1379 return -ENODEV;
1380 }
Oleg Nesterovede392a2014-07-15 20:48:24 +02001381
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301382 ret = trace_add_event_call(call);
1383
1384 if (ret) {
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04001385 pr_info("Failed to register uprobe event: %s\n",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04001386 trace_event_name(call));
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301387 kfree(call->print_fmt);
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -04001388 unregister_trace_event(&call->event);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301389 }
1390
1391 return ret;
1392}
1393
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001394static int unregister_uprobe_event(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301395{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001396 int ret;
1397
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301398 /* tu->event is unregistered in trace_remove_event_call() */
Namhyung Kim14577c32013-07-03 15:42:53 +09001399 ret = trace_remove_event_call(&tu->tp.call);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001400 if (ret)
1401 return ret;
Namhyung Kim14577c32013-07-03 15:42:53 +09001402 kfree(tu->tp.call.print_fmt);
1403 tu->tp.call.print_fmt = NULL;
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001404 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301405}
1406
Song Liu33ea4b22017-12-06 14:45:16 -08001407#ifdef CONFIG_PERF_EVENTS
1408struct trace_event_call *
Song Liua6ca88b2018-10-01 22:36:36 -07001409create_local_trace_uprobe(char *name, unsigned long offs,
1410 unsigned long ref_ctr_offset, bool is_return)
Song Liu33ea4b22017-12-06 14:45:16 -08001411{
1412 struct trace_uprobe *tu;
Song Liu33ea4b22017-12-06 14:45:16 -08001413 struct path path;
1414 int ret;
1415
1416 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1417 if (ret)
1418 return ERR_PTR(ret);
1419
Song Liu0c92c7a2018-04-23 10:21:34 -07001420 if (!d_is_reg(path.dentry)) {
1421 path_put(&path);
Song Liu33ea4b22017-12-06 14:45:16 -08001422 return ERR_PTR(-EINVAL);
1423 }
1424
1425 /*
1426 * local trace_kprobes are not added to probe_list, so they are never
1427 * searched in find_trace_kprobe(). Therefore, there is no concern of
1428 * duplicated name "DUMMY_EVENT" here.
1429 */
1430 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1431 is_return);
1432
1433 if (IS_ERR(tu)) {
1434 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1435 (int)PTR_ERR(tu));
Song Liu0c92c7a2018-04-23 10:21:34 -07001436 path_put(&path);
Song Liu33ea4b22017-12-06 14:45:16 -08001437 return ERR_CAST(tu);
1438 }
1439
1440 tu->offset = offs;
Song Liu0c92c7a2018-04-23 10:21:34 -07001441 tu->path = path;
Song Liua6ca88b2018-10-01 22:36:36 -07001442 tu->ref_ctr_offset = ref_ctr_offset;
Song Liu33ea4b22017-12-06 14:45:16 -08001443 tu->filename = kstrdup(name, GFP_KERNEL);
1444 init_trace_event_call(tu, &tu->tp.call);
1445
Masami Hiramatsu0a46c852018-04-25 21:19:30 +09001446 if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
Song Liu33ea4b22017-12-06 14:45:16 -08001447 ret = -ENOMEM;
1448 goto error;
1449 }
1450
1451 return &tu->tp.call;
1452error:
1453 free_trace_uprobe(tu);
1454 return ERR_PTR(ret);
1455}
1456
1457void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1458{
1459 struct trace_uprobe *tu;
1460
1461 tu = container_of(event_call, struct trace_uprobe, tp.call);
1462
1463 kfree(tu->tp.call.print_fmt);
1464 tu->tp.call.print_fmt = NULL;
1465
1466 free_trace_uprobe(tu);
1467}
1468#endif /* CONFIG_PERF_EVENTS */
1469
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301470/* Make a trace interface for controling probe points */
1471static __init int init_uprobe_trace(void)
1472{
1473 struct dentry *d_tracer;
1474
1475 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05001476 if (IS_ERR(d_tracer))
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301477 return 0;
1478
1479 trace_create_file("uprobe_events", 0644, d_tracer,
1480 NULL, &uprobe_events_ops);
1481 /* Profile interface */
1482 trace_create_file("uprobe_profile", 0444, d_tracer,
1483 NULL, &uprobe_profile_ops);
1484 return 0;
1485}
1486
1487fs_initcall(init_uprobe_trace);