blob: 5395d37e5e72d3d4bd662f2b7d3209aa2cd0b37a [file] [log] [blame]
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301/*
2 * uprobes-based tracing events
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 *
17 * Copyright (C) IBM Corporation, 2010-2012
18 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
19 */
20
21#include <linux/module.h>
22#include <linux/uaccess.h>
23#include <linux/uprobes.h>
24#include <linux/namei.h>
Andy Shevchenkob2e902f2012-12-17 16:01:27 -080025#include <linux/string.h>
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053026
27#include "trace_probe.h"
28
29#define UPROBE_EVENT_SYSTEM "uprobes"
30
Oleg Nesterov457d1772013-03-29 18:26:51 +010031struct uprobe_trace_entry_head {
32 struct trace_entry ent;
33 unsigned long vaddr[];
34};
35
36#define SIZEOF_TRACE_ENTRY(is_return) \
37 (sizeof(struct uprobe_trace_entry_head) + \
38 sizeof(unsigned long) * (is_return ? 2 : 1))
39
40#define DATAOF_TRACE_ENTRY(entry, is_return) \
41 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
42
Oleg Nesterov736288b2013-02-03 20:58:35 +010043struct trace_uprobe_filter {
44 rwlock_t rwlock;
45 int nr_systemwide;
46 struct list_head perf_events;
47};
48
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053049/*
50 * uprobe event core functions
51 */
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053052struct trace_uprobe {
53 struct list_head list;
Oleg Nesterov736288b2013-02-03 20:58:35 +010054 struct trace_uprobe_filter filter;
Oleg Nesterova932b732013-01-31 19:47:23 +010055 struct uprobe_consumer consumer;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053056 struct inode *inode;
57 char *filename;
58 unsigned long offset;
59 unsigned long nhit;
Namhyung Kim14577c32013-07-03 15:42:53 +090060 struct trace_probe tp;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053061};
62
Namhyung Kim14577c32013-07-03 15:42:53 +090063#define SIZEOF_TRACE_UPROBE(n) \
64 (offsetof(struct trace_uprobe, tp.args) + \
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053065 (sizeof(struct probe_arg) * (n)))
66
67static int register_uprobe_event(struct trace_uprobe *tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -040068static int unregister_uprobe_event(struct trace_uprobe *tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053069
70static DEFINE_MUTEX(uprobe_lock);
71static LIST_HEAD(uprobe_list);
72
73static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +010074static int uretprobe_dispatcher(struct uprobe_consumer *con,
75 unsigned long func, struct pt_regs *regs);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +053076
Namhyung Kim3fd996a2013-11-26 15:21:04 +090077#ifdef CONFIG_STACK_GROWSUP
78static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
79{
80 return addr - (n * sizeof(long));
81}
82#else
83static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
84{
85 return addr + (n * sizeof(long));
86}
87#endif
88
89static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
90{
91 unsigned long ret;
92 unsigned long addr = user_stack_pointer(regs);
93
94 addr = adjust_stack_addr(addr, n);
95
96 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
97 return 0;
98
99 return ret;
100}
101
102/*
103 * Uprobes-specific fetch functions
104 */
105#define DEFINE_FETCH_stack(type) \
106static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\
107 void *offset, void *dest) \
108{ \
109 *(type *)dest = (type)get_user_stack_nth(regs, \
110 ((unsigned long)offset)); \
111}
112DEFINE_BASIC_FETCH_FUNCS(stack)
113/* No string on the stack entry */
114#define fetch_stack_string NULL
115#define fetch_stack_string_size NULL
116
117
Namhyung Kim34fee3a2013-11-26 14:56:28 +0900118/* Fetch type information table */
119const struct fetch_type uprobes_fetch_type_table[] = {
120 /* Special types */
121 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
122 sizeof(u32), 1, "__data_loc char[]"),
123 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
124 string_size, sizeof(u32), 0, "u32"),
125 /* Basic types */
126 ASSIGN_FETCH_TYPE(u8, u8, 0),
127 ASSIGN_FETCH_TYPE(u16, u16, 0),
128 ASSIGN_FETCH_TYPE(u32, u32, 0),
129 ASSIGN_FETCH_TYPE(u64, u64, 0),
130 ASSIGN_FETCH_TYPE(s8, u8, 1),
131 ASSIGN_FETCH_TYPE(s16, u16, 1),
132 ASSIGN_FETCH_TYPE(s32, u32, 1),
133 ASSIGN_FETCH_TYPE(s64, u64, 1),
134
135 ASSIGN_FETCH_TYPE_END
136};
137
Oleg Nesterov736288b2013-02-03 20:58:35 +0100138static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
139{
140 rwlock_init(&filter->rwlock);
141 filter->nr_systemwide = 0;
142 INIT_LIST_HEAD(&filter->perf_events);
143}
144
145static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
146{
147 return !filter->nr_systemwide && list_empty(&filter->perf_events);
148}
149
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100150static inline bool is_ret_probe(struct trace_uprobe *tu)
151{
152 return tu->consumer.ret_handler != NULL;
153}
154
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530155/*
156 * Allocate new trace_uprobe and initialize it (including uprobes).
157 */
158static struct trace_uprobe *
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100159alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530160{
161 struct trace_uprobe *tu;
162
163 if (!event || !is_good_name(event))
164 return ERR_PTR(-EINVAL);
165
166 if (!group || !is_good_name(group))
167 return ERR_PTR(-EINVAL);
168
169 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
170 if (!tu)
171 return ERR_PTR(-ENOMEM);
172
Namhyung Kim14577c32013-07-03 15:42:53 +0900173 tu->tp.call.class = &tu->tp.class;
174 tu->tp.call.name = kstrdup(event, GFP_KERNEL);
175 if (!tu->tp.call.name)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530176 goto error;
177
Namhyung Kim14577c32013-07-03 15:42:53 +0900178 tu->tp.class.system = kstrdup(group, GFP_KERNEL);
179 if (!tu->tp.class.system)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530180 goto error;
181
182 INIT_LIST_HEAD(&tu->list);
Oleg Nesterova932b732013-01-31 19:47:23 +0100183 tu->consumer.handler = uprobe_dispatcher;
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100184 if (is_ret)
185 tu->consumer.ret_handler = uretprobe_dispatcher;
Oleg Nesterov736288b2013-02-03 20:58:35 +0100186 init_trace_uprobe_filter(&tu->filter);
Namhyung Kim14577c32013-07-03 15:42:53 +0900187 tu->tp.call.flags |= TRACE_EVENT_FL_USE_CALL_FILTER;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530188 return tu;
189
190error:
Namhyung Kim14577c32013-07-03 15:42:53 +0900191 kfree(tu->tp.call.name);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530192 kfree(tu);
193
194 return ERR_PTR(-ENOMEM);
195}
196
197static void free_trace_uprobe(struct trace_uprobe *tu)
198{
199 int i;
200
Namhyung Kim14577c32013-07-03 15:42:53 +0900201 for (i = 0; i < tu->tp.nr_args; i++)
202 traceprobe_free_probe_arg(&tu->tp.args[i]);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530203
204 iput(tu->inode);
Namhyung Kim14577c32013-07-03 15:42:53 +0900205 kfree(tu->tp.call.class->system);
206 kfree(tu->tp.call.name);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530207 kfree(tu->filename);
208 kfree(tu);
209}
210
211static struct trace_uprobe *find_probe_event(const char *event, const char *group)
212{
213 struct trace_uprobe *tu;
214
215 list_for_each_entry(tu, &uprobe_list, list)
Namhyung Kim14577c32013-07-03 15:42:53 +0900216 if (strcmp(tu->tp.call.name, event) == 0 &&
217 strcmp(tu->tp.call.class->system, group) == 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530218 return tu;
219
220 return NULL;
221}
222
223/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400224static int unregister_trace_uprobe(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530225{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400226 int ret;
227
228 ret = unregister_uprobe_event(tu);
229 if (ret)
230 return ret;
231
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530232 list_del(&tu->list);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530233 free_trace_uprobe(tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400234 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530235}
236
237/* Register a trace_uprobe and probe_event */
238static int register_trace_uprobe(struct trace_uprobe *tu)
239{
Namhyung Kim14577c32013-07-03 15:42:53 +0900240 struct trace_uprobe *old_tu;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530241 int ret;
242
243 mutex_lock(&uprobe_lock);
244
245 /* register as an event */
Namhyung Kim14577c32013-07-03 15:42:53 +0900246 old_tu = find_probe_event(tu->tp.call.name, tu->tp.call.class->system);
247 if (old_tu) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530248 /* delete old event */
Namhyung Kim14577c32013-07-03 15:42:53 +0900249 ret = unregister_trace_uprobe(old_tu);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400250 if (ret)
251 goto end;
252 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530253
254 ret = register_uprobe_event(tu);
255 if (ret) {
256 pr_warning("Failed to register probe event(%d)\n", ret);
257 goto end;
258 }
259
260 list_add_tail(&tu->list, &uprobe_list);
261
262end:
263 mutex_unlock(&uprobe_lock);
264
265 return ret;
266}
267
268/*
269 * Argument syntax:
Namhyung Kim306cfe22013-07-03 16:44:46 +0900270 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530271 *
272 * - Remove uprobe: -:[GRP/]EVENT
273 */
274static int create_trace_uprobe(int argc, char **argv)
275{
276 struct trace_uprobe *tu;
277 struct inode *inode;
278 char *arg, *event, *group, *filename;
279 char buf[MAX_EVENT_NAME_LEN];
280 struct path path;
281 unsigned long offset;
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100282 bool is_delete, is_return;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530283 int i, ret;
284
285 inode = NULL;
286 ret = 0;
287 is_delete = false;
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100288 is_return = false;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530289 event = NULL;
290 group = NULL;
291
292 /* argc must be >= 1 */
293 if (argv[0][0] == '-')
294 is_delete = true;
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100295 else if (argv[0][0] == 'r')
296 is_return = true;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530297 else if (argv[0][0] != 'p') {
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100298 pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530299 return -EINVAL;
300 }
301
302 if (argv[0][1] == ':') {
303 event = &argv[0][2];
304 arg = strchr(event, '/');
305
306 if (arg) {
307 group = event;
308 event = arg + 1;
309 event[-1] = '\0';
310
311 if (strlen(group) == 0) {
312 pr_info("Group name is not specified\n");
313 return -EINVAL;
314 }
315 }
316 if (strlen(event) == 0) {
317 pr_info("Event name is not specified\n");
318 return -EINVAL;
319 }
320 }
321 if (!group)
322 group = UPROBE_EVENT_SYSTEM;
323
324 if (is_delete) {
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400325 int ret;
326
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530327 if (!event) {
328 pr_info("Delete command needs an event name.\n");
329 return -EINVAL;
330 }
331 mutex_lock(&uprobe_lock);
332 tu = find_probe_event(event, group);
333
334 if (!tu) {
335 mutex_unlock(&uprobe_lock);
336 pr_info("Event %s/%s doesn't exist.\n", group, event);
337 return -ENOENT;
338 }
339 /* delete an event */
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400340 ret = unregister_trace_uprobe(tu);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530341 mutex_unlock(&uprobe_lock);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400342 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530343 }
344
345 if (argc < 2) {
346 pr_info("Probe point is not specified.\n");
347 return -EINVAL;
348 }
349 if (isdigit(argv[1][0])) {
350 pr_info("probe point must be have a filename.\n");
351 return -EINVAL;
352 }
353 arg = strchr(argv[1], ':');
zhangwei(Jovi)fa440632013-06-13 14:21:51 +0800354 if (!arg) {
355 ret = -EINVAL;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530356 goto fail_address_parse;
zhangwei(Jovi)fa440632013-06-13 14:21:51 +0800357 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530358
359 *arg++ = '\0';
360 filename = argv[1];
361 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
362 if (ret)
363 goto fail_address_parse;
364
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530365 inode = igrab(path.dentry->d_inode);
Oleg Nesterov84d7ed72013-01-27 18:20:45 +0100366 path_put(&path);
367
Oleg Nesterov7e4e28c2013-01-28 17:08:47 +0100368 if (!inode || !S_ISREG(inode->i_mode)) {
Jovi Zhangd24d7db2012-07-18 18:16:44 +0800369 ret = -EINVAL;
370 goto fail_address_parse;
371 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530372
Oleg Nesterov84d7ed72013-01-27 18:20:45 +0100373 ret = kstrtoul(arg, 0, &offset);
374 if (ret)
375 goto fail_address_parse;
376
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530377 argc -= 2;
378 argv += 2;
379
380 /* setup a probe */
381 if (!event) {
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800382 char *tail;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530383 char *ptr;
384
Andy Shevchenkob2e902f2012-12-17 16:01:27 -0800385 tail = kstrdup(kbasename(filename), GFP_KERNEL);
386 if (!tail) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530387 ret = -ENOMEM;
388 goto fail_address_parse;
389 }
390
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530391 ptr = strpbrk(tail, ".-_");
392 if (ptr)
393 *ptr = '\0';
394
395 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
396 event = buf;
397 kfree(tail);
398 }
399
Oleg Nesterov4ee5a522013-03-30 20:28:15 +0100400 tu = alloc_trace_uprobe(group, event, argc, is_return);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530401 if (IS_ERR(tu)) {
402 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
403 ret = PTR_ERR(tu);
404 goto fail_address_parse;
405 }
406 tu->offset = offset;
407 tu->inode = inode;
408 tu->filename = kstrdup(filename, GFP_KERNEL);
409
410 if (!tu->filename) {
411 pr_info("Failed to allocate filename.\n");
412 ret = -ENOMEM;
413 goto error;
414 }
415
416 /* parse arguments */
417 ret = 0;
418 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
Namhyung Kim14577c32013-07-03 15:42:53 +0900419 struct probe_arg *parg = &tu->tp.args[i];
420
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530421 /* Increment count for freeing args in error case */
Namhyung Kim14577c32013-07-03 15:42:53 +0900422 tu->tp.nr_args++;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530423
424 /* Parse argument name */
425 arg = strchr(argv[i], '=');
426 if (arg) {
427 *arg++ = '\0';
Namhyung Kim14577c32013-07-03 15:42:53 +0900428 parg->name = kstrdup(argv[i], GFP_KERNEL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530429 } else {
430 arg = argv[i];
431 /* If argument name is omitted, set "argN" */
432 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
Namhyung Kim14577c32013-07-03 15:42:53 +0900433 parg->name = kstrdup(buf, GFP_KERNEL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530434 }
435
Namhyung Kim14577c32013-07-03 15:42:53 +0900436 if (!parg->name) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530437 pr_info("Failed to allocate argument[%d] name.\n", i);
438 ret = -ENOMEM;
439 goto error;
440 }
441
Namhyung Kim14577c32013-07-03 15:42:53 +0900442 if (!is_good_name(parg->name)) {
443 pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530444 ret = -EINVAL;
445 goto error;
446 }
447
Namhyung Kim14577c32013-07-03 15:42:53 +0900448 if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530449 pr_info("Argument[%d] name '%s' conflicts with "
450 "another field.\n", i, argv[i]);
451 ret = -EINVAL;
452 goto error;
453 }
454
455 /* Parse fetch argument */
Namhyung Kim14577c32013-07-03 15:42:53 +0900456 ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
457 false, false);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530458 if (ret) {
459 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
460 goto error;
461 }
462 }
463
464 ret = register_trace_uprobe(tu);
465 if (ret)
466 goto error;
467 return 0;
468
469error:
470 free_trace_uprobe(tu);
471 return ret;
472
473fail_address_parse:
474 if (inode)
475 iput(inode);
476
Jovi Zhangd24d7db2012-07-18 18:16:44 +0800477 pr_info("Failed to parse address or file.\n");
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530478
479 return ret;
480}
481
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400482static int cleanup_all_probes(void)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530483{
484 struct trace_uprobe *tu;
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400485 int ret = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530486
487 mutex_lock(&uprobe_lock);
488 while (!list_empty(&uprobe_list)) {
489 tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400490 ret = unregister_trace_uprobe(tu);
491 if (ret)
492 break;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530493 }
494 mutex_unlock(&uprobe_lock);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400495 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530496}
497
498/* Probes listing interfaces */
499static void *probes_seq_start(struct seq_file *m, loff_t *pos)
500{
501 mutex_lock(&uprobe_lock);
502 return seq_list_start(&uprobe_list, *pos);
503}
504
505static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
506{
507 return seq_list_next(v, &uprobe_list, pos);
508}
509
510static void probes_seq_stop(struct seq_file *m, void *v)
511{
512 mutex_unlock(&uprobe_lock);
513}
514
515static int probes_seq_show(struct seq_file *m, void *v)
516{
517 struct trace_uprobe *tu = v;
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100518 char c = is_ret_probe(tu) ? 'r' : 'p';
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530519 int i;
520
Namhyung Kim14577c32013-07-03 15:42:53 +0900521 seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, tu->tp.call.name);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530522 seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset);
523
Namhyung Kim14577c32013-07-03 15:42:53 +0900524 for (i = 0; i < tu->tp.nr_args; i++)
525 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530526
527 seq_printf(m, "\n");
528 return 0;
529}
530
531static const struct seq_operations probes_seq_op = {
532 .start = probes_seq_start,
533 .next = probes_seq_next,
534 .stop = probes_seq_stop,
535 .show = probes_seq_show
536};
537
538static int probes_open(struct inode *inode, struct file *file)
539{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400540 int ret;
541
542 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
543 ret = cleanup_all_probes();
544 if (ret)
545 return ret;
546 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530547
548 return seq_open(file, &probes_seq_op);
549}
550
551static ssize_t probes_write(struct file *file, const char __user *buffer,
552 size_t count, loff_t *ppos)
553{
554 return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe);
555}
556
557static const struct file_operations uprobe_events_ops = {
558 .owner = THIS_MODULE,
559 .open = probes_open,
560 .read = seq_read,
561 .llseek = seq_lseek,
562 .release = seq_release,
563 .write = probes_write,
564};
565
566/* Probes profiling interfaces */
567static int probes_profile_seq_show(struct seq_file *m, void *v)
568{
569 struct trace_uprobe *tu = v;
570
Namhyung Kim14577c32013-07-03 15:42:53 +0900571 seq_printf(m, " %s %-44s %15lu\n", tu->filename, tu->tp.call.name, tu->nhit);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530572 return 0;
573}
574
575static const struct seq_operations profile_seq_op = {
576 .start = probes_seq_start,
577 .next = probes_seq_next,
578 .stop = probes_seq_stop,
579 .show = probes_profile_seq_show
580};
581
582static int profile_open(struct inode *inode, struct file *file)
583{
584 return seq_open(file, &profile_seq_op);
585}
586
587static const struct file_operations uprobe_profile_ops = {
588 .owner = THIS_MODULE,
589 .open = profile_open,
590 .read = seq_read,
591 .llseek = seq_lseek,
592 .release = seq_release,
593};
594
Oleg Nesterova51cc602013-03-30 18:02:12 +0100595static void uprobe_trace_print(struct trace_uprobe *tu,
596 unsigned long func, struct pt_regs *regs)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530597{
598 struct uprobe_trace_entry_head *entry;
599 struct ring_buffer_event *event;
600 struct ring_buffer *buffer;
Oleg Nesterov457d1772013-03-29 18:26:51 +0100601 void *data;
Oleg Nesterov0e3853d2013-03-28 19:19:11 +0100602 int size, i;
Namhyung Kim14577c32013-07-03 15:42:53 +0900603 struct ftrace_event_call *call = &tu->tp.call;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530604
Oleg Nesterov393a7362013-03-30 18:46:22 +0100605 size = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530606 event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
Namhyung Kim14577c32013-07-03 15:42:53 +0900607 size + tu->tp.size, 0, 0);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530608 if (!event)
Oleg Nesterova51cc602013-03-30 18:02:12 +0100609 return;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530610
611 entry = ring_buffer_event_data(event);
Oleg Nesterov393a7362013-03-30 18:46:22 +0100612 if (is_ret_probe(tu)) {
613 entry->vaddr[0] = func;
614 entry->vaddr[1] = instruction_pointer(regs);
615 data = DATAOF_TRACE_ENTRY(entry, true);
616 } else {
617 entry->vaddr[0] = instruction_pointer(regs);
618 data = DATAOF_TRACE_ENTRY(entry, false);
619 }
620
Namhyung Kim14577c32013-07-03 15:42:53 +0900621 for (i = 0; i < tu->tp.nr_args; i++) {
622 call_fetch(&tu->tp.args[i].fetch, regs,
623 data + tu->tp.args[i].offset);
624 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530625
Tom Zanussif306cc82013-10-24 08:34:17 -0500626 if (!call_filter_check_discard(call, entry, buffer, event))
Oleg Nesterov0e3853d2013-03-28 19:19:11 +0100627 trace_buffer_unlock_commit(buffer, event, 0, 0);
Oleg Nesterova51cc602013-03-30 18:02:12 +0100628}
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100629
Oleg Nesterova51cc602013-03-30 18:02:12 +0100630/* uprobe handler */
631static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs)
632{
Oleg Nesterov393a7362013-03-30 18:46:22 +0100633 if (!is_ret_probe(tu))
634 uprobe_trace_print(tu, 0, regs);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100635 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530636}
637
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100638static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
639 struct pt_regs *regs)
640{
641 uprobe_trace_print(tu, func, regs);
642}
643
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530644/* Event entry printers */
645static enum print_line_t
646print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
647{
Oleg Nesterov457d1772013-03-29 18:26:51 +0100648 struct uprobe_trace_entry_head *entry;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530649 struct trace_seq *s = &iter->seq;
650 struct trace_uprobe *tu;
651 u8 *data;
652 int i;
653
Oleg Nesterov457d1772013-03-29 18:26:51 +0100654 entry = (struct uprobe_trace_entry_head *)iter->ent;
Namhyung Kim14577c32013-07-03 15:42:53 +0900655 tu = container_of(event, struct trace_uprobe, tp.call.event);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530656
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100657 if (is_ret_probe(tu)) {
Namhyung Kim14577c32013-07-03 15:42:53 +0900658 if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", tu->tp.call.name,
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100659 entry->vaddr[1], entry->vaddr[0]))
660 goto partial;
661 data = DATAOF_TRACE_ENTRY(entry, true);
662 } else {
Namhyung Kim14577c32013-07-03 15:42:53 +0900663 if (!trace_seq_printf(s, "%s: (0x%lx)", tu->tp.call.name,
Oleg Nesterov3ede82d2013-03-30 19:48:09 +0100664 entry->vaddr[0]))
665 goto partial;
666 data = DATAOF_TRACE_ENTRY(entry, false);
667 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530668
Namhyung Kim14577c32013-07-03 15:42:53 +0900669 for (i = 0; i < tu->tp.nr_args; i++) {
670 struct probe_arg *parg = &tu->tp.args[i];
671
672 if (!parg->type->print(s, parg->name, data + parg->offset, entry))
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530673 goto partial;
674 }
675
676 if (trace_seq_puts(s, "\n"))
677 return TRACE_TYPE_HANDLED;
678
679partial:
680 return TRACE_TYPE_PARTIAL_LINE;
681}
682
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100683typedef bool (*filter_func_t)(struct uprobe_consumer *self,
684 enum uprobe_filter_ctx ctx,
685 struct mm_struct *mm);
686
687static int
688probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530689{
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530690 int ret = 0;
691
Namhyung Kim14577c32013-07-03 15:42:53 +0900692 if (trace_probe_is_enabled(&tu->tp))
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530693 return -EINTR;
694
Oleg Nesterov736288b2013-02-03 20:58:35 +0100695 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
696
Namhyung Kim14577c32013-07-03 15:42:53 +0900697 tu->tp.flags |= flag;
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100698 tu->consumer.filter = filter;
Oleg Nesterova932b732013-01-31 19:47:23 +0100699 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
700 if (ret)
Namhyung Kim14577c32013-07-03 15:42:53 +0900701 tu->tp.flags &= ~flag;
Oleg Nesterov41618242013-01-27 18:36:24 +0100702
703 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530704}
705
706static void probe_event_disable(struct trace_uprobe *tu, int flag)
707{
Namhyung Kim14577c32013-07-03 15:42:53 +0900708 if (!trace_probe_is_enabled(&tu->tp))
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530709 return;
710
Oleg Nesterov736288b2013-02-03 20:58:35 +0100711 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
712
Oleg Nesterova932b732013-01-31 19:47:23 +0100713 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
Namhyung Kim14577c32013-07-03 15:42:53 +0900714 tu->tp.flags &= ~flag;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530715}
716
717static int uprobe_event_define_fields(struct ftrace_event_call *event_call)
718{
Oleg Nesterov457d1772013-03-29 18:26:51 +0100719 int ret, i, size;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530720 struct uprobe_trace_entry_head field;
Oleg Nesterov457d1772013-03-29 18:26:51 +0100721 struct trace_uprobe *tu = event_call->data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530722
Oleg Nesterov4d1298e2013-03-30 19:23:15 +0100723 if (is_ret_probe(tu)) {
724 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
725 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
726 size = SIZEOF_TRACE_ENTRY(true);
727 } else {
728 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
729 size = SIZEOF_TRACE_ENTRY(false);
730 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530731 /* Set argument names as fields */
Namhyung Kim14577c32013-07-03 15:42:53 +0900732 for (i = 0; i < tu->tp.nr_args; i++) {
733 struct probe_arg *parg = &tu->tp.args[i];
734
735 ret = trace_define_field(event_call, parg->type->fmttype,
736 parg->name, size + parg->offset,
737 parg->type->size, parg->type->is_signed,
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530738 FILTER_OTHER);
739
740 if (ret)
741 return ret;
742 }
743 return 0;
744}
745
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530746#ifdef CONFIG_PERF_EVENTS
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100747static bool
748__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
749{
750 struct perf_event *event;
751
752 if (filter->nr_systemwide)
753 return true;
754
755 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
756 if (event->hw.tp_target->mm == mm)
757 return true;
758 }
759
760 return false;
761}
762
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +0100763static inline bool
764uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
765{
766 return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm);
767}
768
Oleg Nesterov736288b2013-02-03 20:58:35 +0100769static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
770{
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +0100771 bool done;
772
Oleg Nesterov736288b2013-02-03 20:58:35 +0100773 write_lock(&tu->filter.rwlock);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +0100774 if (event->hw.tp_target) {
775 /*
776 * event->parent != NULL means copy_process(), we can avoid
777 * uprobe_apply(). current->mm must be probed and we can rely
778 * on dup_mmap() which preserves the already installed bp's.
779 *
780 * attr.enable_on_exec means that exec/mmap will install the
781 * breakpoints we need.
782 */
783 done = tu->filter.nr_systemwide ||
784 event->parent || event->attr.enable_on_exec ||
785 uprobe_filter_event(tu, event);
Oleg Nesterov736288b2013-02-03 20:58:35 +0100786 list_add(&event->hw.tp_list, &tu->filter.perf_events);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +0100787 } else {
788 done = tu->filter.nr_systemwide;
Oleg Nesterov736288b2013-02-03 20:58:35 +0100789 tu->filter.nr_systemwide++;
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +0100790 }
Oleg Nesterov736288b2013-02-03 20:58:35 +0100791 write_unlock(&tu->filter.rwlock);
792
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +0100793 if (!done)
794 uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100795
Oleg Nesterov736288b2013-02-03 20:58:35 +0100796 return 0;
797}
798
799static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
800{
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +0100801 bool done;
802
Oleg Nesterov736288b2013-02-03 20:58:35 +0100803 write_lock(&tu->filter.rwlock);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +0100804 if (event->hw.tp_target) {
Oleg Nesterov736288b2013-02-03 20:58:35 +0100805 list_del(&event->hw.tp_list);
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +0100806 done = tu->filter.nr_systemwide ||
807 (event->hw.tp_target->flags & PF_EXITING) ||
808 uprobe_filter_event(tu, event);
809 } else {
Oleg Nesterov736288b2013-02-03 20:58:35 +0100810 tu->filter.nr_systemwide--;
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +0100811 done = tu->filter.nr_systemwide;
812 }
Oleg Nesterov736288b2013-02-03 20:58:35 +0100813 write_unlock(&tu->filter.rwlock);
814
Oleg Nesterovb2fe8ba2013-02-04 19:05:43 +0100815 if (!done)
816 uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100817
Oleg Nesterov736288b2013-02-03 20:58:35 +0100818 return 0;
819}
820
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100821static bool uprobe_perf_filter(struct uprobe_consumer *uc,
822 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
823{
824 struct trace_uprobe *tu;
825 int ret;
826
827 tu = container_of(uc, struct trace_uprobe, consumer);
828 read_lock(&tu->filter.rwlock);
829 ret = __uprobe_perf_filter(&tu->filter, mm);
830 read_unlock(&tu->filter.rwlock);
831
832 return ret;
833}
834
Oleg Nesterova51cc602013-03-30 18:02:12 +0100835static void uprobe_perf_print(struct trace_uprobe *tu,
836 unsigned long func, struct pt_regs *regs)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530837{
Namhyung Kim14577c32013-07-03 15:42:53 +0900838 struct ftrace_event_call *call = &tu->tp.call;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530839 struct uprobe_trace_entry_head *entry;
840 struct hlist_head *head;
Oleg Nesterov457d1772013-03-29 18:26:51 +0100841 void *data;
842 int size, rctx, i;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530843
Oleg Nesterov393a7362013-03-30 18:46:22 +0100844 size = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
Namhyung Kim14577c32013-07-03 15:42:53 +0900845 size = ALIGN(size + tu->tp.size + sizeof(u32), sizeof(u64)) - sizeof(u32);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530846
847 preempt_disable();
Oleg Nesterov515619f2013-04-13 15:36:49 +0200848 head = this_cpu_ptr(call->perf_events);
849 if (hlist_empty(head))
850 goto out;
851
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530852 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
853 if (!entry)
854 goto out;
855
Oleg Nesterov393a7362013-03-30 18:46:22 +0100856 if (is_ret_probe(tu)) {
857 entry->vaddr[0] = func;
Oleg Nesterov32520b22013-04-10 16:25:49 +0200858 entry->vaddr[1] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +0100859 data = DATAOF_TRACE_ENTRY(entry, true);
860 } else {
Oleg Nesterov32520b22013-04-10 16:25:49 +0200861 entry->vaddr[0] = instruction_pointer(regs);
Oleg Nesterov393a7362013-03-30 18:46:22 +0100862 data = DATAOF_TRACE_ENTRY(entry, false);
863 }
864
Namhyung Kim14577c32013-07-03 15:42:53 +0900865 for (i = 0; i < tu->tp.nr_args; i++) {
866 struct probe_arg *parg = &tu->tp.args[i];
867
868 call_fetch(&parg->fetch, regs, data + parg->offset);
869 }
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530870
Oleg Nesterov32520b22013-04-10 16:25:49 +0200871 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530872 out:
873 preempt_enable();
Oleg Nesterova51cc602013-03-30 18:02:12 +0100874}
875
876/* uprobe profile handler */
877static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
878{
879 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
880 return UPROBE_HANDLER_REMOVE;
881
Oleg Nesterov393a7362013-03-30 18:46:22 +0100882 if (!is_ret_probe(tu))
883 uprobe_perf_print(tu, 0, regs);
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100884 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530885}
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100886
887static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
888 struct pt_regs *regs)
889{
890 uprobe_perf_print(tu, func, regs);
891}
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530892#endif /* CONFIG_PERF_EVENTS */
893
894static
895int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data)
896{
Oleg Nesterov457d1772013-03-29 18:26:51 +0100897 struct trace_uprobe *tu = event->data;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530898
899 switch (type) {
900 case TRACE_REG_REGISTER:
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100901 return probe_event_enable(tu, TP_FLAG_TRACE, NULL);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530902
903 case TRACE_REG_UNREGISTER:
904 probe_event_disable(tu, TP_FLAG_TRACE);
905 return 0;
906
907#ifdef CONFIG_PERF_EVENTS
908 case TRACE_REG_PERF_REGISTER:
Oleg Nesterov31ba3342013-02-04 17:11:58 +0100909 return probe_event_enable(tu, TP_FLAG_PROFILE, uprobe_perf_filter);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530910
911 case TRACE_REG_PERF_UNREGISTER:
912 probe_event_disable(tu, TP_FLAG_PROFILE);
913 return 0;
Oleg Nesterov736288b2013-02-03 20:58:35 +0100914
915 case TRACE_REG_PERF_OPEN:
916 return uprobe_perf_open(tu, data);
917
918 case TRACE_REG_PERF_CLOSE:
919 return uprobe_perf_close(tu, data);
920
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530921#endif
922 default:
923 return 0;
924 }
925 return 0;
926}
927
928static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
929{
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530930 struct trace_uprobe *tu;
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100931 int ret = 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530932
Oleg Nesterova932b732013-01-31 19:47:23 +0100933 tu = container_of(con, struct trace_uprobe, consumer);
Oleg Nesterov1b47aef2013-01-31 19:55:27 +0100934 tu->nhit++;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530935
Namhyung Kim14577c32013-07-03 15:42:53 +0900936 if (tu->tp.flags & TP_FLAG_TRACE)
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100937 ret |= uprobe_trace_func(tu, regs);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530938
939#ifdef CONFIG_PERF_EVENTS
Namhyung Kim14577c32013-07-03 15:42:53 +0900940 if (tu->tp.flags & TP_FLAG_PROFILE)
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100941 ret |= uprobe_perf_func(tu, regs);
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530942#endif
Oleg Nesterovf42d24a2013-02-04 17:48:34 +0100943 return ret;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530944}
945
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100946static int uretprobe_dispatcher(struct uprobe_consumer *con,
947 unsigned long func, struct pt_regs *regs)
948{
949 struct trace_uprobe *tu;
950
951 tu = container_of(con, struct trace_uprobe, consumer);
952
Namhyung Kim14577c32013-07-03 15:42:53 +0900953 if (tu->tp.flags & TP_FLAG_TRACE)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100954 uretprobe_trace_func(tu, func, regs);
955
956#ifdef CONFIG_PERF_EVENTS
Namhyung Kim14577c32013-07-03 15:42:53 +0900957 if (tu->tp.flags & TP_FLAG_PROFILE)
Oleg Nesterovc1ae5c72013-03-30 18:25:23 +0100958 uretprobe_perf_func(tu, func, regs);
959#endif
960 return 0;
961}
962
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530963static struct trace_event_functions uprobe_funcs = {
964 .trace = print_uprobe_event
965};
966
967static int register_uprobe_event(struct trace_uprobe *tu)
968{
Namhyung Kim14577c32013-07-03 15:42:53 +0900969 struct ftrace_event_call *call = &tu->tp.call;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530970 int ret;
971
972 /* Initialize ftrace_event_call */
973 INIT_LIST_HEAD(&call->class->fields);
974 call->event.funcs = &uprobe_funcs;
975 call->class->define_fields = uprobe_event_define_fields;
976
Namhyung Kim5bf652a2013-07-03 16:09:02 +0900977 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +0530978 return -ENOMEM;
979
980 ret = register_ftrace_event(&call->event);
981 if (!ret) {
982 kfree(call->print_fmt);
983 return -ENODEV;
984 }
985 call->flags = 0;
986 call->class->reg = trace_uprobe_register;
987 call->data = tu;
988 ret = trace_add_event_call(call);
989
990 if (ret) {
991 pr_info("Failed to register uprobe event: %s\n", call->name);
992 kfree(call->print_fmt);
993 unregister_ftrace_event(&call->event);
994 }
995
996 return ret;
997}
998
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -0400999static int unregister_uprobe_event(struct trace_uprobe *tu)
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301000{
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001001 int ret;
1002
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301003 /* tu->event is unregistered in trace_remove_event_call() */
Namhyung Kim14577c32013-07-03 15:42:53 +09001004 ret = trace_remove_event_call(&tu->tp.call);
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001005 if (ret)
1006 return ret;
Namhyung Kim14577c32013-07-03 15:42:53 +09001007 kfree(tu->tp.call.print_fmt);
1008 tu->tp.call.print_fmt = NULL;
Steven Rostedt (Red Hat)c6c24012013-07-03 23:33:51 -04001009 return 0;
Srikar Dronamrajuf3f096c2012-04-11 16:00:43 +05301010}
1011
1012/* Make a trace interface for controling probe points */
1013static __init int init_uprobe_trace(void)
1014{
1015 struct dentry *d_tracer;
1016
1017 d_tracer = tracing_init_dentry();
1018 if (!d_tracer)
1019 return 0;
1020
1021 trace_create_file("uprobe_events", 0644, d_tracer,
1022 NULL, &uprobe_events_ops);
1023 /* Profile interface */
1024 trace_create_file("uprobe_profile", 0444, d_tracer,
1025 NULL, &uprobe_profile_ops);
1026 return 0;
1027}
1028
1029fs_initcall(init_uprobe_trace);