blob: 468e8527e9796379255fd197f2021ae74afa2da5 [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010013 * Copyright (C) 2004 Nadia Yvette Chambers
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020014 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
Ingo Molnar29930022017-02-08 18:51:36 +010018#include <linux/sched/task.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020019#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020020#include <linux/seq_file.h>
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -080021#include <linux/suspend.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050022#include <linux/tracefs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020023#include <linux/hardirq.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010024#include <linux/kthread.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020025#include <linux/uaccess.h>
Steven Rostedt5855fea2011-12-16 19:27:42 -050026#include <linux/bsearch.h>
Paul Gortmaker56d82e02011-05-26 17:53:52 -040027#include <linux/module.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010028#include <linux/ftrace.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020029#include <linux/sysctl.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020031#include <linux/ctype.h>
Steven Rostedt68950612011-12-16 17:06:45 -050032#include <linux/sort.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020033#include <linux/list.h>
Steven Rostedt59df055f2009-02-14 15:29:06 -050034#include <linux/hash.h>
Paul E. McKenney3f379b02010-03-05 15:03:25 -080035#include <linux/rcupdate.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020036
Steven Rostedtad8d75f2009-04-14 19:39:12 -040037#include <trace/events/sched.h>
Steven Rostedt8aef2d22009-03-24 01:10:15 -040038
Steven Rostedt (VMware)b80f0f62017-04-03 12:57:35 -040039#include <asm/sections.h>
Steven Rostedt2af15d62009-05-28 13:37:24 -040040#include <asm/setup.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053041
Steven Rostedt0706f1c2009-03-23 23:12:58 -040042#include "trace_output.h"
Steven Rostedtbac429f2009-03-20 12:50:56 -040043#include "trace_stat.h"
Steven Rostedt3d083392008-05-12 21:20:42 +020044
Steven Rostedt6912896e2008-10-23 09:33:03 -040045#define FTRACE_WARN_ON(cond) \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040046 ({ \
47 int ___r = cond; \
48 if (WARN_ON(___r)) \
Steven Rostedt6912896e2008-10-23 09:33:03 -040049 ftrace_kill(); \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040050 ___r; \
51 })
Steven Rostedt6912896e2008-10-23 09:33:03 -040052
53#define FTRACE_WARN_ON_ONCE(cond) \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040054 ({ \
55 int ___r = cond; \
56 if (WARN_ON_ONCE(___r)) \
Steven Rostedt6912896e2008-10-23 09:33:03 -040057 ftrace_kill(); \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040058 ___r; \
59 })
Steven Rostedt6912896e2008-10-23 09:33:03 -040060
Steven Rostedt8fc0c702009-02-16 15:28:00 -050061/* hash bits for specific function selection */
62#define FTRACE_HASH_BITS 7
63#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
Steven Rostedt33dc9b12011-05-02 17:34:47 -040064#define FTRACE_HASH_DEFAULT_BITS 10
65#define FTRACE_HASH_MAX_BITS 12
Steven Rostedt8fc0c702009-02-16 15:28:00 -050066
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +090067#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -040068#define INIT_OPS_HASH(opsname) \
69 .func_hash = &opsname.local_hash, \
70 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
Steven Rostedt (Red Hat)5f151b22014-08-15 17:18:46 -040071#define ASSIGN_OPS_HASH(opsname, val) \
72 .func_hash = val, \
73 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +090074#else
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -040075#define INIT_OPS_HASH(opsname)
Steven Rostedt (Red Hat)5f151b22014-08-15 17:18:46 -040076#define ASSIGN_OPS_HASH(opsname, val)
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +090077#endif
78
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -040079static struct ftrace_ops ftrace_list_end __read_mostly = {
80 .func = ftrace_stub,
Steven Rostedt (Red Hat)395b97a2013-03-27 09:31:28 -040081 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -040082 INIT_OPS_HASH(ftrace_list_end)
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -040083};
84
Steven Rostedt4eebcc82008-05-12 21:20:48 +020085/* ftrace_enabled is a method to turn ftrace on or off */
86int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020087static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020088
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -040089/* Current function tracing op */
90struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
Steven Rostedt (Red Hat)405e1d82013-11-08 14:17:30 -050091/* What to set function_trace_op to */
92static struct ftrace_ops *set_function_trace_op;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050093
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -040094static bool ftrace_pids_enabled(struct ftrace_ops *ops)
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -040095{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -040096 struct trace_array *tr;
97
98 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
99 return false;
100
101 tr = ops->private;
102
103 return tr->function_pids != NULL;
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400104}
105
106static void ftrace_update_trampoline(struct ftrace_ops *ops);
107
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200108/*
109 * ftrace_disabled is set when an anomaly is discovered.
110 * ftrace_disabled is much stronger than ftrace_enabled.
111 */
112static int ftrace_disabled __read_mostly;
113
Steven Rostedt52baf112009-02-14 01:15:39 -0500114static DEFINE_MUTEX(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200115
Chunyan Zhangf86f4182017-06-07 16:12:51 +0800116static struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200117ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedt2b499382011-05-03 22:49:52 -0400118static struct ftrace_ops global_ops;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200119
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400120#if ARCH_SUPPORTS_FTRACE_OPS
121static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400122 struct ftrace_ops *op, struct pt_regs *regs);
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400123#else
124/* See comment below, where ftrace_ops_list_func is defined */
125static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
126#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
127#endif
Steven Rostedtb8489142011-05-04 09:27:52 -0400128
Steven Rostedt0a016402012-11-02 17:03:03 -0400129/*
130 * Traverse the ftrace_global_list, invoking all entries. The reason that we
Steven Rostedt1bb539c2013-05-28 14:38:43 -0400131 * can use rcu_dereference_raw_notrace() is that elements removed from this list
Steven Rostedt0a016402012-11-02 17:03:03 -0400132 * are simply leaked, so there is no need to interact with a grace-period
Steven Rostedt1bb539c2013-05-28 14:38:43 -0400133 * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
Steven Rostedt0a016402012-11-02 17:03:03 -0400134 * concurrent insertions into the ftrace_global_list.
135 *
136 * Silly Alpha and silly pointer-speculation compiler optimizations!
137 */
138#define do_for_each_ftrace_op(op, list) \
Steven Rostedt1bb539c2013-05-28 14:38:43 -0400139 op = rcu_dereference_raw_notrace(list); \
Steven Rostedt0a016402012-11-02 17:03:03 -0400140 do
141
142/*
143 * Optimized for just a single item in the list (as that is the normal case).
144 */
145#define while_for_each_ftrace_op(op) \
Steven Rostedt1bb539c2013-05-28 14:38:43 -0400146 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
Steven Rostedt0a016402012-11-02 17:03:03 -0400147 unlikely((op) != &ftrace_list_end))
148
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +0900149static inline void ftrace_ops_init(struct ftrace_ops *ops)
150{
151#ifdef CONFIG_DYNAMIC_FTRACE
152 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -0400153 mutex_init(&ops->local_hash.regex_lock);
154 ops->func_hash = &ops->local_hash;
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +0900155 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
156 }
157#endif
158}
159
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400160static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400161 struct ftrace_ops *op, struct pt_regs *regs)
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500162{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -0400163 struct trace_array *tr = op->private;
164
165 if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid))
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500166 return;
167
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400168 op->saved_func(ip, parent_ip, op, regs);
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500169}
170
Steven Rostedt (Red Hat)405e1d82013-11-08 14:17:30 -0500171static void ftrace_sync(struct work_struct *work)
172{
173 /*
174 * This function is just a stub to implement a hard force
175 * of synchronize_sched(). This requires synchronizing
176 * tasks even in userspace and idle.
177 *
178 * Yes, function tracing is rude.
179 */
180}
181
182static void ftrace_sync_ipi(void *data)
183{
184 /* Probably not needed, but do it anyway */
185 smp_rmb();
186}
187
Steven Rostedt (Red Hat)23a8e842014-01-13 10:30:23 -0500188#ifdef CONFIG_FUNCTION_GRAPH_TRACER
189static void update_function_graph_func(void);
Steven Rostedt (Red Hat)55577202015-09-29 19:06:50 -0400190
191/* Both enabled by default (can be cleared by function_graph tracer flags */
192static bool fgraph_sleep_time = true;
193static bool fgraph_graph_time = true;
194
Steven Rostedt (Red Hat)23a8e842014-01-13 10:30:23 -0500195#else
196static inline void update_function_graph_func(void) { }
197#endif
198
Steven Rostedt (Red Hat)00ccbf22015-02-19 15:56:14 +0100199
200static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
201{
202 /*
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -0500203 * If this is a dynamic, RCU, or per CPU ops, or we force list func,
Steven Rostedt (Red Hat)00ccbf22015-02-19 15:56:14 +0100204 * then it needs to call the list anyway.
205 */
Peter Zijlstrab3a88802017-10-11 09:45:32 +0200206 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
207 FTRACE_FORCE_LIST_FUNC)
Steven Rostedt (Red Hat)00ccbf22015-02-19 15:56:14 +0100208 return ftrace_ops_list_func;
209
210 return ftrace_ops_get_func(ops);
211}
212
Steven Rostedt2b499382011-05-03 22:49:52 -0400213static void update_ftrace_function(void)
214{
215 ftrace_func_t func;
216
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400217 /*
Steven Rostedt (Red Hat)f7aad4e2014-09-10 10:42:46 -0400218 * Prepare the ftrace_ops that the arch callback will use.
219 * If there's only one ftrace_ops registered, the ftrace_ops_list
220 * will point to the ops we want.
221 */
Chunyan Zhangf86f4182017-06-07 16:12:51 +0800222 set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
223 lockdep_is_held(&ftrace_lock));
Steven Rostedt (Red Hat)f7aad4e2014-09-10 10:42:46 -0400224
225 /* If there's no ftrace_ops registered, just call the stub function */
Chunyan Zhangf86f4182017-06-07 16:12:51 +0800226 if (set_function_trace_op == &ftrace_list_end) {
Steven Rostedt (Red Hat)f7aad4e2014-09-10 10:42:46 -0400227 func = ftrace_stub;
228
229 /*
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400230 * If we are at the end of the list and this ops is
Steven Rostedt47409742012-07-20 11:04:44 -0400231 * recursion safe and not dynamic and the arch supports passing ops,
232 * then have the mcount trampoline call the function directly.
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400233 */
Chunyan Zhangf86f4182017-06-07 16:12:51 +0800234 } else if (rcu_dereference_protected(ftrace_ops_list->next,
235 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
Steven Rostedt (Red Hat)00ccbf22015-02-19 15:56:14 +0100236 func = ftrace_ops_get_list_func(ftrace_ops_list);
Steven Rostedt (Red Hat)f7aad4e2014-09-10 10:42:46 -0400237
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400238 } else {
239 /* Just use the default ftrace_ops */
Steven Rostedt (Red Hat)405e1d82013-11-08 14:17:30 -0500240 set_function_trace_op = &ftrace_list_end;
Steven Rostedtb8489142011-05-04 09:27:52 -0400241 func = ftrace_ops_list_func;
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400242 }
Steven Rostedt2b499382011-05-03 22:49:52 -0400243
Steven Rostedt (Red Hat)5f8bf2d22014-07-15 11:05:12 -0400244 update_function_graph_func();
245
Steven Rostedt (Red Hat)405e1d82013-11-08 14:17:30 -0500246 /* If there's no change, then do nothing more here */
247 if (ftrace_trace_function == func)
248 return;
249
250 /*
251 * If we are using the list function, it doesn't care
252 * about the function_trace_ops.
253 */
254 if (func == ftrace_ops_list_func) {
255 ftrace_trace_function = func;
256 /*
257 * Don't even bother setting function_trace_ops,
258 * it would be racy to do so anyway.
259 */
260 return;
261 }
262
263#ifndef CONFIG_DYNAMIC_FTRACE
264 /*
265 * For static tracing, we need to be a bit more careful.
266 * The function change takes affect immediately. Thus,
267 * we need to coorditate the setting of the function_trace_ops
268 * with the setting of the ftrace_trace_function.
269 *
270 * Set the function to the list ops, which will call the
271 * function we want, albeit indirectly, but it handles the
272 * ftrace_ops and doesn't depend on function_trace_op.
273 */
274 ftrace_trace_function = ftrace_ops_list_func;
275 /*
276 * Make sure all CPUs see this. Yes this is slow, but static
277 * tracing is slow and nasty to have enabled.
278 */
279 schedule_on_each_cpu(ftrace_sync);
280 /* Now all cpus are using the list ops. */
281 function_trace_op = set_function_trace_op;
282 /* Make sure the function_trace_op is visible on all CPUs */
283 smp_wmb();
284 /* Nasty way to force a rmb on all cpus */
285 smp_call_function(ftrace_sync_ipi, NULL, 1);
286 /* OK, we are all set to update the ftrace_trace_function now! */
287#endif /* !CONFIG_DYNAMIC_FTRACE */
288
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400289 ftrace_trace_function = func;
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400290}
291
Chunyan Zhangf86f4182017-06-07 16:12:51 +0800292static void add_ftrace_ops(struct ftrace_ops __rcu **list,
293 struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +0200294{
Chunyan Zhangf86f4182017-06-07 16:12:51 +0800295 rcu_assign_pointer(ops->next, *list);
296
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200297 /*
Steven Rostedtb8489142011-05-04 09:27:52 -0400298 * We are entering ops into the list but another
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200299 * CPU might be walking that list. We need to make sure
300 * the ops->next pointer is valid before another CPU sees
Steven Rostedtb8489142011-05-04 09:27:52 -0400301 * the ops pointer included into the list.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200302 */
Steven Rostedt2b499382011-05-03 22:49:52 -0400303 rcu_assign_pointer(*list, ops);
304}
Steven Rostedt3d083392008-05-12 21:20:42 +0200305
Chunyan Zhangf86f4182017-06-07 16:12:51 +0800306static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
307 struct ftrace_ops *ops)
Steven Rostedt2b499382011-05-03 22:49:52 -0400308{
309 struct ftrace_ops **p;
310
311 /*
312 * If we are removing the last function, then simply point
313 * to the ftrace_stub.
314 */
Chunyan Zhangf86f4182017-06-07 16:12:51 +0800315 if (rcu_dereference_protected(*list,
316 lockdep_is_held(&ftrace_lock)) == ops &&
317 rcu_dereference_protected(ops->next,
318 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
Steven Rostedt2b499382011-05-03 22:49:52 -0400319 *list = &ftrace_list_end;
320 return 0;
321 }
322
323 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
324 if (*p == ops)
325 break;
326
327 if (*p != ops)
328 return -1;
329
330 *p = (*p)->next;
331 return 0;
332}
333
Steven Rostedt (Red Hat)f3bea492014-07-02 23:23:31 -0400334static void ftrace_update_trampoline(struct ftrace_ops *ops);
335
Steven Rostedt2b499382011-05-03 22:49:52 -0400336static int __register_ftrace_function(struct ftrace_ops *ops)
337{
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -0500338 if (ops->flags & FTRACE_OPS_FL_DELETED)
339 return -EINVAL;
340
Steven Rostedtb8489142011-05-04 09:27:52 -0400341 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
342 return -EBUSY;
343
Masami Hiramatsu06aeaae2012-09-28 17:15:17 +0900344#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
Steven Rostedt08f6fba2012-04-30 16:20:23 -0400345 /*
346 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
347 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
348 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
349 */
350 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
351 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
352 return -EINVAL;
353
354 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
355 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
356#endif
357
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400358 if (!core_kernel_data((unsigned long)ops))
359 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
360
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -0500361 add_ftrace_ops(&ftrace_ops_list, ops);
Steven Rostedtb8489142011-05-04 09:27:52 -0400362
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400363 /* Always save the function, and reset at unregistering */
364 ops->saved_func = ops->func;
365
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -0400366 if (ftrace_pids_enabled(ops))
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400367 ops->func = ftrace_pid_func;
368
Steven Rostedt (Red Hat)f3bea492014-07-02 23:23:31 -0400369 ftrace_update_trampoline(ops);
370
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400371 if (ftrace_enabled)
372 update_ftrace_function();
Steven Rostedt3d083392008-05-12 21:20:42 +0200373
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200374 return 0;
375}
376
Ingo Molnare309b412008-05-12 21:20:51 +0200377static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200378{
Steven Rostedt2b499382011-05-03 22:49:52 -0400379 int ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200380
Steven Rostedtb8489142011-05-04 09:27:52 -0400381 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
382 return -EBUSY;
383
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -0500384 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
Steven Rostedtb8489142011-05-04 09:27:52 -0400385
Steven Rostedt2b499382011-05-03 22:49:52 -0400386 if (ret < 0)
387 return ret;
Steven Rostedtb8489142011-05-04 09:27:52 -0400388
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400389 if (ftrace_enabled)
390 update_ftrace_function();
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200391
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400392 ops->func = ops->saved_func;
393
Steven Rostedte6ea44e2009-02-14 01:42:44 -0500394 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200395}
396
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500397static void ftrace_update_pid_func(void)
398{
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400399 struct ftrace_ops *op;
400
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400401 /* Only do something if we are tracing something */
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500402 if (ftrace_trace_function == ftrace_stub)
KOSAKI Motohiro10dd3eb2009-03-06 15:29:04 +0900403 return;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500404
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400405 do_for_each_ftrace_op(op, ftrace_ops_list) {
406 if (op->flags & FTRACE_OPS_FL_PID) {
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -0400407 op->func = ftrace_pids_enabled(op) ?
408 ftrace_pid_func : op->saved_func;
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400409 ftrace_update_trampoline(op);
410 }
411 } while_for_each_ftrace_op(op);
412
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400413 update_ftrace_function();
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500414}
415
Steven Rostedt493762f2009-03-23 17:12:36 -0400416#ifdef CONFIG_FUNCTION_PROFILER
417struct ftrace_profile {
418 struct hlist_node node;
419 unsigned long ip;
420 unsigned long counter;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400421#ifdef CONFIG_FUNCTION_GRAPH_TRACER
422 unsigned long long time;
Chase Douglase330b3b2010-04-26 14:02:05 -0400423 unsigned long long time_squared;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400424#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400425};
426
427struct ftrace_profile_page {
428 struct ftrace_profile_page *next;
429 unsigned long index;
430 struct ftrace_profile records[];
431};
432
Steven Rostedtcafb1682009-03-24 20:50:39 -0400433struct ftrace_profile_stat {
434 atomic_t disabled;
435 struct hlist_head *hash;
436 struct ftrace_profile_page *pages;
437 struct ftrace_profile_page *start;
438 struct tracer_stat stat;
439};
440
Steven Rostedt493762f2009-03-23 17:12:36 -0400441#define PROFILE_RECORDS_SIZE \
442 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
443
444#define PROFILES_PER_PAGE \
445 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
446
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400447static int ftrace_profile_enabled __read_mostly;
448
449/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
Steven Rostedt493762f2009-03-23 17:12:36 -0400450static DEFINE_MUTEX(ftrace_profile_lock);
451
Steven Rostedtcafb1682009-03-24 20:50:39 -0400452static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
Steven Rostedt493762f2009-03-23 17:12:36 -0400453
Namhyung Kim20079eb2013-04-10 08:55:50 +0900454#define FTRACE_PROFILE_HASH_BITS 10
455#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
Steven Rostedt493762f2009-03-23 17:12:36 -0400456
Steven Rostedt493762f2009-03-23 17:12:36 -0400457static void *
458function_stat_next(void *v, int idx)
459{
460 struct ftrace_profile *rec = v;
461 struct ftrace_profile_page *pg;
462
463 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
464
465 again:
Li Zefan0296e422009-06-26 11:15:37 +0800466 if (idx != 0)
467 rec++;
468
Steven Rostedt493762f2009-03-23 17:12:36 -0400469 if ((void *)rec >= (void *)&pg->records[pg->index]) {
470 pg = pg->next;
471 if (!pg)
472 return NULL;
473 rec = &pg->records[0];
474 if (!rec->counter)
475 goto again;
476 }
477
478 return rec;
479}
480
481static void *function_stat_start(struct tracer_stat *trace)
482{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400483 struct ftrace_profile_stat *stat =
484 container_of(trace, struct ftrace_profile_stat, stat);
485
486 if (!stat || !stat->start)
487 return NULL;
488
489 return function_stat_next(&stat->start->records[0], 0);
Steven Rostedt493762f2009-03-23 17:12:36 -0400490}
491
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400492#ifdef CONFIG_FUNCTION_GRAPH_TRACER
493/* function graph compares on total time */
494static int function_stat_cmp(void *p1, void *p2)
495{
496 struct ftrace_profile *a = p1;
497 struct ftrace_profile *b = p2;
498
499 if (a->time < b->time)
500 return -1;
501 if (a->time > b->time)
502 return 1;
503 else
504 return 0;
505}
506#else
507/* not function graph compares against hits */
Steven Rostedt493762f2009-03-23 17:12:36 -0400508static int function_stat_cmp(void *p1, void *p2)
509{
510 struct ftrace_profile *a = p1;
511 struct ftrace_profile *b = p2;
512
513 if (a->counter < b->counter)
514 return -1;
515 if (a->counter > b->counter)
516 return 1;
517 else
518 return 0;
519}
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400520#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400521
522static int function_stat_headers(struct seq_file *m)
523{
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400524#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +0100525 seq_puts(m, " Function "
526 "Hit Time Avg s^2\n"
527 " -------- "
528 "--- ---- --- ---\n");
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400529#else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +0100530 seq_puts(m, " Function Hit\n"
531 " -------- ---\n");
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400532#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400533 return 0;
534}
535
536static int function_stat_show(struct seq_file *m, void *v)
537{
538 struct ftrace_profile *rec = v;
539 char str[KSYM_SYMBOL_LEN];
Li Zefan3aaba202010-08-23 16:50:12 +0800540 int ret = 0;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400541#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt34886c82009-03-25 21:00:47 -0400542 static struct trace_seq s;
543 unsigned long long avg;
Chase Douglase330b3b2010-04-26 14:02:05 -0400544 unsigned long long stddev;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400545#endif
Li Zefan3aaba202010-08-23 16:50:12 +0800546 mutex_lock(&ftrace_profile_lock);
547
548 /* we raced with function_profile_reset() */
549 if (unlikely(rec->counter == 0)) {
550 ret = -EBUSY;
551 goto out;
552 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400553
Umesh Tiwari8e436ca2015-06-22 16:58:08 +0530554#ifdef CONFIG_FUNCTION_GRAPH_TRACER
555 avg = rec->time;
556 do_div(avg, rec->counter);
557 if (tracing_thresh && (avg < tracing_thresh))
558 goto out;
559#endif
560
Steven Rostedt493762f2009-03-23 17:12:36 -0400561 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400562 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
Steven Rostedt493762f2009-03-23 17:12:36 -0400563
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400564#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +0100565 seq_puts(m, " ");
Steven Rostedt34886c82009-03-25 21:00:47 -0400566
Chase Douglase330b3b2010-04-26 14:02:05 -0400567 /* Sample standard deviation (s^2) */
568 if (rec->counter <= 1)
569 stddev = 0;
570 else {
Juri Lelli52d85d72013-06-12 12:03:18 +0200571 /*
572 * Apply Welford's method:
573 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
574 */
575 stddev = rec->counter * rec->time_squared -
576 rec->time * rec->time;
577
Chase Douglase330b3b2010-04-26 14:02:05 -0400578 /*
579 * Divide only 1000 for ns^2 -> us^2 conversion.
580 * trace_print_graph_duration will divide 1000 again.
581 */
Juri Lelli52d85d72013-06-12 12:03:18 +0200582 do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
Chase Douglase330b3b2010-04-26 14:02:05 -0400583 }
584
Steven Rostedt34886c82009-03-25 21:00:47 -0400585 trace_seq_init(&s);
586 trace_print_graph_duration(rec->time, &s);
587 trace_seq_puts(&s, " ");
588 trace_print_graph_duration(avg, &s);
Chase Douglase330b3b2010-04-26 14:02:05 -0400589 trace_seq_puts(&s, " ");
590 trace_print_graph_duration(stddev, &s);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400591 trace_print_seq(m, &s);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400592#endif
593 seq_putc(m, '\n');
Li Zefan3aaba202010-08-23 16:50:12 +0800594out:
595 mutex_unlock(&ftrace_profile_lock);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400596
Li Zefan3aaba202010-08-23 16:50:12 +0800597 return ret;
Steven Rostedt493762f2009-03-23 17:12:36 -0400598}
599
Steven Rostedtcafb1682009-03-24 20:50:39 -0400600static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
Steven Rostedt493762f2009-03-23 17:12:36 -0400601{
602 struct ftrace_profile_page *pg;
603
Steven Rostedtcafb1682009-03-24 20:50:39 -0400604 pg = stat->pages = stat->start;
Steven Rostedt493762f2009-03-23 17:12:36 -0400605
606 while (pg) {
607 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
608 pg->index = 0;
609 pg = pg->next;
610 }
611
Steven Rostedtcafb1682009-03-24 20:50:39 -0400612 memset(stat->hash, 0,
Steven Rostedt493762f2009-03-23 17:12:36 -0400613 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
614}
615
Steven Rostedtcafb1682009-03-24 20:50:39 -0400616int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
Steven Rostedt493762f2009-03-23 17:12:36 -0400617{
618 struct ftrace_profile_page *pg;
Steven Rostedt318e0a72009-03-25 20:06:34 -0400619 int functions;
620 int pages;
Steven Rostedt493762f2009-03-23 17:12:36 -0400621 int i;
622
623 /* If we already allocated, do nothing */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400624 if (stat->pages)
Steven Rostedt493762f2009-03-23 17:12:36 -0400625 return 0;
626
Steven Rostedtcafb1682009-03-24 20:50:39 -0400627 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
628 if (!stat->pages)
Steven Rostedt493762f2009-03-23 17:12:36 -0400629 return -ENOMEM;
630
Steven Rostedt318e0a72009-03-25 20:06:34 -0400631#ifdef CONFIG_DYNAMIC_FTRACE
632 functions = ftrace_update_tot_cnt;
633#else
634 /*
635 * We do not know the number of functions that exist because
636 * dynamic tracing is what counts them. With past experience
637 * we have around 20K functions. That should be more than enough.
638 * It is highly unlikely we will execute every function in
639 * the kernel.
640 */
641 functions = 20000;
642#endif
643
Steven Rostedtcafb1682009-03-24 20:50:39 -0400644 pg = stat->start = stat->pages;
Steven Rostedt493762f2009-03-23 17:12:36 -0400645
Steven Rostedt318e0a72009-03-25 20:06:34 -0400646 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
647
Namhyung Kim39e30cd2013-04-01 21:46:24 +0900648 for (i = 1; i < pages; i++) {
Steven Rostedt493762f2009-03-23 17:12:36 -0400649 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
Steven Rostedt493762f2009-03-23 17:12:36 -0400650 if (!pg->next)
Steven Rostedt318e0a72009-03-25 20:06:34 -0400651 goto out_free;
Steven Rostedt493762f2009-03-23 17:12:36 -0400652 pg = pg->next;
653 }
654
655 return 0;
Steven Rostedt318e0a72009-03-25 20:06:34 -0400656
657 out_free:
658 pg = stat->start;
659 while (pg) {
660 unsigned long tmp = (unsigned long)pg;
661
662 pg = pg->next;
663 free_page(tmp);
664 }
665
Steven Rostedt318e0a72009-03-25 20:06:34 -0400666 stat->pages = NULL;
667 stat->start = NULL;
668
669 return -ENOMEM;
Steven Rostedt493762f2009-03-23 17:12:36 -0400670}
671
Steven Rostedtcafb1682009-03-24 20:50:39 -0400672static int ftrace_profile_init_cpu(int cpu)
Steven Rostedt493762f2009-03-23 17:12:36 -0400673{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400674 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400675 int size;
676
Steven Rostedtcafb1682009-03-24 20:50:39 -0400677 stat = &per_cpu(ftrace_profile_stats, cpu);
678
679 if (stat->hash) {
Steven Rostedt493762f2009-03-23 17:12:36 -0400680 /* If the profile is already created, simply reset it */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400681 ftrace_profile_reset(stat);
Steven Rostedt493762f2009-03-23 17:12:36 -0400682 return 0;
683 }
684
685 /*
686 * We are profiling all functions, but usually only a few thousand
687 * functions are hit. We'll make a hash of 1024 items.
688 */
689 size = FTRACE_PROFILE_HASH_SIZE;
690
Kees Cook6396bb22018-06-12 14:03:40 -0700691 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
Steven Rostedt493762f2009-03-23 17:12:36 -0400692
Steven Rostedtcafb1682009-03-24 20:50:39 -0400693 if (!stat->hash)
Steven Rostedt493762f2009-03-23 17:12:36 -0400694 return -ENOMEM;
695
Steven Rostedt318e0a72009-03-25 20:06:34 -0400696 /* Preallocate the function profiling pages */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400697 if (ftrace_profile_pages_init(stat) < 0) {
698 kfree(stat->hash);
699 stat->hash = NULL;
Steven Rostedt493762f2009-03-23 17:12:36 -0400700 return -ENOMEM;
701 }
702
703 return 0;
704}
705
Steven Rostedtcafb1682009-03-24 20:50:39 -0400706static int ftrace_profile_init(void)
707{
708 int cpu;
709 int ret = 0;
710
Miao Xiec4602c12013-12-16 15:20:01 +0800711 for_each_possible_cpu(cpu) {
Steven Rostedtcafb1682009-03-24 20:50:39 -0400712 ret = ftrace_profile_init_cpu(cpu);
713 if (ret)
714 break;
715 }
716
717 return ret;
718}
719
Steven Rostedt493762f2009-03-23 17:12:36 -0400720/* interrupts must be disabled */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400721static struct ftrace_profile *
722ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
Steven Rostedt493762f2009-03-23 17:12:36 -0400723{
724 struct ftrace_profile *rec;
725 struct hlist_head *hhd;
Steven Rostedt493762f2009-03-23 17:12:36 -0400726 unsigned long key;
727
Namhyung Kim20079eb2013-04-10 08:55:50 +0900728 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400729 hhd = &stat->hash[key];
Steven Rostedt493762f2009-03-23 17:12:36 -0400730
731 if (hlist_empty(hhd))
732 return NULL;
733
Steven Rostedt1bb539c2013-05-28 14:38:43 -0400734 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
Steven Rostedt493762f2009-03-23 17:12:36 -0400735 if (rec->ip == ip)
736 return rec;
737 }
738
739 return NULL;
740}
741
Steven Rostedtcafb1682009-03-24 20:50:39 -0400742static void ftrace_add_profile(struct ftrace_profile_stat *stat,
743 struct ftrace_profile *rec)
Steven Rostedt493762f2009-03-23 17:12:36 -0400744{
745 unsigned long key;
746
Namhyung Kim20079eb2013-04-10 08:55:50 +0900747 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400748 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
Steven Rostedt493762f2009-03-23 17:12:36 -0400749}
750
Steven Rostedt318e0a72009-03-25 20:06:34 -0400751/*
752 * The memory is already allocated, this simply finds a new record to use.
753 */
Steven Rostedt493762f2009-03-23 17:12:36 -0400754static struct ftrace_profile *
Steven Rostedt318e0a72009-03-25 20:06:34 -0400755ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
Steven Rostedt493762f2009-03-23 17:12:36 -0400756{
757 struct ftrace_profile *rec = NULL;
758
Steven Rostedt318e0a72009-03-25 20:06:34 -0400759 /* prevent recursion (from NMIs) */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400760 if (atomic_inc_return(&stat->disabled) != 1)
Steven Rostedt493762f2009-03-23 17:12:36 -0400761 goto out;
762
Steven Rostedt493762f2009-03-23 17:12:36 -0400763 /*
Steven Rostedt318e0a72009-03-25 20:06:34 -0400764 * Try to find the function again since an NMI
765 * could have added it
Steven Rostedt493762f2009-03-23 17:12:36 -0400766 */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400767 rec = ftrace_find_profiled_func(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400768 if (rec)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400769 goto out;
Steven Rostedt493762f2009-03-23 17:12:36 -0400770
Steven Rostedtcafb1682009-03-24 20:50:39 -0400771 if (stat->pages->index == PROFILES_PER_PAGE) {
772 if (!stat->pages->next)
773 goto out;
774 stat->pages = stat->pages->next;
Steven Rostedt493762f2009-03-23 17:12:36 -0400775 }
776
Steven Rostedtcafb1682009-03-24 20:50:39 -0400777 rec = &stat->pages->records[stat->pages->index++];
Steven Rostedt493762f2009-03-23 17:12:36 -0400778 rec->ip = ip;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400779 ftrace_add_profile(stat, rec);
Steven Rostedt493762f2009-03-23 17:12:36 -0400780
Steven Rostedt493762f2009-03-23 17:12:36 -0400781 out:
Steven Rostedtcafb1682009-03-24 20:50:39 -0400782 atomic_dec(&stat->disabled);
Steven Rostedt493762f2009-03-23 17:12:36 -0400783
784 return rec;
785}
786
Steven Rostedt493762f2009-03-23 17:12:36 -0400787static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400788function_profile_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400789 struct ftrace_ops *ops, struct pt_regs *regs)
Steven Rostedt493762f2009-03-23 17:12:36 -0400790{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400791 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400792 struct ftrace_profile *rec;
793 unsigned long flags;
Steven Rostedt493762f2009-03-23 17:12:36 -0400794
795 if (!ftrace_profile_enabled)
796 return;
797
Steven Rostedt493762f2009-03-23 17:12:36 -0400798 local_irq_save(flags);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400799
Christoph Lameterbdffd892014-04-29 14:17:40 -0500800 stat = this_cpu_ptr(&ftrace_profile_stats);
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400801 if (!stat->hash || !ftrace_profile_enabled)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400802 goto out;
803
804 rec = ftrace_find_profiled_func(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400805 if (!rec) {
Steven Rostedt318e0a72009-03-25 20:06:34 -0400806 rec = ftrace_profile_alloc(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400807 if (!rec)
808 goto out;
809 }
810
811 rec->counter++;
812 out:
813 local_irq_restore(flags);
814}
815
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400816#ifdef CONFIG_FUNCTION_GRAPH_TRACER
817static int profile_graph_entry(struct ftrace_graph_ent *trace)
818{
Namhyung Kim8861dd32016-08-31 11:55:29 +0900819 int index = trace->depth;
820
Steven Rostedta1e2e312011-08-09 12:50:46 -0400821 function_profile_call(trace->func, 0, NULL, NULL);
Namhyung Kim8861dd32016-08-31 11:55:29 +0900822
Steven Rostedt (VMware)a8f0f9e2017-08-17 16:37:25 -0400823 /* If function graph is shutting down, ret_stack can be NULL */
824 if (!current->ret_stack)
825 return 0;
826
Namhyung Kim8861dd32016-08-31 11:55:29 +0900827 if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
828 current->ret_stack[index].subtime = 0;
829
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400830 return 1;
831}
832
833static void profile_graph_return(struct ftrace_graph_ret *trace)
834{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400835 struct ftrace_profile_stat *stat;
Steven Rostedta2a16d62009-03-24 23:17:58 -0400836 unsigned long long calltime;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400837 struct ftrace_profile *rec;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400838 unsigned long flags;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400839
840 local_irq_save(flags);
Christoph Lameterbdffd892014-04-29 14:17:40 -0500841 stat = this_cpu_ptr(&ftrace_profile_stats);
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400842 if (!stat->hash || !ftrace_profile_enabled)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400843 goto out;
844
Steven Rostedt37e44bc2010-04-27 21:04:24 -0400845 /* If the calltime was zero'd ignore it */
846 if (!trace->calltime)
847 goto out;
848
Steven Rostedta2a16d62009-03-24 23:17:58 -0400849 calltime = trace->rettime - trace->calltime;
850
Steven Rostedt (Red Hat)55577202015-09-29 19:06:50 -0400851 if (!fgraph_graph_time) {
Steven Rostedta2a16d62009-03-24 23:17:58 -0400852 int index;
853
854 index = trace->depth;
855
856 /* Append this call time to the parent time to subtract */
857 if (index)
858 current->ret_stack[index - 1].subtime += calltime;
859
860 if (current->ret_stack[index].subtime < calltime)
861 calltime -= current->ret_stack[index].subtime;
862 else
863 calltime = 0;
864 }
865
Steven Rostedtcafb1682009-03-24 20:50:39 -0400866 rec = ftrace_find_profiled_func(stat, trace->func);
Chase Douglase330b3b2010-04-26 14:02:05 -0400867 if (rec) {
Steven Rostedta2a16d62009-03-24 23:17:58 -0400868 rec->time += calltime;
Chase Douglase330b3b2010-04-26 14:02:05 -0400869 rec->time_squared += calltime * calltime;
870 }
Steven Rostedta2a16d62009-03-24 23:17:58 -0400871
Steven Rostedtcafb1682009-03-24 20:50:39 -0400872 out:
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400873 local_irq_restore(flags);
874}
875
876static int register_ftrace_profiler(void)
877{
878 return register_ftrace_graph(&profile_graph_return,
879 &profile_graph_entry);
880}
881
882static void unregister_ftrace_profiler(void)
883{
884 unregister_ftrace_graph();
885}
886#else
Paul McQuadebd38c0e2011-05-31 20:51:55 +0100887static struct ftrace_ops ftrace_profile_ops __read_mostly = {
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400888 .func = function_profile_call,
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +0900889 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -0400890 INIT_OPS_HASH(ftrace_profile_ops)
Steven Rostedt493762f2009-03-23 17:12:36 -0400891};
892
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400893static int register_ftrace_profiler(void)
894{
895 return register_ftrace_function(&ftrace_profile_ops);
896}
897
898static void unregister_ftrace_profiler(void)
899{
900 unregister_ftrace_function(&ftrace_profile_ops);
901}
902#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
903
Steven Rostedt493762f2009-03-23 17:12:36 -0400904static ssize_t
905ftrace_profile_write(struct file *filp, const char __user *ubuf,
906 size_t cnt, loff_t *ppos)
907{
908 unsigned long val;
Steven Rostedt493762f2009-03-23 17:12:36 -0400909 int ret;
910
Peter Huewe22fe9b52011-06-07 21:58:27 +0200911 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
912 if (ret)
Steven Rostedt493762f2009-03-23 17:12:36 -0400913 return ret;
914
915 val = !!val;
916
917 mutex_lock(&ftrace_profile_lock);
918 if (ftrace_profile_enabled ^ val) {
919 if (val) {
920 ret = ftrace_profile_init();
921 if (ret < 0) {
922 cnt = ret;
923 goto out;
924 }
925
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400926 ret = register_ftrace_profiler();
927 if (ret < 0) {
928 cnt = ret;
929 goto out;
930 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400931 ftrace_profile_enabled = 1;
932 } else {
933 ftrace_profile_enabled = 0;
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400934 /*
935 * unregister_ftrace_profiler calls stop_machine
936 * so this acts like an synchronize_sched.
937 */
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400938 unregister_ftrace_profiler();
Steven Rostedt493762f2009-03-23 17:12:36 -0400939 }
940 }
941 out:
942 mutex_unlock(&ftrace_profile_lock);
943
Jiri Olsacf8517c2009-10-23 19:36:16 -0400944 *ppos += cnt;
Steven Rostedt493762f2009-03-23 17:12:36 -0400945
946 return cnt;
947}
948
949static ssize_t
950ftrace_profile_read(struct file *filp, char __user *ubuf,
951 size_t cnt, loff_t *ppos)
952{
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400953 char buf[64]; /* big enough to hold a number */
Steven Rostedt493762f2009-03-23 17:12:36 -0400954 int r;
955
956 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
957 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
958}
959
960static const struct file_operations ftrace_profile_fops = {
961 .open = tracing_open_generic,
962 .read = ftrace_profile_read,
963 .write = ftrace_profile_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200964 .llseek = default_llseek,
Steven Rostedt493762f2009-03-23 17:12:36 -0400965};
966
Steven Rostedtcafb1682009-03-24 20:50:39 -0400967/* used to initialize the real stat files */
968static struct tracer_stat function_stats __initdata = {
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400969 .name = "functions",
970 .stat_start = function_stat_start,
971 .stat_next = function_stat_next,
972 .stat_cmp = function_stat_cmp,
973 .stat_headers = function_stat_headers,
974 .stat_show = function_stat_show
Steven Rostedtcafb1682009-03-24 20:50:39 -0400975};
976
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -0500977static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
Steven Rostedt493762f2009-03-23 17:12:36 -0400978{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400979 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400980 struct dentry *entry;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400981 char *name;
Steven Rostedt493762f2009-03-23 17:12:36 -0400982 int ret;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400983 int cpu;
Steven Rostedt493762f2009-03-23 17:12:36 -0400984
Steven Rostedtcafb1682009-03-24 20:50:39 -0400985 for_each_possible_cpu(cpu) {
986 stat = &per_cpu(ftrace_profile_stats, cpu);
987
Geliang Tang6363c6b2016-03-15 22:12:34 +0800988 name = kasprintf(GFP_KERNEL, "function%d", cpu);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400989 if (!name) {
990 /*
991 * The files created are permanent, if something happens
992 * we still do not free memory.
993 */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400994 WARN(1,
995 "Could not allocate stat file for cpu %d\n",
996 cpu);
997 return;
998 }
999 stat->stat = function_stats;
Steven Rostedtcafb1682009-03-24 20:50:39 -04001000 stat->stat.name = name;
1001 ret = register_stat_tracer(&stat->stat);
1002 if (ret) {
1003 WARN(1,
1004 "Could not register function stat for cpu %d\n",
1005 cpu);
1006 kfree(name);
1007 return;
1008 }
Steven Rostedt493762f2009-03-23 17:12:36 -04001009 }
1010
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05001011 entry = tracefs_create_file("function_profile_enabled", 0644,
Steven Rostedt493762f2009-03-23 17:12:36 -04001012 d_tracer, NULL, &ftrace_profile_fops);
1013 if (!entry)
Joe Perchesa395d6a2016-03-22 14:28:09 -07001014 pr_warn("Could not create tracefs 'function_profile_enabled' entry\n");
Steven Rostedt493762f2009-03-23 17:12:36 -04001015}
1016
1017#else /* CONFIG_FUNCTION_PROFILER */
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05001018static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
Steven Rostedt493762f2009-03-23 17:12:36 -04001019{
1020}
1021#endif /* CONFIG_FUNCTION_PROFILER */
1022
Pratyush Anand1619dc32015-03-06 23:58:06 +05301023#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1024static int ftrace_graph_active;
1025#else
1026# define ftrace_graph_active 0
1027#endif
1028
Steven Rostedt3d083392008-05-12 21:20:42 +02001029#ifdef CONFIG_DYNAMIC_FTRACE
Ingo Molnar73d3fd92009-02-17 11:48:18 +01001030
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001031static struct ftrace_ops *removed_ops;
1032
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04001033/*
1034 * Set when doing a global update, like enabling all recs or disabling them.
1035 * It is not set when just updating a single ftrace_ops.
1036 */
1037static bool update_all_ops;
1038
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001039#ifndef CONFIG_FTRACE_MCOUNT_RECORD
Steven Rostedtcb7be3b2008-10-23 09:33:05 -04001040# error Dynamic ftrace depends on MCOUNT_RECORD
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001041#endif
1042
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001043struct ftrace_func_entry {
1044 struct hlist_node hlist;
1045 unsigned long ip;
1046};
1047
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04001048struct ftrace_func_probe {
1049 struct ftrace_probe_ops *probe_ops;
1050 struct ftrace_ops ops;
1051 struct trace_array *tr;
1052 struct list_head list;
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04001053 void *data;
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04001054 int ref;
1055};
1056
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001057/*
1058 * We make these constant because no one should touch them,
1059 * but they are used as the default "empty hash", to avoid allocating
1060 * it all the time. These are in a read only section such that if
1061 * anyone does try to modify it, it will cause an exception.
1062 */
1063static const struct hlist_head empty_buckets[1];
1064static const struct ftrace_hash empty_hash = {
1065 .buckets = (struct hlist_head *)empty_buckets,
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001066};
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001067#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
Steven Rostedt5072c592008-05-12 21:20:43 +02001068
Steven Rostedt2b499382011-05-03 22:49:52 -04001069static struct ftrace_ops global_ops = {
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04001070 .func = ftrace_stub,
1071 .local_hash.notrace_hash = EMPTY_HASH,
1072 .local_hash.filter_hash = EMPTY_HASH,
1073 INIT_OPS_HASH(global_ops)
1074 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -04001075 FTRACE_OPS_FL_INITIALIZED |
1076 FTRACE_OPS_FL_PID,
Steven Rostedtf45948e2011-05-02 12:29:25 -04001077};
1078
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -05001079/*
Steven Rostedt (VMware)6be7fa32018-01-22 22:32:51 -05001080 * Used by the stack undwinder to know about dynamic ftrace trampolines.
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -05001081 */
Steven Rostedt (VMware)6be7fa32018-01-22 22:32:51 -05001082struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -05001083{
Steven Rostedt (VMware)6be7fa32018-01-22 22:32:51 -05001084 struct ftrace_ops *op = NULL;
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -05001085
1086 /*
1087 * Some of the ops may be dynamically allocated,
1088 * they are freed after a synchronize_sched().
1089 */
1090 preempt_disable_notrace();
1091
1092 do_for_each_ftrace_op(op, ftrace_ops_list) {
1093 /*
1094 * This is to check for dynamically allocated trampolines.
1095 * Trampolines that are in kernel text will have
1096 * core_kernel_text() return true.
1097 */
1098 if (op->trampoline && op->trampoline_size)
1099 if (addr >= op->trampoline &&
1100 addr < op->trampoline + op->trampoline_size) {
Steven Rostedt (VMware)6be7fa32018-01-22 22:32:51 -05001101 preempt_enable_notrace();
1102 return op;
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -05001103 }
1104 } while_for_each_ftrace_op(op);
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -05001105 preempt_enable_notrace();
1106
Steven Rostedt (VMware)6be7fa32018-01-22 22:32:51 -05001107 return NULL;
1108}
1109
1110/*
1111 * This is used by __kernel_text_address() to return true if the
1112 * address is on a dynamically allocated trampoline that would
1113 * not return true for either core_kernel_text() or
1114 * is_module_text_address().
1115 */
1116bool is_ftrace_trampoline(unsigned long addr)
1117{
1118 return ftrace_ops_trampoline(addr) != NULL;
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -05001119}
1120
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001121struct ftrace_page {
1122 struct ftrace_page *next;
Steven Rostedta7900872011-12-16 16:23:44 -05001123 struct dyn_ftrace *records;
Steven Rostedt431aa3f2009-01-06 12:43:01 -05001124 int index;
Steven Rostedta7900872011-12-16 16:23:44 -05001125 int size;
David Milleraa5e5ce2008-05-13 22:06:56 -07001126};
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001127
Steven Rostedta7900872011-12-16 16:23:44 -05001128#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1129#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001130
1131/* estimate from running different kernels */
1132#define NR_TO_INIT 10000
1133
1134static struct ftrace_page *ftrace_pages_start;
1135static struct ftrace_page *ftrace_pages;
1136
Steven Rostedt (VMware)2b0cce02017-02-01 12:19:33 -05001137static __always_inline unsigned long
1138ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1139{
1140 if (hash->size_bits > 0)
1141 return hash_long(ip, hash->size_bits);
1142
1143 return 0;
1144}
1145
Steven Rostedt (VMware)2b2c2792017-02-01 15:37:07 -05001146/* Only use this function if ftrace_hash_empty() has already been tested */
1147static __always_inline struct ftrace_func_entry *
1148__ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001149{
1150 unsigned long key;
1151 struct ftrace_func_entry *entry;
1152 struct hlist_head *hhd;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001153
Steven Rostedt (VMware)2b0cce02017-02-01 12:19:33 -05001154 key = ftrace_hash_key(hash, ip);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001155 hhd = &hash->buckets[key];
1156
Steven Rostedt1bb539c2013-05-28 14:38:43 -04001157 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001158 if (entry->ip == ip)
1159 return entry;
1160 }
1161 return NULL;
1162}
1163
Steven Rostedt (VMware)2b2c2792017-02-01 15:37:07 -05001164/**
1165 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1166 * @hash: The hash to look at
1167 * @ip: The instruction pointer to test
1168 *
1169 * Search a given @hash to see if a given instruction pointer (@ip)
1170 * exists in it.
1171 *
1172 * Returns the entry that holds the @ip if found. NULL otherwise.
1173 */
1174struct ftrace_func_entry *
1175ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1176{
1177 if (ftrace_hash_empty(hash))
1178 return NULL;
1179
1180 return __ftrace_lookup_ip(hash, ip);
1181}
1182
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001183static void __add_hash_entry(struct ftrace_hash *hash,
1184 struct ftrace_func_entry *entry)
1185{
1186 struct hlist_head *hhd;
1187 unsigned long key;
1188
Steven Rostedt (VMware)2b0cce02017-02-01 12:19:33 -05001189 key = ftrace_hash_key(hash, entry->ip);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001190 hhd = &hash->buckets[key];
1191 hlist_add_head(&entry->hlist, hhd);
1192 hash->count++;
1193}
1194
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001195static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1196{
1197 struct ftrace_func_entry *entry;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001198
1199 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1200 if (!entry)
1201 return -ENOMEM;
1202
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001203 entry->ip = ip;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001204 __add_hash_entry(hash, entry);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001205
1206 return 0;
1207}
1208
1209static void
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001210free_hash_entry(struct ftrace_hash *hash,
1211 struct ftrace_func_entry *entry)
1212{
1213 hlist_del(&entry->hlist);
1214 kfree(entry);
1215 hash->count--;
1216}
1217
1218static void
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001219remove_hash_entry(struct ftrace_hash *hash,
1220 struct ftrace_func_entry *entry)
1221{
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04001222 hlist_del_rcu(&entry->hlist);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001223 hash->count--;
1224}
1225
1226static void ftrace_hash_clear(struct ftrace_hash *hash)
1227{
1228 struct hlist_head *hhd;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001229 struct hlist_node *tn;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001230 struct ftrace_func_entry *entry;
1231 int size = 1 << hash->size_bits;
1232 int i;
1233
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001234 if (!hash->count)
1235 return;
1236
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001237 for (i = 0; i < size; i++) {
1238 hhd = &hash->buckets[i];
Sasha Levinb67bfe02013-02-27 17:06:00 -08001239 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001240 free_hash_entry(hash, entry);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001241 }
1242 FTRACE_WARN_ON(hash->count);
1243}
1244
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04001245static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1246{
1247 list_del(&ftrace_mod->list);
1248 kfree(ftrace_mod->module);
1249 kfree(ftrace_mod->func);
1250 kfree(ftrace_mod);
1251}
1252
1253static void clear_ftrace_mod_list(struct list_head *head)
1254{
1255 struct ftrace_mod_load *p, *n;
1256
1257 /* stack tracer isn't supported yet */
1258 if (!head)
1259 return;
1260
1261 mutex_lock(&ftrace_lock);
1262 list_for_each_entry_safe(p, n, head, list)
1263 free_ftrace_mod(p);
1264 mutex_unlock(&ftrace_lock);
1265}
1266
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001267static void free_ftrace_hash(struct ftrace_hash *hash)
1268{
1269 if (!hash || hash == EMPTY_HASH)
1270 return;
1271 ftrace_hash_clear(hash);
1272 kfree(hash->buckets);
1273 kfree(hash);
1274}
1275
Steven Rostedt07fd5512011-05-05 18:03:47 -04001276static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1277{
1278 struct ftrace_hash *hash;
1279
1280 hash = container_of(rcu, struct ftrace_hash, rcu);
1281 free_ftrace_hash(hash);
1282}
1283
1284static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1285{
1286 if (!hash || hash == EMPTY_HASH)
1287 return;
1288 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1289}
1290
Jiri Olsa5500fa52012-02-15 15:51:54 +01001291void ftrace_free_filter(struct ftrace_ops *ops)
1292{
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09001293 ftrace_ops_init(ops);
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04001294 free_ftrace_hash(ops->func_hash->filter_hash);
1295 free_ftrace_hash(ops->func_hash->notrace_hash);
Jiri Olsa5500fa52012-02-15 15:51:54 +01001296}
1297
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001298static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1299{
1300 struct ftrace_hash *hash;
1301 int size;
1302
1303 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1304 if (!hash)
1305 return NULL;
1306
1307 size = 1 << size_bits;
Thomas Meyer47b0edc2011-11-29 22:08:00 +01001308 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001309
1310 if (!hash->buckets) {
1311 kfree(hash);
1312 return NULL;
1313 }
1314
1315 hash->size_bits = size_bits;
1316
1317 return hash;
1318}
1319
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04001320
1321static int ftrace_add_mod(struct trace_array *tr,
1322 const char *func, const char *module,
1323 int enable)
1324{
1325 struct ftrace_mod_load *ftrace_mod;
1326 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1327
1328 ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1329 if (!ftrace_mod)
1330 return -ENOMEM;
1331
1332 ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1333 ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1334 ftrace_mod->enable = enable;
1335
1336 if (!ftrace_mod->func || !ftrace_mod->module)
1337 goto out_free;
1338
1339 list_add(&ftrace_mod->list, mod_head);
1340
1341 return 0;
1342
1343 out_free:
1344 free_ftrace_mod(ftrace_mod);
1345
1346 return -ENOMEM;
1347}
1348
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001349static struct ftrace_hash *
1350alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1351{
1352 struct ftrace_func_entry *entry;
1353 struct ftrace_hash *new_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001354 int size;
1355 int ret;
1356 int i;
1357
1358 new_hash = alloc_ftrace_hash(size_bits);
1359 if (!new_hash)
1360 return NULL;
1361
Steven Rostedt (VMware)8c08f0d2017-06-26 11:47:31 -04001362 if (hash)
1363 new_hash->flags = hash->flags;
1364
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001365 /* Empty hash? */
Steven Rostedt06a51d92011-12-19 19:07:36 -05001366 if (ftrace_hash_empty(hash))
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001367 return new_hash;
1368
1369 size = 1 << hash->size_bits;
1370 for (i = 0; i < size; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001371 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001372 ret = add_hash_entry(new_hash, entry->ip);
1373 if (ret < 0)
1374 goto free_hash;
1375 }
1376 }
1377
1378 FTRACE_WARN_ON(new_hash->count != hash->count);
1379
1380 return new_hash;
1381
1382 free_hash:
1383 free_ftrace_hash(new_hash);
1384 return NULL;
1385}
1386
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001387static void
Steven Rostedt (Red Hat)84261912014-08-18 13:21:08 -04001388ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001389static void
Steven Rostedt (Red Hat)84261912014-08-18 13:21:08 -04001390ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001391
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05001392static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1393 struct ftrace_hash *new_hash);
1394
Namhyung Kim3e278c02017-01-20 11:44:45 +09001395static struct ftrace_hash *
1396__ftrace_hash_move(struct ftrace_hash *src)
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001397{
1398 struct ftrace_func_entry *entry;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001399 struct hlist_node *tn;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001400 struct hlist_head *hhd;
Steven Rostedt07fd5512011-05-05 18:03:47 -04001401 struct ftrace_hash *new_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001402 int size = src->count;
1403 int bits = 0;
1404 int i;
1405
1406 /*
Namhyung Kim3e278c02017-01-20 11:44:45 +09001407 * If the new source is empty, just return the empty_hash.
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001408 */
Steven Rostedt (VMware)8c08f0d2017-06-26 11:47:31 -04001409 if (ftrace_hash_empty(src))
Namhyung Kim3e278c02017-01-20 11:44:45 +09001410 return EMPTY_HASH;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001411
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001412 /*
1413 * Make the hash size about 1/2 the # found
1414 */
1415 for (size /= 2; size; size >>= 1)
1416 bits++;
1417
1418 /* Don't allocate too much */
1419 if (bits > FTRACE_HASH_MAX_BITS)
1420 bits = FTRACE_HASH_MAX_BITS;
1421
Steven Rostedt07fd5512011-05-05 18:03:47 -04001422 new_hash = alloc_ftrace_hash(bits);
1423 if (!new_hash)
Namhyung Kim3e278c02017-01-20 11:44:45 +09001424 return NULL;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001425
Steven Rostedt (VMware)8c08f0d2017-06-26 11:47:31 -04001426 new_hash->flags = src->flags;
1427
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001428 size = 1 << src->size_bits;
1429 for (i = 0; i < size; i++) {
1430 hhd = &src->buckets[i];
Sasha Levinb67bfe02013-02-27 17:06:00 -08001431 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001432 remove_hash_entry(src, entry);
Steven Rostedt07fd5512011-05-05 18:03:47 -04001433 __add_hash_entry(new_hash, entry);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001434 }
1435 }
1436
Namhyung Kim3e278c02017-01-20 11:44:45 +09001437 return new_hash;
1438}
1439
1440static int
1441ftrace_hash_move(struct ftrace_ops *ops, int enable,
1442 struct ftrace_hash **dst, struct ftrace_hash *src)
1443{
1444 struct ftrace_hash *new_hash;
1445 int ret;
1446
1447 /* Reject setting notrace hash on IPMODIFY ftrace_ops */
1448 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1449 return -EINVAL;
1450
1451 new_hash = __ftrace_hash_move(src);
1452 if (!new_hash)
1453 return -ENOMEM;
1454
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05001455 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1456 if (enable) {
1457 /* IPMODIFY should be updated only when filter_hash updating */
1458 ret = ftrace_hash_ipmodify_update(ops, new_hash);
1459 if (ret < 0) {
1460 free_ftrace_hash(new_hash);
1461 return ret;
1462 }
1463 }
1464
Masami Hiramatsu5c27c772014-06-17 11:04:42 +00001465 /*
1466 * Remove the current set, update the hash and add
1467 * them back.
1468 */
Steven Rostedt (Red Hat)84261912014-08-18 13:21:08 -04001469 ftrace_hash_rec_disable_modify(ops, enable);
Masami Hiramatsu5c27c772014-06-17 11:04:42 +00001470
Steven Rostedt07fd5512011-05-05 18:03:47 -04001471 rcu_assign_pointer(*dst, new_hash);
Steven Rostedt07fd5512011-05-05 18:03:47 -04001472
Steven Rostedt (Red Hat)84261912014-08-18 13:21:08 -04001473 ftrace_hash_rec_enable_modify(ops, enable);
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001474
Masami Hiramatsu5c27c772014-06-17 11:04:42 +00001475 return 0;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001476}
1477
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001478static bool hash_contains_ip(unsigned long ip,
1479 struct ftrace_ops_hash *hash)
1480{
1481 /*
1482 * The function record is a match if it exists in the filter
1483 * hash and not in the notrace hash. Note, an emty hash is
1484 * considered a match for the filter hash, but an empty
1485 * notrace hash is considered not in the notrace hash.
1486 */
1487 return (ftrace_hash_empty(hash->filter_hash) ||
Steven Rostedt (VMware)2b2c2792017-02-01 15:37:07 -05001488 __ftrace_lookup_ip(hash->filter_hash, ip)) &&
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001489 (ftrace_hash_empty(hash->notrace_hash) ||
Steven Rostedt (VMware)2b2c2792017-02-01 15:37:07 -05001490 !__ftrace_lookup_ip(hash->notrace_hash, ip));
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001491}
1492
Steven Rostedt265c8312009-02-13 12:43:56 -05001493/*
Steven Rostedtb8489142011-05-04 09:27:52 -04001494 * Test the hashes for this ops to see if we want to call
1495 * the ops->func or not.
1496 *
1497 * It's a match if the ip is in the ops->filter_hash or
1498 * the filter_hash does not exist or is empty,
1499 * AND
1500 * the ip is not in the ops->notrace_hash.
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04001501 *
1502 * This needs to be called with preemption disabled as
1503 * the hashes are freed with call_rcu_sched().
Steven Rostedtb8489142011-05-04 09:27:52 -04001504 */
1505static int
Steven Rostedt (Red Hat)195a8af2013-07-23 22:06:15 -04001506ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
Steven Rostedtb8489142011-05-04 09:27:52 -04001507{
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001508 struct ftrace_ops_hash hash;
Steven Rostedtb8489142011-05-04 09:27:52 -04001509 int ret;
1510
Steven Rostedt (Red Hat)195a8af2013-07-23 22:06:15 -04001511#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1512 /*
1513 * There's a small race when adding ops that the ftrace handler
1514 * that wants regs, may be called without them. We can not
1515 * allow that handler to be called if regs is NULL.
1516 */
1517 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1518 return 0;
1519#endif
1520
Chunyan Zhangf86f4182017-06-07 16:12:51 +08001521 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1522 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
Steven Rostedtb8489142011-05-04 09:27:52 -04001523
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001524 if (hash_contains_ip(ip, &hash))
Steven Rostedtb8489142011-05-04 09:27:52 -04001525 ret = 1;
1526 else
1527 ret = 0;
Steven Rostedtb8489142011-05-04 09:27:52 -04001528
1529 return ret;
1530}
1531
1532/*
Steven Rostedt265c8312009-02-13 12:43:56 -05001533 * This is a double for. Do not use 'break' to break out of the loop,
1534 * you must use a goto.
1535 */
1536#define do_for_each_ftrace_rec(pg, rec) \
1537 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1538 int _____i; \
1539 for (_____i = 0; _____i < pg->index; _____i++) { \
1540 rec = &pg->records[_____i];
1541
1542#define while_for_each_ftrace_rec() \
1543 } \
1544 }
Abhishek Sagarecea6562008-06-21 23:47:53 +05301545
Steven Rostedt5855fea2011-12-16 19:27:42 -05001546
1547static int ftrace_cmp_recs(const void *a, const void *b)
1548{
Steven Rostedta650e022012-04-25 13:48:13 -04001549 const struct dyn_ftrace *key = a;
1550 const struct dyn_ftrace *rec = b;
Steven Rostedt5855fea2011-12-16 19:27:42 -05001551
Steven Rostedta650e022012-04-25 13:48:13 -04001552 if (key->flags < rec->ip)
Steven Rostedt5855fea2011-12-16 19:27:42 -05001553 return -1;
Steven Rostedta650e022012-04-25 13:48:13 -04001554 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1555 return 1;
1556 return 0;
1557}
1558
Michael Ellerman04cf31a2016-03-24 22:04:01 +11001559/**
1560 * ftrace_location_range - return the first address of a traced location
1561 * if it touches the given ip range
1562 * @start: start of range to search.
1563 * @end: end of range to search (inclusive). @end points to the last byte
1564 * to check.
1565 *
1566 * Returns rec->ip if the related ftrace location is a least partly within
1567 * the given address range. That is, the first address of the instruction
1568 * that is either a NOP or call to the function tracer. It checks the ftrace
1569 * internal tables to determine if the address belongs or not.
1570 */
1571unsigned long ftrace_location_range(unsigned long start, unsigned long end)
Steven Rostedta650e022012-04-25 13:48:13 -04001572{
1573 struct ftrace_page *pg;
1574 struct dyn_ftrace *rec;
1575 struct dyn_ftrace key;
1576
1577 key.ip = start;
1578 key.flags = end; /* overload flags, as it is unsigned long */
1579
1580 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1581 if (end < pg->records[0].ip ||
1582 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1583 continue;
1584 rec = bsearch(&key, pg->records, pg->index,
1585 sizeof(struct dyn_ftrace),
1586 ftrace_cmp_recs);
1587 if (rec)
Steven Rostedtf0cf9732012-04-25 14:39:54 -04001588 return rec->ip;
Steven Rostedta650e022012-04-25 13:48:13 -04001589 }
1590
Steven Rostedt5855fea2011-12-16 19:27:42 -05001591 return 0;
1592}
1593
Steven Rostedtc88fd862011-08-16 09:53:39 -04001594/**
1595 * ftrace_location - return true if the ip giving is a traced location
1596 * @ip: the instruction pointer to check
1597 *
Steven Rostedtf0cf9732012-04-25 14:39:54 -04001598 * Returns rec->ip if @ip given is a pointer to a ftrace location.
Steven Rostedtc88fd862011-08-16 09:53:39 -04001599 * That is, the instruction that is either a NOP or call to
1600 * the function tracer. It checks the ftrace internal tables to
1601 * determine if the address belongs or not.
1602 */
Steven Rostedtf0cf9732012-04-25 14:39:54 -04001603unsigned long ftrace_location(unsigned long ip)
Steven Rostedtc88fd862011-08-16 09:53:39 -04001604{
Steven Rostedta650e022012-04-25 13:48:13 -04001605 return ftrace_location_range(ip, ip);
1606}
Steven Rostedtc88fd862011-08-16 09:53:39 -04001607
Steven Rostedta650e022012-04-25 13:48:13 -04001608/**
1609 * ftrace_text_reserved - return true if range contains an ftrace location
1610 * @start: start of range to search
1611 * @end: end of range to search (inclusive). @end points to the last byte to check.
1612 *
1613 * Returns 1 if @start and @end contains a ftrace location.
1614 * That is, the instruction that is either a NOP or call to
1615 * the function tracer. It checks the ftrace internal tables to
1616 * determine if the address belongs or not.
1617 */
Sasha Levind88471c2013-01-09 18:09:20 -05001618int ftrace_text_reserved(const void *start, const void *end)
Steven Rostedta650e022012-04-25 13:48:13 -04001619{
Steven Rostedtf0cf9732012-04-25 14:39:54 -04001620 unsigned long ret;
1621
1622 ret = ftrace_location_range((unsigned long)start,
1623 (unsigned long)end);
1624
1625 return (int)!!ret;
Steven Rostedtc88fd862011-08-16 09:53:39 -04001626}
1627
Steven Rostedt (Red Hat)4fbb48c2014-04-30 22:35:48 -04001628/* Test if ops registered to this rec needs regs */
1629static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1630{
1631 struct ftrace_ops *ops;
1632 bool keep_regs = false;
1633
1634 for (ops = ftrace_ops_list;
1635 ops != &ftrace_list_end; ops = ops->next) {
1636 /* pass rec in as regs to have non-NULL val */
1637 if (ftrace_ops_test(ops, rec->ip, rec)) {
1638 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1639 keep_regs = true;
1640 break;
1641 }
1642 }
1643 }
1644
1645 return keep_regs;
1646}
1647
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001648static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
Steven Rostedted926f92011-05-03 13:25:24 -04001649 int filter_hash,
1650 bool inc)
1651{
1652 struct ftrace_hash *hash;
1653 struct ftrace_hash *other_hash;
1654 struct ftrace_page *pg;
1655 struct dyn_ftrace *rec;
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001656 bool update = false;
Steven Rostedted926f92011-05-03 13:25:24 -04001657 int count = 0;
Steven Rostedt (VMware)8c08f0d2017-06-26 11:47:31 -04001658 int all = false;
Steven Rostedted926f92011-05-03 13:25:24 -04001659
1660 /* Only update if the ops has been registered */
1661 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001662 return false;
Steven Rostedted926f92011-05-03 13:25:24 -04001663
1664 /*
1665 * In the filter_hash case:
1666 * If the count is zero, we update all records.
1667 * Otherwise we just update the items in the hash.
1668 *
1669 * In the notrace_hash case:
1670 * We enable the update in the hash.
1671 * As disabling notrace means enabling the tracing,
1672 * and enabling notrace means disabling, the inc variable
1673 * gets inversed.
1674 */
1675 if (filter_hash) {
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04001676 hash = ops->func_hash->filter_hash;
1677 other_hash = ops->func_hash->notrace_hash;
Steven Rostedt06a51d92011-12-19 19:07:36 -05001678 if (ftrace_hash_empty(hash))
Steven Rostedt (VMware)8c08f0d2017-06-26 11:47:31 -04001679 all = true;
Steven Rostedted926f92011-05-03 13:25:24 -04001680 } else {
1681 inc = !inc;
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04001682 hash = ops->func_hash->notrace_hash;
1683 other_hash = ops->func_hash->filter_hash;
Steven Rostedted926f92011-05-03 13:25:24 -04001684 /*
1685 * If the notrace hash has no items,
1686 * then there's nothing to do.
1687 */
Steven Rostedt06a51d92011-12-19 19:07:36 -05001688 if (ftrace_hash_empty(hash))
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001689 return false;
Steven Rostedted926f92011-05-03 13:25:24 -04001690 }
1691
1692 do_for_each_ftrace_rec(pg, rec) {
1693 int in_other_hash = 0;
1694 int in_hash = 0;
1695 int match = 0;
1696
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05001697 if (rec->flags & FTRACE_FL_DISABLED)
1698 continue;
1699
Steven Rostedted926f92011-05-03 13:25:24 -04001700 if (all) {
1701 /*
1702 * Only the filter_hash affects all records.
1703 * Update if the record is not in the notrace hash.
1704 */
Steven Rostedtb8489142011-05-04 09:27:52 -04001705 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
Steven Rostedted926f92011-05-03 13:25:24 -04001706 match = 1;
1707 } else {
Steven Rostedt06a51d92011-12-19 19:07:36 -05001708 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1709 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
Steven Rostedted926f92011-05-03 13:25:24 -04001710
1711 /*
Steven Rostedt (Red Hat)19eab4a2014-05-07 15:06:14 -04001712 * If filter_hash is set, we want to match all functions
1713 * that are in the hash but not in the other hash.
Steven Rostedted926f92011-05-03 13:25:24 -04001714 *
Steven Rostedt (Red Hat)19eab4a2014-05-07 15:06:14 -04001715 * If filter_hash is not set, then we are decrementing.
1716 * That means we match anything that is in the hash
1717 * and also in the other_hash. That is, we need to turn
1718 * off functions in the other hash because they are disabled
1719 * by this hash.
Steven Rostedted926f92011-05-03 13:25:24 -04001720 */
1721 if (filter_hash && in_hash && !in_other_hash)
1722 match = 1;
1723 else if (!filter_hash && in_hash &&
Steven Rostedt06a51d92011-12-19 19:07:36 -05001724 (in_other_hash || ftrace_hash_empty(other_hash)))
Steven Rostedted926f92011-05-03 13:25:24 -04001725 match = 1;
1726 }
1727 if (!match)
1728 continue;
1729
1730 if (inc) {
1731 rec->flags++;
Steven Rostedt (Red Hat)0376bde2014-05-07 13:46:45 -04001732 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001733 return false;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001734
1735 /*
1736 * If there's only a single callback registered to a
1737 * function, and the ops has a trampoline registered
1738 * for it, then we can call it directly.
1739 */
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001740 if (ftrace_rec_count(rec) == 1 && ops->trampoline)
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001741 rec->flags |= FTRACE_FL_TRAMP;
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001742 else
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001743 /*
1744 * If we are adding another function callback
1745 * to this function, and the previous had a
Steven Rostedt (Red Hat)bce0b6c2014-08-20 23:57:04 -04001746 * custom trampoline in use, then we need to go
1747 * back to the default trampoline.
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001748 */
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001749 rec->flags &= ~FTRACE_FL_TRAMP;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001750
Steven Rostedt08f6fba2012-04-30 16:20:23 -04001751 /*
1752 * If any ops wants regs saved for this function
1753 * then all ops will get saved regs.
1754 */
1755 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1756 rec->flags |= FTRACE_FL_REGS;
Steven Rostedted926f92011-05-03 13:25:24 -04001757 } else {
Steven Rostedt (Red Hat)0376bde2014-05-07 13:46:45 -04001758 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001759 return false;
Steven Rostedted926f92011-05-03 13:25:24 -04001760 rec->flags--;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001761
Steven Rostedt (Red Hat)4fbb48c2014-04-30 22:35:48 -04001762 /*
1763 * If the rec had REGS enabled and the ops that is
1764 * being removed had REGS set, then see if there is
1765 * still any ops for this record that wants regs.
1766 * If not, we can stop recording them.
1767 */
Steven Rostedt (Red Hat)0376bde2014-05-07 13:46:45 -04001768 if (ftrace_rec_count(rec) > 0 &&
Steven Rostedt (Red Hat)4fbb48c2014-04-30 22:35:48 -04001769 rec->flags & FTRACE_FL_REGS &&
1770 ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1771 if (!test_rec_ops_needs_regs(rec))
1772 rec->flags &= ~FTRACE_FL_REGS;
1773 }
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001774
1775 /*
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001776 * If the rec had TRAMP enabled, then it needs to
1777 * be cleared. As TRAMP can only be enabled iff
1778 * there is only a single ops attached to it.
1779 * In otherwords, always disable it on decrementing.
1780 * In the future, we may set it if rec count is
1781 * decremented to one, and the ops that is left
1782 * has a trampoline.
1783 */
1784 rec->flags &= ~FTRACE_FL_TRAMP;
1785
1786 /*
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001787 * flags will be cleared in ftrace_check_record()
1788 * if rec count is zero.
1789 */
Steven Rostedted926f92011-05-03 13:25:24 -04001790 }
1791 count++;
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001792
1793 /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
1794 update |= ftrace_test_record(rec, 1) != FTRACE_UPDATE_IGNORE;
1795
Steven Rostedted926f92011-05-03 13:25:24 -04001796 /* Shortcut, if we handled all records, we are done. */
1797 if (!all && count == hash->count)
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001798 return update;
Steven Rostedted926f92011-05-03 13:25:24 -04001799 } while_for_each_ftrace_rec();
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001800
1801 return update;
Steven Rostedted926f92011-05-03 13:25:24 -04001802}
1803
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001804static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
Steven Rostedted926f92011-05-03 13:25:24 -04001805 int filter_hash)
1806{
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001807 return __ftrace_hash_rec_update(ops, filter_hash, 0);
Steven Rostedted926f92011-05-03 13:25:24 -04001808}
1809
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001810static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
Steven Rostedted926f92011-05-03 13:25:24 -04001811 int filter_hash)
1812{
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001813 return __ftrace_hash_rec_update(ops, filter_hash, 1);
Steven Rostedted926f92011-05-03 13:25:24 -04001814}
1815
Steven Rostedt (Red Hat)84261912014-08-18 13:21:08 -04001816static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1817 int filter_hash, int inc)
1818{
1819 struct ftrace_ops *op;
1820
1821 __ftrace_hash_rec_update(ops, filter_hash, inc);
1822
1823 if (ops->func_hash != &global_ops.local_hash)
1824 return;
1825
1826 /*
1827 * If the ops shares the global_ops hash, then we need to update
1828 * all ops that are enabled and use this hash.
1829 */
1830 do_for_each_ftrace_op(op, ftrace_ops_list) {
1831 /* Already done */
1832 if (op == ops)
1833 continue;
1834 if (op->func_hash == &global_ops.local_hash)
1835 __ftrace_hash_rec_update(op, filter_hash, inc);
1836 } while_for_each_ftrace_op(op);
1837}
1838
1839static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1840 int filter_hash)
1841{
1842 ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1843}
1844
1845static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1846 int filter_hash)
1847{
1848 ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1849}
1850
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05001851/*
1852 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1853 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1854 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1855 * Note that old_hash and new_hash has below meanings
1856 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1857 * - If the hash is EMPTY_HASH, it hits nothing
1858 * - Anything else hits the recs which match the hash entries.
1859 */
1860static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1861 struct ftrace_hash *old_hash,
1862 struct ftrace_hash *new_hash)
1863{
1864 struct ftrace_page *pg;
1865 struct dyn_ftrace *rec, *end = NULL;
1866 int in_old, in_new;
1867
1868 /* Only update if the ops has been registered */
1869 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1870 return 0;
1871
1872 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
1873 return 0;
1874
1875 /*
1876 * Since the IPMODIFY is a very address sensitive action, we do not
1877 * allow ftrace_ops to set all functions to new hash.
1878 */
1879 if (!new_hash || !old_hash)
1880 return -EINVAL;
1881
1882 /* Update rec->flags */
1883 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedt (Red Hat)546fece2016-11-14 16:31:49 -05001884
1885 if (rec->flags & FTRACE_FL_DISABLED)
1886 continue;
1887
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05001888 /* We need to update only differences of filter_hash */
1889 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1890 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1891 if (in_old == in_new)
1892 continue;
1893
1894 if (in_new) {
1895 /* New entries must ensure no others are using it */
1896 if (rec->flags & FTRACE_FL_IPMODIFY)
1897 goto rollback;
1898 rec->flags |= FTRACE_FL_IPMODIFY;
1899 } else /* Removed entry */
1900 rec->flags &= ~FTRACE_FL_IPMODIFY;
1901 } while_for_each_ftrace_rec();
1902
1903 return 0;
1904
1905rollback:
1906 end = rec;
1907
1908 /* Roll back what we did above */
1909 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedt (Red Hat)546fece2016-11-14 16:31:49 -05001910
1911 if (rec->flags & FTRACE_FL_DISABLED)
1912 continue;
1913
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05001914 if (rec == end)
1915 goto err_out;
1916
1917 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1918 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1919 if (in_old == in_new)
1920 continue;
1921
1922 if (in_new)
1923 rec->flags &= ~FTRACE_FL_IPMODIFY;
1924 else
1925 rec->flags |= FTRACE_FL_IPMODIFY;
1926 } while_for_each_ftrace_rec();
1927
1928err_out:
1929 return -EBUSY;
1930}
1931
1932static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1933{
1934 struct ftrace_hash *hash = ops->func_hash->filter_hash;
1935
1936 if (ftrace_hash_empty(hash))
1937 hash = NULL;
1938
1939 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1940}
1941
1942/* Disabling always succeeds */
1943static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1944{
1945 struct ftrace_hash *hash = ops->func_hash->filter_hash;
1946
1947 if (ftrace_hash_empty(hash))
1948 hash = NULL;
1949
1950 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
1951}
1952
1953static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1954 struct ftrace_hash *new_hash)
1955{
1956 struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
1957
1958 if (ftrace_hash_empty(old_hash))
1959 old_hash = NULL;
1960
1961 if (ftrace_hash_empty(new_hash))
1962 new_hash = NULL;
1963
1964 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
1965}
1966
Steven Rostedt (Red Hat)b05086c2015-11-25 14:13:11 -05001967static void print_ip_ins(const char *fmt, const unsigned char *p)
Steven Rostedt05736a42008-09-22 14:55:47 -07001968{
1969 int i;
1970
1971 printk(KERN_CONT "%s", fmt);
1972
1973 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1974 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1975}
1976
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04001977static struct ftrace_ops *
1978ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
Steven Rostedt (Red Hat)39daa7b2015-11-25 15:12:38 -05001979static struct ftrace_ops *
1980ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04001981
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05001982enum ftrace_bug_type ftrace_bug_type;
Steven Rostedt (Red Hat)b05086c2015-11-25 14:13:11 -05001983const void *ftrace_expected;
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05001984
1985static void print_bug_type(void)
1986{
1987 switch (ftrace_bug_type) {
1988 case FTRACE_BUG_UNKNOWN:
1989 break;
1990 case FTRACE_BUG_INIT:
1991 pr_info("Initializing ftrace call sites\n");
1992 break;
1993 case FTRACE_BUG_NOP:
1994 pr_info("Setting ftrace call site to NOP\n");
1995 break;
1996 case FTRACE_BUG_CALL:
1997 pr_info("Setting ftrace call site to call ftrace function\n");
1998 break;
1999 case FTRACE_BUG_UPDATE:
2000 pr_info("Updating ftrace call site to call a different ftrace function\n");
2001 break;
2002 }
2003}
2004
Steven Rostedtc88fd862011-08-16 09:53:39 -04002005/**
2006 * ftrace_bug - report and shutdown function tracer
2007 * @failed: The failed type (EFAULT, EINVAL, EPERM)
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002008 * @rec: The record that failed
Steven Rostedtc88fd862011-08-16 09:53:39 -04002009 *
2010 * The arch code that enables or disables the function tracing
2011 * can call ftrace_bug() when it has detected a problem in
2012 * modifying the code. @failed should be one of either:
2013 * EFAULT - if the problem happens on reading the @ip address
2014 * EINVAL - if what is read at @ip is not what was expected
2015 * EPERM - if the problem happens on writting to the @ip address
2016 */
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002017void ftrace_bug(int failed, struct dyn_ftrace *rec)
Steven Rostedtb17e8a32008-11-14 16:21:19 -08002018{
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002019 unsigned long ip = rec ? rec->ip : 0;
2020
Steven Rostedtb17e8a32008-11-14 16:21:19 -08002021 switch (failed) {
2022 case -EFAULT:
2023 FTRACE_WARN_ON_ONCE(1);
2024 pr_info("ftrace faulted on modifying ");
2025 print_ip_sym(ip);
2026 break;
2027 case -EINVAL:
2028 FTRACE_WARN_ON_ONCE(1);
2029 pr_info("ftrace failed to modify ");
2030 print_ip_sym(ip);
Steven Rostedt (Red Hat)b05086c2015-11-25 14:13:11 -05002031 print_ip_ins(" actual: ", (unsigned char *)ip);
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002032 pr_cont("\n");
Steven Rostedt (Red Hat)b05086c2015-11-25 14:13:11 -05002033 if (ftrace_expected) {
2034 print_ip_ins(" expected: ", ftrace_expected);
2035 pr_cont("\n");
2036 }
Steven Rostedtb17e8a32008-11-14 16:21:19 -08002037 break;
2038 case -EPERM:
2039 FTRACE_WARN_ON_ONCE(1);
2040 pr_info("ftrace faulted on writing ");
2041 print_ip_sym(ip);
2042 break;
2043 default:
2044 FTRACE_WARN_ON_ONCE(1);
2045 pr_info("ftrace faulted on unknown error ");
2046 print_ip_sym(ip);
2047 }
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002048 print_bug_type();
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002049 if (rec) {
2050 struct ftrace_ops *ops = NULL;
2051
2052 pr_info("ftrace record flags: %lx\n", rec->flags);
2053 pr_cont(" (%ld)%s", ftrace_rec_count(rec),
2054 rec->flags & FTRACE_FL_REGS ? " R" : " ");
2055 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2056 ops = ftrace_find_tramp_ops_any(rec);
Steven Rostedt (Red Hat)39daa7b2015-11-25 15:12:38 -05002057 if (ops) {
2058 do {
2059 pr_cont("\ttramp: %pS (%pS)",
2060 (void *)ops->trampoline,
2061 (void *)ops->func);
2062 ops = ftrace_find_tramp_ops_next(rec, ops);
2063 } while (ops);
2064 } else
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002065 pr_cont("\ttramp: ERROR!");
2066
2067 }
2068 ip = ftrace_get_addr_curr(rec);
Steven Rostedt (Red Hat)39daa7b2015-11-25 15:12:38 -05002069 pr_cont("\n expected tramp: %lx\n", ip);
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002070 }
Steven Rostedtb17e8a32008-11-14 16:21:19 -08002071}
2072
Steven Rostedtc88fd862011-08-16 09:53:39 -04002073static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
Steven Rostedt5072c592008-05-12 21:20:43 +02002074{
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08002075 unsigned long flag = 0UL;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002076
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002077 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2078
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05002079 if (rec->flags & FTRACE_FL_DISABLED)
2080 return FTRACE_UPDATE_IGNORE;
2081
Steven Rostedt982c3502008-11-15 16:31:41 -05002082 /*
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01002083 * If we are updating calls:
Steven Rostedt982c3502008-11-15 16:31:41 -05002084 *
Steven Rostedted926f92011-05-03 13:25:24 -04002085 * If the record has a ref count, then we need to enable it
2086 * because someone is using it.
Steven Rostedt982c3502008-11-15 16:31:41 -05002087 *
Steven Rostedted926f92011-05-03 13:25:24 -04002088 * Otherwise we make sure its disabled.
2089 *
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01002090 * If we are disabling calls, then disable all records that
Steven Rostedted926f92011-05-03 13:25:24 -04002091 * are enabled.
Steven Rostedt982c3502008-11-15 16:31:41 -05002092 */
Steven Rostedt (Red Hat)0376bde2014-05-07 13:46:45 -04002093 if (enable && ftrace_rec_count(rec))
Steven Rostedted926f92011-05-03 13:25:24 -04002094 flag = FTRACE_FL_ENABLED;
Steven Rostedt5072c592008-05-12 21:20:43 +02002095
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002096 /*
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002097 * If enabling and the REGS flag does not match the REGS_EN, or
2098 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2099 * this record. Set flags to fail the compare against ENABLED.
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002100 */
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002101 if (flag) {
2102 if (!(rec->flags & FTRACE_FL_REGS) !=
2103 !(rec->flags & FTRACE_FL_REGS_EN))
2104 flag |= FTRACE_FL_REGS;
2105
2106 if (!(rec->flags & FTRACE_FL_TRAMP) !=
2107 !(rec->flags & FTRACE_FL_TRAMP_EN))
2108 flag |= FTRACE_FL_TRAMP;
2109 }
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002110
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08002111 /* If the state of this record hasn't changed, then do nothing */
2112 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
Steven Rostedtc88fd862011-08-16 09:53:39 -04002113 return FTRACE_UPDATE_IGNORE;
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08002114
2115 if (flag) {
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002116 /* Save off if rec is being enabled (for return value) */
2117 flag ^= rec->flags & FTRACE_FL_ENABLED;
2118
2119 if (update) {
Steven Rostedtc88fd862011-08-16 09:53:39 -04002120 rec->flags |= FTRACE_FL_ENABLED;
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002121 if (flag & FTRACE_FL_REGS) {
2122 if (rec->flags & FTRACE_FL_REGS)
2123 rec->flags |= FTRACE_FL_REGS_EN;
2124 else
2125 rec->flags &= ~FTRACE_FL_REGS_EN;
2126 }
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002127 if (flag & FTRACE_FL_TRAMP) {
2128 if (rec->flags & FTRACE_FL_TRAMP)
2129 rec->flags |= FTRACE_FL_TRAMP_EN;
2130 else
2131 rec->flags &= ~FTRACE_FL_TRAMP_EN;
2132 }
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002133 }
2134
2135 /*
2136 * If this record is being updated from a nop, then
2137 * return UPDATE_MAKE_CALL.
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002138 * Otherwise,
2139 * return UPDATE_MODIFY_CALL to tell the caller to convert
Steven Rostedt (Red Hat)f1b2f2b2014-05-07 16:09:49 -04002140 * from the save regs, to a non-save regs function or
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002141 * vice versa, or from a trampoline call.
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002142 */
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002143 if (flag & FTRACE_FL_ENABLED) {
2144 ftrace_bug_type = FTRACE_BUG_CALL;
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002145 return FTRACE_UPDATE_MAKE_CALL;
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002146 }
Steven Rostedt (Red Hat)f1b2f2b2014-05-07 16:09:49 -04002147
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002148 ftrace_bug_type = FTRACE_BUG_UPDATE;
Steven Rostedt (Red Hat)f1b2f2b2014-05-07 16:09:49 -04002149 return FTRACE_UPDATE_MODIFY_CALL;
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08002150 }
2151
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002152 if (update) {
2153 /* If there's no more users, clear all flags */
Steven Rostedt (Red Hat)0376bde2014-05-07 13:46:45 -04002154 if (!ftrace_rec_count(rec))
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002155 rec->flags = 0;
2156 else
Steven Rostedt (Red Hat)b24d4432015-03-04 23:10:28 -05002157 /*
2158 * Just disable the record, but keep the ops TRAMP
2159 * and REGS states. The _EN flags must be disabled though.
2160 */
2161 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2162 FTRACE_FL_REGS_EN);
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002163 }
Steven Rostedtc88fd862011-08-16 09:53:39 -04002164
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002165 ftrace_bug_type = FTRACE_BUG_NOP;
Steven Rostedtc88fd862011-08-16 09:53:39 -04002166 return FTRACE_UPDATE_MAKE_NOP;
2167}
2168
2169/**
2170 * ftrace_update_record, set a record that now is tracing or not
2171 * @rec: the record to update
2172 * @enable: set to 1 if the record is tracing, zero to force disable
2173 *
2174 * The records that represent all functions that can be traced need
2175 * to be updated when tracing has been enabled.
2176 */
2177int ftrace_update_record(struct dyn_ftrace *rec, int enable)
2178{
2179 return ftrace_check_record(rec, enable, 1);
2180}
2181
2182/**
2183 * ftrace_test_record, check if the record has been enabled or not
2184 * @rec: the record to test
2185 * @enable: set to 1 to check if enabled, 0 if it is disabled
2186 *
2187 * The arch code may need to test if a record is already set to
2188 * tracing to determine how to modify the function code that it
2189 * represents.
2190 */
2191int ftrace_test_record(struct dyn_ftrace *rec, int enable)
2192{
2193 return ftrace_check_record(rec, enable, 0);
2194}
2195
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002196static struct ftrace_ops *
Steven Rostedt (Red Hat)5fecaa02014-07-24 16:00:31 -04002197ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2198{
2199 struct ftrace_ops *op;
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002200 unsigned long ip = rec->ip;
Steven Rostedt (Red Hat)5fecaa02014-07-24 16:00:31 -04002201
2202 do_for_each_ftrace_op(op, ftrace_ops_list) {
2203
2204 if (!op->trampoline)
2205 continue;
2206
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002207 if (hash_contains_ip(ip, op->func_hash))
Steven Rostedt (Red Hat)5fecaa02014-07-24 16:00:31 -04002208 return op;
2209 } while_for_each_ftrace_op(op);
2210
2211 return NULL;
2212}
2213
2214static struct ftrace_ops *
Steven Rostedt (Red Hat)39daa7b2015-11-25 15:12:38 -05002215ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2216 struct ftrace_ops *op)
2217{
2218 unsigned long ip = rec->ip;
2219
2220 while_for_each_ftrace_op(op) {
2221
2222 if (!op->trampoline)
2223 continue;
2224
2225 if (hash_contains_ip(ip, op->func_hash))
2226 return op;
2227 }
2228
2229 return NULL;
2230}
2231
2232static struct ftrace_ops *
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002233ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2234{
2235 struct ftrace_ops *op;
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002236 unsigned long ip = rec->ip;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002237
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002238 /*
2239 * Need to check removed ops first.
2240 * If they are being removed, and this rec has a tramp,
2241 * and this rec is in the ops list, then it would be the
2242 * one with the tramp.
2243 */
2244 if (removed_ops) {
2245 if (hash_contains_ip(ip, &removed_ops->old_hash))
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002246 return removed_ops;
2247 }
2248
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002249 /*
2250 * Need to find the current trampoline for a rec.
2251 * Now, a trampoline is only attached to a rec if there
2252 * was a single 'ops' attached to it. But this can be called
2253 * when we are adding another op to the rec or removing the
2254 * current one. Thus, if the op is being added, we can
2255 * ignore it because it hasn't attached itself to the rec
Steven Rostedt (Red Hat)4fc40902014-10-24 14:48:35 -04002256 * yet.
2257 *
2258 * If an ops is being modified (hooking to different functions)
2259 * then we don't care about the new functions that are being
2260 * added, just the old ones (that are probably being removed).
2261 *
2262 * If we are adding an ops to a function that already is using
2263 * a trampoline, it needs to be removed (trampolines are only
2264 * for single ops connected), then an ops that is not being
2265 * modified also needs to be checked.
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002266 */
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002267 do_for_each_ftrace_op(op, ftrace_ops_list) {
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002268
2269 if (!op->trampoline)
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002270 continue;
2271
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002272 /*
2273 * If the ops is being added, it hasn't gotten to
2274 * the point to be removed from this tree yet.
2275 */
2276 if (op->flags & FTRACE_OPS_FL_ADDING)
2277 continue;
2278
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002279
Steven Rostedt (Red Hat)4fc40902014-10-24 14:48:35 -04002280 /*
2281 * If the ops is being modified and is in the old
2282 * hash, then it is probably being removed from this
2283 * function.
2284 */
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002285 if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2286 hash_contains_ip(ip, &op->old_hash))
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002287 return op;
Steven Rostedt (Red Hat)4fc40902014-10-24 14:48:35 -04002288 /*
2289 * If the ops is not being added or modified, and it's
2290 * in its normal filter hash, then this must be the one
2291 * we want!
2292 */
2293 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2294 hash_contains_ip(ip, op->func_hash))
2295 return op;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002296
2297 } while_for_each_ftrace_op(op);
2298
2299 return NULL;
2300}
2301
2302static struct ftrace_ops *
2303ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2304{
2305 struct ftrace_ops *op;
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002306 unsigned long ip = rec->ip;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002307
2308 do_for_each_ftrace_op(op, ftrace_ops_list) {
2309 /* pass rec in as regs to have non-NULL val */
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002310 if (hash_contains_ip(ip, op->func_hash))
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002311 return op;
2312 } while_for_each_ftrace_op(op);
2313
2314 return NULL;
2315}
2316
Steven Rostedt (Red Hat)7413af12014-05-06 21:34:14 -04002317/**
2318 * ftrace_get_addr_new - Get the call address to set to
2319 * @rec: The ftrace record descriptor
2320 *
2321 * If the record has the FTRACE_FL_REGS set, that means that it
2322 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2323 * is not not set, then it wants to convert to the normal callback.
2324 *
2325 * Returns the address of the trampoline to set to
2326 */
2327unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2328{
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002329 struct ftrace_ops *ops;
2330
2331 /* Trampolines take precedence over regs */
2332 if (rec->flags & FTRACE_FL_TRAMP) {
2333 ops = ftrace_find_tramp_ops_new(rec);
2334 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
Steven Rostedt (Red Hat)bce0b6c2014-08-20 23:57:04 -04002335 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2336 (void *)rec->ip, (void *)rec->ip, rec->flags);
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002337 /* Ftrace is shutting down, return anything */
2338 return (unsigned long)FTRACE_ADDR;
2339 }
2340 return ops->trampoline;
2341 }
2342
Steven Rostedt (Red Hat)7413af12014-05-06 21:34:14 -04002343 if (rec->flags & FTRACE_FL_REGS)
2344 return (unsigned long)FTRACE_REGS_ADDR;
2345 else
2346 return (unsigned long)FTRACE_ADDR;
2347}
2348
2349/**
2350 * ftrace_get_addr_curr - Get the call address that is already there
2351 * @rec: The ftrace record descriptor
2352 *
2353 * The FTRACE_FL_REGS_EN is set when the record already points to
2354 * a function that saves all the regs. Basically the '_EN' version
2355 * represents the current state of the function.
2356 *
2357 * Returns the address of the trampoline that is currently being called
2358 */
2359unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2360{
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002361 struct ftrace_ops *ops;
2362
2363 /* Trampolines take precedence over regs */
2364 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2365 ops = ftrace_find_tramp_ops_curr(rec);
2366 if (FTRACE_WARN_ON(!ops)) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07002367 pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2368 (void *)rec->ip, (void *)rec->ip);
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002369 /* Ftrace is shutting down, return anything */
2370 return (unsigned long)FTRACE_ADDR;
2371 }
2372 return ops->trampoline;
2373 }
2374
Steven Rostedt (Red Hat)7413af12014-05-06 21:34:14 -04002375 if (rec->flags & FTRACE_FL_REGS_EN)
2376 return (unsigned long)FTRACE_REGS_ADDR;
2377 else
2378 return (unsigned long)FTRACE_ADDR;
2379}
2380
Steven Rostedtc88fd862011-08-16 09:53:39 -04002381static int
2382__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
2383{
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002384 unsigned long ftrace_old_addr;
Steven Rostedtc88fd862011-08-16 09:53:39 -04002385 unsigned long ftrace_addr;
2386 int ret;
2387
Steven Rostedt (Red Hat)7c0868e2014-05-08 07:01:21 -04002388 ftrace_addr = ftrace_get_addr_new(rec);
Steven Rostedtc88fd862011-08-16 09:53:39 -04002389
Steven Rostedt (Red Hat)7c0868e2014-05-08 07:01:21 -04002390 /* This needs to be done before we call ftrace_update_record */
2391 ftrace_old_addr = ftrace_get_addr_curr(rec);
2392
2393 ret = ftrace_update_record(rec, enable);
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002394
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002395 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2396
Steven Rostedtc88fd862011-08-16 09:53:39 -04002397 switch (ret) {
2398 case FTRACE_UPDATE_IGNORE:
2399 return 0;
2400
2401 case FTRACE_UPDATE_MAKE_CALL:
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002402 ftrace_bug_type = FTRACE_BUG_CALL;
Steven Rostedtc88fd862011-08-16 09:53:39 -04002403 return ftrace_make_call(rec, ftrace_addr);
2404
2405 case FTRACE_UPDATE_MAKE_NOP:
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002406 ftrace_bug_type = FTRACE_BUG_NOP;
Steven Rostedt (Red Hat)39b55522014-08-17 20:59:10 -04002407 return ftrace_make_nop(NULL, rec, ftrace_old_addr);
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002408
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002409 case FTRACE_UPDATE_MODIFY_CALL:
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002410 ftrace_bug_type = FTRACE_BUG_UPDATE;
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002411 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
Steven Rostedtc88fd862011-08-16 09:53:39 -04002412 }
2413
2414 return -1; /* unknow ftrace bug */
Steven Rostedt5072c592008-05-12 21:20:43 +02002415}
2416
Steven Rostedte4f5d542012-04-27 09:13:18 -04002417void __weak ftrace_replace_code(int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002418{
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002419 struct dyn_ftrace *rec;
2420 struct ftrace_page *pg;
Steven Rostedt6a24a242009-02-17 11:20:26 -05002421 int failed;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002422
Steven Rostedt45a4a232011-04-21 23:16:46 -04002423 if (unlikely(ftrace_disabled))
2424 return;
2425
Steven Rostedt265c8312009-02-13 12:43:56 -05002426 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedt (Red Hat)546fece2016-11-14 16:31:49 -05002427
2428 if (rec->flags & FTRACE_FL_DISABLED)
2429 continue;
2430
Steven Rostedte4f5d542012-04-27 09:13:18 -04002431 failed = __ftrace_replace_code(rec, enable);
Zhaoleifa9d13c2009-03-13 17:16:34 +08002432 if (failed) {
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002433 ftrace_bug(failed, rec);
Steven Rostedt3279ba32009-10-07 16:57:56 -04002434 /* Stop processing */
2435 return;
Steven Rostedt265c8312009-02-13 12:43:56 -05002436 }
2437 } while_for_each_ftrace_rec();
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002438}
2439
Steven Rostedtc88fd862011-08-16 09:53:39 -04002440struct ftrace_rec_iter {
2441 struct ftrace_page *pg;
2442 int index;
2443};
2444
2445/**
2446 * ftrace_rec_iter_start, start up iterating over traced functions
2447 *
2448 * Returns an iterator handle that is used to iterate over all
2449 * the records that represent address locations where functions
2450 * are traced.
2451 *
2452 * May return NULL if no records are available.
2453 */
2454struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2455{
2456 /*
2457 * We only use a single iterator.
2458 * Protected by the ftrace_lock mutex.
2459 */
2460 static struct ftrace_rec_iter ftrace_rec_iter;
2461 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2462
2463 iter->pg = ftrace_pages_start;
2464 iter->index = 0;
2465
2466 /* Could have empty pages */
2467 while (iter->pg && !iter->pg->index)
2468 iter->pg = iter->pg->next;
2469
2470 if (!iter->pg)
2471 return NULL;
2472
2473 return iter;
2474}
2475
2476/**
2477 * ftrace_rec_iter_next, get the next record to process.
2478 * @iter: The handle to the iterator.
2479 *
2480 * Returns the next iterator after the given iterator @iter.
2481 */
2482struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2483{
2484 iter->index++;
2485
2486 if (iter->index >= iter->pg->index) {
2487 iter->pg = iter->pg->next;
2488 iter->index = 0;
2489
2490 /* Could have empty pages */
2491 while (iter->pg && !iter->pg->index)
2492 iter->pg = iter->pg->next;
2493 }
2494
2495 if (!iter->pg)
2496 return NULL;
2497
2498 return iter;
2499}
2500
2501/**
2502 * ftrace_rec_iter_record, get the record at the iterator location
2503 * @iter: The current iterator location
2504 *
2505 * Returns the record that the current @iter is at.
2506 */
2507struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2508{
2509 return &iter->pg->records[iter->index];
2510}
2511
Abhishek Sagar492a7ea52008-05-25 00:10:04 +05302512static int
Steven Rostedt31e88902008-11-14 16:21:19 -08002513ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002514{
Steven Rostedt593eb8a2008-10-23 09:32:59 -04002515 int ret;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002516
Steven Rostedt45a4a232011-04-21 23:16:46 -04002517 if (unlikely(ftrace_disabled))
2518 return 0;
2519
Shaohua Li25aac9d2009-01-09 11:29:40 +08002520 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
Steven Rostedt593eb8a2008-10-23 09:32:59 -04002521 if (ret) {
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002522 ftrace_bug_type = FTRACE_BUG_INIT;
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002523 ftrace_bug(ret, rec);
Abhishek Sagar492a7ea52008-05-25 00:10:04 +05302524 return 0;
Steven Rostedt37ad50842008-05-12 21:20:48 +02002525 }
Abhishek Sagar492a7ea52008-05-25 00:10:04 +05302526 return 1;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002527}
2528
Steven Rostedt000ab692009-02-17 13:35:06 -05002529/*
2530 * archs can override this function if they must do something
2531 * before the modifying code is performed.
2532 */
2533int __weak ftrace_arch_code_modify_prepare(void)
2534{
2535 return 0;
2536}
2537
2538/*
2539 * archs can override this function if they must do something
2540 * after the modifying code is performed.
2541 */
2542int __weak ftrace_arch_code_modify_post_process(void)
2543{
2544 return 0;
2545}
2546
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04002547void ftrace_modify_all_code(int command)
2548{
Steven Rostedt (Red Hat)59338f72013-08-31 01:04:07 -04002549 int update = command & FTRACE_UPDATE_TRACE_FUNC;
Petr Mladekcd21067f2014-02-24 17:12:21 +01002550 int err = 0;
Steven Rostedt (Red Hat)59338f72013-08-31 01:04:07 -04002551
2552 /*
2553 * If the ftrace_caller calls a ftrace_ops func directly,
2554 * we need to make sure that it only traces functions it
2555 * expects to trace. When doing the switch of functions,
2556 * we need to update to the ftrace_ops_list_func first
2557 * before the transition between old and new calls are set,
2558 * as the ftrace_ops_list_func will check the ops hashes
2559 * to make sure the ops are having the right functions
2560 * traced.
2561 */
Petr Mladekcd21067f2014-02-24 17:12:21 +01002562 if (update) {
2563 err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2564 if (FTRACE_WARN_ON(err))
2565 return;
2566 }
Steven Rostedt (Red Hat)59338f72013-08-31 01:04:07 -04002567
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04002568 if (command & FTRACE_UPDATE_CALLS)
2569 ftrace_replace_code(1);
2570 else if (command & FTRACE_DISABLE_CALLS)
2571 ftrace_replace_code(0);
2572
Steven Rostedt (Red Hat)405e1d82013-11-08 14:17:30 -05002573 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2574 function_trace_op = set_function_trace_op;
2575 smp_wmb();
2576 /* If irqs are disabled, we are in stop machine */
2577 if (!irqs_disabled())
2578 smp_call_function(ftrace_sync_ipi, NULL, 1);
Petr Mladekcd21067f2014-02-24 17:12:21 +01002579 err = ftrace_update_ftrace_func(ftrace_trace_function);
2580 if (FTRACE_WARN_ON(err))
2581 return;
Steven Rostedt (Red Hat)405e1d82013-11-08 14:17:30 -05002582 }
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04002583
2584 if (command & FTRACE_START_FUNC_RET)
Petr Mladekcd21067f2014-02-24 17:12:21 +01002585 err = ftrace_enable_ftrace_graph_caller();
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04002586 else if (command & FTRACE_STOP_FUNC_RET)
Petr Mladekcd21067f2014-02-24 17:12:21 +01002587 err = ftrace_disable_ftrace_graph_caller();
2588 FTRACE_WARN_ON(err);
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04002589}
2590
Ingo Molnare309b412008-05-12 21:20:51 +02002591static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +02002592{
Steven Rostedtd61f82d2008-05-12 21:20:43 +02002593 int *command = data;
2594
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04002595 ftrace_modify_all_code(*command);
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05002596
Steven Rostedtc88fd862011-08-16 09:53:39 -04002597 return 0;
2598}
2599
2600/**
2601 * ftrace_run_stop_machine, go back to the stop machine method
2602 * @command: The command to tell ftrace what to do
2603 *
2604 * If an arch needs to fall back to the stop machine method, the
2605 * it can call this function.
2606 */
2607void ftrace_run_stop_machine(int command)
2608{
2609 stop_machine(__ftrace_modify_code, &command, NULL);
2610}
2611
2612/**
2613 * arch_ftrace_update_code, modify the code to trace or not trace
2614 * @command: The command that needs to be done
2615 *
2616 * Archs can override this function if it does not need to
2617 * run stop_machine() to modify code.
2618 */
2619void __weak arch_ftrace_update_code(int command)
2620{
2621 ftrace_run_stop_machine(command);
2622}
2623
2624static void ftrace_run_update_code(int command)
2625{
2626 int ret;
2627
2628 ret = ftrace_arch_code_modify_prepare();
2629 FTRACE_WARN_ON(ret);
2630 if (ret)
2631 return;
Steven Rostedtc88fd862011-08-16 09:53:39 -04002632
2633 /*
2634 * By default we use stop_machine() to modify the code.
2635 * But archs can do what ever they want as long as it
2636 * is safe. The stop_machine() is the safest, but also
2637 * produces the most overhead.
2638 */
2639 arch_ftrace_update_code(command);
2640
Steven Rostedt000ab692009-02-17 13:35:06 -05002641 ret = ftrace_arch_code_modify_post_process();
2642 FTRACE_WARN_ON(ret);
Steven Rostedt3d083392008-05-12 21:20:42 +02002643}
2644
Steven Rostedt (Red Hat)8252ecf2014-10-24 14:56:01 -04002645static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05002646 struct ftrace_ops_hash *old_hash)
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002647{
2648 ops->flags |= FTRACE_OPS_FL_MODIFYING;
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05002649 ops->old_hash.filter_hash = old_hash->filter_hash;
2650 ops->old_hash.notrace_hash = old_hash->notrace_hash;
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002651 ftrace_run_update_code(command);
Steven Rostedt (Red Hat)8252ecf2014-10-24 14:56:01 -04002652 ops->old_hash.filter_hash = NULL;
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05002653 ops->old_hash.notrace_hash = NULL;
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002654 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2655}
2656
Steven Rostedtd61f82d2008-05-12 21:20:43 +02002657static ftrace_func_t saved_ftrace_func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05002658static int ftrace_start_up;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002659
Steven Rostedt (Red Hat)12cce592014-07-03 15:48:16 -04002660void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2661{
2662}
2663
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002664static void ftrace_startup_enable(int command)
2665{
2666 if (saved_ftrace_func != ftrace_trace_function) {
2667 saved_ftrace_func = ftrace_trace_function;
2668 command |= FTRACE_UPDATE_TRACE_FUNC;
2669 }
2670
2671 if (!command || !ftrace_enabled)
2672 return;
2673
2674 ftrace_run_update_code(command);
2675}
Steven Rostedtd61f82d2008-05-12 21:20:43 +02002676
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002677static void ftrace_startup_all(int command)
2678{
2679 update_all_ops = true;
2680 ftrace_startup_enable(command);
2681 update_all_ops = false;
2682}
2683
Steven Rostedta1cd6172011-05-23 15:24:25 -04002684static int ftrace_startup(struct ftrace_ops *ops, int command)
Steven Rostedt3d083392008-05-12 21:20:42 +02002685{
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05002686 int ret;
Steven Rostedtb8489142011-05-04 09:27:52 -04002687
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002688 if (unlikely(ftrace_disabled))
Steven Rostedta1cd6172011-05-23 15:24:25 -04002689 return -ENODEV;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002690
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05002691 ret = __register_ftrace_function(ops);
2692 if (ret)
2693 return ret;
2694
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05002695 ftrace_start_up++;
Steven Rostedt3d083392008-05-12 21:20:42 +02002696
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002697 /*
2698 * Note that ftrace probes uses this to start up
2699 * and modify functions it will probe. But we still
2700 * set the ADDING flag for modification, as probes
2701 * do not have trampolines. If they add them in the
2702 * future, then the probes will need to distinguish
2703 * between adding and updating probes.
2704 */
2705 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
Steven Rostedt (Red Hat)66209a52014-05-06 21:57:49 -04002706
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05002707 ret = ftrace_hash_ipmodify_enable(ops);
2708 if (ret < 0) {
2709 /* Rollback registration process */
2710 __unregister_ftrace_function(ops);
2711 ftrace_start_up--;
2712 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2713 return ret;
2714 }
2715
Jiri Olsa7f50d062016-03-16 15:34:33 +01002716 if (ftrace_hash_rec_enable(ops, 1))
2717 command |= FTRACE_UPDATE_CALLS;
Steven Rostedted926f92011-05-03 13:25:24 -04002718
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002719 ftrace_startup_enable(command);
Steven Rostedta1cd6172011-05-23 15:24:25 -04002720
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002721 ops->flags &= ~FTRACE_OPS_FL_ADDING;
2722
Steven Rostedta1cd6172011-05-23 15:24:25 -04002723 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +02002724}
2725
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05002726static int ftrace_shutdown(struct ftrace_ops *ops, int command)
Steven Rostedt3d083392008-05-12 21:20:42 +02002727{
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05002728 int ret;
Steven Rostedtb8489142011-05-04 09:27:52 -04002729
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002730 if (unlikely(ftrace_disabled))
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05002731 return -ENODEV;
2732
2733 ret = __unregister_ftrace_function(ops);
2734 if (ret)
2735 return ret;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002736
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05002737 ftrace_start_up--;
Frederic Weisbecker9ea1a152009-06-20 06:52:21 +02002738 /*
2739 * Just warn in case of unbalance, no need to kill ftrace, it's not
2740 * critical but the ftrace_call callers may be never nopped again after
2741 * further ftrace uses.
2742 */
2743 WARN_ON_ONCE(ftrace_start_up < 0);
2744
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05002745 /* Disabling ipmodify never fails */
2746 ftrace_hash_ipmodify_disable(ops);
Jiri Olsa7f50d062016-03-16 15:34:33 +01002747
2748 if (ftrace_hash_rec_disable(ops, 1))
2749 command |= FTRACE_UPDATE_CALLS;
Steven Rostedtb8489142011-05-04 09:27:52 -04002750
Namhyung Kima737e6d2014-06-12 23:56:12 +09002751 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
Steven Rostedtb8489142011-05-04 09:27:52 -04002752
Steven Rostedtd61f82d2008-05-12 21:20:43 +02002753 if (saved_ftrace_func != ftrace_trace_function) {
2754 saved_ftrace_func = ftrace_trace_function;
2755 command |= FTRACE_UPDATE_TRACE_FUNC;
2756 }
2757
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002758 if (!command || !ftrace_enabled) {
2759 /*
Steven Rostedt (VMware)edb096e2017-09-01 12:18:28 -04002760 * If these are dynamic or per_cpu ops, they still
2761 * need their data freed. Since, function tracing is
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002762 * not currently active, we can just free them
2763 * without synchronizing all CPUs.
2764 */
Peter Zijlstrab3a88802017-10-11 09:45:32 +02002765 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
Steven Rostedt (VMware)edb096e2017-09-01 12:18:28 -04002766 goto free_ops;
2767
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05002768 return 0;
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002769 }
Steven Rostedt3d083392008-05-12 21:20:42 +02002770
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002771 /*
2772 * If the ops uses a trampoline, then it needs to be
2773 * tested first on update.
2774 */
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002775 ops->flags |= FTRACE_OPS_FL_REMOVING;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002776 removed_ops = ops;
2777
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002778 /* The trampoline logic checks the old hashes */
2779 ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2780 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2781
Steven Rostedtd61f82d2008-05-12 21:20:43 +02002782 ftrace_run_update_code(command);
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002783
Steven Rostedt (Red Hat)84bde622014-09-12 14:21:13 -04002784 /*
2785 * If there's no more ops registered with ftrace, run a
2786 * sanity check to make sure all rec flags are cleared.
2787 */
Chunyan Zhangf86f4182017-06-07 16:12:51 +08002788 if (rcu_dereference_protected(ftrace_ops_list,
2789 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
Steven Rostedt (Red Hat)84bde622014-09-12 14:21:13 -04002790 struct ftrace_page *pg;
2791 struct dyn_ftrace *rec;
2792
2793 do_for_each_ftrace_rec(pg, rec) {
Alexei Starovoitov977c1f92016-11-07 15:14:20 -08002794 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
Steven Rostedt (Red Hat)84bde622014-09-12 14:21:13 -04002795 pr_warn(" %pS flags:%lx\n",
2796 (void *)rec->ip, rec->flags);
2797 } while_for_each_ftrace_rec();
2798 }
2799
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002800 ops->old_hash.filter_hash = NULL;
2801 ops->old_hash.notrace_hash = NULL;
2802
2803 removed_ops = NULL;
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002804 ops->flags &= ~FTRACE_OPS_FL_REMOVING;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002805
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002806 /*
2807 * Dynamic ops may be freed, we must make sure that all
2808 * callers are done before leaving this function.
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -05002809 * The same goes for freeing the per_cpu data of the per_cpu
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002810 * ops.
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002811 */
Peter Zijlstrab3a88802017-10-11 09:45:32 +02002812 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
Steven Rostedt (VMware)0598e4f2017-04-06 10:28:12 -04002813 /*
2814 * We need to do a hard force of sched synchronization.
2815 * This is because we use preempt_disable() to do RCU, but
2816 * the function tracers can be called where RCU is not watching
2817 * (like before user_exit()). We can not rely on the RCU
2818 * infrastructure to do the synchronization, thus we must do it
2819 * ourselves.
2820 */
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002821 schedule_on_each_cpu(ftrace_sync);
2822
Steven Rostedt (VMware)0598e4f2017-04-06 10:28:12 -04002823 /*
2824 * When the kernel is preeptive, tasks can be preempted
2825 * while on a ftrace trampoline. Just scheduling a task on
2826 * a CPU is not good enough to flush them. Calling
2827 * synchornize_rcu_tasks() will wait for those tasks to
2828 * execute and either schedule voluntarily or enter user space.
2829 */
2830 if (IS_ENABLED(CONFIG_PREEMPT))
2831 synchronize_rcu_tasks();
2832
Steven Rostedt (VMware)edb096e2017-09-01 12:18:28 -04002833 free_ops:
Steven Rostedt (Red Hat)12cce592014-07-03 15:48:16 -04002834 arch_ftrace_trampoline_free(ops);
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002835 }
2836
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05002837 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +02002838}
2839
Ingo Molnare309b412008-05-12 21:20:51 +02002840static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002841{
Pratyush Anand1619dc32015-03-06 23:58:06 +05302842 int command;
2843
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002844 if (unlikely(ftrace_disabled))
2845 return;
2846
Steven Rostedtd61f82d2008-05-12 21:20:43 +02002847 /* Force update next time */
2848 saved_ftrace_func = NULL;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05002849 /* ftrace_start_up is true if we want ftrace running */
Pratyush Anand1619dc32015-03-06 23:58:06 +05302850 if (ftrace_start_up) {
2851 command = FTRACE_UPDATE_CALLS;
2852 if (ftrace_graph_active)
2853 command |= FTRACE_START_FUNC_RET;
Steven Rostedt (Red Hat)524a3862015-03-06 19:55:13 -05002854 ftrace_startup_enable(command);
Pratyush Anand1619dc32015-03-06 23:58:06 +05302855 }
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002856}
2857
Ingo Molnare309b412008-05-12 21:20:51 +02002858static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002859{
Pratyush Anand1619dc32015-03-06 23:58:06 +05302860 int command;
2861
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002862 if (unlikely(ftrace_disabled))
2863 return;
2864
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05002865 /* ftrace_start_up is true if ftrace is running */
Pratyush Anand1619dc32015-03-06 23:58:06 +05302866 if (ftrace_start_up) {
2867 command = FTRACE_DISABLE_CALLS;
2868 if (ftrace_graph_active)
2869 command |= FTRACE_STOP_FUNC_RET;
2870 ftrace_run_update_code(command);
2871 }
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002872}
2873
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +01002874static u64 ftrace_update_time;
Steven Rostedt3d083392008-05-12 21:20:42 +02002875unsigned long ftrace_update_tot_cnt;
2876
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002877static inline int ops_traces_mod(struct ftrace_ops *ops)
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04002878{
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002879 /*
2880 * Filter_hash being empty will default to trace module.
2881 * But notrace hash requires a test of individual module functions.
2882 */
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04002883 return ftrace_hash_empty(ops->func_hash->filter_hash) &&
2884 ftrace_hash_empty(ops->func_hash->notrace_hash);
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002885}
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04002886
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002887/*
2888 * Check if the current ops references the record.
2889 *
2890 * If the ops traces all functions, then it was already accounted for.
2891 * If the ops does not trace the current record function, skip it.
2892 * If the ops ignores the function via notrace filter, skip it.
2893 */
2894static inline bool
2895ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2896{
2897 /* If ops isn't enabled, ignore it */
2898 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
Gustavo A. R. Silva44ec3ec2018-08-01 20:00:56 -05002899 return false;
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002900
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05002901 /* If ops traces all then it includes this function */
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002902 if (ops_traces_mod(ops))
Gustavo A. R. Silva44ec3ec2018-08-01 20:00:56 -05002903 return true;
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002904
2905 /* The function must be in the filter */
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04002906 if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
Steven Rostedt (VMware)2b2c2792017-02-01 15:37:07 -05002907 !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
Gustavo A. R. Silva44ec3ec2018-08-01 20:00:56 -05002908 return false;
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002909
2910 /* If in notrace hash, we ignore it too */
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04002911 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
Gustavo A. R. Silva44ec3ec2018-08-01 20:00:56 -05002912 return false;
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002913
Gustavo A. R. Silva44ec3ec2018-08-01 20:00:56 -05002914 return true;
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002915}
2916
Jiri Slaby1dc43cf2014-02-24 19:59:56 +01002917static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
Steven Rostedt3d083392008-05-12 21:20:42 +02002918{
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002919 struct ftrace_page *pg;
Lai Jiangshane94142a2009-03-13 17:51:27 +08002920 struct dyn_ftrace *p;
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +01002921 u64 start, stop;
Jiri Slaby1dc43cf2014-02-24 19:59:56 +01002922 unsigned long update_cnt = 0;
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05002923 unsigned long rec_flags = 0;
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002924 int i;
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04002925
Ingo Molnar750ed1a2008-05-12 21:20:46 +02002926 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +02002927
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05002928 /*
2929 * When a module is loaded, this function is called to convert
2930 * the calls to mcount in its text to nops, and also to create
2931 * an entry in the ftrace data. Now, if ftrace is activated
2932 * after this call, but before the module sets its text to
2933 * read-only, the modification of enabling ftrace can fail if
2934 * the read-only is done while ftrace is converting the calls.
2935 * To prevent this, the module's records are set as disabled
2936 * and will be enabled after the call to set the module's text
2937 * to read-only.
2938 */
2939 if (mod)
2940 rec_flags |= FTRACE_FL_DISABLED;
2941
Jiri Slaby1dc43cf2014-02-24 19:59:56 +01002942 for (pg = new_pgs; pg; pg = pg->next) {
Abhishek Sagarf22f9a892008-06-21 23:50:29 +05302943
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002944 for (i = 0; i < pg->index; i++) {
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002945
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002946 /* If something went wrong, bail without enabling anything */
2947 if (unlikely(ftrace_disabled))
2948 return -1;
Steven Rostedt3d083392008-05-12 21:20:42 +02002949
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002950 p = &pg->records[i];
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05002951 p->flags = rec_flags;
Abhishek Sagar0eb96702008-06-01 21:47:30 +05302952
Vasily Gorbik2f4df002018-08-06 15:17:46 +02002953#ifndef CC_USING_NOP_MCOUNT
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002954 /*
2955 * Do the initial record conversion from mcount jump
2956 * to the NOP instructions.
2957 */
2958 if (!ftrace_code_disable(mod, p))
2959 break;
Vasily Gorbik2f4df002018-08-06 15:17:46 +02002960#endif
Jiri Olsa5cb084b2009-10-13 16:33:53 -04002961
Jiri Slaby1dc43cf2014-02-24 19:59:56 +01002962 update_cnt++;
Jiri Olsa5cb084b2009-10-13 16:33:53 -04002963 }
Steven Rostedt3d083392008-05-12 21:20:42 +02002964 }
2965
Ingo Molnar750ed1a2008-05-12 21:20:46 +02002966 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +02002967 ftrace_update_time = stop - start;
Jiri Slaby1dc43cf2014-02-24 19:59:56 +01002968 ftrace_update_tot_cnt += update_cnt;
Steven Rostedt3d083392008-05-12 21:20:42 +02002969
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02002970 return 0;
2971}
2972
Steven Rostedta7900872011-12-16 16:23:44 -05002973static int ftrace_allocate_records(struct ftrace_page *pg, int count)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002974{
Steven Rostedta7900872011-12-16 16:23:44 -05002975 int order;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002976 int cnt;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002977
Steven Rostedta7900872011-12-16 16:23:44 -05002978 if (WARN_ON(!count))
2979 return -EINVAL;
2980
2981 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002982
2983 /*
Steven Rostedta7900872011-12-16 16:23:44 -05002984 * We want to fill as much as possible. No more than a page
2985 * may be empty.
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002986 */
Steven Rostedta7900872011-12-16 16:23:44 -05002987 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2988 order--;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002989
Steven Rostedta7900872011-12-16 16:23:44 -05002990 again:
2991 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2992
2993 if (!pg->records) {
2994 /* if we can't allocate this size, try something smaller */
2995 if (!order)
2996 return -ENOMEM;
2997 order >>= 1;
2998 goto again;
2999 }
3000
3001 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
3002 pg->size = cnt;
3003
3004 if (cnt > count)
3005 cnt = count;
3006
3007 return cnt;
3008}
3009
3010static struct ftrace_page *
3011ftrace_allocate_pages(unsigned long num_to_init)
3012{
3013 struct ftrace_page *start_pg;
3014 struct ftrace_page *pg;
3015 int order;
3016 int cnt;
3017
3018 if (!num_to_init)
3019 return 0;
3020
3021 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3022 if (!pg)
3023 return NULL;
3024
3025 /*
3026 * Try to allocate as much as possible in one continues
3027 * location that fills in all of the space. We want to
3028 * waste as little space as possible.
3029 */
3030 for (;;) {
3031 cnt = ftrace_allocate_records(pg, num_to_init);
3032 if (cnt < 0)
3033 goto free_pages;
3034
3035 num_to_init -= cnt;
3036 if (!num_to_init)
3037 break;
3038
3039 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3040 if (!pg->next)
3041 goto free_pages;
3042
3043 pg = pg->next;
3044 }
3045
3046 return start_pg;
3047
3048 free_pages:
Namhyung Kim1f61be002014-06-11 17:06:53 +09003049 pg = start_pg;
3050 while (pg) {
Steven Rostedta7900872011-12-16 16:23:44 -05003051 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
3052 free_pages((unsigned long)pg->records, order);
3053 start_pg = pg->next;
3054 kfree(pg);
3055 pg = start_pg;
3056 }
3057 pr_info("ftrace: FAILED to allocate memory for functions\n");
3058 return NULL;
3059}
3060
Steven Rostedt5072c592008-05-12 21:20:43 +02003061#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3062
3063struct ftrace_iterator {
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003064 loff_t pos;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003065 loff_t func_pos;
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003066 loff_t mod_pos;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003067 struct ftrace_page *pg;
3068 struct dyn_ftrace *func;
3069 struct ftrace_func_probe *probe;
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003070 struct ftrace_func_entry *probe_entry;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003071 struct trace_parser parser;
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003072 struct ftrace_hash *hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003073 struct ftrace_ops *ops;
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003074 struct trace_array *tr;
3075 struct list_head *mod_list;
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003076 int pidx;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003077 int idx;
3078 unsigned flags;
Steven Rostedt5072c592008-05-12 21:20:43 +02003079};
3080
Ingo Molnare309b412008-05-12 21:20:51 +02003081static void *
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003082t_probe_next(struct seq_file *m, loff_t *pos)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003083{
3084 struct ftrace_iterator *iter = m->private;
Steven Rostedt (VMware)d2afd57a2017-04-20 11:31:35 -04003085 struct trace_array *tr = iter->ops->private;
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04003086 struct list_head *func_probes;
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003087 struct ftrace_hash *hash;
3088 struct list_head *next;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003089 struct hlist_node *hnd = NULL;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003090 struct hlist_head *hhd;
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003091 int size;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003092
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003093 (*pos)++;
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003094 iter->pos = *pos;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003095
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04003096 if (!tr)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003097 return NULL;
3098
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04003099 func_probes = &tr->func_probes;
3100 if (list_empty(func_probes))
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003101 return NULL;
3102
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003103 if (!iter->probe) {
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04003104 next = func_probes->next;
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04003105 iter->probe = list_entry(next, struct ftrace_func_probe, list);
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003106 }
3107
3108 if (iter->probe_entry)
3109 hnd = &iter->probe_entry->hlist;
3110
3111 hash = iter->probe->ops.func_hash->filter_hash;
3112 size = 1 << hash->size_bits;
3113
3114 retry:
3115 if (iter->pidx >= size) {
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04003116 if (iter->probe->list.next == func_probes)
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003117 return NULL;
3118 next = iter->probe->list.next;
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04003119 iter->probe = list_entry(next, struct ftrace_func_probe, list);
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003120 hash = iter->probe->ops.func_hash->filter_hash;
3121 size = 1 << hash->size_bits;
3122 iter->pidx = 0;
3123 }
3124
3125 hhd = &hash->buckets[iter->pidx];
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003126
3127 if (hlist_empty(hhd)) {
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003128 iter->pidx++;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003129 hnd = NULL;
3130 goto retry;
3131 }
3132
3133 if (!hnd)
3134 hnd = hhd->first;
3135 else {
3136 hnd = hnd->next;
3137 if (!hnd) {
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003138 iter->pidx++;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003139 goto retry;
3140 }
3141 }
3142
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003143 if (WARN_ON_ONCE(!hnd))
3144 return NULL;
3145
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003146 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003147
3148 return iter;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003149}
3150
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003151static void *t_probe_start(struct seq_file *m, loff_t *pos)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003152{
3153 struct ftrace_iterator *iter = m->private;
3154 void *p = NULL;
Li Zefand82d6242009-06-24 09:54:54 +08003155 loff_t l;
3156
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003157 if (!(iter->flags & FTRACE_ITER_DO_PROBES))
Steven Rostedt69a30832011-12-19 15:21:16 -05003158 return NULL;
3159
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003160 if (iter->mod_pos > *pos)
Steven Rostedt2bccfff2010-09-09 08:43:22 -04003161 return NULL;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003162
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003163 iter->probe = NULL;
3164 iter->probe_entry = NULL;
3165 iter->pidx = 0;
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003166 for (l = 0; l <= (*pos - iter->mod_pos); ) {
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003167 p = t_probe_next(m, &l);
Li Zefand82d6242009-06-24 09:54:54 +08003168 if (!p)
3169 break;
3170 }
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003171 if (!p)
3172 return NULL;
3173
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003174 /* Only set this if we have an item */
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003175 iter->flags |= FTRACE_ITER_PROBE;
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003176
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003177 return iter;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003178}
3179
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003180static int
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003181t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003182{
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003183 struct ftrace_func_entry *probe_entry;
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04003184 struct ftrace_probe_ops *probe_ops;
3185 struct ftrace_func_probe *probe;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003186
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003187 probe = iter->probe;
3188 probe_entry = iter->probe_entry;
3189
3190 if (WARN_ON_ONCE(!probe || !probe_entry))
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003191 return -EIO;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003192
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04003193 probe_ops = probe->probe_ops;
Steven Rostedt809dcf22009-02-16 23:06:01 -05003194
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04003195 if (probe_ops->print)
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04003196 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003197
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04003198 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
3199 (void *)probe_ops->func);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003200
3201 return 0;
3202}
3203
3204static void *
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003205t_mod_next(struct seq_file *m, loff_t *pos)
3206{
3207 struct ftrace_iterator *iter = m->private;
3208 struct trace_array *tr = iter->tr;
3209
3210 (*pos)++;
3211 iter->pos = *pos;
3212
3213 iter->mod_list = iter->mod_list->next;
3214
3215 if (iter->mod_list == &tr->mod_trace ||
3216 iter->mod_list == &tr->mod_notrace) {
3217 iter->flags &= ~FTRACE_ITER_MOD;
3218 return NULL;
3219 }
3220
3221 iter->mod_pos = *pos;
3222
3223 return iter;
3224}
3225
3226static void *t_mod_start(struct seq_file *m, loff_t *pos)
3227{
3228 struct ftrace_iterator *iter = m->private;
3229 void *p = NULL;
3230 loff_t l;
3231
3232 if (iter->func_pos > *pos)
3233 return NULL;
3234
3235 iter->mod_pos = iter->func_pos;
3236
3237 /* probes are only available if tr is set */
3238 if (!iter->tr)
3239 return NULL;
3240
3241 for (l = 0; l <= (*pos - iter->func_pos); ) {
3242 p = t_mod_next(m, &l);
3243 if (!p)
3244 break;
3245 }
3246 if (!p) {
3247 iter->flags &= ~FTRACE_ITER_MOD;
3248 return t_probe_start(m, pos);
3249 }
3250
3251 /* Only set this if we have an item */
3252 iter->flags |= FTRACE_ITER_MOD;
3253
3254 return iter;
3255}
3256
3257static int
3258t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
3259{
3260 struct ftrace_mod_load *ftrace_mod;
3261 struct trace_array *tr = iter->tr;
3262
3263 if (WARN_ON_ONCE(!iter->mod_list) ||
3264 iter->mod_list == &tr->mod_trace ||
3265 iter->mod_list == &tr->mod_notrace)
3266 return -EIO;
3267
3268 ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
3269
3270 if (ftrace_mod->func)
3271 seq_printf(m, "%s", ftrace_mod->func);
3272 else
3273 seq_putc(m, '*');
3274
3275 seq_printf(m, ":mod:%s\n", ftrace_mod->module);
3276
3277 return 0;
3278}
3279
3280static void *
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003281t_func_next(struct seq_file *m, loff_t *pos)
Steven Rostedt5072c592008-05-12 21:20:43 +02003282{
3283 struct ftrace_iterator *iter = m->private;
3284 struct dyn_ftrace *rec = NULL;
3285
3286 (*pos)++;
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003287
Steven Rostedt5072c592008-05-12 21:20:43 +02003288 retry:
3289 if (iter->idx >= iter->pg->index) {
3290 if (iter->pg->next) {
3291 iter->pg = iter->pg->next;
3292 iter->idx = 0;
3293 goto retry;
3294 }
3295 } else {
3296 rec = &iter->pg->records[iter->idx++];
Steven Rostedt (VMware)c20489d2017-03-29 14:55:49 -04003297 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3298 !ftrace_lookup_ip(iter->hash, rec->ip)) ||
Steven Rostedt647bcd02011-05-03 14:39:21 -04003299
3300 ((iter->flags & FTRACE_ITER_ENABLED) &&
Steven Rostedt (Red Hat)23ea9c42013-05-09 19:31:48 -04003301 !(rec->flags & FTRACE_FL_ENABLED))) {
Steven Rostedt647bcd02011-05-03 14:39:21 -04003302
Steven Rostedt5072c592008-05-12 21:20:43 +02003303 rec = NULL;
3304 goto retry;
3305 }
3306 }
3307
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003308 if (!rec)
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003309 return NULL;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003310
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003311 iter->pos = iter->func_pos = *pos;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003312 iter->func = rec;
3313
3314 return iter;
Steven Rostedt5072c592008-05-12 21:20:43 +02003315}
3316
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003317static void *
3318t_next(struct seq_file *m, void *v, loff_t *pos)
3319{
3320 struct ftrace_iterator *iter = m->private;
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003321 loff_t l = *pos; /* t_probe_start() must use original pos */
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003322 void *ret;
3323
3324 if (unlikely(ftrace_disabled))
3325 return NULL;
3326
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003327 if (iter->flags & FTRACE_ITER_PROBE)
3328 return t_probe_next(m, pos);
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003329
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003330 if (iter->flags & FTRACE_ITER_MOD)
3331 return t_mod_next(m, pos);
3332
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003333 if (iter->flags & FTRACE_ITER_PRINTALL) {
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003334 /* next must increment pos, and t_probe_start does not */
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003335 (*pos)++;
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003336 return t_mod_start(m, &l);
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003337 }
3338
3339 ret = t_func_next(m, pos);
3340
3341 if (!ret)
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003342 return t_mod_start(m, &l);
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003343
3344 return ret;
3345}
3346
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003347static void reset_iter_read(struct ftrace_iterator *iter)
3348{
3349 iter->pos = 0;
3350 iter->func_pos = 0;
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003351 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
Steven Rostedt5072c592008-05-12 21:20:43 +02003352}
3353
3354static void *t_start(struct seq_file *m, loff_t *pos)
3355{
3356 struct ftrace_iterator *iter = m->private;
3357 void *p = NULL;
Li Zefan694ce0a2009-06-24 09:54:19 +08003358 loff_t l;
Steven Rostedt5072c592008-05-12 21:20:43 +02003359
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003360 mutex_lock(&ftrace_lock);
Steven Rostedt45a4a232011-04-21 23:16:46 -04003361
3362 if (unlikely(ftrace_disabled))
3363 return NULL;
3364
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003365 /*
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003366 * If an lseek was done, then reset and start from beginning.
3367 */
3368 if (*pos < iter->pos)
3369 reset_iter_read(iter);
3370
3371 /*
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003372 * For set_ftrace_filter reading, if we have the filter
3373 * off, we can short cut and just print out that all
3374 * functions are enabled.
3375 */
Steven Rostedt (VMware)c20489d2017-03-29 14:55:49 -04003376 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3377 ftrace_hash_empty(iter->hash)) {
Steven Rostedt (VMware)43ff9262017-03-30 16:51:43 -04003378 iter->func_pos = 1; /* Account for the message */
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003379 if (*pos > 0)
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003380 return t_mod_start(m, pos);
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003381 iter->flags |= FTRACE_ITER_PRINTALL;
Chris Wrightdf091622010-09-09 16:34:59 -07003382 /* reset in case of seek/pread */
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003383 iter->flags &= ~FTRACE_ITER_PROBE;
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003384 return iter;
3385 }
3386
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003387 if (iter->flags & FTRACE_ITER_MOD)
3388 return t_mod_start(m, pos);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003389
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003390 /*
3391 * Unfortunately, we need to restart at ftrace_pages_start
3392 * every time we let go of the ftrace_mutex. This is because
3393 * those pointers can change without the lock.
3394 */
Li Zefan694ce0a2009-06-24 09:54:19 +08003395 iter->pg = ftrace_pages_start;
3396 iter->idx = 0;
3397 for (l = 0; l <= *pos; ) {
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003398 p = t_func_next(m, &l);
Li Zefan694ce0a2009-06-24 09:54:19 +08003399 if (!p)
3400 break;
Liming Wang50cdaf02008-11-28 12:13:21 +08003401 }
walimis5821e1b2008-11-15 15:19:06 +08003402
Steven Rostedt69a30832011-12-19 15:21:16 -05003403 if (!p)
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003404 return t_mod_start(m, pos);
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003405
3406 return iter;
Steven Rostedt5072c592008-05-12 21:20:43 +02003407}
3408
3409static void t_stop(struct seq_file *m, void *p)
3410{
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003411 mutex_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02003412}
3413
Steven Rostedt (Red Hat)15d5b022014-07-03 14:51:36 -04003414void * __weak
3415arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3416{
3417 return NULL;
3418}
3419
3420static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3421 struct dyn_ftrace *rec)
3422{
3423 void *ptr;
3424
3425 ptr = arch_ftrace_trampoline_func(ops, rec);
3426 if (ptr)
3427 seq_printf(m, " ->%pS", ptr);
3428}
3429
Steven Rostedt5072c592008-05-12 21:20:43 +02003430static int t_show(struct seq_file *m, void *v)
3431{
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003432 struct ftrace_iterator *iter = m->private;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003433 struct dyn_ftrace *rec;
Steven Rostedt5072c592008-05-12 21:20:43 +02003434
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003435 if (iter->flags & FTRACE_ITER_PROBE)
3436 return t_probe_show(m, iter);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003437
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003438 if (iter->flags & FTRACE_ITER_MOD)
3439 return t_mod_show(m, iter);
3440
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003441 if (iter->flags & FTRACE_ITER_PRINTALL) {
Namhyung Kim8c006cf2014-06-13 16:24:06 +09003442 if (iter->flags & FTRACE_ITER_NOTRACE)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003443 seq_puts(m, "#### no functions disabled ####\n");
Namhyung Kim8c006cf2014-06-13 16:24:06 +09003444 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003445 seq_puts(m, "#### all functions enabled ####\n");
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003446 return 0;
3447 }
3448
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003449 rec = iter->func;
3450
Steven Rostedt5072c592008-05-12 21:20:43 +02003451 if (!rec)
3452 return 0;
3453
Steven Rostedt647bcd02011-05-03 14:39:21 -04003454 seq_printf(m, "%ps", (void *)rec->ip);
Steven Rostedt (Red Hat)9674b2f2014-05-09 16:54:59 -04003455 if (iter->flags & FTRACE_ITER_ENABLED) {
Steven Rostedt (Red Hat)030f4e12015-12-01 12:24:45 -05003456 struct ftrace_ops *ops;
Steven Rostedt (Red Hat)15d5b022014-07-03 14:51:36 -04003457
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05003458 seq_printf(m, " (%ld)%s%s",
Steven Rostedt (Red Hat)0376bde2014-05-07 13:46:45 -04003459 ftrace_rec_count(rec),
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05003460 rec->flags & FTRACE_FL_REGS ? " R" : " ",
3461 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ");
Steven Rostedt (Red Hat)9674b2f2014-05-09 16:54:59 -04003462 if (rec->flags & FTRACE_FL_TRAMP_EN) {
Steven Rostedt (Red Hat)5fecaa02014-07-24 16:00:31 -04003463 ops = ftrace_find_tramp_ops_any(rec);
Steven Rostedt (Red Hat)39daa7b2015-11-25 15:12:38 -05003464 if (ops) {
3465 do {
3466 seq_printf(m, "\ttramp: %pS (%pS)",
3467 (void *)ops->trampoline,
3468 (void *)ops->func);
Steven Rostedt (Red Hat)030f4e12015-12-01 12:24:45 -05003469 add_trampoline_func(m, ops, rec);
Steven Rostedt (Red Hat)39daa7b2015-11-25 15:12:38 -05003470 ops = ftrace_find_tramp_ops_next(rec, ops);
3471 } while (ops);
3472 } else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003473 seq_puts(m, "\ttramp: ERROR!");
Steven Rostedt (Red Hat)030f4e12015-12-01 12:24:45 -05003474 } else {
3475 add_trampoline_func(m, NULL, rec);
Steven Rostedt (Red Hat)9674b2f2014-05-09 16:54:59 -04003476 }
3477 }
3478
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003479 seq_putc(m, '\n');
Steven Rostedt5072c592008-05-12 21:20:43 +02003480
3481 return 0;
3482}
3483
James Morris88e9d342009-09-22 16:43:43 -07003484static const struct seq_operations show_ftrace_seq_ops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02003485 .start = t_start,
3486 .next = t_next,
3487 .stop = t_stop,
3488 .show = t_show,
3489};
3490
Ingo Molnare309b412008-05-12 21:20:51 +02003491static int
Steven Rostedt5072c592008-05-12 21:20:43 +02003492ftrace_avail_open(struct inode *inode, struct file *file)
3493{
3494 struct ftrace_iterator *iter;
Steven Rostedt5072c592008-05-12 21:20:43 +02003495
Steven Rostedt4eebcc82008-05-12 21:20:48 +02003496 if (unlikely(ftrace_disabled))
3497 return -ENODEV;
3498
Jiri Olsa50e18b92012-04-25 10:23:39 +02003499 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
Steven Rostedt (VMware)c1bc5912017-03-29 11:38:13 -04003500 if (!iter)
3501 return -ENOMEM;
Steven Rostedt5072c592008-05-12 21:20:43 +02003502
Steven Rostedt (VMware)c1bc5912017-03-29 11:38:13 -04003503 iter->pg = ftrace_pages_start;
3504 iter->ops = &global_ops;
3505
3506 return 0;
Steven Rostedt5072c592008-05-12 21:20:43 +02003507}
3508
Steven Rostedt647bcd02011-05-03 14:39:21 -04003509static int
3510ftrace_enabled_open(struct inode *inode, struct file *file)
3511{
3512 struct ftrace_iterator *iter;
Steven Rostedt647bcd02011-05-03 14:39:21 -04003513
Jiri Olsa50e18b92012-04-25 10:23:39 +02003514 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
Steven Rostedt (VMware)c1bc5912017-03-29 11:38:13 -04003515 if (!iter)
3516 return -ENOMEM;
Steven Rostedt647bcd02011-05-03 14:39:21 -04003517
Steven Rostedt (VMware)c1bc5912017-03-29 11:38:13 -04003518 iter->pg = ftrace_pages_start;
3519 iter->flags = FTRACE_ITER_ENABLED;
3520 iter->ops = &global_ops;
3521
3522 return 0;
Steven Rostedt647bcd02011-05-03 14:39:21 -04003523}
3524
Steven Rostedtfc13cb02011-12-19 14:41:25 -05003525/**
3526 * ftrace_regex_open - initialize function tracer filter files
3527 * @ops: The ftrace_ops that hold the hash filters
3528 * @flag: The type of filter to process
3529 * @inode: The inode, usually passed in to your open routine
3530 * @file: The file, usually passed in to your open routine
3531 *
3532 * ftrace_regex_open() initializes the filter files for the
3533 * @ops. Depending on @flag it may process the filter hash or
3534 * the notrace hash of @ops. With this called from the open
3535 * routine, you can use ftrace_filter_write() for the write
3536 * routine if @flag has FTRACE_ITER_FILTER set, or
3537 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003538 * tracing_lseek() should be used as the lseek routine, and
Steven Rostedtfc13cb02011-12-19 14:41:25 -05003539 * release must call ftrace_regex_release().
3540 */
3541int
Steven Rostedtf45948e2011-05-02 12:29:25 -04003542ftrace_regex_open(struct ftrace_ops *ops, int flag,
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003543 struct inode *inode, struct file *file)
Steven Rostedt5072c592008-05-12 21:20:43 +02003544{
3545 struct ftrace_iterator *iter;
Steven Rostedtf45948e2011-05-02 12:29:25 -04003546 struct ftrace_hash *hash;
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003547 struct list_head *mod_head;
3548 struct trace_array *tr = ops->private;
Steven Rostedt5072c592008-05-12 21:20:43 +02003549 int ret = 0;
3550
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09003551 ftrace_ops_init(ops);
3552
Steven Rostedt4eebcc82008-05-12 21:20:48 +02003553 if (unlikely(ftrace_disabled))
3554 return -ENODEV;
3555
Steven Rostedt5072c592008-05-12 21:20:43 +02003556 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3557 if (!iter)
3558 return -ENOMEM;
3559
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003560 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
3561 kfree(iter);
3562 return -ENOMEM;
3563 }
3564
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09003565 iter->ops = ops;
3566 iter->flags = flag;
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003567 iter->tr = tr;
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09003568
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003569 mutex_lock(&ops->func_hash->regex_lock);
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09003570
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003571 if (flag & FTRACE_ITER_NOTRACE) {
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003572 hash = ops->func_hash->notrace_hash;
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003573 mod_head = tr ? &tr->mod_notrace : NULL;
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003574 } else {
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003575 hash = ops->func_hash->filter_hash;
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003576 mod_head = tr ? &tr->mod_trace : NULL;
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003577 }
Steven Rostedtf45948e2011-05-02 12:29:25 -04003578
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003579 iter->mod_list = mod_head;
3580
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003581 if (file->f_mode & FMODE_WRITE) {
Namhyung Kimef2fbe12014-06-11 17:06:54 +09003582 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3583
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003584 if (file->f_flags & O_TRUNC) {
Namhyung Kimef2fbe12014-06-11 17:06:54 +09003585 iter->hash = alloc_ftrace_hash(size_bits);
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003586 clear_ftrace_mod_list(mod_head);
3587 } else {
Namhyung Kimef2fbe12014-06-11 17:06:54 +09003588 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003589 }
Namhyung Kimef2fbe12014-06-11 17:06:54 +09003590
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003591 if (!iter->hash) {
3592 trace_parser_put(&iter->parser);
3593 kfree(iter);
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09003594 ret = -ENOMEM;
3595 goto out_unlock;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003596 }
Steven Rostedt (VMware)c20489d2017-03-29 14:55:49 -04003597 } else
3598 iter->hash = hash;
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003599
Steven Rostedt5072c592008-05-12 21:20:43 +02003600 if (file->f_mode & FMODE_READ) {
3601 iter->pg = ftrace_pages_start;
Steven Rostedt5072c592008-05-12 21:20:43 +02003602
3603 ret = seq_open(file, &show_ftrace_seq_ops);
3604 if (!ret) {
3605 struct seq_file *m = file->private_data;
3606 m->private = iter;
Li Zefan79fe2492009-09-22 13:54:28 +08003607 } else {
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003608 /* Failed */
3609 free_ftrace_hash(iter->hash);
Li Zefan79fe2492009-09-22 13:54:28 +08003610 trace_parser_put(&iter->parser);
Steven Rostedt5072c592008-05-12 21:20:43 +02003611 kfree(iter);
Li Zefan79fe2492009-09-22 13:54:28 +08003612 }
Steven Rostedt5072c592008-05-12 21:20:43 +02003613 } else
3614 file->private_data = iter;
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09003615
3616 out_unlock:
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003617 mutex_unlock(&ops->func_hash->regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02003618
3619 return ret;
3620}
3621
Steven Rostedt41c52c02008-05-22 11:46:33 -04003622static int
3623ftrace_filter_open(struct inode *inode, struct file *file)
3624{
Steven Rostedt (Red Hat)e3b3e2e2013-11-11 23:07:14 -05003625 struct ftrace_ops *ops = inode->i_private;
3626
3627 return ftrace_regex_open(ops,
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003628 FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
Steven Rostedt69a30832011-12-19 15:21:16 -05003629 inode, file);
Steven Rostedt41c52c02008-05-22 11:46:33 -04003630}
3631
3632static int
3633ftrace_notrace_open(struct inode *inode, struct file *file)
3634{
Steven Rostedt (Red Hat)e3b3e2e2013-11-11 23:07:14 -05003635 struct ftrace_ops *ops = inode->i_private;
3636
3637 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003638 inode, file);
Steven Rostedt41c52c02008-05-22 11:46:33 -04003639}
3640
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003641/* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
3642struct ftrace_glob {
3643 char *search;
3644 unsigned len;
3645 int type;
3646};
3647
Thiago Jung Bauermann7132e2d2016-04-25 18:56:14 -03003648/*
3649 * If symbols in an architecture don't correspond exactly to the user-visible
3650 * name of what they represent, it is possible to define this function to
3651 * perform the necessary adjustments.
3652*/
3653char * __weak arch_ftrace_match_adjust(char *str, const char *search)
3654{
3655 return str;
3656}
3657
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003658static int ftrace_match(char *str, struct ftrace_glob *g)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003659{
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003660 int matched = 0;
Li Zefan751e9982010-01-14 10:53:02 +08003661 int slen;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003662
Thiago Jung Bauermann7132e2d2016-04-25 18:56:14 -03003663 str = arch_ftrace_match_adjust(str, g->search);
3664
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003665 switch (g->type) {
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003666 case MATCH_FULL:
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003667 if (strcmp(str, g->search) == 0)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003668 matched = 1;
3669 break;
3670 case MATCH_FRONT_ONLY:
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003671 if (strncmp(str, g->search, g->len) == 0)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003672 matched = 1;
3673 break;
3674 case MATCH_MIDDLE_ONLY:
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003675 if (strstr(str, g->search))
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003676 matched = 1;
3677 break;
3678 case MATCH_END_ONLY:
Li Zefan751e9982010-01-14 10:53:02 +08003679 slen = strlen(str);
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003680 if (slen >= g->len &&
3681 memcmp(str + slen - g->len, g->search, g->len) == 0)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003682 matched = 1;
3683 break;
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09003684 case MATCH_GLOB:
3685 if (glob_match(g->search, str))
3686 matched = 1;
3687 break;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003688 }
3689
3690 return matched;
3691}
3692
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003693static int
Dmitry Safonovf0a3b152015-09-29 19:46:13 +03003694enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
Steven Rostedt996e87b2011-04-26 16:11:03 -04003695{
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003696 struct ftrace_func_entry *entry;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003697 int ret = 0;
3698
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003699 entry = ftrace_lookup_ip(hash, rec->ip);
Dmitry Safonovf0a3b152015-09-29 19:46:13 +03003700 if (clear_filter) {
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003701 /* Do nothing if it doesn't exist */
3702 if (!entry)
3703 return 0;
3704
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003705 free_hash_entry(hash, entry);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003706 } else {
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003707 /* Do nothing if it exists */
3708 if (entry)
3709 return 0;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003710
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003711 ret = add_hash_entry(hash, rec->ip);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003712 }
3713 return ret;
Steven Rostedt996e87b2011-04-26 16:11:03 -04003714}
3715
Steven Rostedt64e7c442009-02-13 17:08:48 -05003716static int
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003717ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
3718 struct ftrace_glob *mod_g, int exclude_mod)
Steven Rostedt64e7c442009-02-13 17:08:48 -05003719{
3720 char str[KSYM_SYMBOL_LEN];
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003721 char *modname;
Steven Rostedt64e7c442009-02-13 17:08:48 -05003722
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003723 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3724
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003725 if (mod_g) {
3726 int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
3727
3728 /* blank module name to match all modules */
3729 if (!mod_g->len) {
3730 /* blank module globbing: modname xor exclude_mod */
Steven Rostedt (VMware)77c0edd2017-05-03 11:41:44 -04003731 if (!exclude_mod != !modname)
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003732 goto func_match;
3733 return 0;
3734 }
3735
Steven Rostedt (VMware)77c0edd2017-05-03 11:41:44 -04003736 /*
3737 * exclude_mod is set to trace everything but the given
3738 * module. If it is set and the module matches, then
3739 * return 0. If it is not set, and the module doesn't match
3740 * also return 0. Otherwise, check the function to see if
3741 * that matches.
3742 */
3743 if (!mod_matches == !exclude_mod)
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003744 return 0;
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003745func_match:
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003746 /* blank search means to match all funcs in the mod */
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003747 if (!func_g->len)
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003748 return 1;
3749 }
3750
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003751 return ftrace_match(str, func_g);
Steven Rostedt64e7c442009-02-13 17:08:48 -05003752}
3753
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003754static int
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003755match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003756{
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003757 struct ftrace_page *pg;
3758 struct dyn_ftrace *rec;
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003759 struct ftrace_glob func_g = { .type = MATCH_FULL };
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003760 struct ftrace_glob mod_g = { .type = MATCH_FULL };
3761 struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
3762 int exclude_mod = 0;
Li Zefan311d16d2009-12-08 11:15:11 +08003763 int found = 0;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003764 int ret;
Dan Carpenter2e028c42017-07-12 10:35:57 +03003765 int clear_filter = 0;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003766
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003767 if (func) {
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003768 func_g.type = filter_parse_regex(func, len, &func_g.search,
3769 &clear_filter);
3770 func_g.len = strlen(func_g.search);
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003771 }
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003772
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003773 if (mod) {
3774 mod_g.type = filter_parse_regex(mod, strlen(mod),
3775 &mod_g.search, &exclude_mod);
3776 mod_g.len = strlen(mod_g.search);
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003777 }
3778
Steven Rostedt52baf112009-02-14 01:15:39 -05003779 mutex_lock(&ftrace_lock);
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003780
3781 if (unlikely(ftrace_disabled))
3782 goto out_unlock;
3783
Steven Rostedt265c8312009-02-13 12:43:56 -05003784 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedt (Red Hat)546fece2016-11-14 16:31:49 -05003785
3786 if (rec->flags & FTRACE_FL_DISABLED)
3787 continue;
3788
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003789 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
Dmitry Safonovf0a3b152015-09-29 19:46:13 +03003790 ret = enter_record(hash, rec, clear_filter);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003791 if (ret < 0) {
3792 found = ret;
3793 goto out_unlock;
3794 }
Li Zefan311d16d2009-12-08 11:15:11 +08003795 found = 1;
Steven Rostedt265c8312009-02-13 12:43:56 -05003796 }
3797 } while_for_each_ftrace_rec();
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003798 out_unlock:
Steven Rostedt52baf112009-02-14 01:15:39 -05003799 mutex_unlock(&ftrace_lock);
Li Zefan311d16d2009-12-08 11:15:11 +08003800
3801 return found;
Steven Rostedt5072c592008-05-12 21:20:43 +02003802}
3803
Steven Rostedt64e7c442009-02-13 17:08:48 -05003804static int
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003805ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
Steven Rostedt64e7c442009-02-13 17:08:48 -05003806{
Dmitry Safonovf0a3b152015-09-29 19:46:13 +03003807 return match_records(hash, buff, len, NULL);
Steven Rostedt64e7c442009-02-13 17:08:48 -05003808}
3809
Steven Rostedt (VMware)e16b35d2017-04-04 14:46:56 -04003810static void ftrace_ops_update_code(struct ftrace_ops *ops,
3811 struct ftrace_ops_hash *old_hash)
3812{
3813 struct ftrace_ops *op;
3814
3815 if (!ftrace_enabled)
3816 return;
3817
3818 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
3819 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
3820 return;
3821 }
3822
3823 /*
3824 * If this is the shared global_ops filter, then we need to
3825 * check if there is another ops that shares it, is enabled.
3826 * If so, we still need to run the modify code.
3827 */
3828 if (ops->func_hash != &global_ops.local_hash)
3829 return;
3830
3831 do_for_each_ftrace_op(op, ftrace_ops_list) {
3832 if (op->func_hash == &global_ops.local_hash &&
3833 op->flags & FTRACE_OPS_FL_ENABLED) {
3834 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
3835 /* Only need to do this once */
3836 return;
3837 }
3838 } while_for_each_ftrace_op(op);
3839}
3840
3841static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
3842 struct ftrace_hash **orig_hash,
3843 struct ftrace_hash *hash,
3844 int enable)
3845{
3846 struct ftrace_ops_hash old_hash_ops;
3847 struct ftrace_hash *old_hash;
3848 int ret;
3849
3850 old_hash = *orig_hash;
3851 old_hash_ops.filter_hash = ops->func_hash->filter_hash;
3852 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
3853 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3854 if (!ret) {
3855 ftrace_ops_update_code(ops, &old_hash_ops);
3856 free_ftrace_hash_rcu(old_hash);
3857 }
3858 return ret;
3859}
Steven Rostedt64e7c442009-02-13 17:08:48 -05003860
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003861static bool module_exists(const char *module)
3862{
3863 /* All modules have the symbol __this_module */
3864 const char this_mod[] = "__this_module";
Salvatore Mesoraca419e9fe2018-03-30 10:53:08 +02003865 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003866 unsigned long val;
3867 int n;
3868
Salvatore Mesoraca419e9fe2018-03-30 10:53:08 +02003869 n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003870
Salvatore Mesoraca419e9fe2018-03-30 10:53:08 +02003871 if (n > sizeof(modname) - 1)
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003872 return false;
3873
3874 val = module_kallsyms_lookup_name(modname);
3875 return val != 0;
3876}
3877
3878static int cache_mod(struct trace_array *tr,
3879 const char *func, char *module, int enable)
3880{
3881 struct ftrace_mod_load *ftrace_mod, *n;
3882 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
3883 int ret;
3884
3885 mutex_lock(&ftrace_lock);
3886
3887 /* We do not cache inverse filters */
3888 if (func[0] == '!') {
3889 func++;
3890 ret = -EINVAL;
3891
3892 /* Look to remove this hash */
3893 list_for_each_entry_safe(ftrace_mod, n, head, list) {
3894 if (strcmp(ftrace_mod->module, module) != 0)
3895 continue;
3896
3897 /* no func matches all */
Dan Carpenter44925df2017-07-12 10:33:40 +03003898 if (strcmp(func, "*") == 0 ||
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003899 (ftrace_mod->func &&
3900 strcmp(ftrace_mod->func, func) == 0)) {
3901 ret = 0;
3902 free_ftrace_mod(ftrace_mod);
3903 continue;
3904 }
3905 }
3906 goto out;
3907 }
3908
3909 ret = -EINVAL;
3910 /* We only care about modules that have not been loaded yet */
3911 if (module_exists(module))
3912 goto out;
3913
3914 /* Save this string off, and execute it when the module is loaded */
3915 ret = ftrace_add_mod(tr, func, module, enable);
3916 out:
3917 mutex_unlock(&ftrace_lock);
3918
3919 return ret;
3920}
3921
Steven Rostedt (VMware)d7fbf8d2017-06-26 10:57:21 -04003922static int
3923ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3924 int reset, int enable);
3925
Arnd Bergmann69449bbd2017-07-10 10:44:03 +02003926#ifdef CONFIG_MODULES
Steven Rostedt (VMware)d7fbf8d2017-06-26 10:57:21 -04003927static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
3928 char *mod, bool enable)
3929{
3930 struct ftrace_mod_load *ftrace_mod, *n;
3931 struct ftrace_hash **orig_hash, *new_hash;
3932 LIST_HEAD(process_mods);
3933 char *func;
3934 int ret;
3935
3936 mutex_lock(&ops->func_hash->regex_lock);
3937
3938 if (enable)
3939 orig_hash = &ops->func_hash->filter_hash;
3940 else
3941 orig_hash = &ops->func_hash->notrace_hash;
3942
3943 new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
3944 *orig_hash);
3945 if (!new_hash)
Steven Rostedt (VMware)3b58a3c2017-06-28 09:09:38 -04003946 goto out; /* warn? */
Steven Rostedt (VMware)d7fbf8d2017-06-26 10:57:21 -04003947
3948 mutex_lock(&ftrace_lock);
3949
3950 list_for_each_entry_safe(ftrace_mod, n, head, list) {
3951
3952 if (strcmp(ftrace_mod->module, mod) != 0)
3953 continue;
3954
3955 if (ftrace_mod->func)
3956 func = kstrdup(ftrace_mod->func, GFP_KERNEL);
3957 else
3958 func = kstrdup("*", GFP_KERNEL);
3959
3960 if (!func) /* warn? */
3961 continue;
3962
3963 list_del(&ftrace_mod->list);
3964 list_add(&ftrace_mod->list, &process_mods);
3965
3966 /* Use the newly allocated func, as it may be "*" */
3967 kfree(ftrace_mod->func);
3968 ftrace_mod->func = func;
3969 }
3970
3971 mutex_unlock(&ftrace_lock);
3972
3973 list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
3974
3975 func = ftrace_mod->func;
3976
3977 /* Grabs ftrace_lock, which is why we have this extra step */
3978 match_records(new_hash, func, strlen(func), mod);
3979 free_ftrace_mod(ftrace_mod);
3980 }
3981
Steven Rostedt (VMware)8c08f0d2017-06-26 11:47:31 -04003982 if (enable && list_empty(head))
3983 new_hash->flags &= ~FTRACE_HASH_FL_MOD;
3984
Steven Rostedt (VMware)d7fbf8d2017-06-26 10:57:21 -04003985 mutex_lock(&ftrace_lock);
3986
3987 ret = ftrace_hash_move_and_update_ops(ops, orig_hash,
3988 new_hash, enable);
3989 mutex_unlock(&ftrace_lock);
3990
Steven Rostedt (VMware)3b58a3c2017-06-28 09:09:38 -04003991 out:
Steven Rostedt (VMware)d7fbf8d2017-06-26 10:57:21 -04003992 mutex_unlock(&ops->func_hash->regex_lock);
3993
3994 free_ftrace_hash(new_hash);
3995}
3996
3997static void process_cached_mods(const char *mod_name)
3998{
3999 struct trace_array *tr;
4000 char *mod;
4001
4002 mod = kstrdup(mod_name, GFP_KERNEL);
4003 if (!mod)
4004 return;
4005
4006 mutex_lock(&trace_types_lock);
4007 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
4008 if (!list_empty(&tr->mod_trace))
4009 process_mod_list(&tr->mod_trace, tr->ops, mod, true);
4010 if (!list_empty(&tr->mod_notrace))
4011 process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
4012 }
4013 mutex_unlock(&trace_types_lock);
4014
4015 kfree(mod);
4016}
Arnd Bergmann69449bbd2017-07-10 10:44:03 +02004017#endif
Steven Rostedt (VMware)d7fbf8d2017-06-26 10:57:21 -04004018
Steven Rostedtf6180772009-02-14 00:40:25 -05004019/*
4020 * We register the module command as a template to show others how
4021 * to register the a command as well.
4022 */
4023
4024static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04004025ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04004026 char *func_orig, char *cmd, char *module, int enable)
Steven Rostedtf6180772009-02-14 00:40:25 -05004027{
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04004028 char *func;
Dmitry Safonov5e3949f2015-09-29 19:46:12 +03004029 int ret;
Steven Rostedtf6180772009-02-14 00:40:25 -05004030
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04004031 /* match_records() modifies func, and we need the original */
4032 func = kstrdup(func_orig, GFP_KERNEL);
4033 if (!func)
4034 return -ENOMEM;
4035
Steven Rostedtf6180772009-02-14 00:40:25 -05004036 /*
4037 * cmd == 'mod' because we only registered this func
4038 * for the 'mod' ftrace_func_command.
4039 * But if you register one func with multiple commands,
4040 * you can tell which command was used by the cmd
4041 * parameter.
4042 */
Dmitry Safonovf0a3b152015-09-29 19:46:13 +03004043 ret = match_records(hash, func, strlen(func), module);
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04004044 kfree(func);
4045
Steven Rostedtb448c4e2011-04-29 15:12:32 -04004046 if (!ret)
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04004047 return cache_mod(tr, func_orig, module, enable);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04004048 if (ret < 0)
4049 return ret;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04004050 return 0;
Steven Rostedtf6180772009-02-14 00:40:25 -05004051}
4052
4053static struct ftrace_func_command ftrace_mod_cmd = {
4054 .name = "mod",
4055 .func = ftrace_mod_callback,
4056};
4057
4058static int __init ftrace_mod_cmd_init(void)
4059{
4060 return register_ftrace_command(&ftrace_mod_cmd);
4061}
Steven Rostedt6f415672012-10-05 12:13:07 -04004062core_initcall(ftrace_mod_cmd_init);
Steven Rostedtf6180772009-02-14 00:40:25 -05004063
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04004064static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -04004065 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedt59df055f2009-02-14 15:29:06 -05004066{
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004067 struct ftrace_probe_ops *probe_ops;
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004068 struct ftrace_func_probe *probe;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004069
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004070 probe = container_of(op, struct ftrace_func_probe, ops);
4071 probe_ops = probe->probe_ops;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004072
4073 /*
4074 * Disable preemption for these calls to prevent a RCU grace
4075 * period. This syncs the hash iteration and freeing of items
4076 * on the hash. rcu_read_lock is too dangerous here.
4077 */
Steven Rostedt5168ae52010-06-03 09:36:50 -04004078 preempt_disable_notrace();
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04004079 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
Steven Rostedt5168ae52010-06-03 09:36:50 -04004080 preempt_enable_notrace();
Steven Rostedt59df055f2009-02-14 15:29:06 -05004081}
4082
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004083struct ftrace_func_map {
4084 struct ftrace_func_entry entry;
4085 void *data;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004086};
4087
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004088struct ftrace_func_mapper {
4089 struct ftrace_hash hash;
4090};
Steven Rostedt59df055f2009-02-14 15:29:06 -05004091
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004092/**
4093 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
4094 *
4095 * Returns a ftrace_func_mapper descriptor that can be used to map ips to data.
4096 */
4097struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
Steven Rostedt59df055f2009-02-14 15:29:06 -05004098{
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004099 struct ftrace_hash *hash;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004100
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004101 /*
4102 * The mapper is simply a ftrace_hash, but since the entries
4103 * in the hash are not ftrace_func_entry type, we define it
4104 * as a separate structure.
4105 */
4106 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4107 return (struct ftrace_func_mapper *)hash;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004108}
4109
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004110/**
4111 * ftrace_func_mapper_find_ip - Find some data mapped to an ip
4112 * @mapper: The mapper that has the ip maps
4113 * @ip: the instruction pointer to find the data for
4114 *
4115 * Returns the data mapped to @ip if found otherwise NULL. The return
4116 * is actually the address of the mapper data pointer. The address is
4117 * returned for use cases where the data is no bigger than a long, and
4118 * the user can use the data pointer as its data instead of having to
4119 * allocate more memory for the reference.
4120 */
4121void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
4122 unsigned long ip)
Steven Rostedt59df055f2009-02-14 15:29:06 -05004123{
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004124 struct ftrace_func_entry *entry;
4125 struct ftrace_func_map *map;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004126
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004127 entry = ftrace_lookup_ip(&mapper->hash, ip);
4128 if (!entry)
4129 return NULL;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004130
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004131 map = (struct ftrace_func_map *)entry;
4132 return &map->data;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004133}
4134
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004135/**
4136 * ftrace_func_mapper_add_ip - Map some data to an ip
4137 * @mapper: The mapper that has the ip maps
4138 * @ip: The instruction pointer address to map @data to
4139 * @data: The data to map to @ip
4140 *
4141 * Returns 0 on succes otherwise an error.
4142 */
4143int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
4144 unsigned long ip, void *data)
Steven Rostedt59df055f2009-02-14 15:29:06 -05004145{
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004146 struct ftrace_func_entry *entry;
4147 struct ftrace_func_map *map;
4148
4149 entry = ftrace_lookup_ip(&mapper->hash, ip);
4150 if (entry)
4151 return -EBUSY;
4152
4153 map = kmalloc(sizeof(*map), GFP_KERNEL);
4154 if (!map)
4155 return -ENOMEM;
4156
4157 map->entry.ip = ip;
4158 map->data = data;
4159
4160 __add_hash_entry(&mapper->hash, &map->entry);
4161
4162 return 0;
4163}
4164
4165/**
4166 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
4167 * @mapper: The mapper that has the ip maps
4168 * @ip: The instruction pointer address to remove the data from
4169 *
4170 * Returns the data if it is found, otherwise NULL.
4171 * Note, if the data pointer is used as the data itself, (see
4172 * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
4173 * if the data pointer was set to zero.
4174 */
4175void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
4176 unsigned long ip)
4177{
4178 struct ftrace_func_entry *entry;
4179 struct ftrace_func_map *map;
4180 void *data;
4181
4182 entry = ftrace_lookup_ip(&mapper->hash, ip);
4183 if (!entry)
4184 return NULL;
4185
4186 map = (struct ftrace_func_map *)entry;
4187 data = map->data;
4188
4189 remove_hash_entry(&mapper->hash, entry);
Steven Rostedt59df055f2009-02-14 15:29:06 -05004190 kfree(entry);
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004191
4192 return data;
4193}
4194
4195/**
4196 * free_ftrace_func_mapper - free a mapping of ips and data
4197 * @mapper: The mapper that has the ip maps
4198 * @free_func: A function to be called on each data item.
4199 *
4200 * This is used to free the function mapper. The @free_func is optional
4201 * and can be used if the data needs to be freed as well.
4202 */
4203void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
4204 ftrace_mapper_func free_func)
4205{
4206 struct ftrace_func_entry *entry;
4207 struct ftrace_func_map *map;
4208 struct hlist_head *hhd;
4209 int size = 1 << mapper->hash.size_bits;
4210 int i;
4211
4212 if (free_func && mapper->hash.count) {
4213 for (i = 0; i < size; i++) {
4214 hhd = &mapper->hash.buckets[i];
4215 hlist_for_each_entry(entry, hhd, hlist) {
4216 map = (struct ftrace_func_map *)entry;
4217 free_func(map);
4218 }
4219 }
4220 }
4221 free_ftrace_hash(&mapper->hash);
4222}
4223
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004224static void release_probe(struct ftrace_func_probe *probe)
4225{
4226 struct ftrace_probe_ops *probe_ops;
4227
4228 mutex_lock(&ftrace_lock);
4229
4230 WARN_ON(probe->ref <= 0);
4231
4232 /* Subtract the ref that was used to protect this instance */
4233 probe->ref--;
4234
4235 if (!probe->ref) {
4236 probe_ops = probe->probe_ops;
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04004237 /*
4238 * Sending zero as ip tells probe_ops to free
4239 * the probe->data itself
4240 */
4241 if (probe_ops->free)
4242 probe_ops->free(probe_ops, probe->tr, 0, probe->data);
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004243 list_del(&probe->list);
4244 kfree(probe);
4245 }
4246 mutex_unlock(&ftrace_lock);
4247}
4248
4249static void acquire_probe_locked(struct ftrace_func_probe *probe)
4250{
4251 /*
4252 * Add one ref to keep it from being freed when releasing the
4253 * ftrace_lock mutex.
4254 */
4255 probe->ref++;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004256}
4257
Steven Rostedt59df055f2009-02-14 15:29:06 -05004258int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04004259register_ftrace_function_probe(char *glob, struct trace_array *tr,
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004260 struct ftrace_probe_ops *probe_ops,
4261 void *data)
Steven Rostedt59df055f2009-02-14 15:29:06 -05004262{
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004263 struct ftrace_func_entry *entry;
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004264 struct ftrace_func_probe *probe;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004265 struct ftrace_hash **orig_hash;
4266 struct ftrace_hash *old_hash;
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004267 struct ftrace_hash *hash;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004268 int count = 0;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004269 int size;
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004270 int ret;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004271 int i;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004272
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04004273 if (WARN_ON(!tr))
Steven Rostedt59df055f2009-02-14 15:29:06 -05004274 return -EINVAL;
4275
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004276 /* We do not support '!' for function probes */
4277 if (WARN_ON(glob[0] == '!'))
Steven Rostedt59df055f2009-02-14 15:29:06 -05004278 return -EINVAL;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004279
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05004280
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004281 mutex_lock(&ftrace_lock);
4282 /* Check if the probe_ops is already registered */
4283 list_for_each_entry(probe, &tr->func_probes, list) {
4284 if (probe->probe_ops == probe_ops)
4285 break;
4286 }
4287 if (&probe->list == &tr->func_probes) {
4288 probe = kzalloc(sizeof(*probe), GFP_KERNEL);
4289 if (!probe) {
4290 mutex_unlock(&ftrace_lock);
4291 return -ENOMEM;
4292 }
4293 probe->probe_ops = probe_ops;
4294 probe->ops.func = function_trace_probe_call;
4295 probe->tr = tr;
4296 ftrace_ops_init(&probe->ops);
4297 list_add(&probe->list, &tr->func_probes);
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004298 }
4299
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004300 acquire_probe_locked(probe);
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004301
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004302 mutex_unlock(&ftrace_lock);
4303
4304 mutex_lock(&probe->ops.func_hash->regex_lock);
4305
4306 orig_hash = &probe->ops.func_hash->filter_hash;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004307 old_hash = *orig_hash;
4308 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4309
4310 ret = ftrace_match_records(hash, glob, strlen(glob));
4311
4312 /* Nothing found? */
4313 if (!ret)
4314 ret = -EINVAL;
4315
4316 if (ret < 0)
Steven Rostedt (Red Hat)5ae0bf52013-05-09 18:20:37 -04004317 goto out;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004318
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004319 size = 1 << hash->size_bits;
4320 for (i = 0; i < size; i++) {
4321 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4322 if (ftrace_lookup_ip(old_hash, entry->ip))
4323 continue;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004324 /*
4325 * The caller might want to do something special
4326 * for each function we find. We call the callback
4327 * to give the caller an opportunity to do so.
4328 */
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004329 if (probe_ops->init) {
4330 ret = probe_ops->init(probe_ops, tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04004331 entry->ip, data,
4332 &probe->data);
4333 if (ret < 0) {
4334 if (probe_ops->free && count)
4335 probe_ops->free(probe_ops, tr,
4336 0, probe->data);
4337 probe->data = NULL;
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004338 goto out;
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04004339 }
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004340 }
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004341 count++;
4342 }
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004343 }
Steven Rostedt45a4a232011-04-21 23:16:46 -04004344
Steven Rostedt (Red Hat)5ae0bf52013-05-09 18:20:37 -04004345 mutex_lock(&ftrace_lock);
4346
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004347 if (!count) {
4348 /* Nothing was added? */
4349 ret = -EINVAL;
4350 goto out_unlock;
4351 }
Steven Rostedt59df055f2009-02-14 15:29:06 -05004352
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004353 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4354 hash, 1);
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004355 if (ret < 0)
Steven Rostedt (VMware)8d707252017-04-05 13:36:18 -04004356 goto err_unlock;
Steven Rostedt (Red Hat)546fece2016-11-14 16:31:49 -05004357
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004358 /* One ref for each new function traced */
4359 probe->ref += count;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004360
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004361 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
4362 ret = ftrace_startup(&probe->ops, 0);
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004363
Steven Rostedt59df055f2009-02-14 15:29:06 -05004364 out_unlock:
Steven Rostedt (Red Hat)5ae0bf52013-05-09 18:20:37 -04004365 mutex_unlock(&ftrace_lock);
Steven Rostedt59df055f2009-02-14 15:29:06 -05004366
4367 if (!ret)
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004368 ret = count;
Steven Rostedt (Red Hat)5ae0bf52013-05-09 18:20:37 -04004369 out:
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004370 mutex_unlock(&probe->ops.func_hash->regex_lock);
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004371 free_ftrace_hash(hash);
Steven Rostedt59df055f2009-02-14 15:29:06 -05004372
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004373 release_probe(probe);
4374
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004375 return ret;
Steven Rostedt (VMware)8d707252017-04-05 13:36:18 -04004376
4377 err_unlock:
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004378 if (!probe_ops->free || !count)
Steven Rostedt (VMware)8d707252017-04-05 13:36:18 -04004379 goto out_unlock;
4380
4381 /* Failed to do the move, need to call the free functions */
4382 for (i = 0; i < size; i++) {
4383 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4384 if (ftrace_lookup_ip(old_hash, entry->ip))
4385 continue;
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04004386 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
Steven Rostedt (VMware)8d707252017-04-05 13:36:18 -04004387 }
4388 }
4389 goto out_unlock;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004390}
4391
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04004392int
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004393unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4394 struct ftrace_probe_ops *probe_ops)
Steven Rostedt59df055f2009-02-14 15:29:06 -05004395{
Steven Rostedt (VMware)82cc4fc2017-04-14 17:45:45 -04004396 struct ftrace_ops_hash old_hash_ops;
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004397 struct ftrace_func_entry *entry;
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004398 struct ftrace_func_probe *probe;
Dmitry Safonov3ba00922015-09-29 19:46:14 +03004399 struct ftrace_glob func_g;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004400 struct ftrace_hash **orig_hash;
4401 struct ftrace_hash *old_hash;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004402 struct ftrace_hash *hash = NULL;
Sasha Levinb67bfe02013-02-27 17:06:00 -08004403 struct hlist_node *tmp;
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004404 struct hlist_head hhd;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004405 char str[KSYM_SYMBOL_LEN];
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004406 int count = 0;
4407 int i, ret = -ENODEV;
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004408 int size;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004409
Naveen N. Raocbab5672017-05-16 23:21:25 +05304410 if (!glob || !strlen(glob) || !strcmp(glob, "*"))
Dmitry Safonov3ba00922015-09-29 19:46:14 +03004411 func_g.search = NULL;
Naveen N. Raocbab5672017-05-16 23:21:25 +05304412 else {
Steven Rostedt59df055f2009-02-14 15:29:06 -05004413 int not;
4414
Dmitry Safonov3ba00922015-09-29 19:46:14 +03004415 func_g.type = filter_parse_regex(glob, strlen(glob),
4416 &func_g.search, &not);
4417 func_g.len = strlen(func_g.search);
Steven Rostedt59df055f2009-02-14 15:29:06 -05004418
Steven Rostedtb6887d72009-02-17 12:32:04 -05004419 /* we do not support '!' for function probes */
Steven Rostedt59df055f2009-02-14 15:29:06 -05004420 if (WARN_ON(not))
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04004421 return -EINVAL;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004422 }
4423
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004424 mutex_lock(&ftrace_lock);
4425 /* Check if the probe_ops is already registered */
4426 list_for_each_entry(probe, &tr->func_probes, list) {
4427 if (probe->probe_ops == probe_ops)
4428 break;
4429 }
4430 if (&probe->list == &tr->func_probes)
4431 goto err_unlock_ftrace;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004432
4433 ret = -EINVAL;
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004434 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
4435 goto err_unlock_ftrace;
4436
4437 acquire_probe_locked(probe);
4438
4439 mutex_unlock(&ftrace_lock);
4440
4441 mutex_lock(&probe->ops.func_hash->regex_lock);
4442
4443 orig_hash = &probe->ops.func_hash->filter_hash;
4444 old_hash = *orig_hash;
4445
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004446 if (ftrace_hash_empty(old_hash))
4447 goto out_unlock;
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004448
Steven Rostedt (VMware)82cc4fc2017-04-14 17:45:45 -04004449 old_hash_ops.filter_hash = old_hash;
4450 /* Probes only have filters */
4451 old_hash_ops.notrace_hash = NULL;
4452
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04004453 ret = -ENOMEM;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004454 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004455 if (!hash)
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004456 goto out_unlock;
4457
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004458 INIT_HLIST_HEAD(&hhd);
Steven Rostedt (Red Hat)7818b382013-03-13 12:42:58 -04004459
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004460 size = 1 << hash->size_bits;
4461 for (i = 0; i < size; i++) {
4462 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
Steven Rostedt59df055f2009-02-14 15:29:06 -05004463
Dmitry Safonov3ba00922015-09-29 19:46:14 +03004464 if (func_g.search) {
Steven Rostedt59df055f2009-02-14 15:29:06 -05004465 kallsyms_lookup(entry->ip, NULL, NULL,
4466 NULL, str);
Dmitry Safonov3ba00922015-09-29 19:46:14 +03004467 if (!ftrace_match(str, &func_g))
Steven Rostedt59df055f2009-02-14 15:29:06 -05004468 continue;
4469 }
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004470 count++;
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004471 remove_hash_entry(hash, entry);
4472 hlist_add_head(&entry->hlist, &hhd);
Steven Rostedt59df055f2009-02-14 15:29:06 -05004473 }
4474 }
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04004475
4476 /* Nothing found? */
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004477 if (!count) {
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04004478 ret = -EINVAL;
4479 goto out_unlock;
4480 }
4481
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004482 mutex_lock(&ftrace_lock);
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004483
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004484 WARN_ON(probe->ref < count);
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004485
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004486 probe->ref -= count;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004487
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004488 if (ftrace_hash_empty(hash))
4489 ftrace_shutdown(&probe->ops, 0);
4490
4491 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004492 hash, 1);
Steven Rostedt (VMware)82cc4fc2017-04-14 17:45:45 -04004493
4494 /* still need to update the function call sites */
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004495 if (ftrace_enabled && !ftrace_hash_empty(hash))
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004496 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
Steven Rostedt (VMware)82cc4fc2017-04-14 17:45:45 -04004497 &old_hash_ops);
Steven Rostedt (Red Hat)7818b382013-03-13 12:42:58 -04004498 synchronize_sched();
Steven Rostedt (Red Hat)3296fc42014-07-24 15:33:41 -04004499
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004500 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
4501 hlist_del(&entry->hlist);
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004502 if (probe_ops->free)
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04004503 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004504 kfree(entry);
Steven Rostedt (Red Hat)7818b382013-03-13 12:42:58 -04004505 }
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004506 mutex_unlock(&ftrace_lock);
Dmitry Safonov3ba00922015-09-29 19:46:14 +03004507
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004508 out_unlock:
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004509 mutex_unlock(&probe->ops.func_hash->regex_lock);
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004510 free_ftrace_hash(hash);
Steven Rostedt59df055f2009-02-14 15:29:06 -05004511
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004512 release_probe(probe);
Steven Rostedt59df055f2009-02-14 15:29:06 -05004513
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004514 return ret;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004515
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004516 err_unlock_ftrace:
4517 mutex_unlock(&ftrace_lock);
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04004518 return ret;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004519}
4520
Naveen N. Raoa0e63692017-05-16 23:21:26 +05304521void clear_ftrace_function_probes(struct trace_array *tr)
4522{
4523 struct ftrace_func_probe *probe, *n;
4524
4525 list_for_each_entry_safe(probe, n, &tr->func_probes, list)
4526 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
4527}
4528
Steven Rostedtf6180772009-02-14 00:40:25 -05004529static LIST_HEAD(ftrace_commands);
4530static DEFINE_MUTEX(ftrace_cmd_mutex);
4531
Tom Zanussi38de93a2013-10-24 08:34:18 -05004532/*
4533 * Currently we only register ftrace commands from __init, so mark this
4534 * __init too.
4535 */
4536__init int register_ftrace_command(struct ftrace_func_command *cmd)
Steven Rostedtf6180772009-02-14 00:40:25 -05004537{
4538 struct ftrace_func_command *p;
4539 int ret = 0;
4540
4541 mutex_lock(&ftrace_cmd_mutex);
4542 list_for_each_entry(p, &ftrace_commands, list) {
4543 if (strcmp(cmd->name, p->name) == 0) {
4544 ret = -EBUSY;
4545 goto out_unlock;
4546 }
4547 }
4548 list_add(&cmd->list, &ftrace_commands);
4549 out_unlock:
4550 mutex_unlock(&ftrace_cmd_mutex);
4551
4552 return ret;
4553}
4554
Tom Zanussi38de93a2013-10-24 08:34:18 -05004555/*
4556 * Currently we only unregister ftrace commands from __init, so mark
4557 * this __init too.
4558 */
4559__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
Steven Rostedtf6180772009-02-14 00:40:25 -05004560{
4561 struct ftrace_func_command *p, *n;
4562 int ret = -ENODEV;
4563
4564 mutex_lock(&ftrace_cmd_mutex);
4565 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
4566 if (strcmp(cmd->name, p->name) == 0) {
4567 ret = 0;
4568 list_del_init(&p->list);
4569 goto out_unlock;
4570 }
4571 }
4572 out_unlock:
4573 mutex_unlock(&ftrace_cmd_mutex);
4574
4575 return ret;
4576}
4577
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04004578static int ftrace_process_regex(struct ftrace_iterator *iter,
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004579 char *buff, int len, int enable)
Steven Rostedt64e7c442009-02-13 17:08:48 -05004580{
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04004581 struct ftrace_hash *hash = iter->hash;
Steven Rostedt (VMware)d2afd57a2017-04-20 11:31:35 -04004582 struct trace_array *tr = iter->ops->private;
Steven Rostedtf6180772009-02-14 00:40:25 -05004583 char *func, *command, *next = buff;
Steven Rostedt6a24a242009-02-17 11:20:26 -05004584 struct ftrace_func_command *p;
GuoWen Li0aff1c02011-06-01 19:18:47 +08004585 int ret = -EINVAL;
Steven Rostedt64e7c442009-02-13 17:08:48 -05004586
4587 func = strsep(&next, ":");
4588
4589 if (!next) {
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04004590 ret = ftrace_match_records(hash, func, len);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04004591 if (!ret)
4592 ret = -EINVAL;
4593 if (ret < 0)
4594 return ret;
4595 return 0;
Steven Rostedt64e7c442009-02-13 17:08:48 -05004596 }
4597
Steven Rostedtf6180772009-02-14 00:40:25 -05004598 /* command found */
Steven Rostedt64e7c442009-02-13 17:08:48 -05004599
4600 command = strsep(&next, ":");
4601
Steven Rostedtf6180772009-02-14 00:40:25 -05004602 mutex_lock(&ftrace_cmd_mutex);
4603 list_for_each_entry(p, &ftrace_commands, list) {
4604 if (strcmp(p->name, command) == 0) {
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04004605 ret = p->func(tr, hash, func, command, next, enable);
Steven Rostedtf6180772009-02-14 00:40:25 -05004606 goto out_unlock;
4607 }
Steven Rostedt64e7c442009-02-13 17:08:48 -05004608 }
Steven Rostedtf6180772009-02-14 00:40:25 -05004609 out_unlock:
4610 mutex_unlock(&ftrace_cmd_mutex);
Steven Rostedt64e7c442009-02-13 17:08:48 -05004611
Steven Rostedtf6180772009-02-14 00:40:25 -05004612 return ret;
Steven Rostedt64e7c442009-02-13 17:08:48 -05004613}
4614
Ingo Molnare309b412008-05-12 21:20:51 +02004615static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04004616ftrace_regex_write(struct file *file, const char __user *ubuf,
4617 size_t cnt, loff_t *ppos, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02004618{
4619 struct ftrace_iterator *iter;
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004620 struct trace_parser *parser;
4621 ssize_t ret, read;
Steven Rostedt5072c592008-05-12 21:20:43 +02004622
Li Zefan4ba79782009-09-22 13:52:20 +08004623 if (!cnt)
Steven Rostedt5072c592008-05-12 21:20:43 +02004624 return 0;
4625
Steven Rostedt5072c592008-05-12 21:20:43 +02004626 if (file->f_mode & FMODE_READ) {
4627 struct seq_file *m = file->private_data;
4628 iter = m->private;
4629 } else
4630 iter = file->private_data;
4631
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09004632 if (unlikely(ftrace_disabled))
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004633 return -ENODEV;
4634
4635 /* iter->hash is a local copy, so we don't need regex_lock */
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09004636
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004637 parser = &iter->parser;
4638 read = trace_get_user(parser, ubuf, cnt, ppos);
Steven Rostedt5072c592008-05-12 21:20:43 +02004639
Li Zefan4ba79782009-09-22 13:52:20 +08004640 if (read >= 0 && trace_parser_loaded(parser) &&
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004641 !trace_parser_cont(parser)) {
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04004642 ret = ftrace_process_regex(iter, parser->buffer,
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004643 parser->idx, enable);
Li Zefan313254a2009-12-08 11:15:30 +08004644 trace_parser_clear(parser);
Steven Rostedt (Red Hat)7c088b52013-05-09 11:35:12 -04004645 if (ret < 0)
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004646 goto out;
Steven Rostedt5072c592008-05-12 21:20:43 +02004647 }
4648
Steven Rostedt5072c592008-05-12 21:20:43 +02004649 ret = read;
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004650 out:
Steven Rostedt5072c592008-05-12 21:20:43 +02004651 return ret;
4652}
4653
Steven Rostedtfc13cb02011-12-19 14:41:25 -05004654ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04004655ftrace_filter_write(struct file *file, const char __user *ubuf,
4656 size_t cnt, loff_t *ppos)
4657{
4658 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
4659}
4660
Steven Rostedtfc13cb02011-12-19 14:41:25 -05004661ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04004662ftrace_notrace_write(struct file *file, const char __user *ubuf,
4663 size_t cnt, loff_t *ppos)
4664{
4665 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
4666}
4667
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004668static int
Masami Hiramatsu647664e2012-06-05 19:28:08 +09004669ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
4670{
4671 struct ftrace_func_entry *entry;
4672
4673 if (!ftrace_location(ip))
4674 return -EINVAL;
4675
4676 if (remove) {
4677 entry = ftrace_lookup_ip(hash, ip);
4678 if (!entry)
4679 return -ENOENT;
4680 free_hash_entry(hash, entry);
4681 return 0;
4682 }
4683
4684 return add_hash_entry(hash, ip);
4685}
4686
4687static int
4688ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4689 unsigned long ip, int remove, int reset, int enable)
Steven Rostedt41c52c02008-05-22 11:46:33 -04004690{
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004691 struct ftrace_hash **orig_hash;
Steven Rostedtf45948e2011-05-02 12:29:25 -04004692 struct ftrace_hash *hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004693 int ret;
Steven Rostedtf45948e2011-05-02 12:29:25 -04004694
Steven Rostedt41c52c02008-05-22 11:46:33 -04004695 if (unlikely(ftrace_disabled))
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004696 return -ENODEV;
Steven Rostedt41c52c02008-05-22 11:46:33 -04004697
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04004698 mutex_lock(&ops->func_hash->regex_lock);
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004699
Steven Rostedtf45948e2011-05-02 12:29:25 -04004700 if (enable)
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04004701 orig_hash = &ops->func_hash->filter_hash;
Steven Rostedtf45948e2011-05-02 12:29:25 -04004702 else
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04004703 orig_hash = &ops->func_hash->notrace_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004704
Wang Nanb972cc52014-07-15 08:40:20 +08004705 if (reset)
4706 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4707 else
4708 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
4709
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004710 if (!hash) {
4711 ret = -ENOMEM;
4712 goto out_regex_unlock;
4713 }
Steven Rostedtf45948e2011-05-02 12:29:25 -04004714
Jiri Olsaac483c42012-01-02 10:04:14 +01004715 if (buf && !ftrace_match_records(hash, buf, len)) {
4716 ret = -EINVAL;
4717 goto out_regex_unlock;
4718 }
Masami Hiramatsu647664e2012-06-05 19:28:08 +09004719 if (ip) {
4720 ret = ftrace_match_addr(hash, ip, remove);
4721 if (ret < 0)
4722 goto out_regex_unlock;
4723 }
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004724
4725 mutex_lock(&ftrace_lock);
Steven Rostedt (VMware)e16b35d2017-04-04 14:46:56 -04004726 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004727 mutex_unlock(&ftrace_lock);
4728
Jiri Olsaac483c42012-01-02 10:04:14 +01004729 out_regex_unlock:
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04004730 mutex_unlock(&ops->func_hash->regex_lock);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004731
4732 free_ftrace_hash(hash);
4733 return ret;
Steven Rostedt41c52c02008-05-22 11:46:33 -04004734}
4735
Masami Hiramatsu647664e2012-06-05 19:28:08 +09004736static int
4737ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
4738 int reset, int enable)
4739{
4740 return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
4741}
4742
4743/**
4744 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
4745 * @ops - the ops to set the filter with
4746 * @ip - the address to add to or remove from the filter.
4747 * @remove - non zero to remove the ip from the filter
4748 * @reset - non zero to reset all filters before applying this filter.
4749 *
4750 * Filters denote which functions should be enabled when tracing is enabled
4751 * If @ip is NULL, it failes to update filter.
4752 */
4753int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
4754 int remove, int reset)
4755{
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09004756 ftrace_ops_init(ops);
Masami Hiramatsu647664e2012-06-05 19:28:08 +09004757 return ftrace_set_addr(ops, ip, remove, reset, 1);
4758}
4759EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
4760
Joel Fernandesd032ae82016-11-15 12:31:20 -08004761/**
4762 * ftrace_ops_set_global_filter - setup ops to use global filters
4763 * @ops - the ops which will use the global filters
4764 *
4765 * ftrace users who need global function trace filtering should call this.
4766 * It can set the global filter only if ops were not initialized before.
4767 */
4768void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
4769{
4770 if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
4771 return;
4772
4773 ftrace_ops_init(ops);
4774 ops->func_hash = &global_ops.local_hash;
4775}
4776EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
4777
Masami Hiramatsu647664e2012-06-05 19:28:08 +09004778static int
4779ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4780 int reset, int enable)
4781{
4782 return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
4783}
4784
Steven Rostedt77a2b372008-05-12 21:20:45 +02004785/**
4786 * ftrace_set_filter - set a function to filter on in ftrace
Steven Rostedt936e0742011-05-05 22:54:01 -04004787 * @ops - the ops to set the filter with
Steven Rostedt77a2b372008-05-12 21:20:45 +02004788 * @buf - the string that holds the function filter text.
4789 * @len - the length of the string.
4790 * @reset - non zero to reset all filters before applying this filter.
4791 *
4792 * Filters denote which functions should be enabled when tracing is enabled.
4793 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4794 */
Jiri Olsaac483c42012-01-02 10:04:14 +01004795int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
Steven Rostedt936e0742011-05-05 22:54:01 -04004796 int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02004797{
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09004798 ftrace_ops_init(ops);
Jiri Olsaac483c42012-01-02 10:04:14 +01004799 return ftrace_set_regex(ops, buf, len, reset, 1);
Steven Rostedt41c52c02008-05-22 11:46:33 -04004800}
Steven Rostedt936e0742011-05-05 22:54:01 -04004801EXPORT_SYMBOL_GPL(ftrace_set_filter);
Steven Rostedt4eebcc82008-05-12 21:20:48 +02004802
Steven Rostedt41c52c02008-05-22 11:46:33 -04004803/**
4804 * ftrace_set_notrace - set a function to not trace in ftrace
Steven Rostedt936e0742011-05-05 22:54:01 -04004805 * @ops - the ops to set the notrace filter with
Steven Rostedt41c52c02008-05-22 11:46:33 -04004806 * @buf - the string that holds the function notrace text.
4807 * @len - the length of the string.
4808 * @reset - non zero to reset all filters before applying this filter.
4809 *
4810 * Notrace Filters denote which functions should not be enabled when tracing
4811 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4812 * for tracing.
4813 */
Jiri Olsaac483c42012-01-02 10:04:14 +01004814int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
Steven Rostedt936e0742011-05-05 22:54:01 -04004815 int len, int reset)
4816{
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09004817 ftrace_ops_init(ops);
Jiri Olsaac483c42012-01-02 10:04:14 +01004818 return ftrace_set_regex(ops, buf, len, reset, 0);
Steven Rostedt936e0742011-05-05 22:54:01 -04004819}
4820EXPORT_SYMBOL_GPL(ftrace_set_notrace);
4821/**
Jiaxing Wang8d1b0652014-04-20 23:10:44 +08004822 * ftrace_set_global_filter - set a function to filter on with global tracers
Steven Rostedt936e0742011-05-05 22:54:01 -04004823 * @buf - the string that holds the function filter text.
4824 * @len - the length of the string.
4825 * @reset - non zero to reset all filters before applying this filter.
4826 *
4827 * Filters denote which functions should be enabled when tracing is enabled.
4828 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4829 */
4830void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
4831{
4832 ftrace_set_regex(&global_ops, buf, len, reset, 1);
4833}
4834EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
4835
4836/**
Jiaxing Wang8d1b0652014-04-20 23:10:44 +08004837 * ftrace_set_global_notrace - set a function to not trace with global tracers
Steven Rostedt936e0742011-05-05 22:54:01 -04004838 * @buf - the string that holds the function notrace text.
4839 * @len - the length of the string.
4840 * @reset - non zero to reset all filters before applying this filter.
4841 *
4842 * Notrace Filters denote which functions should not be enabled when tracing
4843 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4844 * for tracing.
4845 */
4846void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
Steven Rostedt41c52c02008-05-22 11:46:33 -04004847{
Steven Rostedtf45948e2011-05-02 12:29:25 -04004848 ftrace_set_regex(&global_ops, buf, len, reset, 0);
Steven Rostedt77a2b372008-05-12 21:20:45 +02004849}
Steven Rostedt936e0742011-05-05 22:54:01 -04004850EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
Steven Rostedt77a2b372008-05-12 21:20:45 +02004851
Steven Rostedt2af15d62009-05-28 13:37:24 -04004852/*
4853 * command line interface to allow users to set filters on boot up.
4854 */
4855#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
4856static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4857static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
4858
Steven Rostedt (Red Hat)f1ed7c72013-06-27 22:18:06 -04004859/* Used by function selftest to not test if filter is set */
4860bool ftrace_filter_param __initdata;
4861
Steven Rostedt2af15d62009-05-28 13:37:24 -04004862static int __init set_ftrace_notrace(char *str)
4863{
Steven Rostedt (Red Hat)f1ed7c72013-06-27 22:18:06 -04004864 ftrace_filter_param = true;
Chen Gang75761cc2013-04-08 12:12:39 +08004865 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
Steven Rostedt2af15d62009-05-28 13:37:24 -04004866 return 1;
4867}
4868__setup("ftrace_notrace=", set_ftrace_notrace);
4869
4870static int __init set_ftrace_filter(char *str)
4871{
Steven Rostedt (Red Hat)f1ed7c72013-06-27 22:18:06 -04004872 ftrace_filter_param = true;
Chen Gang75761cc2013-04-08 12:12:39 +08004873 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
Steven Rostedt2af15d62009-05-28 13:37:24 -04004874 return 1;
4875}
4876__setup("ftrace_filter=", set_ftrace_filter);
4877
Stefan Assmann369bc182009-10-12 22:17:21 +02004878#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Lai Jiangshanf6060f42009-11-05 11:16:17 +08004879static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
Namhyung Kim0d7d9a12014-06-13 01:23:50 +09004880static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09004881static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
Steven Rostedt801c29f2010-03-05 20:02:19 -05004882
Stefan Assmann369bc182009-10-12 22:17:21 +02004883static int __init set_graph_function(char *str)
4884{
Frederic Weisbecker06f43d62009-10-14 20:43:39 +02004885 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
Stefan Assmann369bc182009-10-12 22:17:21 +02004886 return 1;
4887}
4888__setup("ftrace_graph_filter=", set_graph_function);
4889
Namhyung Kim0d7d9a12014-06-13 01:23:50 +09004890static int __init set_graph_notrace_function(char *str)
4891{
4892 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
4893 return 1;
4894}
4895__setup("ftrace_graph_notrace=", set_graph_notrace_function);
4896
Todd Brandt65a50c652017-03-02 16:12:15 -08004897static int __init set_graph_max_depth_function(char *str)
4898{
4899 if (!str)
4900 return 0;
4901 fgraph_max_depth = simple_strtoul(str, NULL, 0);
4902 return 1;
4903}
4904__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
4905
Namhyung Kim0d7d9a12014-06-13 01:23:50 +09004906static void __init set_ftrace_early_graph(char *buf, int enable)
Stefan Assmann369bc182009-10-12 22:17:21 +02004907{
4908 int ret;
4909 char *func;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09004910 struct ftrace_hash *hash;
Namhyung Kim0d7d9a12014-06-13 01:23:50 +09004911
Steven Rostedt (VMware)92ad18e2017-03-02 12:53:26 -05004912 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4913 if (WARN_ON(!hash))
4914 return;
Stefan Assmann369bc182009-10-12 22:17:21 +02004915
4916 while (buf) {
4917 func = strsep(&buf, ",");
4918 /* we allow only one expression at a time */
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09004919 ret = ftrace_graph_set_hash(hash, func);
Stefan Assmann369bc182009-10-12 22:17:21 +02004920 if (ret)
4921 printk(KERN_DEBUG "ftrace: function %s not "
4922 "traceable\n", func);
4923 }
Steven Rostedt (VMware)92ad18e2017-03-02 12:53:26 -05004924
4925 if (enable)
4926 ftrace_graph_hash = hash;
4927 else
4928 ftrace_graph_notrace_hash = hash;
Stefan Assmann369bc182009-10-12 22:17:21 +02004929}
4930#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4931
Steven Rostedt2a85a372011-12-19 21:57:44 -05004932void __init
4933ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
Steven Rostedt2af15d62009-05-28 13:37:24 -04004934{
4935 char *func;
4936
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09004937 ftrace_ops_init(ops);
4938
Steven Rostedt2af15d62009-05-28 13:37:24 -04004939 while (buf) {
4940 func = strsep(&buf, ",");
Steven Rostedtf45948e2011-05-02 12:29:25 -04004941 ftrace_set_regex(ops, func, strlen(func), 0, enable);
Steven Rostedt2af15d62009-05-28 13:37:24 -04004942 }
4943}
4944
4945static void __init set_ftrace_early_filters(void)
4946{
4947 if (ftrace_filter_buf[0])
Steven Rostedt2a85a372011-12-19 21:57:44 -05004948 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
Steven Rostedt2af15d62009-05-28 13:37:24 -04004949 if (ftrace_notrace_buf[0])
Steven Rostedt2a85a372011-12-19 21:57:44 -05004950 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
Stefan Assmann369bc182009-10-12 22:17:21 +02004951#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4952 if (ftrace_graph_buf[0])
Namhyung Kim0d7d9a12014-06-13 01:23:50 +09004953 set_ftrace_early_graph(ftrace_graph_buf, 1);
4954 if (ftrace_graph_notrace_buf[0])
4955 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
Stefan Assmann369bc182009-10-12 22:17:21 +02004956#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
Steven Rostedt2af15d62009-05-28 13:37:24 -04004957}
4958
Steven Rostedtfc13cb02011-12-19 14:41:25 -05004959int ftrace_regex_release(struct inode *inode, struct file *file)
Steven Rostedt5072c592008-05-12 21:20:43 +02004960{
4961 struct seq_file *m = (struct seq_file *)file->private_data;
4962 struct ftrace_iterator *iter;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004963 struct ftrace_hash **orig_hash;
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004964 struct trace_parser *parser;
Steven Rostedted926f92011-05-03 13:25:24 -04004965 int filter_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004966 int ret;
Steven Rostedt5072c592008-05-12 21:20:43 +02004967
Steven Rostedt5072c592008-05-12 21:20:43 +02004968 if (file->f_mode & FMODE_READ) {
4969 iter = m->private;
Steven Rostedt5072c592008-05-12 21:20:43 +02004970 seq_release(inode, file);
4971 } else
4972 iter = file->private_data;
4973
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004974 parser = &iter->parser;
4975 if (trace_parser_loaded(parser)) {
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04004976 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
Steven Rostedt5072c592008-05-12 21:20:43 +02004977 }
4978
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004979 trace_parser_put(parser);
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004980
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04004981 mutex_lock(&iter->ops->func_hash->regex_lock);
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004982
Steven Rostedt058e2972011-04-29 22:35:33 -04004983 if (file->f_mode & FMODE_WRITE) {
Steven Rostedted926f92011-05-03 13:25:24 -04004984 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
4985
Steven Rostedt (VMware)8c08f0d2017-06-26 11:47:31 -04004986 if (filter_hash) {
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04004987 orig_hash = &iter->ops->func_hash->filter_hash;
Steven Rostedt (VMware)69d71872017-07-05 09:45:43 -04004988 if (iter->tr && !list_empty(&iter->tr->mod_trace))
Steven Rostedt (VMware)8c08f0d2017-06-26 11:47:31 -04004989 iter->hash->flags |= FTRACE_HASH_FL_MOD;
4990 } else
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04004991 orig_hash = &iter->ops->func_hash->notrace_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004992
Steven Rostedt058e2972011-04-29 22:35:33 -04004993 mutex_lock(&ftrace_lock);
Steven Rostedt (VMware)e16b35d2017-04-04 14:46:56 -04004994 ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
4995 iter->hash, filter_hash);
Steven Rostedt058e2972011-04-29 22:35:33 -04004996 mutex_unlock(&ftrace_lock);
Steven Rostedt (VMware)c20489d2017-03-29 14:55:49 -04004997 } else {
4998 /* For read only, the hash is the ops hash */
4999 iter->hash = NULL;
Steven Rostedt058e2972011-04-29 22:35:33 -04005000 }
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09005001
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04005002 mutex_unlock(&iter->ops->func_hash->regex_lock);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04005003 free_ftrace_hash(iter->hash);
5004 kfree(iter);
Steven Rostedt058e2972011-04-29 22:35:33 -04005005
Steven Rostedt5072c592008-05-12 21:20:43 +02005006 return 0;
5007}
5008
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005009static const struct file_operations ftrace_avail_fops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02005010 .open = ftrace_avail_open,
5011 .read = seq_read,
5012 .llseek = seq_lseek,
Li Zefan3be04b42009-08-17 16:54:03 +08005013 .release = seq_release_private,
Steven Rostedt5072c592008-05-12 21:20:43 +02005014};
5015
Steven Rostedt647bcd02011-05-03 14:39:21 -04005016static const struct file_operations ftrace_enabled_fops = {
5017 .open = ftrace_enabled_open,
5018 .read = seq_read,
5019 .llseek = seq_lseek,
5020 .release = seq_release_private,
5021};
5022
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005023static const struct file_operations ftrace_filter_fops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02005024 .open = ftrace_filter_open,
Lai Jiangshan850a80c2009-03-13 17:47:23 +08005025 .read = seq_read,
Steven Rostedt5072c592008-05-12 21:20:43 +02005026 .write = ftrace_filter_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005027 .llseek = tracing_lseek,
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04005028 .release = ftrace_regex_release,
Steven Rostedt5072c592008-05-12 21:20:43 +02005029};
5030
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005031static const struct file_operations ftrace_notrace_fops = {
Steven Rostedt41c52c02008-05-22 11:46:33 -04005032 .open = ftrace_notrace_open,
Lai Jiangshan850a80c2009-03-13 17:47:23 +08005033 .read = seq_read,
Steven Rostedt41c52c02008-05-22 11:46:33 -04005034 .write = ftrace_notrace_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005035 .llseek = tracing_lseek,
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04005036 .release = ftrace_regex_release,
Steven Rostedt41c52c02008-05-22 11:46:33 -04005037};
5038
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005039#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5040
5041static DEFINE_MUTEX(graph_lock);
5042
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005043struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH;
5044struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH;
5045
5046enum graph_filter_type {
5047 GRAPH_FILTER_NOTRACE = 0,
5048 GRAPH_FILTER_FUNCTION,
5049};
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005050
Steven Rostedt (VMware)555fc782017-02-02 10:15:22 -05005051#define FTRACE_GRAPH_EMPTY ((void *)1)
5052
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005053struct ftrace_graph_data {
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005054 struct ftrace_hash *hash;
5055 struct ftrace_func_entry *entry;
5056 int idx; /* for hash table iteration */
5057 enum graph_filter_type type;
5058 struct ftrace_hash *new_hash;
5059 const struct seq_operations *seq_ops;
5060 struct trace_parser parser;
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005061};
5062
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005063static void *
Li Zefan85951842009-06-24 09:54:00 +08005064__g_next(struct seq_file *m, loff_t *pos)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005065{
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005066 struct ftrace_graph_data *fgd = m->private;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005067 struct ftrace_func_entry *entry = fgd->entry;
5068 struct hlist_head *head;
5069 int i, idx = fgd->idx;
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005070
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005071 if (*pos >= fgd->hash->count)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005072 return NULL;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005073
5074 if (entry) {
5075 hlist_for_each_entry_continue(entry, hlist) {
5076 fgd->entry = entry;
5077 return entry;
5078 }
5079
5080 idx++;
5081 }
5082
5083 for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
5084 head = &fgd->hash->buckets[i];
5085 hlist_for_each_entry(entry, head, hlist) {
5086 fgd->entry = entry;
5087 fgd->idx = i;
5088 return entry;
5089 }
5090 }
5091 return NULL;
Li Zefan85951842009-06-24 09:54:00 +08005092}
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005093
Li Zefan85951842009-06-24 09:54:00 +08005094static void *
5095g_next(struct seq_file *m, void *v, loff_t *pos)
5096{
5097 (*pos)++;
5098 return __g_next(m, pos);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005099}
5100
5101static void *g_start(struct seq_file *m, loff_t *pos)
5102{
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005103 struct ftrace_graph_data *fgd = m->private;
5104
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005105 mutex_lock(&graph_lock);
5106
Steven Rostedt (VMware)649b9882017-02-02 20:16:29 -05005107 if (fgd->type == GRAPH_FILTER_FUNCTION)
5108 fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
5109 lockdep_is_held(&graph_lock));
5110 else
5111 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5112 lockdep_is_held(&graph_lock));
5113
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01005114 /* Nothing, tell g_show to print all functions are enabled */
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005115 if (ftrace_hash_empty(fgd->hash) && !*pos)
Steven Rostedt (VMware)555fc782017-02-02 10:15:22 -05005116 return FTRACE_GRAPH_EMPTY;
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01005117
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005118 fgd->idx = 0;
5119 fgd->entry = NULL;
Li Zefan85951842009-06-24 09:54:00 +08005120 return __g_next(m, pos);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005121}
5122
5123static void g_stop(struct seq_file *m, void *p)
5124{
5125 mutex_unlock(&graph_lock);
5126}
5127
5128static int g_show(struct seq_file *m, void *v)
5129{
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005130 struct ftrace_func_entry *entry = v;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005131
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005132 if (!entry)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005133 return 0;
5134
Steven Rostedt (VMware)555fc782017-02-02 10:15:22 -05005135 if (entry == FTRACE_GRAPH_EMPTY) {
Namhyung Kim280d1422014-06-13 01:23:51 +09005136 struct ftrace_graph_data *fgd = m->private;
5137
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005138 if (fgd->type == GRAPH_FILTER_FUNCTION)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005139 seq_puts(m, "#### all functions enabled ####\n");
Namhyung Kim280d1422014-06-13 01:23:51 +09005140 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005141 seq_puts(m, "#### no functions disabled ####\n");
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01005142 return 0;
5143 }
5144
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005145 seq_printf(m, "%ps\n", (void *)entry->ip);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005146
5147 return 0;
5148}
5149
James Morris88e9d342009-09-22 16:43:43 -07005150static const struct seq_operations ftrace_graph_seq_ops = {
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005151 .start = g_start,
5152 .next = g_next,
5153 .stop = g_stop,
5154 .show = g_show,
5155};
5156
5157static int
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005158__ftrace_graph_open(struct inode *inode, struct file *file,
5159 struct ftrace_graph_data *fgd)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005160{
5161 int ret = 0;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005162 struct ftrace_hash *new_hash = NULL;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005163
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005164 if (file->f_mode & FMODE_WRITE) {
5165 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
5166
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005167 if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
5168 return -ENOMEM;
5169
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005170 if (file->f_flags & O_TRUNC)
5171 new_hash = alloc_ftrace_hash(size_bits);
5172 else
5173 new_hash = alloc_and_copy_ftrace_hash(size_bits,
5174 fgd->hash);
5175 if (!new_hash) {
5176 ret = -ENOMEM;
5177 goto out;
5178 }
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005179 }
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005180
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005181 if (file->f_mode & FMODE_READ) {
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005182 ret = seq_open(file, &ftrace_graph_seq_ops);
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005183 if (!ret) {
5184 struct seq_file *m = file->private_data;
5185 m->private = fgd;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005186 } else {
5187 /* Failed */
5188 free_ftrace_hash(new_hash);
5189 new_hash = NULL;
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005190 }
5191 } else
5192 file->private_data = fgd;
Li Zefana4ec5e02009-09-18 14:06:28 +08005193
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005194out:
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005195 if (ret < 0 && file->f_mode & FMODE_WRITE)
5196 trace_parser_put(&fgd->parser);
5197
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005198 fgd->new_hash = new_hash;
Steven Rostedt (VMware)649b9882017-02-02 20:16:29 -05005199
5200 /*
5201 * All uses of fgd->hash must be taken with the graph_lock
5202 * held. The graph_lock is going to be released, so force
5203 * fgd->hash to be reinitialized when it is taken again.
5204 */
5205 fgd->hash = NULL;
5206
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005207 return ret;
5208}
5209
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005210static int
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005211ftrace_graph_open(struct inode *inode, struct file *file)
5212{
5213 struct ftrace_graph_data *fgd;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005214 int ret;
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005215
5216 if (unlikely(ftrace_disabled))
5217 return -ENODEV;
5218
5219 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
5220 if (fgd == NULL)
5221 return -ENOMEM;
5222
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005223 mutex_lock(&graph_lock);
5224
Steven Rostedt (VMware)649b9882017-02-02 20:16:29 -05005225 fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
5226 lockdep_is_held(&graph_lock));
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005227 fgd->type = GRAPH_FILTER_FUNCTION;
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005228 fgd->seq_ops = &ftrace_graph_seq_ops;
5229
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005230 ret = __ftrace_graph_open(inode, file, fgd);
5231 if (ret < 0)
5232 kfree(fgd);
5233
5234 mutex_unlock(&graph_lock);
5235 return ret;
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005236}
5237
5238static int
Namhyung Kim29ad23b2013-10-14 17:24:26 +09005239ftrace_graph_notrace_open(struct inode *inode, struct file *file)
5240{
5241 struct ftrace_graph_data *fgd;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005242 int ret;
Namhyung Kim29ad23b2013-10-14 17:24:26 +09005243
5244 if (unlikely(ftrace_disabled))
5245 return -ENODEV;
5246
5247 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
5248 if (fgd == NULL)
5249 return -ENOMEM;
5250
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005251 mutex_lock(&graph_lock);
5252
Steven Rostedt (VMware)649b9882017-02-02 20:16:29 -05005253 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5254 lockdep_is_held(&graph_lock));
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005255 fgd->type = GRAPH_FILTER_NOTRACE;
Namhyung Kim29ad23b2013-10-14 17:24:26 +09005256 fgd->seq_ops = &ftrace_graph_seq_ops;
5257
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005258 ret = __ftrace_graph_open(inode, file, fgd);
5259 if (ret < 0)
5260 kfree(fgd);
5261
5262 mutex_unlock(&graph_lock);
5263 return ret;
Namhyung Kim29ad23b2013-10-14 17:24:26 +09005264}
5265
5266static int
Li Zefan87827112009-07-23 11:29:11 +08005267ftrace_graph_release(struct inode *inode, struct file *file)
5268{
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005269 struct ftrace_graph_data *fgd;
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005270 struct ftrace_hash *old_hash, *new_hash;
5271 struct trace_parser *parser;
5272 int ret = 0;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005273
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005274 if (file->f_mode & FMODE_READ) {
5275 struct seq_file *m = file->private_data;
5276
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005277 fgd = m->private;
Li Zefan87827112009-07-23 11:29:11 +08005278 seq_release(inode, file);
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005279 } else {
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005280 fgd = file->private_data;
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005281 }
5282
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005283
5284 if (file->f_mode & FMODE_WRITE) {
5285
5286 parser = &fgd->parser;
5287
5288 if (trace_parser_loaded((parser))) {
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005289 ret = ftrace_graph_set_hash(fgd->new_hash,
5290 parser->buffer);
5291 }
5292
5293 trace_parser_put(parser);
5294
5295 new_hash = __ftrace_hash_move(fgd->new_hash);
5296 if (!new_hash) {
5297 ret = -ENOMEM;
5298 goto out;
5299 }
5300
5301 mutex_lock(&graph_lock);
5302
5303 if (fgd->type == GRAPH_FILTER_FUNCTION) {
5304 old_hash = rcu_dereference_protected(ftrace_graph_hash,
5305 lockdep_is_held(&graph_lock));
5306 rcu_assign_pointer(ftrace_graph_hash, new_hash);
5307 } else {
5308 old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5309 lockdep_is_held(&graph_lock));
5310 rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
5311 }
5312
5313 mutex_unlock(&graph_lock);
5314
5315 /* Wait till all users are no longer using the old hash */
5316 synchronize_sched();
5317
5318 free_ftrace_hash(old_hash);
5319 }
5320
5321 out:
Luis Henriquesf9797c22017-05-25 16:20:38 +01005322 free_ftrace_hash(fgd->new_hash);
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005323 kfree(fgd);
5324
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005325 return ret;
Li Zefan87827112009-07-23 11:29:11 +08005326}
5327
5328static int
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005329ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005330{
Dmitry Safonov3ba00922015-09-29 19:46:14 +03005331 struct ftrace_glob func_g;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005332 struct dyn_ftrace *rec;
5333 struct ftrace_page *pg;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005334 struct ftrace_func_entry *entry;
Li Zefanc7c6b1f2010-02-10 15:43:04 +08005335 int fail = 1;
Dmitry Safonov3ba00922015-09-29 19:46:14 +03005336 int not;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005337
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01005338 /* decode regex */
Dmitry Safonov3ba00922015-09-29 19:46:14 +03005339 func_g.type = filter_parse_regex(buffer, strlen(buffer),
5340 &func_g.search, &not);
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01005341
Dmitry Safonov3ba00922015-09-29 19:46:14 +03005342 func_g.len = strlen(func_g.search);
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01005343
Steven Rostedt52baf112009-02-14 01:15:39 -05005344 mutex_lock(&ftrace_lock);
Steven Rostedt45a4a232011-04-21 23:16:46 -04005345
5346 if (unlikely(ftrace_disabled)) {
5347 mutex_unlock(&ftrace_lock);
5348 return -ENODEV;
5349 }
5350
Steven Rostedt265c8312009-02-13 12:43:56 -05005351 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005352
Steven Rostedt (Red Hat)546fece2016-11-14 16:31:49 -05005353 if (rec->flags & FTRACE_FL_DISABLED)
5354 continue;
5355
Dmitry Safonov0b507e12015-09-29 19:46:15 +03005356 if (ftrace_match_record(rec, &func_g, NULL, 0)) {
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005357 entry = ftrace_lookup_ip(hash, rec->ip);
Li Zefanc7c6b1f2010-02-10 15:43:04 +08005358
5359 if (!not) {
5360 fail = 0;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005361
5362 if (entry)
5363 continue;
5364 if (add_hash_entry(hash, rec->ip) < 0)
5365 goto out;
Li Zefanc7c6b1f2010-02-10 15:43:04 +08005366 } else {
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005367 if (entry) {
5368 free_hash_entry(hash, entry);
Li Zefanc7c6b1f2010-02-10 15:43:04 +08005369 fail = 0;
5370 }
5371 }
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005372 }
Steven Rostedt265c8312009-02-13 12:43:56 -05005373 } while_for_each_ftrace_rec();
Li Zefanc7c6b1f2010-02-10 15:43:04 +08005374out:
Steven Rostedt52baf112009-02-14 01:15:39 -05005375 mutex_unlock(&ftrace_lock);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005376
Li Zefanc7c6b1f2010-02-10 15:43:04 +08005377 if (fail)
5378 return -EINVAL;
5379
Li Zefanc7c6b1f2010-02-10 15:43:04 +08005380 return 0;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005381}
5382
5383static ssize_t
5384ftrace_graph_write(struct file *file, const char __user *ubuf,
5385 size_t cnt, loff_t *ppos)
5386{
Namhyung Kim6a101082013-10-14 17:24:25 +09005387 ssize_t read, ret = 0;
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005388 struct ftrace_graph_data *fgd = file->private_data;
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005389 struct trace_parser *parser;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005390
Li Zefanc7c6b1f2010-02-10 15:43:04 +08005391 if (!cnt)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005392 return 0;
5393
Steven Rostedt (VMware)ae98d272017-02-02 16:59:06 -05005394 /* Read mode uses seq functions */
5395 if (file->f_mode & FMODE_READ) {
5396 struct seq_file *m = file->private_data;
5397 fgd = m->private;
5398 }
5399
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005400 parser = &fgd->parser;
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02005401
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005402 read = trace_get_user(parser, ubuf, cnt, ppos);
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02005403
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005404 if (read >= 0 && trace_parser_loaded(parser) &&
5405 !trace_parser_cont(parser)) {
Namhyung Kim6a101082013-10-14 17:24:25 +09005406
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005407 ret = ftrace_graph_set_hash(fgd->new_hash,
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005408 parser->buffer);
5409 trace_parser_clear(parser);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005410 }
5411
Namhyung Kim6a101082013-10-14 17:24:25 +09005412 if (!ret)
5413 ret = read;
Li Zefan1eb90f12009-09-22 13:52:57 +08005414
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005415 return ret;
5416}
5417
5418static const struct file_operations ftrace_graph_fops = {
Li Zefan87827112009-07-23 11:29:11 +08005419 .open = ftrace_graph_open,
5420 .read = seq_read,
5421 .write = ftrace_graph_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005422 .llseek = tracing_lseek,
Li Zefan87827112009-07-23 11:29:11 +08005423 .release = ftrace_graph_release,
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005424};
Namhyung Kim29ad23b2013-10-14 17:24:26 +09005425
5426static const struct file_operations ftrace_graph_notrace_fops = {
5427 .open = ftrace_graph_notrace_open,
5428 .read = seq_read,
5429 .write = ftrace_graph_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005430 .llseek = tracing_lseek,
Namhyung Kim29ad23b2013-10-14 17:24:26 +09005431 .release = ftrace_graph_release,
5432};
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005433#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5434
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05005435void ftrace_create_filter_files(struct ftrace_ops *ops,
5436 struct dentry *parent)
5437{
5438
5439 trace_create_file("set_ftrace_filter", 0644, parent,
5440 ops, &ftrace_filter_fops);
5441
5442 trace_create_file("set_ftrace_notrace", 0644, parent,
5443 ops, &ftrace_notrace_fops);
5444}
5445
5446/*
5447 * The name "destroy_filter_files" is really a misnomer. Although
5448 * in the future, it may actualy delete the files, but this is
5449 * really intended to make sure the ops passed in are disabled
5450 * and that when this function returns, the caller is free to
5451 * free the ops.
5452 *
5453 * The "destroy" name is only to match the "create" name that this
5454 * should be paired with.
5455 */
5456void ftrace_destroy_filter_files(struct ftrace_ops *ops)
5457{
5458 mutex_lock(&ftrace_lock);
5459 if (ops->flags & FTRACE_OPS_FL_ENABLED)
5460 ftrace_shutdown(ops, 0);
5461 ops->flags |= FTRACE_OPS_FL_DELETED;
5462 mutex_unlock(&ftrace_lock);
5463}
5464
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05005465static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
Steven Rostedt5072c592008-05-12 21:20:43 +02005466{
Steven Rostedt5072c592008-05-12 21:20:43 +02005467
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005468 trace_create_file("available_filter_functions", 0444,
5469 d_tracer, NULL, &ftrace_avail_fops);
Steven Rostedt5072c592008-05-12 21:20:43 +02005470
Steven Rostedt647bcd02011-05-03 14:39:21 -04005471 trace_create_file("enabled_functions", 0444,
5472 d_tracer, NULL, &ftrace_enabled_fops);
5473
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05005474 ftrace_create_filter_files(&global_ops, d_tracer);
Steven Rostedtad90c0e2008-05-27 20:48:37 -04005475
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005476#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Chen LinX1ce05002014-09-03 14:31:09 +08005477 trace_create_file("set_graph_function", 0644, d_tracer,
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005478 NULL,
5479 &ftrace_graph_fops);
Chen LinX1ce05002014-09-03 14:31:09 +08005480 trace_create_file("set_graph_notrace", 0644, d_tracer,
Namhyung Kim29ad23b2013-10-14 17:24:26 +09005481 NULL,
5482 &ftrace_graph_notrace_fops);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005483#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5484
Steven Rostedt5072c592008-05-12 21:20:43 +02005485 return 0;
5486}
5487
Steven Rostedt9fd49322012-04-24 22:32:06 -04005488static int ftrace_cmp_ips(const void *a, const void *b)
Steven Rostedt68950612011-12-16 17:06:45 -05005489{
Steven Rostedt9fd49322012-04-24 22:32:06 -04005490 const unsigned long *ipa = a;
5491 const unsigned long *ipb = b;
Steven Rostedt68950612011-12-16 17:06:45 -05005492
Steven Rostedt9fd49322012-04-24 22:32:06 -04005493 if (*ipa > *ipb)
5494 return 1;
5495 if (*ipa < *ipb)
5496 return -1;
5497 return 0;
5498}
5499
Jiri Olsa5cb084b2009-10-13 16:33:53 -04005500static int ftrace_process_locs(struct module *mod,
Steven Rostedt31e88902008-11-14 16:21:19 -08005501 unsigned long *start,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005502 unsigned long *end)
5503{
Steven Rostedt706c81f2012-04-24 23:45:26 -04005504 struct ftrace_page *start_pg;
Steven Rostedta7900872011-12-16 16:23:44 -05005505 struct ftrace_page *pg;
Steven Rostedt706c81f2012-04-24 23:45:26 -04005506 struct dyn_ftrace *rec;
Steven Rostedta7900872011-12-16 16:23:44 -05005507 unsigned long count;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005508 unsigned long *p;
5509 unsigned long addr;
Steven Rostedt4376cac2011-06-24 23:28:13 -04005510 unsigned long flags = 0; /* Shut up gcc */
Steven Rostedta7900872011-12-16 16:23:44 -05005511 int ret = -ENOMEM;
5512
5513 count = end - start;
5514
5515 if (!count)
5516 return 0;
5517
Steven Rostedt9fd49322012-04-24 22:32:06 -04005518 sort(start, count, sizeof(*start),
Rasmus Villemoes6db02902015-09-09 23:27:02 +02005519 ftrace_cmp_ips, NULL);
Steven Rostedt9fd49322012-04-24 22:32:06 -04005520
Steven Rostedt706c81f2012-04-24 23:45:26 -04005521 start_pg = ftrace_allocate_pages(count);
5522 if (!start_pg)
Steven Rostedta7900872011-12-16 16:23:44 -05005523 return -ENOMEM;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005524
Steven Rostedte6ea44e2009-02-14 01:42:44 -05005525 mutex_lock(&ftrace_lock);
Steven Rostedta7900872011-12-16 16:23:44 -05005526
Steven Rostedt320823092011-12-16 14:42:37 -05005527 /*
5528 * Core and each module needs their own pages, as
5529 * modules will free them when they are removed.
5530 * Force a new page to be allocated for modules.
5531 */
Steven Rostedta7900872011-12-16 16:23:44 -05005532 if (!mod) {
5533 WARN_ON(ftrace_pages || ftrace_pages_start);
5534 /* First initialization */
Steven Rostedt706c81f2012-04-24 23:45:26 -04005535 ftrace_pages = ftrace_pages_start = start_pg;
Steven Rostedta7900872011-12-16 16:23:44 -05005536 } else {
Steven Rostedt320823092011-12-16 14:42:37 -05005537 if (!ftrace_pages)
Steven Rostedta7900872011-12-16 16:23:44 -05005538 goto out;
Steven Rostedt320823092011-12-16 14:42:37 -05005539
Steven Rostedta7900872011-12-16 16:23:44 -05005540 if (WARN_ON(ftrace_pages->next)) {
5541 /* Hmm, we have free pages? */
5542 while (ftrace_pages->next)
5543 ftrace_pages = ftrace_pages->next;
Steven Rostedt320823092011-12-16 14:42:37 -05005544 }
Steven Rostedta7900872011-12-16 16:23:44 -05005545
Steven Rostedt706c81f2012-04-24 23:45:26 -04005546 ftrace_pages->next = start_pg;
Steven Rostedt320823092011-12-16 14:42:37 -05005547 }
5548
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005549 p = start;
Steven Rostedt706c81f2012-04-24 23:45:26 -04005550 pg = start_pg;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005551 while (p < end) {
5552 addr = ftrace_call_adjust(*p++);
Steven Rostedt20e52272008-11-14 16:21:19 -08005553 /*
5554 * Some architecture linkers will pad between
5555 * the different mcount_loc sections of different
5556 * object files to satisfy alignments.
5557 * Skip any NULL pointers.
5558 */
5559 if (!addr)
5560 continue;
Steven Rostedt706c81f2012-04-24 23:45:26 -04005561
5562 if (pg->index == pg->size) {
5563 /* We should have allocated enough */
5564 if (WARN_ON(!pg->next))
5565 break;
5566 pg = pg->next;
5567 }
5568
5569 rec = &pg->records[pg->index++];
5570 rec->ip = addr;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005571 }
5572
Steven Rostedt706c81f2012-04-24 23:45:26 -04005573 /* We should have used all pages */
5574 WARN_ON(pg->next);
5575
5576 /* Assign the last page to ftrace_pages */
5577 ftrace_pages = pg;
5578
Steven Rostedta4f18ed2011-06-07 09:26:46 -04005579 /*
Steven Rostedt4376cac2011-06-24 23:28:13 -04005580 * We only need to disable interrupts on start up
5581 * because we are modifying code that an interrupt
5582 * may execute, and the modification is not atomic.
5583 * But for modules, nothing runs the code we modify
5584 * until we are finished with it, and there's no
5585 * reason to cause large interrupt latencies while we do it.
Steven Rostedta4f18ed2011-06-07 09:26:46 -04005586 */
Steven Rostedt4376cac2011-06-24 23:28:13 -04005587 if (!mod)
5588 local_irq_save(flags);
Jiri Slaby1dc43cf2014-02-24 19:59:56 +01005589 ftrace_update_code(mod, start_pg);
Steven Rostedt4376cac2011-06-24 23:28:13 -04005590 if (!mod)
5591 local_irq_restore(flags);
Steven Rostedta7900872011-12-16 16:23:44 -05005592 ret = 0;
5593 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05005594 mutex_unlock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005595
Steven Rostedta7900872011-12-16 16:23:44 -05005596 return ret;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005597}
5598
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04005599struct ftrace_mod_func {
5600 struct list_head list;
5601 char *name;
5602 unsigned long ip;
5603 unsigned int size;
5604};
5605
5606struct ftrace_mod_map {
Steven Rostedt (VMware)6aa69782017-09-05 19:20:16 -04005607 struct rcu_head rcu;
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04005608 struct list_head list;
5609 struct module *mod;
5610 unsigned long start_addr;
5611 unsigned long end_addr;
5612 struct list_head funcs;
Steven Rostedt (VMware)6171a032017-09-06 08:40:41 -04005613 unsigned int num_funcs;
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04005614};
5615
Steven Rostedt93eb6772009-04-15 13:24:06 -04005616#ifdef CONFIG_MODULES
Steven Rostedt320823092011-12-16 14:42:37 -05005617
5618#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
5619
Steven Rostedt (VMware)6aa69782017-09-05 19:20:16 -04005620static LIST_HEAD(ftrace_mod_maps);
5621
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05005622static int referenced_filters(struct dyn_ftrace *rec)
5623{
5624 struct ftrace_ops *ops;
5625 int cnt = 0;
5626
5627 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
5628 if (ops_references_rec(ops, rec))
5629 cnt++;
5630 }
5631
5632 return cnt;
5633}
5634
Steven Rostedt (VMware)2a5bfe42017-08-31 17:36:51 -04005635static void
5636clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
5637{
5638 struct ftrace_func_entry *entry;
5639 struct dyn_ftrace *rec;
5640 int i;
5641
5642 if (ftrace_hash_empty(hash))
5643 return;
5644
5645 for (i = 0; i < pg->index; i++) {
5646 rec = &pg->records[i];
5647 entry = __ftrace_lookup_ip(hash, rec->ip);
5648 /*
5649 * Do not allow this rec to match again.
5650 * Yeah, it may waste some memory, but will be removed
5651 * if/when the hash is modified again.
5652 */
5653 if (entry)
5654 entry->ip = 0;
5655 }
5656}
5657
5658/* Clear any records from hashs */
5659static void clear_mod_from_hashes(struct ftrace_page *pg)
5660{
5661 struct trace_array *tr;
5662
5663 mutex_lock(&trace_types_lock);
5664 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5665 if (!tr->ops || !tr->ops->func_hash)
5666 continue;
5667 mutex_lock(&tr->ops->func_hash->regex_lock);
5668 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
5669 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
5670 mutex_unlock(&tr->ops->func_hash->regex_lock);
5671 }
5672 mutex_unlock(&trace_types_lock);
5673}
5674
Steven Rostedt (VMware)6aa69782017-09-05 19:20:16 -04005675static void ftrace_free_mod_map(struct rcu_head *rcu)
5676{
5677 struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
5678 struct ftrace_mod_func *mod_func;
5679 struct ftrace_mod_func *n;
5680
5681 /* All the contents of mod_map are now not visible to readers */
5682 list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
5683 kfree(mod_func->name);
5684 list_del(&mod_func->list);
5685 kfree(mod_func);
5686 }
5687
5688 kfree(mod_map);
5689}
5690
jolsa@redhat.come7247a12009-10-07 19:00:35 +02005691void ftrace_release_mod(struct module *mod)
Steven Rostedt93eb6772009-04-15 13:24:06 -04005692{
Steven Rostedt (VMware)6aa69782017-09-05 19:20:16 -04005693 struct ftrace_mod_map *mod_map;
5694 struct ftrace_mod_map *n;
Steven Rostedt93eb6772009-04-15 13:24:06 -04005695 struct dyn_ftrace *rec;
Steven Rostedt320823092011-12-16 14:42:37 -05005696 struct ftrace_page **last_pg;
Steven Rostedt (VMware)2a5bfe42017-08-31 17:36:51 -04005697 struct ftrace_page *tmp_page = NULL;
Steven Rostedt93eb6772009-04-15 13:24:06 -04005698 struct ftrace_page *pg;
Steven Rostedta7900872011-12-16 16:23:44 -05005699 int order;
Steven Rostedt93eb6772009-04-15 13:24:06 -04005700
Steven Rostedt93eb6772009-04-15 13:24:06 -04005701 mutex_lock(&ftrace_lock);
Steven Rostedt45a4a232011-04-21 23:16:46 -04005702
5703 if (ftrace_disabled)
5704 goto out_unlock;
5705
Steven Rostedt (VMware)6aa69782017-09-05 19:20:16 -04005706 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
5707 if (mod_map->mod == mod) {
5708 list_del_rcu(&mod_map->list);
5709 call_rcu_sched(&mod_map->rcu, ftrace_free_mod_map);
5710 break;
5711 }
5712 }
5713
Steven Rostedt320823092011-12-16 14:42:37 -05005714 /*
5715 * Each module has its own ftrace_pages, remove
5716 * them from the list.
5717 */
5718 last_pg = &ftrace_pages_start;
5719 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
5720 rec = &pg->records[0];
Steven Rostedt (VMware)3e234282017-03-03 18:00:22 -05005721 if (within_module_core(rec->ip, mod) ||
5722 within_module_init(rec->ip, mod)) {
Steven Rostedt93eb6772009-04-15 13:24:06 -04005723 /*
Steven Rostedt320823092011-12-16 14:42:37 -05005724 * As core pages are first, the first
5725 * page should never be a module page.
Steven Rostedt93eb6772009-04-15 13:24:06 -04005726 */
Steven Rostedt320823092011-12-16 14:42:37 -05005727 if (WARN_ON(pg == ftrace_pages_start))
5728 goto out_unlock;
5729
5730 /* Check if we are deleting the last page */
5731 if (pg == ftrace_pages)
5732 ftrace_pages = next_to_ftrace_page(last_pg);
5733
Steven Rostedt (VMware)83dd1492017-06-27 11:04:40 -04005734 ftrace_update_tot_cnt -= pg->index;
Steven Rostedt320823092011-12-16 14:42:37 -05005735 *last_pg = pg->next;
Steven Rostedt (VMware)2a5bfe42017-08-31 17:36:51 -04005736
5737 pg->next = tmp_page;
5738 tmp_page = pg;
Steven Rostedt320823092011-12-16 14:42:37 -05005739 } else
5740 last_pg = &pg->next;
5741 }
Steven Rostedt45a4a232011-04-21 23:16:46 -04005742 out_unlock:
Steven Rostedt93eb6772009-04-15 13:24:06 -04005743 mutex_unlock(&ftrace_lock);
Steven Rostedt (VMware)2a5bfe42017-08-31 17:36:51 -04005744
5745 for (pg = tmp_page; pg; pg = tmp_page) {
5746
5747 /* Needs to be called outside of ftrace_lock */
5748 clear_mod_from_hashes(pg);
5749
5750 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
5751 free_pages((unsigned long)pg->records, order);
5752 tmp_page = pg->next;
5753 kfree(pg);
5754 }
Steven Rostedt93eb6772009-04-15 13:24:06 -04005755}
5756
Jessica Yu7dcd1822016-02-16 17:32:33 -05005757void ftrace_module_enable(struct module *mod)
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05005758{
5759 struct dyn_ftrace *rec;
5760 struct ftrace_page *pg;
5761
5762 mutex_lock(&ftrace_lock);
5763
5764 if (ftrace_disabled)
5765 goto out_unlock;
5766
5767 /*
5768 * If the tracing is enabled, go ahead and enable the record.
5769 *
5770 * The reason not to enable the record immediatelly is the
5771 * inherent check of ftrace_make_nop/ftrace_make_call for
5772 * correct previous instructions. Making first the NOP
5773 * conversion puts the module to the correct state, thus
5774 * passing the ftrace_make_call check.
5775 *
5776 * We also delay this to after the module code already set the
5777 * text to read-only, as we now need to set it back to read-write
5778 * so that we can modify the text.
5779 */
5780 if (ftrace_start_up)
5781 ftrace_arch_code_modify_prepare();
5782
5783 do_for_each_ftrace_rec(pg, rec) {
5784 int cnt;
5785 /*
5786 * do_for_each_ftrace_rec() is a double loop.
5787 * module text shares the pg. If a record is
5788 * not part of this module, then skip this pg,
5789 * which the "break" will do.
5790 */
Steven Rostedt (VMware)3e234282017-03-03 18:00:22 -05005791 if (!within_module_core(rec->ip, mod) &&
5792 !within_module_init(rec->ip, mod))
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05005793 break;
5794
5795 cnt = 0;
5796
5797 /*
5798 * When adding a module, we need to check if tracers are
5799 * currently enabled and if they are, and can trace this record,
5800 * we need to enable the module functions as well as update the
5801 * reference counts for those function records.
5802 */
5803 if (ftrace_start_up)
5804 cnt += referenced_filters(rec);
5805
5806 /* This clears FTRACE_FL_DISABLED */
5807 rec->flags = cnt;
5808
5809 if (ftrace_start_up && cnt) {
5810 int failed = __ftrace_replace_code(rec, 1);
5811 if (failed) {
5812 ftrace_bug(failed, rec);
5813 goto out_loop;
5814 }
5815 }
5816
5817 } while_for_each_ftrace_rec();
5818
5819 out_loop:
5820 if (ftrace_start_up)
5821 ftrace_arch_code_modify_post_process();
5822
5823 out_unlock:
5824 mutex_unlock(&ftrace_lock);
Steven Rostedt (VMware)d7fbf8d2017-06-26 10:57:21 -04005825
5826 process_cached_mods(mod->name);
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05005827}
5828
Steven Rostedt (Red Hat)a949ae52014-04-24 10:40:12 -04005829void ftrace_module_init(struct module *mod)
Steven Rostedt93eb6772009-04-15 13:24:06 -04005830{
Steven Rostedt (Red Hat)97e9b4f2015-12-23 12:12:22 -05005831 if (ftrace_disabled || !mod->num_ftrace_callsites)
Abel Vesab6b71f62015-12-02 15:39:57 +01005832 return;
5833
Steven Rostedt (Red Hat)97e9b4f2015-12-23 12:12:22 -05005834 ftrace_process_locs(mod, mod->ftrace_callsites,
5835 mod->ftrace_callsites + mod->num_ftrace_callsites);
Steven Rostedt (Red Hat)8c189ea2013-02-13 15:18:38 -05005836}
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04005837
5838static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
5839 struct dyn_ftrace *rec)
5840{
5841 struct ftrace_mod_func *mod_func;
5842 unsigned long symsize;
5843 unsigned long offset;
5844 char str[KSYM_SYMBOL_LEN];
5845 char *modname;
5846 const char *ret;
5847
5848 ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
5849 if (!ret)
5850 return;
5851
5852 mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
5853 if (!mod_func)
5854 return;
5855
5856 mod_func->name = kstrdup(str, GFP_KERNEL);
5857 if (!mod_func->name) {
5858 kfree(mod_func);
5859 return;
5860 }
5861
5862 mod_func->ip = rec->ip - offset;
5863 mod_func->size = symsize;
5864
Steven Rostedt (VMware)6171a032017-09-06 08:40:41 -04005865 mod_map->num_funcs++;
5866
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04005867 list_add_rcu(&mod_func->list, &mod_map->funcs);
5868}
5869
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04005870static struct ftrace_mod_map *
5871allocate_ftrace_mod_map(struct module *mod,
5872 unsigned long start, unsigned long end)
5873{
5874 struct ftrace_mod_map *mod_map;
5875
5876 mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
5877 if (!mod_map)
5878 return NULL;
5879
5880 mod_map->mod = mod;
5881 mod_map->start_addr = start;
5882 mod_map->end_addr = end;
Steven Rostedt (VMware)6171a032017-09-06 08:40:41 -04005883 mod_map->num_funcs = 0;
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04005884
5885 INIT_LIST_HEAD_RCU(&mod_map->funcs);
5886
5887 list_add_rcu(&mod_map->list, &ftrace_mod_maps);
5888
5889 return mod_map;
5890}
5891
5892static const char *
5893ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
5894 unsigned long addr, unsigned long *size,
5895 unsigned long *off, char *sym)
5896{
5897 struct ftrace_mod_func *found_func = NULL;
5898 struct ftrace_mod_func *mod_func;
5899
5900 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
5901 if (addr >= mod_func->ip &&
5902 addr < mod_func->ip + mod_func->size) {
5903 found_func = mod_func;
5904 break;
5905 }
5906 }
5907
5908 if (found_func) {
5909 if (size)
5910 *size = found_func->size;
5911 if (off)
5912 *off = addr - found_func->ip;
5913 if (sym)
5914 strlcpy(sym, found_func->name, KSYM_NAME_LEN);
5915
5916 return found_func->name;
5917 }
5918
5919 return NULL;
5920}
5921
5922const char *
5923ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
5924 unsigned long *off, char **modname, char *sym)
5925{
5926 struct ftrace_mod_map *mod_map;
5927 const char *ret = NULL;
5928
Steven Rostedt (VMware)6aa69782017-09-05 19:20:16 -04005929 /* mod_map is freed via call_rcu_sched() */
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04005930 preempt_disable();
5931 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
5932 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
5933 if (ret) {
5934 if (modname)
5935 *modname = mod_map->mod->name;
5936 break;
5937 }
5938 }
5939 preempt_enable();
5940
5941 return ret;
5942}
5943
Steven Rostedt (VMware)6171a032017-09-06 08:40:41 -04005944int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
5945 char *type, char *name,
5946 char *module_name, int *exported)
5947{
5948 struct ftrace_mod_map *mod_map;
5949 struct ftrace_mod_func *mod_func;
5950
5951 preempt_disable();
5952 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
5953
5954 if (symnum >= mod_map->num_funcs) {
5955 symnum -= mod_map->num_funcs;
5956 continue;
5957 }
5958
5959 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
5960 if (symnum > 1) {
5961 symnum--;
5962 continue;
5963 }
5964
5965 *value = mod_func->ip;
5966 *type = 'T';
5967 strlcpy(name, mod_func->name, KSYM_NAME_LEN);
5968 strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
5969 *exported = 1;
5970 preempt_enable();
5971 return 0;
5972 }
5973 WARN_ON(1);
5974 break;
5975 }
5976 preempt_enable();
5977 return -ERANGE;
5978}
5979
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04005980#else
5981static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
5982 struct dyn_ftrace *rec) { }
5983static inline struct ftrace_mod_map *
5984allocate_ftrace_mod_map(struct module *mod,
5985 unsigned long start, unsigned long end)
5986{
5987 return NULL;
5988}
Steven Rostedt93eb6772009-04-15 13:24:06 -04005989#endif /* CONFIG_MODULES */
5990
Joel Fernandes8715b102017-10-09 12:29:31 -07005991struct ftrace_init_func {
5992 struct list_head list;
5993 unsigned long ip;
5994};
5995
5996/* Clear any init ips from hashes */
5997static void
5998clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -05005999{
Joel Fernandes8715b102017-10-09 12:29:31 -07006000 struct ftrace_func_entry *entry;
6001
6002 if (ftrace_hash_empty(hash))
6003 return;
6004
6005 entry = __ftrace_lookup_ip(hash, func->ip);
6006
6007 /*
6008 * Do not allow this rec to match again.
6009 * Yeah, it may waste some memory, but will be removed
6010 * if/when the hash is modified again.
6011 */
6012 if (entry)
6013 entry->ip = 0;
6014}
6015
6016static void
6017clear_func_from_hashes(struct ftrace_init_func *func)
6018{
6019 struct trace_array *tr;
6020
6021 mutex_lock(&trace_types_lock);
6022 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6023 if (!tr->ops || !tr->ops->func_hash)
6024 continue;
6025 mutex_lock(&tr->ops->func_hash->regex_lock);
6026 clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
6027 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
6028 mutex_unlock(&tr->ops->func_hash->regex_lock);
6029 }
6030 mutex_unlock(&trace_types_lock);
6031}
6032
6033static void add_to_clear_hash_list(struct list_head *clear_list,
6034 struct dyn_ftrace *rec)
6035{
6036 struct ftrace_init_func *func;
6037
6038 func = kmalloc(sizeof(*func), GFP_KERNEL);
6039 if (!func) {
6040 WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n");
6041 return;
6042 }
6043
6044 func->ip = rec->ip;
6045 list_add(&func->list, clear_list);
6046}
6047
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04006048void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -05006049{
Steven Rostedt (VMware)6cafbe12017-06-20 10:44:58 -04006050 unsigned long start = (unsigned long)(start_ptr);
6051 unsigned long end = (unsigned long)(end_ptr);
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -05006052 struct ftrace_page **last_pg = &ftrace_pages_start;
6053 struct ftrace_page *pg;
6054 struct dyn_ftrace *rec;
6055 struct dyn_ftrace key;
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04006056 struct ftrace_mod_map *mod_map = NULL;
Joel Fernandes8715b102017-10-09 12:29:31 -07006057 struct ftrace_init_func *func, *func_next;
6058 struct list_head clear_hash;
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -05006059 int order;
6060
Joel Fernandes8715b102017-10-09 12:29:31 -07006061 INIT_LIST_HEAD(&clear_hash);
6062
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -05006063 key.ip = start;
6064 key.flags = end; /* overload flags, as it is unsigned long */
6065
6066 mutex_lock(&ftrace_lock);
6067
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04006068 /*
6069 * If we are freeing module init memory, then check if
6070 * any tracer is active. If so, we need to save a mapping of
6071 * the module functions being freed with the address.
6072 */
6073 if (mod && ftrace_ops_list != &ftrace_list_end)
6074 mod_map = allocate_ftrace_mod_map(mod, start, end);
6075
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -05006076 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
6077 if (end < pg->records[0].ip ||
6078 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
6079 continue;
6080 again:
6081 rec = bsearch(&key, pg->records, pg->index,
6082 sizeof(struct dyn_ftrace),
6083 ftrace_cmp_recs);
6084 if (!rec)
6085 continue;
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04006086
Joel Fernandes8715b102017-10-09 12:29:31 -07006087 /* rec will be cleared from hashes after ftrace_lock unlock */
6088 add_to_clear_hash_list(&clear_hash, rec);
6089
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04006090 if (mod_map)
6091 save_ftrace_mod_rec(mod_map, rec);
6092
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -05006093 pg->index--;
Steven Rostedt (VMware)4ec78462017-06-28 11:57:03 -04006094 ftrace_update_tot_cnt--;
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -05006095 if (!pg->index) {
6096 *last_pg = pg->next;
6097 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
6098 free_pages((unsigned long)pg->records, order);
6099 kfree(pg);
6100 pg = container_of(last_pg, struct ftrace_page, next);
6101 if (!(*last_pg))
6102 ftrace_pages = pg;
6103 continue;
6104 }
6105 memmove(rec, rec + 1,
6106 (pg->index - (rec - pg->records)) * sizeof(*rec));
6107 /* More than one function may be in this block */
6108 goto again;
6109 }
6110 mutex_unlock(&ftrace_lock);
Joel Fernandes8715b102017-10-09 12:29:31 -07006111
6112 list_for_each_entry_safe(func, func_next, &clear_hash, list) {
6113 clear_func_from_hashes(func);
6114 kfree(func);
6115 }
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -05006116}
6117
Steven Rostedt (VMware)6cafbe12017-06-20 10:44:58 -04006118void __init ftrace_free_init_mem(void)
6119{
6120 void *start = (void *)(&__init_begin);
6121 void *end = (void *)(&__init_end);
6122
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04006123 ftrace_free_mem(NULL, start, end);
Steven Rostedt93eb6772009-04-15 13:24:06 -04006124}
6125
Steven Rostedt68bf21a2008-08-14 15:45:08 -04006126void __init ftrace_init(void)
6127{
Jiri Slaby1dc43cf2014-02-24 19:59:56 +01006128 extern unsigned long __start_mcount_loc[];
6129 extern unsigned long __stop_mcount_loc[];
Jiri Slaby3a36cb12014-02-24 19:59:59 +01006130 unsigned long count, flags;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04006131 int ret;
6132
Steven Rostedt68bf21a2008-08-14 15:45:08 -04006133 local_irq_save(flags);
Jiri Slaby3a36cb12014-02-24 19:59:59 +01006134 ret = ftrace_dyn_arch_init();
Steven Rostedt68bf21a2008-08-14 15:45:08 -04006135 local_irq_restore(flags);
Jiri Slabyaf64a7c2014-02-24 19:59:58 +01006136 if (ret)
Steven Rostedt68bf21a2008-08-14 15:45:08 -04006137 goto failed;
6138
6139 count = __stop_mcount_loc - __start_mcount_loc;
Jiri Slabyc867ccd2014-02-24 19:59:57 +01006140 if (!count) {
6141 pr_info("ftrace: No functions to be traced?\n");
Steven Rostedt68bf21a2008-08-14 15:45:08 -04006142 goto failed;
Jiri Slabyc867ccd2014-02-24 19:59:57 +01006143 }
6144
6145 pr_info("ftrace: allocating %ld entries in %ld pages\n",
6146 count, count / ENTRIES_PER_PAGE + 1);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04006147
6148 last_ftrace_enabled = ftrace_enabled = 1;
6149
Jiri Olsa5cb084b2009-10-13 16:33:53 -04006150 ret = ftrace_process_locs(NULL,
Steven Rostedt31e88902008-11-14 16:21:19 -08006151 __start_mcount_loc,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04006152 __stop_mcount_loc);
6153
Steven Rostedt2af15d62009-05-28 13:37:24 -04006154 set_ftrace_early_filters();
6155
Steven Rostedt68bf21a2008-08-14 15:45:08 -04006156 return;
6157 failed:
6158 ftrace_disabled = 1;
6159}
Steven Rostedt68bf21a2008-08-14 15:45:08 -04006160
Steven Rostedt (Red Hat)f3bea492014-07-02 23:23:31 -04006161/* Do nothing if arch does not support this */
6162void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
6163{
6164}
6165
6166static void ftrace_update_trampoline(struct ftrace_ops *ops)
6167{
Steven Rostedt (Red Hat)f3bea492014-07-02 23:23:31 -04006168 arch_ftrace_update_trampoline(ops);
6169}
6170
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04006171void ftrace_init_trace_array(struct trace_array *tr)
6172{
6173 INIT_LIST_HEAD(&tr->func_probes);
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04006174 INIT_LIST_HEAD(&tr->mod_trace);
6175 INIT_LIST_HEAD(&tr->mod_notrace);
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04006176}
Steven Rostedt3d083392008-05-12 21:20:42 +02006177#else
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01006178
Steven Rostedt2b499382011-05-03 22:49:52 -04006179static struct ftrace_ops global_ops = {
Steven Rostedtbd69c302011-05-03 21:55:54 -04006180 .func = ftrace_stub,
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -04006181 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
6182 FTRACE_OPS_FL_INITIALIZED |
6183 FTRACE_OPS_FL_PID,
Steven Rostedtbd69c302011-05-03 21:55:54 -04006184};
6185
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01006186static int __init ftrace_nodyn_init(void)
6187{
6188 ftrace_enabled = 1;
6189 return 0;
6190}
Steven Rostedt6f415672012-10-05 12:13:07 -04006191core_initcall(ftrace_nodyn_init);
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01006192
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006193static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006194static inline void ftrace_startup_enable(int command) { }
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04006195static inline void ftrace_startup_all(int command) { }
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05006196/* Keep as macros so we do not need to define the commands */
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05006197# define ftrace_startup(ops, command) \
6198 ({ \
6199 int ___ret = __register_ftrace_function(ops); \
6200 if (!___ret) \
6201 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
6202 ___ret; \
Steven Rostedt3b6cfdb2011-05-23 15:33:49 -04006203 })
Steven Rostedt (Red Hat)1fcc1552014-02-19 15:12:18 -05006204# define ftrace_shutdown(ops, command) \
6205 ({ \
6206 int ___ret = __unregister_ftrace_function(ops); \
6207 if (!___ret) \
6208 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
6209 ___ret; \
6210 })
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05006211
Ingo Molnarc7aafc52008-05-12 21:20:45 +02006212# define ftrace_startup_sysctl() do { } while (0)
6213# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedtb8489142011-05-04 09:27:52 -04006214
6215static inline int
Steven Rostedt (Red Hat)195a8af2013-07-23 22:06:15 -04006216ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
Steven Rostedtb8489142011-05-04 09:27:52 -04006217{
6218 return 1;
6219}
6220
Steven Rostedt (Red Hat)f3bea492014-07-02 23:23:31 -04006221static void ftrace_update_trampoline(struct ftrace_ops *ops)
6222{
6223}
6224
Steven Rostedt3d083392008-05-12 21:20:42 +02006225#endif /* CONFIG_DYNAMIC_FTRACE */
6226
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006227__init void ftrace_init_global_array_ops(struct trace_array *tr)
6228{
6229 tr->ops = &global_ops;
6230 tr->ops->private = tr;
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04006231 ftrace_init_trace_array(tr);
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006232}
6233
6234void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
6235{
6236 /* If we filter on pids, update to use the pid function */
6237 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
6238 if (WARN_ON(tr->ops->func != ftrace_stub))
6239 printk("ftrace ops had %pS for function\n",
6240 tr->ops->func);
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006241 }
6242 tr->ops->func = func;
6243 tr->ops->private = tr;
6244}
6245
6246void ftrace_reset_array_ops(struct trace_array *tr)
6247{
6248 tr->ops->func = ftrace_stub;
6249}
6250
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04006251static inline void
6252__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -04006253 struct ftrace_ops *ignored, struct pt_regs *regs)
Steven Rostedtb8489142011-05-04 09:27:52 -04006254{
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04006255 struct ftrace_ops *op;
Steven Rostedtedc15ca2012-11-02 17:47:21 -04006256 int bit;
Steven Rostedtb8489142011-05-04 09:27:52 -04006257
Steven Rostedtedc15ca2012-11-02 17:47:21 -04006258 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
6259 if (bit < 0)
6260 return;
Steven Rostedtc29f1222012-11-02 17:17:59 -04006261
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04006262 /*
6263 * Some of the ops may be dynamically allocated,
6264 * they must be freed after a synchronize_sched().
6265 */
6266 preempt_disable_notrace();
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -05006267
Steven Rostedt0a016402012-11-02 17:03:03 -04006268 do_for_each_ftrace_op(op, ftrace_ops_list) {
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -05006269 /*
6270 * Check the following for each ops before calling their func:
6271 * if RCU flag is set, then rcu_is_watching() must be true
6272 * if PER_CPU is set, then ftrace_function_local_disable()
6273 * must be false
6274 * Otherwise test if the ip matches the ops filter
6275 *
6276 * If any of the above fails then the op->func() is not executed.
6277 */
6278 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -05006279 ftrace_ops_test(op, ip, regs)) {
Steven Rostedt (Red Hat)1d48d592014-06-25 11:54:03 -04006280 if (FTRACE_WARN_ON(!op->func)) {
6281 pr_warn("op=%p %pS\n", op, op);
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006282 goto out;
6283 }
Steven Rostedta1e2e312011-08-09 12:50:46 -04006284 op->func(ip, parent_ip, op, regs);
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006285 }
Steven Rostedt0a016402012-11-02 17:03:03 -04006286 } while_for_each_ftrace_op(op);
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006287out:
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04006288 preempt_enable_notrace();
Steven Rostedtedc15ca2012-11-02 17:47:21 -04006289 trace_clear_recursion(bit);
Steven Rostedtb8489142011-05-04 09:27:52 -04006290}
6291
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04006292/*
6293 * Some archs only support passing ip and parent_ip. Even though
6294 * the list function ignores the op parameter, we do not want any
6295 * C side effects, where a function is called without the caller
6296 * sending a third parameter.
Steven Rostedta1e2e312011-08-09 12:50:46 -04006297 * Archs are to support both the regs and ftrace_ops at the same time.
6298 * If they support ftrace_ops, it is assumed they support regs.
6299 * If call backs want to use regs, they must either check for regs
Masami Hiramatsu06aeaae2012-09-28 17:15:17 +09006300 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
6301 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
Steven Rostedta1e2e312011-08-09 12:50:46 -04006302 * An architecture can pass partial regs with ftrace_ops and still
Li Binb8ec3302015-11-30 18:23:36 +08006303 * set the ARCH_SUPPORTS_FTRACE_OPS.
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04006304 */
6305#if ARCH_SUPPORTS_FTRACE_OPS
6306static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -04006307 struct ftrace_ops *op, struct pt_regs *regs)
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04006308{
Steven Rostedta1e2e312011-08-09 12:50:46 -04006309 __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04006310}
6311#else
6312static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
6313{
Steven Rostedta1e2e312011-08-09 12:50:46 -04006314 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04006315}
6316#endif
6317
Steven Rostedt (Red Hat)f1ff6342014-07-22 20:16:57 -04006318/*
6319 * If there's only one function registered but it does not support
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05006320 * recursion, needs RCU protection and/or requires per cpu handling, then
6321 * this function will be called by the mcount trampoline.
Steven Rostedt (Red Hat)f1ff6342014-07-22 20:16:57 -04006322 */
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05006323static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (Red Hat)f1ff6342014-07-22 20:16:57 -04006324 struct ftrace_ops *op, struct pt_regs *regs)
6325{
6326 int bit;
6327
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05006328 if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching())
6329 return;
6330
Steven Rostedt (Red Hat)f1ff6342014-07-22 20:16:57 -04006331 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
6332 if (bit < 0)
6333 return;
6334
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05006335 preempt_disable_notrace();
Steven Rostedt (Red Hat)f1ff6342014-07-22 20:16:57 -04006336
Peter Zijlstrab3a88802017-10-11 09:45:32 +02006337 op->func(ip, parent_ip, op, regs);
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05006338
6339 preempt_enable_notrace();
Steven Rostedt (Red Hat)f1ff6342014-07-22 20:16:57 -04006340 trace_clear_recursion(bit);
6341}
6342
Steven Rostedt (Red Hat)87354052014-07-22 20:41:42 -04006343/**
6344 * ftrace_ops_get_func - get the function a trampoline should call
6345 * @ops: the ops to get the function for
6346 *
6347 * Normally the mcount trampoline will call the ops->func, but there
6348 * are times that it should not. For example, if the ops does not
6349 * have its own recursion protection, then it should call the
Chunyu Hu3a150df2017-02-22 08:29:26 +08006350 * ftrace_ops_assist_func() instead.
Steven Rostedt (Red Hat)87354052014-07-22 20:41:42 -04006351 *
6352 * Returns the function that the trampoline should call for @ops.
6353 */
6354ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
6355{
6356 /*
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05006357 * If the function does not handle recursion, needs to be RCU safe,
6358 * or does per cpu logic, then we need to call the assist handler.
Steven Rostedt (Red Hat)87354052014-07-22 20:41:42 -04006359 */
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05006360 if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
Peter Zijlstrab3a88802017-10-11 09:45:32 +02006361 ops->flags & FTRACE_OPS_FL_RCU)
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05006362 return ftrace_ops_assist_func;
Steven Rostedt (Red Hat)87354052014-07-22 20:41:42 -04006363
6364 return ops->func;
6365}
6366
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006367static void
6368ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
6369 struct task_struct *prev, struct task_struct *next)
Steven Rostedte32d8952008-12-04 00:26:41 -05006370{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006371 struct trace_array *tr = data;
6372 struct trace_pid_list *pid_list;
6373
6374 pid_list = rcu_dereference_sched(tr->function_pids);
6375
6376 this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
6377 trace_ignore_this_task(pid_list, next));
6378}
6379
Namhyung Kim1e104862017-04-17 11:44:28 +09006380static void
6381ftrace_pid_follow_sched_process_fork(void *data,
6382 struct task_struct *self,
6383 struct task_struct *task)
6384{
6385 struct trace_pid_list *pid_list;
6386 struct trace_array *tr = data;
6387
6388 pid_list = rcu_dereference_sched(tr->function_pids);
6389 trace_filter_add_remove_task(pid_list, self, task);
6390}
6391
6392static void
6393ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
6394{
6395 struct trace_pid_list *pid_list;
6396 struct trace_array *tr = data;
6397
6398 pid_list = rcu_dereference_sched(tr->function_pids);
6399 trace_filter_add_remove_task(pid_list, NULL, task);
6400}
6401
6402void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
6403{
6404 if (enable) {
6405 register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
6406 tr);
6407 register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
6408 tr);
6409 } else {
6410 unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
6411 tr);
6412 unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
6413 tr);
6414 }
6415}
6416
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006417static void clear_ftrace_pids(struct trace_array *tr)
6418{
6419 struct trace_pid_list *pid_list;
Steven Rostedte32d8952008-12-04 00:26:41 -05006420 int cpu;
6421
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006422 pid_list = rcu_dereference_protected(tr->function_pids,
6423 lockdep_is_held(&ftrace_lock));
6424 if (!pid_list)
6425 return;
6426
6427 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
6428
6429 for_each_possible_cpu(cpu)
6430 per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false;
6431
6432 rcu_assign_pointer(tr->function_pids, NULL);
6433
6434 /* Wait till all users are no longer using pid filtering */
6435 synchronize_sched();
6436
6437 trace_free_pid_list(pid_list);
Steven Rostedte32d8952008-12-04 00:26:41 -05006438}
6439
Namhyung Kimd879d0b2017-04-17 11:44:27 +09006440void ftrace_clear_pids(struct trace_array *tr)
6441{
6442 mutex_lock(&ftrace_lock);
6443
6444 clear_ftrace_pids(tr);
6445
6446 mutex_unlock(&ftrace_lock);
6447}
6448
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006449static void ftrace_pid_reset(struct trace_array *tr)
Steven Rostedte32d8952008-12-04 00:26:41 -05006450{
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006451 mutex_lock(&ftrace_lock);
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006452 clear_ftrace_pids(tr);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006453
6454 ftrace_update_pid_func();
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04006455 ftrace_startup_all(0);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006456
6457 mutex_unlock(&ftrace_lock);
6458}
6459
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006460/* Greater than any max PID */
6461#define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1)
6462
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006463static void *fpid_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006464 __acquires(RCU)
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006465{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006466 struct trace_pid_list *pid_list;
6467 struct trace_array *tr = m->private;
6468
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006469 mutex_lock(&ftrace_lock);
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006470 rcu_read_lock_sched();
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006471
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006472 pid_list = rcu_dereference_sched(tr->function_pids);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006473
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006474 if (!pid_list)
6475 return !(*pos) ? FTRACE_NO_PIDS : NULL;
6476
6477 return trace_pid_start(pid_list, pos);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006478}
6479
6480static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
6481{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006482 struct trace_array *tr = m->private;
6483 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
6484
6485 if (v == FTRACE_NO_PIDS)
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006486 return NULL;
6487
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006488 return trace_pid_next(pid_list, v, pos);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006489}
6490
6491static void fpid_stop(struct seq_file *m, void *p)
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006492 __releases(RCU)
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006493{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006494 rcu_read_unlock_sched();
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006495 mutex_unlock(&ftrace_lock);
6496}
6497
6498static int fpid_show(struct seq_file *m, void *v)
6499{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006500 if (v == FTRACE_NO_PIDS) {
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006501 seq_puts(m, "no pid\n");
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006502 return 0;
6503 }
6504
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006505 return trace_pid_show(m, v);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006506}
6507
6508static const struct seq_operations ftrace_pid_sops = {
6509 .start = fpid_start,
6510 .next = fpid_next,
6511 .stop = fpid_stop,
6512 .show = fpid_show,
6513};
6514
6515static int
6516ftrace_pid_open(struct inode *inode, struct file *file)
6517{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006518 struct trace_array *tr = inode->i_private;
6519 struct seq_file *m;
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006520 int ret = 0;
6521
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006522 if (trace_array_get(tr) < 0)
6523 return -ENODEV;
6524
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006525 if ((file->f_mode & FMODE_WRITE) &&
6526 (file->f_flags & O_TRUNC))
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006527 ftrace_pid_reset(tr);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006528
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006529 ret = seq_open(file, &ftrace_pid_sops);
6530 if (ret < 0) {
6531 trace_array_put(tr);
6532 } else {
6533 m = file->private_data;
6534 /* copy tr over to seq ops */
6535 m->private = tr;
6536 }
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006537
6538 return ret;
6539}
6540
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006541static void ignore_task_cpu(void *data)
6542{
6543 struct trace_array *tr = data;
6544 struct trace_pid_list *pid_list;
6545
6546 /*
6547 * This function is called by on_each_cpu() while the
6548 * event_mutex is held.
6549 */
6550 pid_list = rcu_dereference_protected(tr->function_pids,
6551 mutex_is_locked(&ftrace_lock));
6552
6553 this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
6554 trace_ignore_this_task(pid_list, current));
6555}
6556
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006557static ssize_t
6558ftrace_pid_write(struct file *filp, const char __user *ubuf,
6559 size_t cnt, loff_t *ppos)
6560{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006561 struct seq_file *m = filp->private_data;
6562 struct trace_array *tr = m->private;
6563 struct trace_pid_list *filtered_pids = NULL;
6564 struct trace_pid_list *pid_list;
6565 ssize_t ret;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006566
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006567 if (!cnt)
6568 return 0;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006569
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006570 mutex_lock(&ftrace_lock);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006571
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006572 filtered_pids = rcu_dereference_protected(tr->function_pids,
6573 lockdep_is_held(&ftrace_lock));
6574
6575 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
6576 if (ret < 0)
6577 goto out;
6578
6579 rcu_assign_pointer(tr->function_pids, pid_list);
6580
6581 if (filtered_pids) {
6582 synchronize_sched();
6583 trace_free_pid_list(filtered_pids);
6584 } else if (pid_list) {
6585 /* Register a probe to set whether to ignore the tracing of a task */
6586 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
6587 }
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006588
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006589 /*
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006590 * Ignoring of pids is done at task switch. But we have to
6591 * check for those tasks that are currently running.
6592 * Always do this in case a pid was appended or removed.
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006593 */
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006594 on_each_cpu(ignore_task_cpu, tr, 1);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006595
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006596 ftrace_update_pid_func();
6597 ftrace_startup_all(0);
6598 out:
6599 mutex_unlock(&ftrace_lock);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006600
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006601 if (ret > 0)
6602 *ppos += ret;
Steven Rostedt978f3a42008-12-04 00:26:40 -05006603
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006604 return ret;
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006605}
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006606
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006607static int
6608ftrace_pid_release(struct inode *inode, struct file *file)
6609{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006610 struct trace_array *tr = inode->i_private;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006611
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006612 trace_array_put(tr);
6613
6614 return seq_release(inode, file);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006615}
6616
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006617static const struct file_operations ftrace_pid_fops = {
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006618 .open = ftrace_pid_open,
6619 .write = ftrace_pid_write,
6620 .read = seq_read,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05006621 .llseek = tracing_lseek,
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006622 .release = ftrace_pid_release,
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006623};
6624
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006625void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006626{
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006627 trace_create_file("set_ftrace_pid", 0644, d_tracer,
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006628 tr, &ftrace_pid_fops);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006629}
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006630
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04006631void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
6632 struct dentry *d_tracer)
6633{
6634 /* Only the top level directory has the dyn_tracefs and profile */
6635 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
6636
6637 ftrace_init_dyn_tracefs(d_tracer);
6638 ftrace_profile_tracefs(d_tracer);
6639}
6640
Steven Rostedt3d083392008-05-12 21:20:42 +02006641/**
Steven Rostedt81adbdc2008-10-23 09:33:02 -04006642 * ftrace_kill - kill ftrace
Steven Rostedta2bb6a32008-07-10 20:58:15 -04006643 *
6644 * This function should be used by panic code. It stops ftrace
6645 * but in a not so nice way. If you need to simply kill ftrace
6646 * from a non-atomic section, use ftrace_kill.
6647 */
Steven Rostedt81adbdc2008-10-23 09:33:02 -04006648void ftrace_kill(void)
Steven Rostedta2bb6a32008-07-10 20:58:15 -04006649{
6650 ftrace_disabled = 1;
6651 ftrace_enabled = 0;
Yisheng Xie5ccba642018-02-02 10:14:49 +08006652 ftrace_trace_function = ftrace_stub;
Steven Rostedta2bb6a32008-07-10 20:58:15 -04006653}
6654
6655/**
Steven Rostedte0a413f2011-09-29 21:26:16 -04006656 * Test if ftrace is dead or not.
6657 */
6658int ftrace_is_dead(void)
6659{
6660 return ftrace_disabled;
6661}
6662
6663/**
Steven Rostedt3d083392008-05-12 21:20:42 +02006664 * register_ftrace_function - register a function for profiling
6665 * @ops - ops structure that holds the function for profiling.
6666 *
6667 * Register a function to be called by all functions in the
6668 * kernel.
6669 *
6670 * Note: @ops->func and all the functions it calls must be labeled
6671 * with "notrace", otherwise it will go into a
6672 * recursive loop.
6673 */
6674int register_ftrace_function(struct ftrace_ops *ops)
6675{
Steven Rostedt45a4a232011-04-21 23:16:46 -04006676 int ret = -1;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02006677
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09006678 ftrace_ops_init(ops);
6679
Steven Rostedte6ea44e2009-02-14 01:42:44 -05006680 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01006681
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05006682 ret = ftrace_startup(ops, 0);
Steven Rostedtb8489142011-05-04 09:27:52 -04006683
Steven Rostedte6ea44e2009-02-14 01:42:44 -05006684 mutex_unlock(&ftrace_lock);
Borislav Petkov8d240dd2012-03-29 19:11:40 +02006685
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006686 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02006687}
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04006688EXPORT_SYMBOL_GPL(register_ftrace_function);
Steven Rostedt3d083392008-05-12 21:20:42 +02006689
6690/**
Uwe Kleine-Koenig32632922009-01-12 23:35:50 +01006691 * unregister_ftrace_function - unregister a function for profiling.
Steven Rostedt3d083392008-05-12 21:20:42 +02006692 * @ops - ops structure that holds the function to unregister
6693 *
6694 * Unregister a function that was added to be called by ftrace profiling.
6695 */
6696int unregister_ftrace_function(struct ftrace_ops *ops)
6697{
6698 int ret;
6699
Steven Rostedte6ea44e2009-02-14 01:42:44 -05006700 mutex_lock(&ftrace_lock);
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05006701 ret = ftrace_shutdown(ops, 0);
Steven Rostedte6ea44e2009-02-14 01:42:44 -05006702 mutex_unlock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006703
6704 return ret;
6705}
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04006706EXPORT_SYMBOL_GPL(unregister_ftrace_function);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006707
Ingo Molnare309b412008-05-12 21:20:51 +02006708int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006709ftrace_enable_sysctl(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006710 void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006711 loff_t *ppos)
6712{
Steven Rostedt45a4a232011-04-21 23:16:46 -04006713 int ret = -ENODEV;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02006714
Steven Rostedte6ea44e2009-02-14 01:42:44 -05006715 mutex_lock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006716
Steven Rostedt45a4a232011-04-21 23:16:46 -04006717 if (unlikely(ftrace_disabled))
6718 goto out;
6719
6720 ret = proc_dointvec(table, write, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006721
Li Zefana32c7762009-06-26 16:55:51 +08006722 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006723 goto out;
6724
Li Zefana32c7762009-06-26 16:55:51 +08006725 last_ftrace_enabled = !!ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006726
6727 if (ftrace_enabled) {
6728
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006729 /* we are starting ftrace again */
Chunyan Zhangf86f4182017-06-07 16:12:51 +08006730 if (rcu_dereference_protected(ftrace_ops_list,
6731 lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
Jan Kiszka5000c412013-03-26 17:53:03 +01006732 update_ftrace_function();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006733
Steven Rostedt (Red Hat)524a3862015-03-06 19:55:13 -05006734 ftrace_startup_sysctl();
6735
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006736 } else {
6737 /* stopping ftrace calls (just send to ftrace_stub) */
6738 ftrace_trace_function = ftrace_stub;
6739
6740 ftrace_shutdown_sysctl();
6741 }
6742
6743 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05006744 mutex_unlock(&ftrace_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02006745 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02006746}
Ingo Molnarf17845e2008-10-24 12:47:10 +02006747
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01006748#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01006749
Steven Rostedt (Red Hat)5f151b22014-08-15 17:18:46 -04006750static struct ftrace_ops graph_ops = {
6751 .func = ftrace_stub,
6752 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
6753 FTRACE_OPS_FL_INITIALIZED |
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -04006754 FTRACE_OPS_FL_PID |
Steven Rostedt (Red Hat)5f151b22014-08-15 17:18:46 -04006755 FTRACE_OPS_FL_STUB,
6756#ifdef FTRACE_GRAPH_TRAMP_ADDR
6757 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -05006758 /* trampoline_size is only needed for dynamically allocated tramps */
Steven Rostedt (Red Hat)5f151b22014-08-15 17:18:46 -04006759#endif
6760 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
6761};
6762
Steven Rostedt (Red Hat)55577202015-09-29 19:06:50 -04006763void ftrace_graph_sleep_time_control(bool enable)
6764{
6765 fgraph_sleep_time = enable;
6766}
6767
6768void ftrace_graph_graph_time_control(bool enable)
6769{
6770 fgraph_graph_time = enable;
6771}
6772
Steven Rostedte49dc192008-12-02 23:50:05 -05006773int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
6774{
6775 return 0;
6776}
6777
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01006778/* The callbacks that hook a function */
6779trace_func_graph_ret_t ftrace_graph_return =
6780 (trace_func_graph_ret_t)ftrace_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05006781trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
Steven Rostedt (Red Hat)23a8e842014-01-13 10:30:23 -05006782static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006783
6784/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
6785static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
6786{
6787 int i;
6788 int ret = 0;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006789 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
6790 struct task_struct *g, *t;
6791
6792 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
Kees Cook6da2ec52018-06-12 13:55:00 -07006793 ret_stack_list[i] =
6794 kmalloc_array(FTRACE_RETFUNC_DEPTH,
6795 sizeof(struct ftrace_ret_stack),
6796 GFP_KERNEL);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006797 if (!ret_stack_list[i]) {
6798 start = 0;
6799 end = i;
6800 ret = -ENOMEM;
6801 goto free;
6802 }
6803 }
6804
Soumya PN6112a302016-05-17 21:31:14 +05306805 read_lock(&tasklist_lock);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006806 do_each_thread(g, t) {
6807 if (start == end) {
6808 ret = -EAGAIN;
6809 goto unlock;
6810 }
6811
6812 if (t->ret_stack == NULL) {
Frederic Weisbecker380c4b12008-12-06 03:43:41 +01006813 atomic_set(&t->tracing_graph_pause, 0);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006814 atomic_set(&t->trace_overrun, 0);
Steven Rostedt26c01622009-06-02 14:01:19 -04006815 t->curr_ret_stack = -1;
6816 /* Make sure the tasks see the -1 first: */
6817 smp_wmb();
6818 t->ret_stack = ret_stack_list[start++];
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006819 }
6820 } while_each_thread(g, t);
6821
6822unlock:
Soumya PN6112a302016-05-17 21:31:14 +05306823 read_unlock(&tasklist_lock);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006824free:
6825 for (i = start; i < end; i++)
6826 kfree(ret_stack_list[i]);
6827 return ret;
6828}
6829
Steven Rostedt8aef2d22009-03-24 01:10:15 -04006830static void
Peter Zijlstrac73464b2015-09-28 18:06:56 +02006831ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
Steven Rostedt38516ab2010-04-20 17:04:50 -04006832 struct task_struct *prev, struct task_struct *next)
Steven Rostedt8aef2d22009-03-24 01:10:15 -04006833{
6834 unsigned long long timestamp;
6835 int index;
6836
Steven Rostedtbe6f1642009-03-24 11:06:24 -04006837 /*
6838 * Does the user want to count the time a function was asleep.
6839 * If so, do not update the time stamps.
6840 */
Steven Rostedt (Red Hat)55577202015-09-29 19:06:50 -04006841 if (fgraph_sleep_time)
Steven Rostedtbe6f1642009-03-24 11:06:24 -04006842 return;
6843
Steven Rostedt8aef2d22009-03-24 01:10:15 -04006844 timestamp = trace_clock_local();
6845
6846 prev->ftrace_timestamp = timestamp;
6847
6848 /* only process tasks that we timestamped */
6849 if (!next->ftrace_timestamp)
6850 return;
6851
6852 /*
6853 * Update all the counters in next to make up for the
6854 * time next was sleeping.
6855 */
6856 timestamp -= next->ftrace_timestamp;
6857
6858 for (index = next->curr_ret_stack; index >= 0; index--)
6859 next->ret_stack[index].calltime += timestamp;
6860}
6861
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006862/* Allocate a return stack for each task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01006863static int start_graph_tracing(void)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006864{
6865 struct ftrace_ret_stack **ret_stack_list;
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01006866 int ret, cpu;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006867
Kees Cook6da2ec52018-06-12 13:55:00 -07006868 ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
6869 sizeof(struct ftrace_ret_stack *),
6870 GFP_KERNEL);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006871
6872 if (!ret_stack_list)
6873 return -ENOMEM;
6874
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01006875 /* The cpu_boot init_task->ret_stack will never be freed */
Steven Rostedt179c4982009-06-02 12:03:19 -04006876 for_each_online_cpu(cpu) {
6877 if (!idle_task(cpu)->ret_stack)
Steven Rostedt868baf02011-02-10 21:26:13 -05006878 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
Steven Rostedt179c4982009-06-02 12:03:19 -04006879 }
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01006880
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006881 do {
6882 ret = alloc_retstack_tasklist(ret_stack_list);
6883 } while (ret == -EAGAIN);
6884
Steven Rostedt8aef2d22009-03-24 01:10:15 -04006885 if (!ret) {
Steven Rostedt38516ab2010-04-20 17:04:50 -04006886 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
Steven Rostedt8aef2d22009-03-24 01:10:15 -04006887 if (ret)
6888 pr_info("ftrace_graph: Couldn't activate tracepoint"
6889 " probe to kernel_sched_switch\n");
6890 }
6891
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006892 kfree(ret_stack_list);
6893 return ret;
6894}
6895
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08006896/*
6897 * Hibernation protection.
6898 * The state of the current task is too much unstable during
6899 * suspend/restore to disk. We want to protect against that.
6900 */
6901static int
6902ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
6903 void *unused)
6904{
6905 switch (state) {
6906 case PM_HIBERNATION_PREPARE:
6907 pause_graph_tracing();
6908 break;
6909
6910 case PM_POST_HIBERNATION:
6911 unpause_graph_tracing();
6912 break;
6913 }
6914 return NOTIFY_DONE;
6915}
6916
Steven Rostedt (Red Hat)23a8e842014-01-13 10:30:23 -05006917static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
6918{
6919 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
6920 return 0;
6921 return __ftrace_graph_entry(trace);
6922}
6923
6924/*
6925 * The function graph tracer should only trace the functions defined
6926 * by set_ftrace_filter and set_ftrace_notrace. If another function
6927 * tracer ops is registered, the graph tracer requires testing the
6928 * function against the global ops, and not just trace any function
6929 * that any ftrace_ops registered.
6930 */
6931static void update_function_graph_func(void)
6932{
Steven Rostedt (Red Hat)5f151b22014-08-15 17:18:46 -04006933 struct ftrace_ops *op;
6934 bool do_test = false;
6935
6936 /*
6937 * The graph and global ops share the same set of functions
6938 * to test. If any other ops is on the list, then
6939 * the graph tracing needs to test if its the function
6940 * it should call.
6941 */
6942 do_for_each_ftrace_op(op, ftrace_ops_list) {
6943 if (op != &global_ops && op != &graph_ops &&
6944 op != &ftrace_list_end) {
6945 do_test = true;
6946 /* in double loop, break out with goto */
6947 goto out;
6948 }
6949 } while_for_each_ftrace_op(op);
6950 out:
6951 if (do_test)
Steven Rostedt (Red Hat)23a8e842014-01-13 10:30:23 -05006952 ftrace_graph_entry = ftrace_graph_entry_test;
Steven Rostedt (Red Hat)5f151b22014-08-15 17:18:46 -04006953 else
6954 ftrace_graph_entry = __ftrace_graph_entry;
Steven Rostedt (Red Hat)23a8e842014-01-13 10:30:23 -05006955}
6956
Mathias Krause8275f692014-03-30 15:31:50 +02006957static struct notifier_block ftrace_suspend_notifier = {
6958 .notifier_call = ftrace_suspend_notifier_call,
6959};
6960
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01006961int register_ftrace_graph(trace_func_graph_ret_t retfunc,
6962 trace_func_graph_ent_t entryfunc)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01006963{
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01006964 int ret = 0;
6965
Steven Rostedte6ea44e2009-02-14 01:42:44 -05006966 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01006967
Steven Rostedt05ce5812009-03-24 00:18:31 -04006968 /* we currently allow only one tracer registered at a time */
Steven Rostedt597af812009-04-03 15:24:12 -04006969 if (ftrace_graph_active) {
Steven Rostedt05ce5812009-03-24 00:18:31 -04006970 ret = -EBUSY;
6971 goto out;
6972 }
6973
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08006974 register_pm_notifier(&ftrace_suspend_notifier);
6975
Steven Rostedt597af812009-04-03 15:24:12 -04006976 ftrace_graph_active++;
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01006977 ret = start_graph_tracing();
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006978 if (ret) {
Steven Rostedt597af812009-04-03 15:24:12 -04006979 ftrace_graph_active--;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006980 goto out;
6981 }
Steven Rostedte53a6312008-11-26 00:16:25 -05006982
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01006983 ftrace_graph_return = retfunc;
Steven Rostedt (Red Hat)23a8e842014-01-13 10:30:23 -05006984
6985 /*
6986 * Update the indirect function to the entryfunc, and the
6987 * function that gets called to the entry_test first. Then
6988 * call the update fgraph entry function to determine if
6989 * the entryfunc should be called directly or not.
6990 */
6991 __ftrace_graph_entry = entryfunc;
6992 ftrace_graph_entry = ftrace_graph_entry_test;
6993 update_function_graph_func();
Steven Rostedte53a6312008-11-26 00:16:25 -05006994
Steven Rostedt (Red Hat)5f151b22014-08-15 17:18:46 -04006995 ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01006996out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05006997 mutex_unlock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01006998 return ret;
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01006999}
7000
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01007001void unregister_ftrace_graph(void)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01007002{
Steven Rostedte6ea44e2009-02-14 01:42:44 -05007003 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01007004
Steven Rostedt597af812009-04-03 15:24:12 -04007005 if (unlikely(!ftrace_graph_active))
Steven Rostedt2aad1b72009-03-30 11:11:28 -04007006 goto out;
7007
Steven Rostedt597af812009-04-03 15:24:12 -04007008 ftrace_graph_active--;
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01007009 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05007010 ftrace_graph_entry = ftrace_graph_entry_stub;
Steven Rostedt (Red Hat)23a8e842014-01-13 10:30:23 -05007011 __ftrace_graph_entry = ftrace_graph_entry_stub;
Steven Rostedt (Red Hat)5f151b22014-08-15 17:18:46 -04007012 ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08007013 unregister_pm_notifier(&ftrace_suspend_notifier);
Steven Rostedt38516ab2010-04-20 17:04:50 -04007014 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01007015
Steven Rostedt2aad1b72009-03-30 11:11:28 -04007016 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05007017 mutex_unlock(&ftrace_lock);
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01007018}
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01007019
Steven Rostedt868baf02011-02-10 21:26:13 -05007020static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
7021
7022static void
7023graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
7024{
7025 atomic_set(&t->tracing_graph_pause, 0);
7026 atomic_set(&t->trace_overrun, 0);
7027 t->ftrace_timestamp = 0;
Lucas De Marchi25985ed2011-03-30 22:57:33 -03007028 /* make curr_ret_stack visible before we add the ret_stack */
Steven Rostedt868baf02011-02-10 21:26:13 -05007029 smp_wmb();
7030 t->ret_stack = ret_stack;
7031}
7032
7033/*
7034 * Allocate a return stack for the idle task. May be the first
7035 * time through, or it may be done by CPU hotplug online.
7036 */
7037void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
7038{
7039 t->curr_ret_stack = -1;
7040 /*
7041 * The idle task has no parent, it either has its own
7042 * stack or no stack at all.
7043 */
7044 if (t->ret_stack)
7045 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
7046
7047 if (ftrace_graph_active) {
7048 struct ftrace_ret_stack *ret_stack;
7049
7050 ret_stack = per_cpu(idle_ret_stack, cpu);
7051 if (!ret_stack) {
Kees Cook6da2ec52018-06-12 13:55:00 -07007052 ret_stack =
7053 kmalloc_array(FTRACE_RETFUNC_DEPTH,
7054 sizeof(struct ftrace_ret_stack),
7055 GFP_KERNEL);
Steven Rostedt868baf02011-02-10 21:26:13 -05007056 if (!ret_stack)
7057 return;
7058 per_cpu(idle_ret_stack, cpu) = ret_stack;
7059 }
7060 graph_init_task(t, ret_stack);
7061 }
7062}
7063
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01007064/* Allocate a return stack for newly created task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01007065void ftrace_graph_init_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01007066{
Steven Rostedt84047e32009-06-02 16:51:55 -04007067 /* Make sure we do not use the parent ret_stack */
7068 t->ret_stack = NULL;
Steven Rostedtea14eb72010-03-12 19:41:23 -05007069 t->curr_ret_stack = -1;
Steven Rostedt84047e32009-06-02 16:51:55 -04007070
Steven Rostedt597af812009-04-03 15:24:12 -04007071 if (ftrace_graph_active) {
Steven Rostedt82310a32009-06-02 12:26:07 -04007072 struct ftrace_ret_stack *ret_stack;
7073
Kees Cook6da2ec52018-06-12 13:55:00 -07007074 ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
7075 sizeof(struct ftrace_ret_stack),
7076 GFP_KERNEL);
Steven Rostedt82310a32009-06-02 12:26:07 -04007077 if (!ret_stack)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01007078 return;
Steven Rostedt868baf02011-02-10 21:26:13 -05007079 graph_init_task(t, ret_stack);
Steven Rostedt84047e32009-06-02 16:51:55 -04007080 }
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01007081}
7082
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01007083void ftrace_graph_exit_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01007084{
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01007085 struct ftrace_ret_stack *ret_stack = t->ret_stack;
7086
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01007087 t->ret_stack = NULL;
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01007088 /* NULL must become visible to IRQs before we free it: */
7089 barrier();
7090
7091 kfree(ret_stack);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01007092}
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01007093#endif