blob: 38277af44f5c4c4fa99eba4bdf22b383e8b8065f [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02002/*
3 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally ported from the -rt patch by:
9 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code in the latency_tracer, that is:
12 *
13 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010014 * Copyright (C) 2004 Nadia Yvette Chambers
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020015 */
16
Steven Rostedt3d083392008-05-12 21:20:42 +020017#include <linux/stop_machine.h>
18#include <linux/clocksource.h>
Ingo Molnar29930022017-02-08 18:51:36 +010019#include <linux/sched/task.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020020#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020021#include <linux/seq_file.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050022#include <linux/tracefs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020023#include <linux/hardirq.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010024#include <linux/kthread.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020025#include <linux/uaccess.h>
Steven Rostedt5855fea2011-12-16 19:27:42 -050026#include <linux/bsearch.h>
Paul Gortmaker56d82e02011-05-26 17:53:52 -040027#include <linux/module.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010028#include <linux/ftrace.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020029#include <linux/sysctl.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020031#include <linux/ctype.h>
Steven Rostedt68950612011-12-16 17:06:45 -050032#include <linux/sort.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020033#include <linux/list.h>
Steven Rostedt59df055f2009-02-14 15:29:06 -050034#include <linux/hash.h>
Paul E. McKenney3f379b02010-03-05 15:03:25 -080035#include <linux/rcupdate.h>
Masami Hiramatsufabe38a2019-02-24 01:50:20 +090036#include <linux/kprobes.h>
Josh Poimboeuf9f255b62019-06-13 20:07:22 -050037#include <linux/memory.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020038
Steven Rostedtad8d75f2009-04-14 19:39:12 -040039#include <trace/events/sched.h>
Steven Rostedt8aef2d22009-03-24 01:10:15 -040040
Steven Rostedt (VMware)b80f0f62017-04-03 12:57:35 -040041#include <asm/sections.h>
Steven Rostedt2af15d62009-05-28 13:37:24 -040042#include <asm/setup.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053043
Steven Rostedt (VMware)3306fc4a2018-11-15 12:32:38 -050044#include "ftrace_internal.h"
Steven Rostedt0706f1c2009-03-23 23:12:58 -040045#include "trace_output.h"
Steven Rostedtbac429f2009-03-20 12:50:56 -040046#include "trace_stat.h"
Steven Rostedt3d083392008-05-12 21:20:42 +020047
Steven Rostedt6912896e2008-10-23 09:33:03 -040048#define FTRACE_WARN_ON(cond) \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040049 ({ \
50 int ___r = cond; \
51 if (WARN_ON(___r)) \
Steven Rostedt6912896e2008-10-23 09:33:03 -040052 ftrace_kill(); \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040053 ___r; \
54 })
Steven Rostedt6912896e2008-10-23 09:33:03 -040055
56#define FTRACE_WARN_ON_ONCE(cond) \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040057 ({ \
58 int ___r = cond; \
59 if (WARN_ON_ONCE(___r)) \
Steven Rostedt6912896e2008-10-23 09:33:03 -040060 ftrace_kill(); \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040061 ___r; \
62 })
Steven Rostedt6912896e2008-10-23 09:33:03 -040063
Steven Rostedt8fc0c702009-02-16 15:28:00 -050064/* hash bits for specific function selection */
65#define FTRACE_HASH_BITS 7
66#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
Steven Rostedt33dc9b12011-05-02 17:34:47 -040067#define FTRACE_HASH_DEFAULT_BITS 10
68#define FTRACE_HASH_MAX_BITS 12
Steven Rostedt8fc0c702009-02-16 15:28:00 -050069
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +090070#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -040071#define INIT_OPS_HASH(opsname) \
72 .func_hash = &opsname.local_hash, \
73 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +090074#else
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -040075#define INIT_OPS_HASH(opsname)
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +090076#endif
77
Steven Rostedt (VMware)a0572f62018-12-05 12:48:53 -050078enum {
79 FTRACE_MODIFY_ENABLE_FL = (1 << 0),
80 FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1),
81};
82
Steven Rostedt (VMware)3306fc4a2018-11-15 12:32:38 -050083struct ftrace_ops ftrace_list_end __read_mostly = {
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -040084 .func = ftrace_stub,
Steven Rostedt (Red Hat)395b97a2013-03-27 09:31:28 -040085 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -040086 INIT_OPS_HASH(ftrace_list_end)
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -040087};
88
Steven Rostedt4eebcc82008-05-12 21:20:48 +020089/* ftrace_enabled is a method to turn ftrace on or off */
90int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020091static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020092
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -040093/* Current function tracing op */
94struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
Steven Rostedt (Red Hat)405e1d82013-11-08 14:17:30 -050095/* What to set function_trace_op to */
96static struct ftrace_ops *set_function_trace_op;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050097
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -040098static bool ftrace_pids_enabled(struct ftrace_ops *ops)
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -040099{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -0400100 struct trace_array *tr;
101
102 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
103 return false;
104
105 tr = ops->private;
106
107 return tr->function_pids != NULL;
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400108}
109
110static void ftrace_update_trampoline(struct ftrace_ops *ops);
111
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200112/*
113 * ftrace_disabled is set when an anomaly is discovered.
114 * ftrace_disabled is much stronger than ftrace_enabled.
115 */
116static int ftrace_disabled __read_mostly;
117
Steven Rostedt (VMware)3306fc4a2018-11-15 12:32:38 -0500118DEFINE_MUTEX(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200119
Steven Rostedt (VMware)3306fc4a2018-11-15 12:32:38 -0500120struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200121ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedt (VMware)3306fc4a2018-11-15 12:32:38 -0500122struct ftrace_ops global_ops;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200123
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400124#if ARCH_SUPPORTS_FTRACE_OPS
125static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400126 struct ftrace_ops *op, struct pt_regs *regs);
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400127#else
128/* See comment below, where ftrace_ops_list_func is defined */
129static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
130#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
131#endif
Steven Rostedtb8489142011-05-04 09:27:52 -0400132
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +0900133static inline void ftrace_ops_init(struct ftrace_ops *ops)
134{
135#ifdef CONFIG_DYNAMIC_FTRACE
136 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -0400137 mutex_init(&ops->local_hash.regex_lock);
138 ops->func_hash = &ops->local_hash;
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +0900139 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
140 }
141#endif
142}
143
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400144static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400145 struct ftrace_ops *op, struct pt_regs *regs)
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500146{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -0400147 struct trace_array *tr = op->private;
148
149 if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid))
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500150 return;
151
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400152 op->saved_func(ip, parent_ip, op, regs);
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500153}
154
Steven Rostedt (Red Hat)405e1d82013-11-08 14:17:30 -0500155static void ftrace_sync(struct work_struct *work)
156{
157 /*
158 * This function is just a stub to implement a hard force
Paul E. McKenney74401722018-11-06 18:44:52 -0800159 * of synchronize_rcu(). This requires synchronizing
Steven Rostedt (Red Hat)405e1d82013-11-08 14:17:30 -0500160 * tasks even in userspace and idle.
161 *
162 * Yes, function tracing is rude.
163 */
164}
165
166static void ftrace_sync_ipi(void *data)
167{
168 /* Probably not needed, but do it anyway */
169 smp_rmb();
170}
171
Steven Rostedt (Red Hat)00ccbf22015-02-19 15:56:14 +0100172static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
173{
174 /*
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -0500175 * If this is a dynamic, RCU, or per CPU ops, or we force list func,
Steven Rostedt (Red Hat)00ccbf22015-02-19 15:56:14 +0100176 * then it needs to call the list anyway.
177 */
Peter Zijlstrab3a88802017-10-11 09:45:32 +0200178 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
179 FTRACE_FORCE_LIST_FUNC)
Steven Rostedt (Red Hat)00ccbf22015-02-19 15:56:14 +0100180 return ftrace_ops_list_func;
181
182 return ftrace_ops_get_func(ops);
183}
184
Steven Rostedt2b499382011-05-03 22:49:52 -0400185static void update_ftrace_function(void)
186{
187 ftrace_func_t func;
188
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400189 /*
Steven Rostedt (Red Hat)f7aad4e2014-09-10 10:42:46 -0400190 * Prepare the ftrace_ops that the arch callback will use.
191 * If there's only one ftrace_ops registered, the ftrace_ops_list
192 * will point to the ops we want.
193 */
Chunyan Zhangf86f4182017-06-07 16:12:51 +0800194 set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
195 lockdep_is_held(&ftrace_lock));
Steven Rostedt (Red Hat)f7aad4e2014-09-10 10:42:46 -0400196
197 /* If there's no ftrace_ops registered, just call the stub function */
Chunyan Zhangf86f4182017-06-07 16:12:51 +0800198 if (set_function_trace_op == &ftrace_list_end) {
Steven Rostedt (Red Hat)f7aad4e2014-09-10 10:42:46 -0400199 func = ftrace_stub;
200
201 /*
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400202 * If we are at the end of the list and this ops is
Steven Rostedt47409742012-07-20 11:04:44 -0400203 * recursion safe and not dynamic and the arch supports passing ops,
204 * then have the mcount trampoline call the function directly.
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400205 */
Chunyan Zhangf86f4182017-06-07 16:12:51 +0800206 } else if (rcu_dereference_protected(ftrace_ops_list->next,
207 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
Steven Rostedt (Red Hat)00ccbf22015-02-19 15:56:14 +0100208 func = ftrace_ops_get_list_func(ftrace_ops_list);
Steven Rostedt (Red Hat)f7aad4e2014-09-10 10:42:46 -0400209
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400210 } else {
211 /* Just use the default ftrace_ops */
Steven Rostedt (Red Hat)405e1d82013-11-08 14:17:30 -0500212 set_function_trace_op = &ftrace_list_end;
Steven Rostedtb8489142011-05-04 09:27:52 -0400213 func = ftrace_ops_list_func;
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400214 }
Steven Rostedt2b499382011-05-03 22:49:52 -0400215
Steven Rostedt (Red Hat)5f8bf2d22014-07-15 11:05:12 -0400216 update_function_graph_func();
217
Steven Rostedt (Red Hat)405e1d82013-11-08 14:17:30 -0500218 /* If there's no change, then do nothing more here */
219 if (ftrace_trace_function == func)
220 return;
221
222 /*
223 * If we are using the list function, it doesn't care
224 * about the function_trace_ops.
225 */
226 if (func == ftrace_ops_list_func) {
227 ftrace_trace_function = func;
228 /*
229 * Don't even bother setting function_trace_ops,
230 * it would be racy to do so anyway.
231 */
232 return;
233 }
234
235#ifndef CONFIG_DYNAMIC_FTRACE
236 /*
237 * For static tracing, we need to be a bit more careful.
238 * The function change takes affect immediately. Thus,
239 * we need to coorditate the setting of the function_trace_ops
240 * with the setting of the ftrace_trace_function.
241 *
242 * Set the function to the list ops, which will call the
243 * function we want, albeit indirectly, but it handles the
244 * ftrace_ops and doesn't depend on function_trace_op.
245 */
246 ftrace_trace_function = ftrace_ops_list_func;
247 /*
248 * Make sure all CPUs see this. Yes this is slow, but static
249 * tracing is slow and nasty to have enabled.
250 */
251 schedule_on_each_cpu(ftrace_sync);
252 /* Now all cpus are using the list ops. */
253 function_trace_op = set_function_trace_op;
254 /* Make sure the function_trace_op is visible on all CPUs */
255 smp_wmb();
256 /* Nasty way to force a rmb on all cpus */
257 smp_call_function(ftrace_sync_ipi, NULL, 1);
258 /* OK, we are all set to update the ftrace_trace_function now! */
259#endif /* !CONFIG_DYNAMIC_FTRACE */
260
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400261 ftrace_trace_function = func;
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400262}
263
Chunyan Zhangf86f4182017-06-07 16:12:51 +0800264static void add_ftrace_ops(struct ftrace_ops __rcu **list,
265 struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +0200266{
Chunyan Zhangf86f4182017-06-07 16:12:51 +0800267 rcu_assign_pointer(ops->next, *list);
268
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200269 /*
Steven Rostedtb8489142011-05-04 09:27:52 -0400270 * We are entering ops into the list but another
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200271 * CPU might be walking that list. We need to make sure
272 * the ops->next pointer is valid before another CPU sees
Steven Rostedtb8489142011-05-04 09:27:52 -0400273 * the ops pointer included into the list.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200274 */
Steven Rostedt2b499382011-05-03 22:49:52 -0400275 rcu_assign_pointer(*list, ops);
276}
Steven Rostedt3d083392008-05-12 21:20:42 +0200277
Chunyan Zhangf86f4182017-06-07 16:12:51 +0800278static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
279 struct ftrace_ops *ops)
Steven Rostedt2b499382011-05-03 22:49:52 -0400280{
281 struct ftrace_ops **p;
282
283 /*
284 * If we are removing the last function, then simply point
285 * to the ftrace_stub.
286 */
Chunyan Zhangf86f4182017-06-07 16:12:51 +0800287 if (rcu_dereference_protected(*list,
288 lockdep_is_held(&ftrace_lock)) == ops &&
289 rcu_dereference_protected(ops->next,
290 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
Steven Rostedt2b499382011-05-03 22:49:52 -0400291 *list = &ftrace_list_end;
292 return 0;
293 }
294
295 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
296 if (*p == ops)
297 break;
298
299 if (*p != ops)
300 return -1;
301
302 *p = (*p)->next;
303 return 0;
304}
305
Steven Rostedt (Red Hat)f3bea492014-07-02 23:23:31 -0400306static void ftrace_update_trampoline(struct ftrace_ops *ops);
307
Steven Rostedt (VMware)3306fc4a2018-11-15 12:32:38 -0500308int __register_ftrace_function(struct ftrace_ops *ops)
Steven Rostedt2b499382011-05-03 22:49:52 -0400309{
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -0500310 if (ops->flags & FTRACE_OPS_FL_DELETED)
311 return -EINVAL;
312
Steven Rostedtb8489142011-05-04 09:27:52 -0400313 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
314 return -EBUSY;
315
Masami Hiramatsu06aeaae2012-09-28 17:15:17 +0900316#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
Steven Rostedt08f6fba2012-04-30 16:20:23 -0400317 /*
318 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
319 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
320 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
321 */
322 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
323 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
324 return -EINVAL;
325
326 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
327 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
328#endif
329
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400330 if (!core_kernel_data((unsigned long)ops))
331 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
332
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -0500333 add_ftrace_ops(&ftrace_ops_list, ops);
Steven Rostedtb8489142011-05-04 09:27:52 -0400334
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400335 /* Always save the function, and reset at unregistering */
336 ops->saved_func = ops->func;
337
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -0400338 if (ftrace_pids_enabled(ops))
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400339 ops->func = ftrace_pid_func;
340
Steven Rostedt (Red Hat)f3bea492014-07-02 23:23:31 -0400341 ftrace_update_trampoline(ops);
342
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400343 if (ftrace_enabled)
344 update_ftrace_function();
Steven Rostedt3d083392008-05-12 21:20:42 +0200345
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200346 return 0;
347}
348
Steven Rostedt (VMware)3306fc4a2018-11-15 12:32:38 -0500349int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200350{
Steven Rostedt2b499382011-05-03 22:49:52 -0400351 int ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200352
Steven Rostedtb8489142011-05-04 09:27:52 -0400353 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
354 return -EBUSY;
355
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -0500356 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
Steven Rostedtb8489142011-05-04 09:27:52 -0400357
Steven Rostedt2b499382011-05-03 22:49:52 -0400358 if (ret < 0)
359 return ret;
Steven Rostedtb8489142011-05-04 09:27:52 -0400360
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400361 if (ftrace_enabled)
362 update_ftrace_function();
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200363
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400364 ops->func = ops->saved_func;
365
Steven Rostedte6ea44e2009-02-14 01:42:44 -0500366 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200367}
368
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500369static void ftrace_update_pid_func(void)
370{
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400371 struct ftrace_ops *op;
372
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400373 /* Only do something if we are tracing something */
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500374 if (ftrace_trace_function == ftrace_stub)
KOSAKI Motohiro10dd3eb2009-03-06 15:29:04 +0900375 return;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500376
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400377 do_for_each_ftrace_op(op, ftrace_ops_list) {
378 if (op->flags & FTRACE_OPS_FL_PID) {
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -0400379 op->func = ftrace_pids_enabled(op) ?
380 ftrace_pid_func : op->saved_func;
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400381 ftrace_update_trampoline(op);
382 }
383 } while_for_each_ftrace_op(op);
384
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400385 update_ftrace_function();
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500386}
387
Steven Rostedt493762f2009-03-23 17:12:36 -0400388#ifdef CONFIG_FUNCTION_PROFILER
389struct ftrace_profile {
390 struct hlist_node node;
391 unsigned long ip;
392 unsigned long counter;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400393#ifdef CONFIG_FUNCTION_GRAPH_TRACER
394 unsigned long long time;
Chase Douglase330b3b2010-04-26 14:02:05 -0400395 unsigned long long time_squared;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400396#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400397};
398
399struct ftrace_profile_page {
400 struct ftrace_profile_page *next;
401 unsigned long index;
402 struct ftrace_profile records[];
403};
404
Steven Rostedtcafb1682009-03-24 20:50:39 -0400405struct ftrace_profile_stat {
406 atomic_t disabled;
407 struct hlist_head *hash;
408 struct ftrace_profile_page *pages;
409 struct ftrace_profile_page *start;
410 struct tracer_stat stat;
411};
412
Steven Rostedt493762f2009-03-23 17:12:36 -0400413#define PROFILE_RECORDS_SIZE \
414 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
415
416#define PROFILES_PER_PAGE \
417 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
418
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400419static int ftrace_profile_enabled __read_mostly;
420
421/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
Steven Rostedt493762f2009-03-23 17:12:36 -0400422static DEFINE_MUTEX(ftrace_profile_lock);
423
Steven Rostedtcafb1682009-03-24 20:50:39 -0400424static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
Steven Rostedt493762f2009-03-23 17:12:36 -0400425
Namhyung Kim20079eb2013-04-10 08:55:50 +0900426#define FTRACE_PROFILE_HASH_BITS 10
427#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
Steven Rostedt493762f2009-03-23 17:12:36 -0400428
Steven Rostedt493762f2009-03-23 17:12:36 -0400429static void *
430function_stat_next(void *v, int idx)
431{
432 struct ftrace_profile *rec = v;
433 struct ftrace_profile_page *pg;
434
435 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
436
437 again:
Li Zefan0296e422009-06-26 11:15:37 +0800438 if (idx != 0)
439 rec++;
440
Steven Rostedt493762f2009-03-23 17:12:36 -0400441 if ((void *)rec >= (void *)&pg->records[pg->index]) {
442 pg = pg->next;
443 if (!pg)
444 return NULL;
445 rec = &pg->records[0];
446 if (!rec->counter)
447 goto again;
448 }
449
450 return rec;
451}
452
453static void *function_stat_start(struct tracer_stat *trace)
454{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400455 struct ftrace_profile_stat *stat =
456 container_of(trace, struct ftrace_profile_stat, stat);
457
458 if (!stat || !stat->start)
459 return NULL;
460
461 return function_stat_next(&stat->start->records[0], 0);
Steven Rostedt493762f2009-03-23 17:12:36 -0400462}
463
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400464#ifdef CONFIG_FUNCTION_GRAPH_TRACER
465/* function graph compares on total time */
466static int function_stat_cmp(void *p1, void *p2)
467{
468 struct ftrace_profile *a = p1;
469 struct ftrace_profile *b = p2;
470
471 if (a->time < b->time)
472 return -1;
473 if (a->time > b->time)
474 return 1;
475 else
476 return 0;
477}
478#else
479/* not function graph compares against hits */
Steven Rostedt493762f2009-03-23 17:12:36 -0400480static int function_stat_cmp(void *p1, void *p2)
481{
482 struct ftrace_profile *a = p1;
483 struct ftrace_profile *b = p2;
484
485 if (a->counter < b->counter)
486 return -1;
487 if (a->counter > b->counter)
488 return 1;
489 else
490 return 0;
491}
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400492#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400493
494static int function_stat_headers(struct seq_file *m)
495{
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400496#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +0100497 seq_puts(m, " Function "
498 "Hit Time Avg s^2\n"
499 " -------- "
500 "--- ---- --- ---\n");
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400501#else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +0100502 seq_puts(m, " Function Hit\n"
503 " -------- ---\n");
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400504#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400505 return 0;
506}
507
508static int function_stat_show(struct seq_file *m, void *v)
509{
510 struct ftrace_profile *rec = v;
511 char str[KSYM_SYMBOL_LEN];
Li Zefan3aaba202010-08-23 16:50:12 +0800512 int ret = 0;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400513#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt34886c82009-03-25 21:00:47 -0400514 static struct trace_seq s;
515 unsigned long long avg;
Chase Douglase330b3b2010-04-26 14:02:05 -0400516 unsigned long long stddev;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400517#endif
Li Zefan3aaba202010-08-23 16:50:12 +0800518 mutex_lock(&ftrace_profile_lock);
519
520 /* we raced with function_profile_reset() */
521 if (unlikely(rec->counter == 0)) {
522 ret = -EBUSY;
523 goto out;
524 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400525
Umesh Tiwari8e436ca2015-06-22 16:58:08 +0530526#ifdef CONFIG_FUNCTION_GRAPH_TRACER
527 avg = rec->time;
528 do_div(avg, rec->counter);
529 if (tracing_thresh && (avg < tracing_thresh))
530 goto out;
531#endif
532
Steven Rostedt493762f2009-03-23 17:12:36 -0400533 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400534 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
Steven Rostedt493762f2009-03-23 17:12:36 -0400535
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400536#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +0100537 seq_puts(m, " ");
Steven Rostedt34886c82009-03-25 21:00:47 -0400538
Chase Douglase330b3b2010-04-26 14:02:05 -0400539 /* Sample standard deviation (s^2) */
540 if (rec->counter <= 1)
541 stddev = 0;
542 else {
Juri Lelli52d85d72013-06-12 12:03:18 +0200543 /*
544 * Apply Welford's method:
545 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
546 */
547 stddev = rec->counter * rec->time_squared -
548 rec->time * rec->time;
549
Chase Douglase330b3b2010-04-26 14:02:05 -0400550 /*
551 * Divide only 1000 for ns^2 -> us^2 conversion.
552 * trace_print_graph_duration will divide 1000 again.
553 */
Juri Lelli52d85d72013-06-12 12:03:18 +0200554 do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
Chase Douglase330b3b2010-04-26 14:02:05 -0400555 }
556
Steven Rostedt34886c82009-03-25 21:00:47 -0400557 trace_seq_init(&s);
558 trace_print_graph_duration(rec->time, &s);
559 trace_seq_puts(&s, " ");
560 trace_print_graph_duration(avg, &s);
Chase Douglase330b3b2010-04-26 14:02:05 -0400561 trace_seq_puts(&s, " ");
562 trace_print_graph_duration(stddev, &s);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400563 trace_print_seq(m, &s);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400564#endif
565 seq_putc(m, '\n');
Li Zefan3aaba202010-08-23 16:50:12 +0800566out:
567 mutex_unlock(&ftrace_profile_lock);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400568
Li Zefan3aaba202010-08-23 16:50:12 +0800569 return ret;
Steven Rostedt493762f2009-03-23 17:12:36 -0400570}
571
Steven Rostedtcafb1682009-03-24 20:50:39 -0400572static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
Steven Rostedt493762f2009-03-23 17:12:36 -0400573{
574 struct ftrace_profile_page *pg;
575
Steven Rostedtcafb1682009-03-24 20:50:39 -0400576 pg = stat->pages = stat->start;
Steven Rostedt493762f2009-03-23 17:12:36 -0400577
578 while (pg) {
579 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
580 pg->index = 0;
581 pg = pg->next;
582 }
583
Steven Rostedtcafb1682009-03-24 20:50:39 -0400584 memset(stat->hash, 0,
Steven Rostedt493762f2009-03-23 17:12:36 -0400585 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
586}
587
Steven Rostedtcafb1682009-03-24 20:50:39 -0400588int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
Steven Rostedt493762f2009-03-23 17:12:36 -0400589{
590 struct ftrace_profile_page *pg;
Steven Rostedt318e0a72009-03-25 20:06:34 -0400591 int functions;
592 int pages;
Steven Rostedt493762f2009-03-23 17:12:36 -0400593 int i;
594
595 /* If we already allocated, do nothing */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400596 if (stat->pages)
Steven Rostedt493762f2009-03-23 17:12:36 -0400597 return 0;
598
Steven Rostedtcafb1682009-03-24 20:50:39 -0400599 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
600 if (!stat->pages)
Steven Rostedt493762f2009-03-23 17:12:36 -0400601 return -ENOMEM;
602
Steven Rostedt318e0a72009-03-25 20:06:34 -0400603#ifdef CONFIG_DYNAMIC_FTRACE
604 functions = ftrace_update_tot_cnt;
605#else
606 /*
607 * We do not know the number of functions that exist because
608 * dynamic tracing is what counts them. With past experience
609 * we have around 20K functions. That should be more than enough.
610 * It is highly unlikely we will execute every function in
611 * the kernel.
612 */
613 functions = 20000;
614#endif
615
Steven Rostedtcafb1682009-03-24 20:50:39 -0400616 pg = stat->start = stat->pages;
Steven Rostedt493762f2009-03-23 17:12:36 -0400617
Steven Rostedt318e0a72009-03-25 20:06:34 -0400618 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
619
Namhyung Kim39e30cd2013-04-01 21:46:24 +0900620 for (i = 1; i < pages; i++) {
Steven Rostedt493762f2009-03-23 17:12:36 -0400621 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
Steven Rostedt493762f2009-03-23 17:12:36 -0400622 if (!pg->next)
Steven Rostedt318e0a72009-03-25 20:06:34 -0400623 goto out_free;
Steven Rostedt493762f2009-03-23 17:12:36 -0400624 pg = pg->next;
625 }
626
627 return 0;
Steven Rostedt318e0a72009-03-25 20:06:34 -0400628
629 out_free:
630 pg = stat->start;
631 while (pg) {
632 unsigned long tmp = (unsigned long)pg;
633
634 pg = pg->next;
635 free_page(tmp);
636 }
637
Steven Rostedt318e0a72009-03-25 20:06:34 -0400638 stat->pages = NULL;
639 stat->start = NULL;
640
641 return -ENOMEM;
Steven Rostedt493762f2009-03-23 17:12:36 -0400642}
643
Steven Rostedtcafb1682009-03-24 20:50:39 -0400644static int ftrace_profile_init_cpu(int cpu)
Steven Rostedt493762f2009-03-23 17:12:36 -0400645{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400646 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400647 int size;
648
Steven Rostedtcafb1682009-03-24 20:50:39 -0400649 stat = &per_cpu(ftrace_profile_stats, cpu);
650
651 if (stat->hash) {
Steven Rostedt493762f2009-03-23 17:12:36 -0400652 /* If the profile is already created, simply reset it */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400653 ftrace_profile_reset(stat);
Steven Rostedt493762f2009-03-23 17:12:36 -0400654 return 0;
655 }
656
657 /*
658 * We are profiling all functions, but usually only a few thousand
659 * functions are hit. We'll make a hash of 1024 items.
660 */
661 size = FTRACE_PROFILE_HASH_SIZE;
662
Kees Cook6396bb22018-06-12 14:03:40 -0700663 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
Steven Rostedt493762f2009-03-23 17:12:36 -0400664
Steven Rostedtcafb1682009-03-24 20:50:39 -0400665 if (!stat->hash)
Steven Rostedt493762f2009-03-23 17:12:36 -0400666 return -ENOMEM;
667
Steven Rostedt318e0a72009-03-25 20:06:34 -0400668 /* Preallocate the function profiling pages */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400669 if (ftrace_profile_pages_init(stat) < 0) {
670 kfree(stat->hash);
671 stat->hash = NULL;
Steven Rostedt493762f2009-03-23 17:12:36 -0400672 return -ENOMEM;
673 }
674
675 return 0;
676}
677
Steven Rostedtcafb1682009-03-24 20:50:39 -0400678static int ftrace_profile_init(void)
679{
680 int cpu;
681 int ret = 0;
682
Miao Xiec4602c12013-12-16 15:20:01 +0800683 for_each_possible_cpu(cpu) {
Steven Rostedtcafb1682009-03-24 20:50:39 -0400684 ret = ftrace_profile_init_cpu(cpu);
685 if (ret)
686 break;
687 }
688
689 return ret;
690}
691
Steven Rostedt493762f2009-03-23 17:12:36 -0400692/* interrupts must be disabled */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400693static struct ftrace_profile *
694ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
Steven Rostedt493762f2009-03-23 17:12:36 -0400695{
696 struct ftrace_profile *rec;
697 struct hlist_head *hhd;
Steven Rostedt493762f2009-03-23 17:12:36 -0400698 unsigned long key;
699
Namhyung Kim20079eb2013-04-10 08:55:50 +0900700 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400701 hhd = &stat->hash[key];
Steven Rostedt493762f2009-03-23 17:12:36 -0400702
703 if (hlist_empty(hhd))
704 return NULL;
705
Steven Rostedt1bb539c2013-05-28 14:38:43 -0400706 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
Steven Rostedt493762f2009-03-23 17:12:36 -0400707 if (rec->ip == ip)
708 return rec;
709 }
710
711 return NULL;
712}
713
Steven Rostedtcafb1682009-03-24 20:50:39 -0400714static void ftrace_add_profile(struct ftrace_profile_stat *stat,
715 struct ftrace_profile *rec)
Steven Rostedt493762f2009-03-23 17:12:36 -0400716{
717 unsigned long key;
718
Namhyung Kim20079eb2013-04-10 08:55:50 +0900719 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400720 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
Steven Rostedt493762f2009-03-23 17:12:36 -0400721}
722
Steven Rostedt318e0a72009-03-25 20:06:34 -0400723/*
724 * The memory is already allocated, this simply finds a new record to use.
725 */
Steven Rostedt493762f2009-03-23 17:12:36 -0400726static struct ftrace_profile *
Steven Rostedt318e0a72009-03-25 20:06:34 -0400727ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
Steven Rostedt493762f2009-03-23 17:12:36 -0400728{
729 struct ftrace_profile *rec = NULL;
730
Steven Rostedt318e0a72009-03-25 20:06:34 -0400731 /* prevent recursion (from NMIs) */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400732 if (atomic_inc_return(&stat->disabled) != 1)
Steven Rostedt493762f2009-03-23 17:12:36 -0400733 goto out;
734
Steven Rostedt493762f2009-03-23 17:12:36 -0400735 /*
Steven Rostedt318e0a72009-03-25 20:06:34 -0400736 * Try to find the function again since an NMI
737 * could have added it
Steven Rostedt493762f2009-03-23 17:12:36 -0400738 */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400739 rec = ftrace_find_profiled_func(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400740 if (rec)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400741 goto out;
Steven Rostedt493762f2009-03-23 17:12:36 -0400742
Steven Rostedtcafb1682009-03-24 20:50:39 -0400743 if (stat->pages->index == PROFILES_PER_PAGE) {
744 if (!stat->pages->next)
745 goto out;
746 stat->pages = stat->pages->next;
Steven Rostedt493762f2009-03-23 17:12:36 -0400747 }
748
Steven Rostedtcafb1682009-03-24 20:50:39 -0400749 rec = &stat->pages->records[stat->pages->index++];
Steven Rostedt493762f2009-03-23 17:12:36 -0400750 rec->ip = ip;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400751 ftrace_add_profile(stat, rec);
Steven Rostedt493762f2009-03-23 17:12:36 -0400752
Steven Rostedt493762f2009-03-23 17:12:36 -0400753 out:
Steven Rostedtcafb1682009-03-24 20:50:39 -0400754 atomic_dec(&stat->disabled);
Steven Rostedt493762f2009-03-23 17:12:36 -0400755
756 return rec;
757}
758
Steven Rostedt493762f2009-03-23 17:12:36 -0400759static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400760function_profile_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400761 struct ftrace_ops *ops, struct pt_regs *regs)
Steven Rostedt493762f2009-03-23 17:12:36 -0400762{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400763 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400764 struct ftrace_profile *rec;
765 unsigned long flags;
Steven Rostedt493762f2009-03-23 17:12:36 -0400766
767 if (!ftrace_profile_enabled)
768 return;
769
Steven Rostedt493762f2009-03-23 17:12:36 -0400770 local_irq_save(flags);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400771
Christoph Lameterbdffd892014-04-29 14:17:40 -0500772 stat = this_cpu_ptr(&ftrace_profile_stats);
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400773 if (!stat->hash || !ftrace_profile_enabled)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400774 goto out;
775
776 rec = ftrace_find_profiled_func(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400777 if (!rec) {
Steven Rostedt318e0a72009-03-25 20:06:34 -0400778 rec = ftrace_profile_alloc(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400779 if (!rec)
780 goto out;
781 }
782
783 rec->counter++;
784 out:
785 local_irq_restore(flags);
786}
787
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400788#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt (VMware)e73e6792018-11-15 12:35:13 -0500789static bool fgraph_graph_time = true;
790
791void ftrace_graph_graph_time_control(bool enable)
792{
793 fgraph_graph_time = enable;
794}
795
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400796static int profile_graph_entry(struct ftrace_graph_ent *trace)
797{
Steven Rostedt (VMware)b0e21a62018-11-19 20:54:08 -0500798 struct ftrace_ret_stack *ret_stack;
Namhyung Kim8861dd32016-08-31 11:55:29 +0900799
Steven Rostedta1e2e312011-08-09 12:50:46 -0400800 function_profile_call(trace->func, 0, NULL, NULL);
Namhyung Kim8861dd32016-08-31 11:55:29 +0900801
Steven Rostedt (VMware)a8f0f9e2017-08-17 16:37:25 -0400802 /* If function graph is shutting down, ret_stack can be NULL */
803 if (!current->ret_stack)
804 return 0;
805
Steven Rostedt (VMware)b0e21a62018-11-19 20:54:08 -0500806 ret_stack = ftrace_graph_get_ret_stack(current, 0);
807 if (ret_stack)
808 ret_stack->subtime = 0;
Namhyung Kim8861dd32016-08-31 11:55:29 +0900809
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400810 return 1;
811}
812
813static void profile_graph_return(struct ftrace_graph_ret *trace)
814{
Steven Rostedt (VMware)b0e21a62018-11-19 20:54:08 -0500815 struct ftrace_ret_stack *ret_stack;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400816 struct ftrace_profile_stat *stat;
Steven Rostedta2a16d62009-03-24 23:17:58 -0400817 unsigned long long calltime;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400818 struct ftrace_profile *rec;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400819 unsigned long flags;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400820
821 local_irq_save(flags);
Christoph Lameterbdffd892014-04-29 14:17:40 -0500822 stat = this_cpu_ptr(&ftrace_profile_stats);
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400823 if (!stat->hash || !ftrace_profile_enabled)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400824 goto out;
825
Steven Rostedt37e44bc2010-04-27 21:04:24 -0400826 /* If the calltime was zero'd ignore it */
827 if (!trace->calltime)
828 goto out;
829
Steven Rostedta2a16d62009-03-24 23:17:58 -0400830 calltime = trace->rettime - trace->calltime;
831
Steven Rostedt (Red Hat)55577202015-09-29 19:06:50 -0400832 if (!fgraph_graph_time) {
Steven Rostedta2a16d62009-03-24 23:17:58 -0400833
834 /* Append this call time to the parent time to subtract */
Steven Rostedt (VMware)b0e21a62018-11-19 20:54:08 -0500835 ret_stack = ftrace_graph_get_ret_stack(current, 1);
836 if (ret_stack)
837 ret_stack->subtime += calltime;
Steven Rostedta2a16d62009-03-24 23:17:58 -0400838
Steven Rostedt (VMware)b0e21a62018-11-19 20:54:08 -0500839 ret_stack = ftrace_graph_get_ret_stack(current, 0);
840 if (ret_stack && ret_stack->subtime < calltime)
841 calltime -= ret_stack->subtime;
Steven Rostedta2a16d62009-03-24 23:17:58 -0400842 else
843 calltime = 0;
844 }
845
Steven Rostedtcafb1682009-03-24 20:50:39 -0400846 rec = ftrace_find_profiled_func(stat, trace->func);
Chase Douglase330b3b2010-04-26 14:02:05 -0400847 if (rec) {
Steven Rostedta2a16d62009-03-24 23:17:58 -0400848 rec->time += calltime;
Chase Douglase330b3b2010-04-26 14:02:05 -0400849 rec->time_squared += calltime * calltime;
850 }
Steven Rostedta2a16d62009-03-24 23:17:58 -0400851
Steven Rostedtcafb1682009-03-24 20:50:39 -0400852 out:
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400853 local_irq_restore(flags);
854}
855
Steven Rostedt (VMware)688f7082018-11-15 14:06:47 -0500856static struct fgraph_ops fprofiler_ops = {
857 .entryfunc = &profile_graph_entry,
858 .retfunc = &profile_graph_return,
859};
860
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400861static int register_ftrace_profiler(void)
862{
Steven Rostedt (VMware)688f7082018-11-15 14:06:47 -0500863 return register_ftrace_graph(&fprofiler_ops);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400864}
865
866static void unregister_ftrace_profiler(void)
867{
Steven Rostedt (VMware)688f7082018-11-15 14:06:47 -0500868 unregister_ftrace_graph(&fprofiler_ops);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400869}
870#else
Paul McQuadebd38c0e2011-05-31 20:51:55 +0100871static struct ftrace_ops ftrace_profile_ops __read_mostly = {
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400872 .func = function_profile_call,
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +0900873 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -0400874 INIT_OPS_HASH(ftrace_profile_ops)
Steven Rostedt493762f2009-03-23 17:12:36 -0400875};
876
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400877static int register_ftrace_profiler(void)
878{
879 return register_ftrace_function(&ftrace_profile_ops);
880}
881
882static void unregister_ftrace_profiler(void)
883{
884 unregister_ftrace_function(&ftrace_profile_ops);
885}
886#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
887
Steven Rostedt493762f2009-03-23 17:12:36 -0400888static ssize_t
889ftrace_profile_write(struct file *filp, const char __user *ubuf,
890 size_t cnt, loff_t *ppos)
891{
892 unsigned long val;
Steven Rostedt493762f2009-03-23 17:12:36 -0400893 int ret;
894
Peter Huewe22fe9b52011-06-07 21:58:27 +0200895 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
896 if (ret)
Steven Rostedt493762f2009-03-23 17:12:36 -0400897 return ret;
898
899 val = !!val;
900
901 mutex_lock(&ftrace_profile_lock);
902 if (ftrace_profile_enabled ^ val) {
903 if (val) {
904 ret = ftrace_profile_init();
905 if (ret < 0) {
906 cnt = ret;
907 goto out;
908 }
909
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400910 ret = register_ftrace_profiler();
911 if (ret < 0) {
912 cnt = ret;
913 goto out;
914 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400915 ftrace_profile_enabled = 1;
916 } else {
917 ftrace_profile_enabled = 0;
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400918 /*
919 * unregister_ftrace_profiler calls stop_machine
Paul E. McKenney74401722018-11-06 18:44:52 -0800920 * so this acts like an synchronize_rcu.
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400921 */
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400922 unregister_ftrace_profiler();
Steven Rostedt493762f2009-03-23 17:12:36 -0400923 }
924 }
925 out:
926 mutex_unlock(&ftrace_profile_lock);
927
Jiri Olsacf8517c2009-10-23 19:36:16 -0400928 *ppos += cnt;
Steven Rostedt493762f2009-03-23 17:12:36 -0400929
930 return cnt;
931}
932
933static ssize_t
934ftrace_profile_read(struct file *filp, char __user *ubuf,
935 size_t cnt, loff_t *ppos)
936{
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400937 char buf[64]; /* big enough to hold a number */
Steven Rostedt493762f2009-03-23 17:12:36 -0400938 int r;
939
940 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
941 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
942}
943
944static const struct file_operations ftrace_profile_fops = {
945 .open = tracing_open_generic,
946 .read = ftrace_profile_read,
947 .write = ftrace_profile_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200948 .llseek = default_llseek,
Steven Rostedt493762f2009-03-23 17:12:36 -0400949};
950
Steven Rostedtcafb1682009-03-24 20:50:39 -0400951/* used to initialize the real stat files */
952static struct tracer_stat function_stats __initdata = {
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400953 .name = "functions",
954 .stat_start = function_stat_start,
955 .stat_next = function_stat_next,
956 .stat_cmp = function_stat_cmp,
957 .stat_headers = function_stat_headers,
958 .stat_show = function_stat_show
Steven Rostedtcafb1682009-03-24 20:50:39 -0400959};
960
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -0500961static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
Steven Rostedt493762f2009-03-23 17:12:36 -0400962{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400963 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400964 struct dentry *entry;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400965 char *name;
Steven Rostedt493762f2009-03-23 17:12:36 -0400966 int ret;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400967 int cpu;
Steven Rostedt493762f2009-03-23 17:12:36 -0400968
Steven Rostedtcafb1682009-03-24 20:50:39 -0400969 for_each_possible_cpu(cpu) {
970 stat = &per_cpu(ftrace_profile_stats, cpu);
971
Geliang Tang6363c6b2016-03-15 22:12:34 +0800972 name = kasprintf(GFP_KERNEL, "function%d", cpu);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400973 if (!name) {
974 /*
975 * The files created are permanent, if something happens
976 * we still do not free memory.
977 */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400978 WARN(1,
979 "Could not allocate stat file for cpu %d\n",
980 cpu);
981 return;
982 }
983 stat->stat = function_stats;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400984 stat->stat.name = name;
985 ret = register_stat_tracer(&stat->stat);
986 if (ret) {
987 WARN(1,
988 "Could not register function stat for cpu %d\n",
989 cpu);
990 kfree(name);
991 return;
992 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400993 }
994
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -0500995 entry = tracefs_create_file("function_profile_enabled", 0644,
Steven Rostedt493762f2009-03-23 17:12:36 -0400996 d_tracer, NULL, &ftrace_profile_fops);
997 if (!entry)
Joe Perchesa395d6a2016-03-22 14:28:09 -0700998 pr_warn("Could not create tracefs 'function_profile_enabled' entry\n");
Steven Rostedt493762f2009-03-23 17:12:36 -0400999}
1000
1001#else /* CONFIG_FUNCTION_PROFILER */
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05001002static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
Steven Rostedt493762f2009-03-23 17:12:36 -04001003{
1004}
1005#endif /* CONFIG_FUNCTION_PROFILER */
1006
Steven Rostedt3d083392008-05-12 21:20:42 +02001007#ifdef CONFIG_DYNAMIC_FTRACE
Ingo Molnar73d3fd92009-02-17 11:48:18 +01001008
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001009static struct ftrace_ops *removed_ops;
1010
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04001011/*
1012 * Set when doing a global update, like enabling all recs or disabling them.
1013 * It is not set when just updating a single ftrace_ops.
1014 */
1015static bool update_all_ops;
1016
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001017#ifndef CONFIG_FTRACE_MCOUNT_RECORD
Steven Rostedtcb7be3b2008-10-23 09:33:05 -04001018# error Dynamic ftrace depends on MCOUNT_RECORD
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001019#endif
1020
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001021struct ftrace_func_entry {
1022 struct hlist_node hlist;
1023 unsigned long ip;
1024};
1025
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04001026struct ftrace_func_probe {
1027 struct ftrace_probe_ops *probe_ops;
1028 struct ftrace_ops ops;
1029 struct trace_array *tr;
1030 struct list_head list;
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04001031 void *data;
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04001032 int ref;
1033};
1034
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001035/*
1036 * We make these constant because no one should touch them,
1037 * but they are used as the default "empty hash", to avoid allocating
1038 * it all the time. These are in a read only section such that if
1039 * anyone does try to modify it, it will cause an exception.
1040 */
1041static const struct hlist_head empty_buckets[1];
1042static const struct ftrace_hash empty_hash = {
1043 .buckets = (struct hlist_head *)empty_buckets,
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001044};
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001045#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
Steven Rostedt5072c592008-05-12 21:20:43 +02001046
Steven Rostedt (VMware)3306fc4a2018-11-15 12:32:38 -05001047struct ftrace_ops global_ops = {
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04001048 .func = ftrace_stub,
1049 .local_hash.notrace_hash = EMPTY_HASH,
1050 .local_hash.filter_hash = EMPTY_HASH,
1051 INIT_OPS_HASH(global_ops)
1052 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -04001053 FTRACE_OPS_FL_INITIALIZED |
1054 FTRACE_OPS_FL_PID,
Steven Rostedtf45948e2011-05-02 12:29:25 -04001055};
1056
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -05001057/*
Steven Rostedt (VMware)6be7fa32018-01-22 22:32:51 -05001058 * Used by the stack undwinder to know about dynamic ftrace trampolines.
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -05001059 */
Steven Rostedt (VMware)6be7fa32018-01-22 22:32:51 -05001060struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -05001061{
Steven Rostedt (VMware)6be7fa32018-01-22 22:32:51 -05001062 struct ftrace_ops *op = NULL;
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -05001063
1064 /*
1065 * Some of the ops may be dynamically allocated,
Paul E. McKenney74401722018-11-06 18:44:52 -08001066 * they are freed after a synchronize_rcu().
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -05001067 */
1068 preempt_disable_notrace();
1069
1070 do_for_each_ftrace_op(op, ftrace_ops_list) {
1071 /*
1072 * This is to check for dynamically allocated trampolines.
1073 * Trampolines that are in kernel text will have
1074 * core_kernel_text() return true.
1075 */
1076 if (op->trampoline && op->trampoline_size)
1077 if (addr >= op->trampoline &&
1078 addr < op->trampoline + op->trampoline_size) {
Steven Rostedt (VMware)6be7fa32018-01-22 22:32:51 -05001079 preempt_enable_notrace();
1080 return op;
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -05001081 }
1082 } while_for_each_ftrace_op(op);
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -05001083 preempt_enable_notrace();
1084
Steven Rostedt (VMware)6be7fa32018-01-22 22:32:51 -05001085 return NULL;
1086}
1087
1088/*
1089 * This is used by __kernel_text_address() to return true if the
1090 * address is on a dynamically allocated trampoline that would
1091 * not return true for either core_kernel_text() or
1092 * is_module_text_address().
1093 */
1094bool is_ftrace_trampoline(unsigned long addr)
1095{
1096 return ftrace_ops_trampoline(addr) != NULL;
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -05001097}
1098
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001099struct ftrace_page {
1100 struct ftrace_page *next;
Steven Rostedta7900872011-12-16 16:23:44 -05001101 struct dyn_ftrace *records;
Steven Rostedt431aa3f2009-01-06 12:43:01 -05001102 int index;
Steven Rostedta7900872011-12-16 16:23:44 -05001103 int size;
David Milleraa5e5ce2008-05-13 22:06:56 -07001104};
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001105
Steven Rostedta7900872011-12-16 16:23:44 -05001106#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1107#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001108
1109/* estimate from running different kernels */
1110#define NR_TO_INIT 10000
1111
1112static struct ftrace_page *ftrace_pages_start;
1113static struct ftrace_page *ftrace_pages;
1114
Steven Rostedt (VMware)2b0cce02017-02-01 12:19:33 -05001115static __always_inline unsigned long
1116ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1117{
1118 if (hash->size_bits > 0)
1119 return hash_long(ip, hash->size_bits);
1120
1121 return 0;
1122}
1123
Steven Rostedt (VMware)2b2c2792017-02-01 15:37:07 -05001124/* Only use this function if ftrace_hash_empty() has already been tested */
1125static __always_inline struct ftrace_func_entry *
1126__ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001127{
1128 unsigned long key;
1129 struct ftrace_func_entry *entry;
1130 struct hlist_head *hhd;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001131
Steven Rostedt (VMware)2b0cce02017-02-01 12:19:33 -05001132 key = ftrace_hash_key(hash, ip);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001133 hhd = &hash->buckets[key];
1134
Steven Rostedt1bb539c2013-05-28 14:38:43 -04001135 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001136 if (entry->ip == ip)
1137 return entry;
1138 }
1139 return NULL;
1140}
1141
Steven Rostedt (VMware)2b2c2792017-02-01 15:37:07 -05001142/**
1143 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1144 * @hash: The hash to look at
1145 * @ip: The instruction pointer to test
1146 *
1147 * Search a given @hash to see if a given instruction pointer (@ip)
1148 * exists in it.
1149 *
1150 * Returns the entry that holds the @ip if found. NULL otherwise.
1151 */
1152struct ftrace_func_entry *
1153ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1154{
1155 if (ftrace_hash_empty(hash))
1156 return NULL;
1157
1158 return __ftrace_lookup_ip(hash, ip);
1159}
1160
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001161static void __add_hash_entry(struct ftrace_hash *hash,
1162 struct ftrace_func_entry *entry)
1163{
1164 struct hlist_head *hhd;
1165 unsigned long key;
1166
Steven Rostedt (VMware)2b0cce02017-02-01 12:19:33 -05001167 key = ftrace_hash_key(hash, entry->ip);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001168 hhd = &hash->buckets[key];
1169 hlist_add_head(&entry->hlist, hhd);
1170 hash->count++;
1171}
1172
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001173static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1174{
1175 struct ftrace_func_entry *entry;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001176
1177 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1178 if (!entry)
1179 return -ENOMEM;
1180
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001181 entry->ip = ip;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001182 __add_hash_entry(hash, entry);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001183
1184 return 0;
1185}
1186
1187static void
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001188free_hash_entry(struct ftrace_hash *hash,
1189 struct ftrace_func_entry *entry)
1190{
1191 hlist_del(&entry->hlist);
1192 kfree(entry);
1193 hash->count--;
1194}
1195
1196static void
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001197remove_hash_entry(struct ftrace_hash *hash,
1198 struct ftrace_func_entry *entry)
1199{
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04001200 hlist_del_rcu(&entry->hlist);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001201 hash->count--;
1202}
1203
1204static void ftrace_hash_clear(struct ftrace_hash *hash)
1205{
1206 struct hlist_head *hhd;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001207 struct hlist_node *tn;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001208 struct ftrace_func_entry *entry;
1209 int size = 1 << hash->size_bits;
1210 int i;
1211
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001212 if (!hash->count)
1213 return;
1214
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001215 for (i = 0; i < size; i++) {
1216 hhd = &hash->buckets[i];
Sasha Levinb67bfe02013-02-27 17:06:00 -08001217 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001218 free_hash_entry(hash, entry);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001219 }
1220 FTRACE_WARN_ON(hash->count);
1221}
1222
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04001223static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1224{
1225 list_del(&ftrace_mod->list);
1226 kfree(ftrace_mod->module);
1227 kfree(ftrace_mod->func);
1228 kfree(ftrace_mod);
1229}
1230
1231static void clear_ftrace_mod_list(struct list_head *head)
1232{
1233 struct ftrace_mod_load *p, *n;
1234
1235 /* stack tracer isn't supported yet */
1236 if (!head)
1237 return;
1238
1239 mutex_lock(&ftrace_lock);
1240 list_for_each_entry_safe(p, n, head, list)
1241 free_ftrace_mod(p);
1242 mutex_unlock(&ftrace_lock);
1243}
1244
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001245static void free_ftrace_hash(struct ftrace_hash *hash)
1246{
1247 if (!hash || hash == EMPTY_HASH)
1248 return;
1249 ftrace_hash_clear(hash);
1250 kfree(hash->buckets);
1251 kfree(hash);
1252}
1253
Steven Rostedt07fd5512011-05-05 18:03:47 -04001254static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1255{
1256 struct ftrace_hash *hash;
1257
1258 hash = container_of(rcu, struct ftrace_hash, rcu);
1259 free_ftrace_hash(hash);
1260}
1261
1262static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1263{
1264 if (!hash || hash == EMPTY_HASH)
1265 return;
Paul E. McKenney74401722018-11-06 18:44:52 -08001266 call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
Steven Rostedt07fd5512011-05-05 18:03:47 -04001267}
1268
Jiri Olsa5500fa52012-02-15 15:51:54 +01001269void ftrace_free_filter(struct ftrace_ops *ops)
1270{
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09001271 ftrace_ops_init(ops);
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04001272 free_ftrace_hash(ops->func_hash->filter_hash);
1273 free_ftrace_hash(ops->func_hash->notrace_hash);
Jiri Olsa5500fa52012-02-15 15:51:54 +01001274}
1275
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001276static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1277{
1278 struct ftrace_hash *hash;
1279 int size;
1280
1281 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1282 if (!hash)
1283 return NULL;
1284
1285 size = 1 << size_bits;
Thomas Meyer47b0edc2011-11-29 22:08:00 +01001286 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001287
1288 if (!hash->buckets) {
1289 kfree(hash);
1290 return NULL;
1291 }
1292
1293 hash->size_bits = size_bits;
1294
1295 return hash;
1296}
1297
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04001298
1299static int ftrace_add_mod(struct trace_array *tr,
1300 const char *func, const char *module,
1301 int enable)
1302{
1303 struct ftrace_mod_load *ftrace_mod;
1304 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1305
1306 ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1307 if (!ftrace_mod)
1308 return -ENOMEM;
1309
1310 ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1311 ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1312 ftrace_mod->enable = enable;
1313
1314 if (!ftrace_mod->func || !ftrace_mod->module)
1315 goto out_free;
1316
1317 list_add(&ftrace_mod->list, mod_head);
1318
1319 return 0;
1320
1321 out_free:
1322 free_ftrace_mod(ftrace_mod);
1323
1324 return -ENOMEM;
1325}
1326
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001327static struct ftrace_hash *
1328alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1329{
1330 struct ftrace_func_entry *entry;
1331 struct ftrace_hash *new_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001332 int size;
1333 int ret;
1334 int i;
1335
1336 new_hash = alloc_ftrace_hash(size_bits);
1337 if (!new_hash)
1338 return NULL;
1339
Steven Rostedt (VMware)8c08f0d2017-06-26 11:47:31 -04001340 if (hash)
1341 new_hash->flags = hash->flags;
1342
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001343 /* Empty hash? */
Steven Rostedt06a51d92011-12-19 19:07:36 -05001344 if (ftrace_hash_empty(hash))
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001345 return new_hash;
1346
1347 size = 1 << hash->size_bits;
1348 for (i = 0; i < size; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001349 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001350 ret = add_hash_entry(new_hash, entry->ip);
1351 if (ret < 0)
1352 goto free_hash;
1353 }
1354 }
1355
1356 FTRACE_WARN_ON(new_hash->count != hash->count);
1357
1358 return new_hash;
1359
1360 free_hash:
1361 free_ftrace_hash(new_hash);
1362 return NULL;
1363}
1364
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001365static void
Steven Rostedt (Red Hat)84261912014-08-18 13:21:08 -04001366ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001367static void
Steven Rostedt (Red Hat)84261912014-08-18 13:21:08 -04001368ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001369
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05001370static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1371 struct ftrace_hash *new_hash);
1372
Namhyung Kim3e278c02017-01-20 11:44:45 +09001373static struct ftrace_hash *
1374__ftrace_hash_move(struct ftrace_hash *src)
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001375{
1376 struct ftrace_func_entry *entry;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001377 struct hlist_node *tn;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001378 struct hlist_head *hhd;
Steven Rostedt07fd5512011-05-05 18:03:47 -04001379 struct ftrace_hash *new_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001380 int size = src->count;
1381 int bits = 0;
1382 int i;
1383
1384 /*
Namhyung Kim3e278c02017-01-20 11:44:45 +09001385 * If the new source is empty, just return the empty_hash.
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001386 */
Steven Rostedt (VMware)8c08f0d2017-06-26 11:47:31 -04001387 if (ftrace_hash_empty(src))
Namhyung Kim3e278c02017-01-20 11:44:45 +09001388 return EMPTY_HASH;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001389
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001390 /*
1391 * Make the hash size about 1/2 the # found
1392 */
1393 for (size /= 2; size; size >>= 1)
1394 bits++;
1395
1396 /* Don't allocate too much */
1397 if (bits > FTRACE_HASH_MAX_BITS)
1398 bits = FTRACE_HASH_MAX_BITS;
1399
Steven Rostedt07fd5512011-05-05 18:03:47 -04001400 new_hash = alloc_ftrace_hash(bits);
1401 if (!new_hash)
Namhyung Kim3e278c02017-01-20 11:44:45 +09001402 return NULL;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001403
Steven Rostedt (VMware)8c08f0d2017-06-26 11:47:31 -04001404 new_hash->flags = src->flags;
1405
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001406 size = 1 << src->size_bits;
1407 for (i = 0; i < size; i++) {
1408 hhd = &src->buckets[i];
Sasha Levinb67bfe02013-02-27 17:06:00 -08001409 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001410 remove_hash_entry(src, entry);
Steven Rostedt07fd5512011-05-05 18:03:47 -04001411 __add_hash_entry(new_hash, entry);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001412 }
1413 }
1414
Namhyung Kim3e278c02017-01-20 11:44:45 +09001415 return new_hash;
1416}
1417
1418static int
1419ftrace_hash_move(struct ftrace_ops *ops, int enable,
1420 struct ftrace_hash **dst, struct ftrace_hash *src)
1421{
1422 struct ftrace_hash *new_hash;
1423 int ret;
1424
1425 /* Reject setting notrace hash on IPMODIFY ftrace_ops */
1426 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1427 return -EINVAL;
1428
1429 new_hash = __ftrace_hash_move(src);
1430 if (!new_hash)
1431 return -ENOMEM;
1432
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05001433 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1434 if (enable) {
1435 /* IPMODIFY should be updated only when filter_hash updating */
1436 ret = ftrace_hash_ipmodify_update(ops, new_hash);
1437 if (ret < 0) {
1438 free_ftrace_hash(new_hash);
1439 return ret;
1440 }
1441 }
1442
Masami Hiramatsu5c27c772014-06-17 11:04:42 +00001443 /*
1444 * Remove the current set, update the hash and add
1445 * them back.
1446 */
Steven Rostedt (Red Hat)84261912014-08-18 13:21:08 -04001447 ftrace_hash_rec_disable_modify(ops, enable);
Masami Hiramatsu5c27c772014-06-17 11:04:42 +00001448
Steven Rostedt07fd5512011-05-05 18:03:47 -04001449 rcu_assign_pointer(*dst, new_hash);
Steven Rostedt07fd5512011-05-05 18:03:47 -04001450
Steven Rostedt (Red Hat)84261912014-08-18 13:21:08 -04001451 ftrace_hash_rec_enable_modify(ops, enable);
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001452
Masami Hiramatsu5c27c772014-06-17 11:04:42 +00001453 return 0;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001454}
1455
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001456static bool hash_contains_ip(unsigned long ip,
1457 struct ftrace_ops_hash *hash)
1458{
1459 /*
1460 * The function record is a match if it exists in the filter
1461 * hash and not in the notrace hash. Note, an emty hash is
1462 * considered a match for the filter hash, but an empty
1463 * notrace hash is considered not in the notrace hash.
1464 */
1465 return (ftrace_hash_empty(hash->filter_hash) ||
Steven Rostedt (VMware)2b2c2792017-02-01 15:37:07 -05001466 __ftrace_lookup_ip(hash->filter_hash, ip)) &&
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001467 (ftrace_hash_empty(hash->notrace_hash) ||
Steven Rostedt (VMware)2b2c2792017-02-01 15:37:07 -05001468 !__ftrace_lookup_ip(hash->notrace_hash, ip));
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001469}
1470
Steven Rostedt265c8312009-02-13 12:43:56 -05001471/*
Steven Rostedtb8489142011-05-04 09:27:52 -04001472 * Test the hashes for this ops to see if we want to call
1473 * the ops->func or not.
1474 *
1475 * It's a match if the ip is in the ops->filter_hash or
1476 * the filter_hash does not exist or is empty,
1477 * AND
1478 * the ip is not in the ops->notrace_hash.
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04001479 *
1480 * This needs to be called with preemption disabled as
Paul E. McKenney74401722018-11-06 18:44:52 -08001481 * the hashes are freed with call_rcu().
Steven Rostedtb8489142011-05-04 09:27:52 -04001482 */
Steven Rostedt (VMware)3306fc4a2018-11-15 12:32:38 -05001483int
Steven Rostedt (Red Hat)195a8af2013-07-23 22:06:15 -04001484ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
Steven Rostedtb8489142011-05-04 09:27:52 -04001485{
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001486 struct ftrace_ops_hash hash;
Steven Rostedtb8489142011-05-04 09:27:52 -04001487 int ret;
1488
Steven Rostedt (Red Hat)195a8af2013-07-23 22:06:15 -04001489#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1490 /*
1491 * There's a small race when adding ops that the ftrace handler
1492 * that wants regs, may be called without them. We can not
1493 * allow that handler to be called if regs is NULL.
1494 */
1495 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1496 return 0;
1497#endif
1498
Chunyan Zhangf86f4182017-06-07 16:12:51 +08001499 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1500 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
Steven Rostedtb8489142011-05-04 09:27:52 -04001501
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001502 if (hash_contains_ip(ip, &hash))
Steven Rostedtb8489142011-05-04 09:27:52 -04001503 ret = 1;
1504 else
1505 ret = 0;
Steven Rostedtb8489142011-05-04 09:27:52 -04001506
1507 return ret;
1508}
1509
1510/*
Steven Rostedt265c8312009-02-13 12:43:56 -05001511 * This is a double for. Do not use 'break' to break out of the loop,
1512 * you must use a goto.
1513 */
1514#define do_for_each_ftrace_rec(pg, rec) \
1515 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1516 int _____i; \
1517 for (_____i = 0; _____i < pg->index; _____i++) { \
1518 rec = &pg->records[_____i];
1519
1520#define while_for_each_ftrace_rec() \
1521 } \
1522 }
Abhishek Sagarecea6562008-06-21 23:47:53 +05301523
Steven Rostedt5855fea2011-12-16 19:27:42 -05001524
1525static int ftrace_cmp_recs(const void *a, const void *b)
1526{
Steven Rostedta650e022012-04-25 13:48:13 -04001527 const struct dyn_ftrace *key = a;
1528 const struct dyn_ftrace *rec = b;
Steven Rostedt5855fea2011-12-16 19:27:42 -05001529
Steven Rostedta650e022012-04-25 13:48:13 -04001530 if (key->flags < rec->ip)
Steven Rostedt5855fea2011-12-16 19:27:42 -05001531 return -1;
Steven Rostedta650e022012-04-25 13:48:13 -04001532 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1533 return 1;
1534 return 0;
1535}
1536
Michael Ellerman04cf31a2016-03-24 22:04:01 +11001537/**
1538 * ftrace_location_range - return the first address of a traced location
1539 * if it touches the given ip range
1540 * @start: start of range to search.
1541 * @end: end of range to search (inclusive). @end points to the last byte
1542 * to check.
1543 *
1544 * Returns rec->ip if the related ftrace location is a least partly within
1545 * the given address range. That is, the first address of the instruction
1546 * that is either a NOP or call to the function tracer. It checks the ftrace
1547 * internal tables to determine if the address belongs or not.
1548 */
1549unsigned long ftrace_location_range(unsigned long start, unsigned long end)
Steven Rostedta650e022012-04-25 13:48:13 -04001550{
1551 struct ftrace_page *pg;
1552 struct dyn_ftrace *rec;
1553 struct dyn_ftrace key;
1554
1555 key.ip = start;
1556 key.flags = end; /* overload flags, as it is unsigned long */
1557
1558 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1559 if (end < pg->records[0].ip ||
1560 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1561 continue;
1562 rec = bsearch(&key, pg->records, pg->index,
1563 sizeof(struct dyn_ftrace),
1564 ftrace_cmp_recs);
1565 if (rec)
Steven Rostedtf0cf9732012-04-25 14:39:54 -04001566 return rec->ip;
Steven Rostedta650e022012-04-25 13:48:13 -04001567 }
1568
Steven Rostedt5855fea2011-12-16 19:27:42 -05001569 return 0;
1570}
1571
Steven Rostedtc88fd862011-08-16 09:53:39 -04001572/**
1573 * ftrace_location - return true if the ip giving is a traced location
1574 * @ip: the instruction pointer to check
1575 *
Steven Rostedtf0cf9732012-04-25 14:39:54 -04001576 * Returns rec->ip if @ip given is a pointer to a ftrace location.
Steven Rostedtc88fd862011-08-16 09:53:39 -04001577 * That is, the instruction that is either a NOP or call to
1578 * the function tracer. It checks the ftrace internal tables to
1579 * determine if the address belongs or not.
1580 */
Steven Rostedtf0cf9732012-04-25 14:39:54 -04001581unsigned long ftrace_location(unsigned long ip)
Steven Rostedtc88fd862011-08-16 09:53:39 -04001582{
Steven Rostedta650e022012-04-25 13:48:13 -04001583 return ftrace_location_range(ip, ip);
1584}
Steven Rostedtc88fd862011-08-16 09:53:39 -04001585
Steven Rostedta650e022012-04-25 13:48:13 -04001586/**
1587 * ftrace_text_reserved - return true if range contains an ftrace location
1588 * @start: start of range to search
1589 * @end: end of range to search (inclusive). @end points to the last byte to check.
1590 *
1591 * Returns 1 if @start and @end contains a ftrace location.
1592 * That is, the instruction that is either a NOP or call to
1593 * the function tracer. It checks the ftrace internal tables to
1594 * determine if the address belongs or not.
1595 */
Sasha Levind88471c2013-01-09 18:09:20 -05001596int ftrace_text_reserved(const void *start, const void *end)
Steven Rostedta650e022012-04-25 13:48:13 -04001597{
Steven Rostedtf0cf9732012-04-25 14:39:54 -04001598 unsigned long ret;
1599
1600 ret = ftrace_location_range((unsigned long)start,
1601 (unsigned long)end);
1602
1603 return (int)!!ret;
Steven Rostedtc88fd862011-08-16 09:53:39 -04001604}
1605
Steven Rostedt (Red Hat)4fbb48c2014-04-30 22:35:48 -04001606/* Test if ops registered to this rec needs regs */
1607static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1608{
1609 struct ftrace_ops *ops;
1610 bool keep_regs = false;
1611
1612 for (ops = ftrace_ops_list;
1613 ops != &ftrace_list_end; ops = ops->next) {
1614 /* pass rec in as regs to have non-NULL val */
1615 if (ftrace_ops_test(ops, rec->ip, rec)) {
1616 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1617 keep_regs = true;
1618 break;
1619 }
1620 }
1621 }
1622
1623 return keep_regs;
1624}
1625
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001626static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
Steven Rostedted926f92011-05-03 13:25:24 -04001627 int filter_hash,
1628 bool inc)
1629{
1630 struct ftrace_hash *hash;
1631 struct ftrace_hash *other_hash;
1632 struct ftrace_page *pg;
1633 struct dyn_ftrace *rec;
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001634 bool update = false;
Steven Rostedted926f92011-05-03 13:25:24 -04001635 int count = 0;
Steven Rostedt (VMware)8c08f0d2017-06-26 11:47:31 -04001636 int all = false;
Steven Rostedted926f92011-05-03 13:25:24 -04001637
1638 /* Only update if the ops has been registered */
1639 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001640 return false;
Steven Rostedted926f92011-05-03 13:25:24 -04001641
1642 /*
1643 * In the filter_hash case:
1644 * If the count is zero, we update all records.
1645 * Otherwise we just update the items in the hash.
1646 *
1647 * In the notrace_hash case:
1648 * We enable the update in the hash.
1649 * As disabling notrace means enabling the tracing,
1650 * and enabling notrace means disabling, the inc variable
1651 * gets inversed.
1652 */
1653 if (filter_hash) {
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04001654 hash = ops->func_hash->filter_hash;
1655 other_hash = ops->func_hash->notrace_hash;
Steven Rostedt06a51d92011-12-19 19:07:36 -05001656 if (ftrace_hash_empty(hash))
Steven Rostedt (VMware)8c08f0d2017-06-26 11:47:31 -04001657 all = true;
Steven Rostedted926f92011-05-03 13:25:24 -04001658 } else {
1659 inc = !inc;
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04001660 hash = ops->func_hash->notrace_hash;
1661 other_hash = ops->func_hash->filter_hash;
Steven Rostedted926f92011-05-03 13:25:24 -04001662 /*
1663 * If the notrace hash has no items,
1664 * then there's nothing to do.
1665 */
Steven Rostedt06a51d92011-12-19 19:07:36 -05001666 if (ftrace_hash_empty(hash))
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001667 return false;
Steven Rostedted926f92011-05-03 13:25:24 -04001668 }
1669
1670 do_for_each_ftrace_rec(pg, rec) {
1671 int in_other_hash = 0;
1672 int in_hash = 0;
1673 int match = 0;
1674
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05001675 if (rec->flags & FTRACE_FL_DISABLED)
1676 continue;
1677
Steven Rostedted926f92011-05-03 13:25:24 -04001678 if (all) {
1679 /*
1680 * Only the filter_hash affects all records.
1681 * Update if the record is not in the notrace hash.
1682 */
Steven Rostedtb8489142011-05-04 09:27:52 -04001683 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
Steven Rostedted926f92011-05-03 13:25:24 -04001684 match = 1;
1685 } else {
Steven Rostedt06a51d92011-12-19 19:07:36 -05001686 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1687 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
Steven Rostedted926f92011-05-03 13:25:24 -04001688
1689 /*
Steven Rostedt (Red Hat)19eab4a2014-05-07 15:06:14 -04001690 * If filter_hash is set, we want to match all functions
1691 * that are in the hash but not in the other hash.
Steven Rostedted926f92011-05-03 13:25:24 -04001692 *
Steven Rostedt (Red Hat)19eab4a2014-05-07 15:06:14 -04001693 * If filter_hash is not set, then we are decrementing.
1694 * That means we match anything that is in the hash
1695 * and also in the other_hash. That is, we need to turn
1696 * off functions in the other hash because they are disabled
1697 * by this hash.
Steven Rostedted926f92011-05-03 13:25:24 -04001698 */
1699 if (filter_hash && in_hash && !in_other_hash)
1700 match = 1;
1701 else if (!filter_hash && in_hash &&
Steven Rostedt06a51d92011-12-19 19:07:36 -05001702 (in_other_hash || ftrace_hash_empty(other_hash)))
Steven Rostedted926f92011-05-03 13:25:24 -04001703 match = 1;
1704 }
1705 if (!match)
1706 continue;
1707
1708 if (inc) {
1709 rec->flags++;
Steven Rostedt (Red Hat)0376bde2014-05-07 13:46:45 -04001710 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001711 return false;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001712
1713 /*
1714 * If there's only a single callback registered to a
1715 * function, and the ops has a trampoline registered
1716 * for it, then we can call it directly.
1717 */
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001718 if (ftrace_rec_count(rec) == 1 && ops->trampoline)
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001719 rec->flags |= FTRACE_FL_TRAMP;
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001720 else
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001721 /*
1722 * If we are adding another function callback
1723 * to this function, and the previous had a
Steven Rostedt (Red Hat)bce0b6c2014-08-20 23:57:04 -04001724 * custom trampoline in use, then we need to go
1725 * back to the default trampoline.
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001726 */
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001727 rec->flags &= ~FTRACE_FL_TRAMP;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001728
Steven Rostedt08f6fba2012-04-30 16:20:23 -04001729 /*
1730 * If any ops wants regs saved for this function
1731 * then all ops will get saved regs.
1732 */
1733 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1734 rec->flags |= FTRACE_FL_REGS;
Steven Rostedted926f92011-05-03 13:25:24 -04001735 } else {
Steven Rostedt (Red Hat)0376bde2014-05-07 13:46:45 -04001736 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001737 return false;
Steven Rostedted926f92011-05-03 13:25:24 -04001738 rec->flags--;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001739
Steven Rostedt (Red Hat)4fbb48c2014-04-30 22:35:48 -04001740 /*
1741 * If the rec had REGS enabled and the ops that is
1742 * being removed had REGS set, then see if there is
1743 * still any ops for this record that wants regs.
1744 * If not, we can stop recording them.
1745 */
Steven Rostedt (Red Hat)0376bde2014-05-07 13:46:45 -04001746 if (ftrace_rec_count(rec) > 0 &&
Steven Rostedt (Red Hat)4fbb48c2014-04-30 22:35:48 -04001747 rec->flags & FTRACE_FL_REGS &&
1748 ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1749 if (!test_rec_ops_needs_regs(rec))
1750 rec->flags &= ~FTRACE_FL_REGS;
1751 }
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001752
1753 /*
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001754 * If the rec had TRAMP enabled, then it needs to
1755 * be cleared. As TRAMP can only be enabled iff
1756 * there is only a single ops attached to it.
1757 * In otherwords, always disable it on decrementing.
1758 * In the future, we may set it if rec count is
1759 * decremented to one, and the ops that is left
1760 * has a trampoline.
1761 */
1762 rec->flags &= ~FTRACE_FL_TRAMP;
1763
1764 /*
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001765 * flags will be cleared in ftrace_check_record()
1766 * if rec count is zero.
1767 */
Steven Rostedted926f92011-05-03 13:25:24 -04001768 }
1769 count++;
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001770
1771 /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
1772 update |= ftrace_test_record(rec, 1) != FTRACE_UPDATE_IGNORE;
1773
Steven Rostedted926f92011-05-03 13:25:24 -04001774 /* Shortcut, if we handled all records, we are done. */
1775 if (!all && count == hash->count)
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001776 return update;
Steven Rostedted926f92011-05-03 13:25:24 -04001777 } while_for_each_ftrace_rec();
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001778
1779 return update;
Steven Rostedted926f92011-05-03 13:25:24 -04001780}
1781
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001782static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
Steven Rostedted926f92011-05-03 13:25:24 -04001783 int filter_hash)
1784{
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001785 return __ftrace_hash_rec_update(ops, filter_hash, 0);
Steven Rostedted926f92011-05-03 13:25:24 -04001786}
1787
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001788static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
Steven Rostedted926f92011-05-03 13:25:24 -04001789 int filter_hash)
1790{
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001791 return __ftrace_hash_rec_update(ops, filter_hash, 1);
Steven Rostedted926f92011-05-03 13:25:24 -04001792}
1793
Steven Rostedt (Red Hat)84261912014-08-18 13:21:08 -04001794static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1795 int filter_hash, int inc)
1796{
1797 struct ftrace_ops *op;
1798
1799 __ftrace_hash_rec_update(ops, filter_hash, inc);
1800
1801 if (ops->func_hash != &global_ops.local_hash)
1802 return;
1803
1804 /*
1805 * If the ops shares the global_ops hash, then we need to update
1806 * all ops that are enabled and use this hash.
1807 */
1808 do_for_each_ftrace_op(op, ftrace_ops_list) {
1809 /* Already done */
1810 if (op == ops)
1811 continue;
1812 if (op->func_hash == &global_ops.local_hash)
1813 __ftrace_hash_rec_update(op, filter_hash, inc);
1814 } while_for_each_ftrace_op(op);
1815}
1816
1817static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1818 int filter_hash)
1819{
1820 ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1821}
1822
1823static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1824 int filter_hash)
1825{
1826 ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1827}
1828
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05001829/*
1830 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1831 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1832 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1833 * Note that old_hash and new_hash has below meanings
1834 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1835 * - If the hash is EMPTY_HASH, it hits nothing
1836 * - Anything else hits the recs which match the hash entries.
1837 */
1838static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1839 struct ftrace_hash *old_hash,
1840 struct ftrace_hash *new_hash)
1841{
1842 struct ftrace_page *pg;
1843 struct dyn_ftrace *rec, *end = NULL;
1844 int in_old, in_new;
1845
1846 /* Only update if the ops has been registered */
1847 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1848 return 0;
1849
1850 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
1851 return 0;
1852
1853 /*
1854 * Since the IPMODIFY is a very address sensitive action, we do not
1855 * allow ftrace_ops to set all functions to new hash.
1856 */
1857 if (!new_hash || !old_hash)
1858 return -EINVAL;
1859
1860 /* Update rec->flags */
1861 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedt (Red Hat)546fece2016-11-14 16:31:49 -05001862
1863 if (rec->flags & FTRACE_FL_DISABLED)
1864 continue;
1865
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05001866 /* We need to update only differences of filter_hash */
1867 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1868 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1869 if (in_old == in_new)
1870 continue;
1871
1872 if (in_new) {
1873 /* New entries must ensure no others are using it */
1874 if (rec->flags & FTRACE_FL_IPMODIFY)
1875 goto rollback;
1876 rec->flags |= FTRACE_FL_IPMODIFY;
1877 } else /* Removed entry */
1878 rec->flags &= ~FTRACE_FL_IPMODIFY;
1879 } while_for_each_ftrace_rec();
1880
1881 return 0;
1882
1883rollback:
1884 end = rec;
1885
1886 /* Roll back what we did above */
1887 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedt (Red Hat)546fece2016-11-14 16:31:49 -05001888
1889 if (rec->flags & FTRACE_FL_DISABLED)
1890 continue;
1891
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05001892 if (rec == end)
1893 goto err_out;
1894
1895 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1896 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1897 if (in_old == in_new)
1898 continue;
1899
1900 if (in_new)
1901 rec->flags &= ~FTRACE_FL_IPMODIFY;
1902 else
1903 rec->flags |= FTRACE_FL_IPMODIFY;
1904 } while_for_each_ftrace_rec();
1905
1906err_out:
1907 return -EBUSY;
1908}
1909
1910static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1911{
1912 struct ftrace_hash *hash = ops->func_hash->filter_hash;
1913
1914 if (ftrace_hash_empty(hash))
1915 hash = NULL;
1916
1917 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1918}
1919
1920/* Disabling always succeeds */
1921static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1922{
1923 struct ftrace_hash *hash = ops->func_hash->filter_hash;
1924
1925 if (ftrace_hash_empty(hash))
1926 hash = NULL;
1927
1928 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
1929}
1930
1931static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1932 struct ftrace_hash *new_hash)
1933{
1934 struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
1935
1936 if (ftrace_hash_empty(old_hash))
1937 old_hash = NULL;
1938
1939 if (ftrace_hash_empty(new_hash))
1940 new_hash = NULL;
1941
1942 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
1943}
1944
Steven Rostedt (Red Hat)b05086c2015-11-25 14:13:11 -05001945static void print_ip_ins(const char *fmt, const unsigned char *p)
Steven Rostedt05736a42008-09-22 14:55:47 -07001946{
1947 int i;
1948
1949 printk(KERN_CONT "%s", fmt);
1950
1951 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1952 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1953}
1954
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04001955static struct ftrace_ops *
1956ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
Steven Rostedt (Red Hat)39daa7b2015-11-25 15:12:38 -05001957static struct ftrace_ops *
1958ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04001959
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05001960enum ftrace_bug_type ftrace_bug_type;
Steven Rostedt (Red Hat)b05086c2015-11-25 14:13:11 -05001961const void *ftrace_expected;
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05001962
1963static void print_bug_type(void)
1964{
1965 switch (ftrace_bug_type) {
1966 case FTRACE_BUG_UNKNOWN:
1967 break;
1968 case FTRACE_BUG_INIT:
1969 pr_info("Initializing ftrace call sites\n");
1970 break;
1971 case FTRACE_BUG_NOP:
1972 pr_info("Setting ftrace call site to NOP\n");
1973 break;
1974 case FTRACE_BUG_CALL:
1975 pr_info("Setting ftrace call site to call ftrace function\n");
1976 break;
1977 case FTRACE_BUG_UPDATE:
1978 pr_info("Updating ftrace call site to call a different ftrace function\n");
1979 break;
1980 }
1981}
1982
Steven Rostedtc88fd862011-08-16 09:53:39 -04001983/**
1984 * ftrace_bug - report and shutdown function tracer
1985 * @failed: The failed type (EFAULT, EINVAL, EPERM)
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04001986 * @rec: The record that failed
Steven Rostedtc88fd862011-08-16 09:53:39 -04001987 *
1988 * The arch code that enables or disables the function tracing
1989 * can call ftrace_bug() when it has detected a problem in
1990 * modifying the code. @failed should be one of either:
1991 * EFAULT - if the problem happens on reading the @ip address
1992 * EINVAL - if what is read at @ip is not what was expected
Hariprasad Kelam9efb85c2019-03-24 00:05:23 +05301993 * EPERM - if the problem happens on writing to the @ip address
Steven Rostedtc88fd862011-08-16 09:53:39 -04001994 */
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04001995void ftrace_bug(int failed, struct dyn_ftrace *rec)
Steven Rostedtb17e8a32008-11-14 16:21:19 -08001996{
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04001997 unsigned long ip = rec ? rec->ip : 0;
1998
Steven Rostedtb17e8a32008-11-14 16:21:19 -08001999 switch (failed) {
2000 case -EFAULT:
2001 FTRACE_WARN_ON_ONCE(1);
2002 pr_info("ftrace faulted on modifying ");
2003 print_ip_sym(ip);
2004 break;
2005 case -EINVAL:
2006 FTRACE_WARN_ON_ONCE(1);
2007 pr_info("ftrace failed to modify ");
2008 print_ip_sym(ip);
Steven Rostedt (Red Hat)b05086c2015-11-25 14:13:11 -05002009 print_ip_ins(" actual: ", (unsigned char *)ip);
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002010 pr_cont("\n");
Steven Rostedt (Red Hat)b05086c2015-11-25 14:13:11 -05002011 if (ftrace_expected) {
2012 print_ip_ins(" expected: ", ftrace_expected);
2013 pr_cont("\n");
2014 }
Steven Rostedtb17e8a32008-11-14 16:21:19 -08002015 break;
2016 case -EPERM:
2017 FTRACE_WARN_ON_ONCE(1);
2018 pr_info("ftrace faulted on writing ");
2019 print_ip_sym(ip);
2020 break;
2021 default:
2022 FTRACE_WARN_ON_ONCE(1);
2023 pr_info("ftrace faulted on unknown error ");
2024 print_ip_sym(ip);
2025 }
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002026 print_bug_type();
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002027 if (rec) {
2028 struct ftrace_ops *ops = NULL;
2029
2030 pr_info("ftrace record flags: %lx\n", rec->flags);
2031 pr_cont(" (%ld)%s", ftrace_rec_count(rec),
2032 rec->flags & FTRACE_FL_REGS ? " R" : " ");
2033 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2034 ops = ftrace_find_tramp_ops_any(rec);
Steven Rostedt (Red Hat)39daa7b2015-11-25 15:12:38 -05002035 if (ops) {
2036 do {
2037 pr_cont("\ttramp: %pS (%pS)",
2038 (void *)ops->trampoline,
2039 (void *)ops->func);
2040 ops = ftrace_find_tramp_ops_next(rec, ops);
2041 } while (ops);
2042 } else
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002043 pr_cont("\ttramp: ERROR!");
2044
2045 }
2046 ip = ftrace_get_addr_curr(rec);
Steven Rostedt (Red Hat)39daa7b2015-11-25 15:12:38 -05002047 pr_cont("\n expected tramp: %lx\n", ip);
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002048 }
Steven Rostedtb17e8a32008-11-14 16:21:19 -08002049}
2050
Steven Rostedtc88fd862011-08-16 09:53:39 -04002051static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
Steven Rostedt5072c592008-05-12 21:20:43 +02002052{
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08002053 unsigned long flag = 0UL;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002054
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002055 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2056
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05002057 if (rec->flags & FTRACE_FL_DISABLED)
2058 return FTRACE_UPDATE_IGNORE;
2059
Steven Rostedt982c3502008-11-15 16:31:41 -05002060 /*
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01002061 * If we are updating calls:
Steven Rostedt982c3502008-11-15 16:31:41 -05002062 *
Steven Rostedted926f92011-05-03 13:25:24 -04002063 * If the record has a ref count, then we need to enable it
2064 * because someone is using it.
Steven Rostedt982c3502008-11-15 16:31:41 -05002065 *
Steven Rostedted926f92011-05-03 13:25:24 -04002066 * Otherwise we make sure its disabled.
2067 *
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01002068 * If we are disabling calls, then disable all records that
Steven Rostedted926f92011-05-03 13:25:24 -04002069 * are enabled.
Steven Rostedt982c3502008-11-15 16:31:41 -05002070 */
Steven Rostedt (Red Hat)0376bde2014-05-07 13:46:45 -04002071 if (enable && ftrace_rec_count(rec))
Steven Rostedted926f92011-05-03 13:25:24 -04002072 flag = FTRACE_FL_ENABLED;
Steven Rostedt5072c592008-05-12 21:20:43 +02002073
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002074 /*
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002075 * If enabling and the REGS flag does not match the REGS_EN, or
2076 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2077 * this record. Set flags to fail the compare against ENABLED.
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002078 */
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002079 if (flag) {
2080 if (!(rec->flags & FTRACE_FL_REGS) !=
2081 !(rec->flags & FTRACE_FL_REGS_EN))
2082 flag |= FTRACE_FL_REGS;
2083
2084 if (!(rec->flags & FTRACE_FL_TRAMP) !=
2085 !(rec->flags & FTRACE_FL_TRAMP_EN))
2086 flag |= FTRACE_FL_TRAMP;
2087 }
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002088
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08002089 /* If the state of this record hasn't changed, then do nothing */
2090 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
Steven Rostedtc88fd862011-08-16 09:53:39 -04002091 return FTRACE_UPDATE_IGNORE;
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08002092
2093 if (flag) {
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002094 /* Save off if rec is being enabled (for return value) */
2095 flag ^= rec->flags & FTRACE_FL_ENABLED;
2096
2097 if (update) {
Steven Rostedtc88fd862011-08-16 09:53:39 -04002098 rec->flags |= FTRACE_FL_ENABLED;
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002099 if (flag & FTRACE_FL_REGS) {
2100 if (rec->flags & FTRACE_FL_REGS)
2101 rec->flags |= FTRACE_FL_REGS_EN;
2102 else
2103 rec->flags &= ~FTRACE_FL_REGS_EN;
2104 }
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002105 if (flag & FTRACE_FL_TRAMP) {
2106 if (rec->flags & FTRACE_FL_TRAMP)
2107 rec->flags |= FTRACE_FL_TRAMP_EN;
2108 else
2109 rec->flags &= ~FTRACE_FL_TRAMP_EN;
2110 }
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002111 }
2112
2113 /*
2114 * If this record is being updated from a nop, then
2115 * return UPDATE_MAKE_CALL.
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002116 * Otherwise,
2117 * return UPDATE_MODIFY_CALL to tell the caller to convert
Steven Rostedt (Red Hat)f1b2f2b2014-05-07 16:09:49 -04002118 * from the save regs, to a non-save regs function or
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002119 * vice versa, or from a trampoline call.
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002120 */
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002121 if (flag & FTRACE_FL_ENABLED) {
2122 ftrace_bug_type = FTRACE_BUG_CALL;
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002123 return FTRACE_UPDATE_MAKE_CALL;
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002124 }
Steven Rostedt (Red Hat)f1b2f2b2014-05-07 16:09:49 -04002125
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002126 ftrace_bug_type = FTRACE_BUG_UPDATE;
Steven Rostedt (Red Hat)f1b2f2b2014-05-07 16:09:49 -04002127 return FTRACE_UPDATE_MODIFY_CALL;
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08002128 }
2129
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002130 if (update) {
2131 /* If there's no more users, clear all flags */
Steven Rostedt (Red Hat)0376bde2014-05-07 13:46:45 -04002132 if (!ftrace_rec_count(rec))
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002133 rec->flags = 0;
2134 else
Steven Rostedt (Red Hat)b24d4432015-03-04 23:10:28 -05002135 /*
2136 * Just disable the record, but keep the ops TRAMP
2137 * and REGS states. The _EN flags must be disabled though.
2138 */
2139 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2140 FTRACE_FL_REGS_EN);
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002141 }
Steven Rostedtc88fd862011-08-16 09:53:39 -04002142
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002143 ftrace_bug_type = FTRACE_BUG_NOP;
Steven Rostedtc88fd862011-08-16 09:53:39 -04002144 return FTRACE_UPDATE_MAKE_NOP;
2145}
2146
2147/**
2148 * ftrace_update_record, set a record that now is tracing or not
2149 * @rec: the record to update
2150 * @enable: set to 1 if the record is tracing, zero to force disable
2151 *
2152 * The records that represent all functions that can be traced need
2153 * to be updated when tracing has been enabled.
2154 */
2155int ftrace_update_record(struct dyn_ftrace *rec, int enable)
2156{
2157 return ftrace_check_record(rec, enable, 1);
2158}
2159
2160/**
2161 * ftrace_test_record, check if the record has been enabled or not
2162 * @rec: the record to test
2163 * @enable: set to 1 to check if enabled, 0 if it is disabled
2164 *
2165 * The arch code may need to test if a record is already set to
2166 * tracing to determine how to modify the function code that it
2167 * represents.
2168 */
2169int ftrace_test_record(struct dyn_ftrace *rec, int enable)
2170{
2171 return ftrace_check_record(rec, enable, 0);
2172}
2173
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002174static struct ftrace_ops *
Steven Rostedt (Red Hat)5fecaa02014-07-24 16:00:31 -04002175ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2176{
2177 struct ftrace_ops *op;
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002178 unsigned long ip = rec->ip;
Steven Rostedt (Red Hat)5fecaa02014-07-24 16:00:31 -04002179
2180 do_for_each_ftrace_op(op, ftrace_ops_list) {
2181
2182 if (!op->trampoline)
2183 continue;
2184
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002185 if (hash_contains_ip(ip, op->func_hash))
Steven Rostedt (Red Hat)5fecaa02014-07-24 16:00:31 -04002186 return op;
2187 } while_for_each_ftrace_op(op);
2188
2189 return NULL;
2190}
2191
2192static struct ftrace_ops *
Steven Rostedt (Red Hat)39daa7b2015-11-25 15:12:38 -05002193ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2194 struct ftrace_ops *op)
2195{
2196 unsigned long ip = rec->ip;
2197
2198 while_for_each_ftrace_op(op) {
2199
2200 if (!op->trampoline)
2201 continue;
2202
2203 if (hash_contains_ip(ip, op->func_hash))
2204 return op;
2205 }
2206
2207 return NULL;
2208}
2209
2210static struct ftrace_ops *
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002211ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2212{
2213 struct ftrace_ops *op;
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002214 unsigned long ip = rec->ip;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002215
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002216 /*
2217 * Need to check removed ops first.
2218 * If they are being removed, and this rec has a tramp,
2219 * and this rec is in the ops list, then it would be the
2220 * one with the tramp.
2221 */
2222 if (removed_ops) {
2223 if (hash_contains_ip(ip, &removed_ops->old_hash))
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002224 return removed_ops;
2225 }
2226
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002227 /*
2228 * Need to find the current trampoline for a rec.
2229 * Now, a trampoline is only attached to a rec if there
2230 * was a single 'ops' attached to it. But this can be called
2231 * when we are adding another op to the rec or removing the
2232 * current one. Thus, if the op is being added, we can
2233 * ignore it because it hasn't attached itself to the rec
Steven Rostedt (Red Hat)4fc40902014-10-24 14:48:35 -04002234 * yet.
2235 *
2236 * If an ops is being modified (hooking to different functions)
2237 * then we don't care about the new functions that are being
2238 * added, just the old ones (that are probably being removed).
2239 *
2240 * If we are adding an ops to a function that already is using
2241 * a trampoline, it needs to be removed (trampolines are only
2242 * for single ops connected), then an ops that is not being
2243 * modified also needs to be checked.
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002244 */
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002245 do_for_each_ftrace_op(op, ftrace_ops_list) {
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002246
2247 if (!op->trampoline)
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002248 continue;
2249
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002250 /*
2251 * If the ops is being added, it hasn't gotten to
2252 * the point to be removed from this tree yet.
2253 */
2254 if (op->flags & FTRACE_OPS_FL_ADDING)
2255 continue;
2256
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002257
Steven Rostedt (Red Hat)4fc40902014-10-24 14:48:35 -04002258 /*
2259 * If the ops is being modified and is in the old
2260 * hash, then it is probably being removed from this
2261 * function.
2262 */
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002263 if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2264 hash_contains_ip(ip, &op->old_hash))
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002265 return op;
Steven Rostedt (Red Hat)4fc40902014-10-24 14:48:35 -04002266 /*
2267 * If the ops is not being added or modified, and it's
2268 * in its normal filter hash, then this must be the one
2269 * we want!
2270 */
2271 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2272 hash_contains_ip(ip, op->func_hash))
2273 return op;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002274
2275 } while_for_each_ftrace_op(op);
2276
2277 return NULL;
2278}
2279
2280static struct ftrace_ops *
2281ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2282{
2283 struct ftrace_ops *op;
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002284 unsigned long ip = rec->ip;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002285
2286 do_for_each_ftrace_op(op, ftrace_ops_list) {
2287 /* pass rec in as regs to have non-NULL val */
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002288 if (hash_contains_ip(ip, op->func_hash))
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002289 return op;
2290 } while_for_each_ftrace_op(op);
2291
2292 return NULL;
2293}
2294
Steven Rostedt (Red Hat)7413af12014-05-06 21:34:14 -04002295/**
2296 * ftrace_get_addr_new - Get the call address to set to
2297 * @rec: The ftrace record descriptor
2298 *
2299 * If the record has the FTRACE_FL_REGS set, that means that it
2300 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2301 * is not not set, then it wants to convert to the normal callback.
2302 *
2303 * Returns the address of the trampoline to set to
2304 */
2305unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2306{
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002307 struct ftrace_ops *ops;
2308
2309 /* Trampolines take precedence over regs */
2310 if (rec->flags & FTRACE_FL_TRAMP) {
2311 ops = ftrace_find_tramp_ops_new(rec);
2312 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
Steven Rostedt (Red Hat)bce0b6c2014-08-20 23:57:04 -04002313 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2314 (void *)rec->ip, (void *)rec->ip, rec->flags);
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002315 /* Ftrace is shutting down, return anything */
2316 return (unsigned long)FTRACE_ADDR;
2317 }
2318 return ops->trampoline;
2319 }
2320
Steven Rostedt (Red Hat)7413af12014-05-06 21:34:14 -04002321 if (rec->flags & FTRACE_FL_REGS)
2322 return (unsigned long)FTRACE_REGS_ADDR;
2323 else
2324 return (unsigned long)FTRACE_ADDR;
2325}
2326
2327/**
2328 * ftrace_get_addr_curr - Get the call address that is already there
2329 * @rec: The ftrace record descriptor
2330 *
2331 * The FTRACE_FL_REGS_EN is set when the record already points to
2332 * a function that saves all the regs. Basically the '_EN' version
2333 * represents the current state of the function.
2334 *
2335 * Returns the address of the trampoline that is currently being called
2336 */
2337unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2338{
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002339 struct ftrace_ops *ops;
2340
2341 /* Trampolines take precedence over regs */
2342 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2343 ops = ftrace_find_tramp_ops_curr(rec);
2344 if (FTRACE_WARN_ON(!ops)) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07002345 pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2346 (void *)rec->ip, (void *)rec->ip);
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002347 /* Ftrace is shutting down, return anything */
2348 return (unsigned long)FTRACE_ADDR;
2349 }
2350 return ops->trampoline;
2351 }
2352
Steven Rostedt (Red Hat)7413af12014-05-06 21:34:14 -04002353 if (rec->flags & FTRACE_FL_REGS_EN)
2354 return (unsigned long)FTRACE_REGS_ADDR;
2355 else
2356 return (unsigned long)FTRACE_ADDR;
2357}
2358
Steven Rostedtc88fd862011-08-16 09:53:39 -04002359static int
2360__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
2361{
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002362 unsigned long ftrace_old_addr;
Steven Rostedtc88fd862011-08-16 09:53:39 -04002363 unsigned long ftrace_addr;
2364 int ret;
2365
Steven Rostedt (Red Hat)7c0868e2014-05-08 07:01:21 -04002366 ftrace_addr = ftrace_get_addr_new(rec);
Steven Rostedtc88fd862011-08-16 09:53:39 -04002367
Steven Rostedt (Red Hat)7c0868e2014-05-08 07:01:21 -04002368 /* This needs to be done before we call ftrace_update_record */
2369 ftrace_old_addr = ftrace_get_addr_curr(rec);
2370
2371 ret = ftrace_update_record(rec, enable);
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002372
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002373 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2374
Steven Rostedtc88fd862011-08-16 09:53:39 -04002375 switch (ret) {
2376 case FTRACE_UPDATE_IGNORE:
2377 return 0;
2378
2379 case FTRACE_UPDATE_MAKE_CALL:
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002380 ftrace_bug_type = FTRACE_BUG_CALL;
Steven Rostedtc88fd862011-08-16 09:53:39 -04002381 return ftrace_make_call(rec, ftrace_addr);
2382
2383 case FTRACE_UPDATE_MAKE_NOP:
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002384 ftrace_bug_type = FTRACE_BUG_NOP;
Steven Rostedt (Red Hat)39b55522014-08-17 20:59:10 -04002385 return ftrace_make_nop(NULL, rec, ftrace_old_addr);
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002386
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002387 case FTRACE_UPDATE_MODIFY_CALL:
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002388 ftrace_bug_type = FTRACE_BUG_UPDATE;
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002389 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
Steven Rostedtc88fd862011-08-16 09:53:39 -04002390 }
2391
Hariprasad Kelam9efb85c2019-03-24 00:05:23 +05302392 return -1; /* unknown ftrace bug */
Steven Rostedt5072c592008-05-12 21:20:43 +02002393}
2394
Steven Rostedt (VMware)a0572f62018-12-05 12:48:53 -05002395void __weak ftrace_replace_code(int mod_flags)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002396{
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002397 struct dyn_ftrace *rec;
2398 struct ftrace_page *pg;
Steven Rostedt (VMware)a0572f62018-12-05 12:48:53 -05002399 int enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
2400 int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
Steven Rostedt6a24a242009-02-17 11:20:26 -05002401 int failed;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002402
Steven Rostedt45a4a232011-04-21 23:16:46 -04002403 if (unlikely(ftrace_disabled))
2404 return;
2405
Steven Rostedt265c8312009-02-13 12:43:56 -05002406 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedt (Red Hat)546fece2016-11-14 16:31:49 -05002407
2408 if (rec->flags & FTRACE_FL_DISABLED)
2409 continue;
2410
Steven Rostedte4f5d542012-04-27 09:13:18 -04002411 failed = __ftrace_replace_code(rec, enable);
Zhaoleifa9d13c2009-03-13 17:16:34 +08002412 if (failed) {
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002413 ftrace_bug(failed, rec);
Steven Rostedt3279ba32009-10-07 16:57:56 -04002414 /* Stop processing */
2415 return;
Steven Rostedt265c8312009-02-13 12:43:56 -05002416 }
Steven Rostedt (VMware)a0572f62018-12-05 12:48:53 -05002417 if (schedulable)
2418 cond_resched();
Steven Rostedt265c8312009-02-13 12:43:56 -05002419 } while_for_each_ftrace_rec();
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002420}
2421
Steven Rostedtc88fd862011-08-16 09:53:39 -04002422struct ftrace_rec_iter {
2423 struct ftrace_page *pg;
2424 int index;
2425};
2426
2427/**
2428 * ftrace_rec_iter_start, start up iterating over traced functions
2429 *
2430 * Returns an iterator handle that is used to iterate over all
2431 * the records that represent address locations where functions
2432 * are traced.
2433 *
2434 * May return NULL if no records are available.
2435 */
2436struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2437{
2438 /*
2439 * We only use a single iterator.
2440 * Protected by the ftrace_lock mutex.
2441 */
2442 static struct ftrace_rec_iter ftrace_rec_iter;
2443 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2444
2445 iter->pg = ftrace_pages_start;
2446 iter->index = 0;
2447
2448 /* Could have empty pages */
2449 while (iter->pg && !iter->pg->index)
2450 iter->pg = iter->pg->next;
2451
2452 if (!iter->pg)
2453 return NULL;
2454
2455 return iter;
2456}
2457
2458/**
2459 * ftrace_rec_iter_next, get the next record to process.
2460 * @iter: The handle to the iterator.
2461 *
2462 * Returns the next iterator after the given iterator @iter.
2463 */
2464struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2465{
2466 iter->index++;
2467
2468 if (iter->index >= iter->pg->index) {
2469 iter->pg = iter->pg->next;
2470 iter->index = 0;
2471
2472 /* Could have empty pages */
2473 while (iter->pg && !iter->pg->index)
2474 iter->pg = iter->pg->next;
2475 }
2476
2477 if (!iter->pg)
2478 return NULL;
2479
2480 return iter;
2481}
2482
2483/**
2484 * ftrace_rec_iter_record, get the record at the iterator location
2485 * @iter: The current iterator location
2486 *
2487 * Returns the record that the current @iter is at.
2488 */
2489struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2490{
2491 return &iter->pg->records[iter->index];
2492}
2493
Abhishek Sagar492a7ea52008-05-25 00:10:04 +05302494static int
Steven Rostedt31e88902008-11-14 16:21:19 -08002495ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002496{
Steven Rostedt593eb8a2008-10-23 09:32:59 -04002497 int ret;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002498
Steven Rostedt45a4a232011-04-21 23:16:46 -04002499 if (unlikely(ftrace_disabled))
2500 return 0;
2501
Shaohua Li25aac9d2009-01-09 11:29:40 +08002502 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
Steven Rostedt593eb8a2008-10-23 09:32:59 -04002503 if (ret) {
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002504 ftrace_bug_type = FTRACE_BUG_INIT;
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002505 ftrace_bug(ret, rec);
Abhishek Sagar492a7ea52008-05-25 00:10:04 +05302506 return 0;
Steven Rostedt37ad50842008-05-12 21:20:48 +02002507 }
Abhishek Sagar492a7ea52008-05-25 00:10:04 +05302508 return 1;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002509}
2510
Steven Rostedt000ab692009-02-17 13:35:06 -05002511/*
2512 * archs can override this function if they must do something
2513 * before the modifying code is performed.
2514 */
2515int __weak ftrace_arch_code_modify_prepare(void)
2516{
2517 return 0;
2518}
2519
2520/*
2521 * archs can override this function if they must do something
2522 * after the modifying code is performed.
2523 */
2524int __weak ftrace_arch_code_modify_post_process(void)
2525{
2526 return 0;
2527}
2528
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04002529void ftrace_modify_all_code(int command)
2530{
Steven Rostedt (Red Hat)59338f72013-08-31 01:04:07 -04002531 int update = command & FTRACE_UPDATE_TRACE_FUNC;
Steven Rostedt (VMware)a0572f62018-12-05 12:48:53 -05002532 int mod_flags = 0;
Petr Mladekcd21067f2014-02-24 17:12:21 +01002533 int err = 0;
Steven Rostedt (Red Hat)59338f72013-08-31 01:04:07 -04002534
Steven Rostedt (VMware)a0572f62018-12-05 12:48:53 -05002535 if (command & FTRACE_MAY_SLEEP)
2536 mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
2537
Steven Rostedt (Red Hat)59338f72013-08-31 01:04:07 -04002538 /*
2539 * If the ftrace_caller calls a ftrace_ops func directly,
2540 * we need to make sure that it only traces functions it
2541 * expects to trace. When doing the switch of functions,
2542 * we need to update to the ftrace_ops_list_func first
2543 * before the transition between old and new calls are set,
2544 * as the ftrace_ops_list_func will check the ops hashes
2545 * to make sure the ops are having the right functions
2546 * traced.
2547 */
Petr Mladekcd21067f2014-02-24 17:12:21 +01002548 if (update) {
2549 err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2550 if (FTRACE_WARN_ON(err))
2551 return;
2552 }
Steven Rostedt (Red Hat)59338f72013-08-31 01:04:07 -04002553
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04002554 if (command & FTRACE_UPDATE_CALLS)
Steven Rostedt (VMware)a0572f62018-12-05 12:48:53 -05002555 ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04002556 else if (command & FTRACE_DISABLE_CALLS)
Steven Rostedt (VMware)a0572f62018-12-05 12:48:53 -05002557 ftrace_replace_code(mod_flags);
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04002558
Steven Rostedt (Red Hat)405e1d82013-11-08 14:17:30 -05002559 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2560 function_trace_op = set_function_trace_op;
2561 smp_wmb();
2562 /* If irqs are disabled, we are in stop machine */
2563 if (!irqs_disabled())
2564 smp_call_function(ftrace_sync_ipi, NULL, 1);
Petr Mladekcd21067f2014-02-24 17:12:21 +01002565 err = ftrace_update_ftrace_func(ftrace_trace_function);
2566 if (FTRACE_WARN_ON(err))
2567 return;
Steven Rostedt (Red Hat)405e1d82013-11-08 14:17:30 -05002568 }
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04002569
2570 if (command & FTRACE_START_FUNC_RET)
Petr Mladekcd21067f2014-02-24 17:12:21 +01002571 err = ftrace_enable_ftrace_graph_caller();
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04002572 else if (command & FTRACE_STOP_FUNC_RET)
Petr Mladekcd21067f2014-02-24 17:12:21 +01002573 err = ftrace_disable_ftrace_graph_caller();
2574 FTRACE_WARN_ON(err);
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04002575}
2576
Ingo Molnare309b412008-05-12 21:20:51 +02002577static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +02002578{
Steven Rostedtd61f82d2008-05-12 21:20:43 +02002579 int *command = data;
2580
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04002581 ftrace_modify_all_code(*command);
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05002582
Steven Rostedtc88fd862011-08-16 09:53:39 -04002583 return 0;
2584}
2585
2586/**
2587 * ftrace_run_stop_machine, go back to the stop machine method
2588 * @command: The command to tell ftrace what to do
2589 *
2590 * If an arch needs to fall back to the stop machine method, the
2591 * it can call this function.
2592 */
2593void ftrace_run_stop_machine(int command)
2594{
2595 stop_machine(__ftrace_modify_code, &command, NULL);
2596}
2597
2598/**
2599 * arch_ftrace_update_code, modify the code to trace or not trace
2600 * @command: The command that needs to be done
2601 *
2602 * Archs can override this function if it does not need to
2603 * run stop_machine() to modify code.
2604 */
2605void __weak arch_ftrace_update_code(int command)
2606{
2607 ftrace_run_stop_machine(command);
2608}
2609
2610static void ftrace_run_update_code(int command)
2611{
2612 int ret;
2613
Josh Poimboeuf9f255b62019-06-13 20:07:22 -05002614 mutex_lock(&text_mutex);
2615
Steven Rostedtc88fd862011-08-16 09:53:39 -04002616 ret = ftrace_arch_code_modify_prepare();
2617 FTRACE_WARN_ON(ret);
2618 if (ret)
Josh Poimboeuf9f255b62019-06-13 20:07:22 -05002619 goto out_unlock;
Steven Rostedtc88fd862011-08-16 09:53:39 -04002620
2621 /*
2622 * By default we use stop_machine() to modify the code.
2623 * But archs can do what ever they want as long as it
2624 * is safe. The stop_machine() is the safest, but also
2625 * produces the most overhead.
2626 */
2627 arch_ftrace_update_code(command);
2628
Steven Rostedt000ab692009-02-17 13:35:06 -05002629 ret = ftrace_arch_code_modify_post_process();
2630 FTRACE_WARN_ON(ret);
Josh Poimboeuf9f255b62019-06-13 20:07:22 -05002631
2632out_unlock:
2633 mutex_unlock(&text_mutex);
Steven Rostedt3d083392008-05-12 21:20:42 +02002634}
2635
Steven Rostedt (Red Hat)8252ecf2014-10-24 14:56:01 -04002636static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05002637 struct ftrace_ops_hash *old_hash)
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002638{
2639 ops->flags |= FTRACE_OPS_FL_MODIFYING;
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05002640 ops->old_hash.filter_hash = old_hash->filter_hash;
2641 ops->old_hash.notrace_hash = old_hash->notrace_hash;
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002642 ftrace_run_update_code(command);
Steven Rostedt (Red Hat)8252ecf2014-10-24 14:56:01 -04002643 ops->old_hash.filter_hash = NULL;
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05002644 ops->old_hash.notrace_hash = NULL;
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002645 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2646}
2647
Steven Rostedtd61f82d2008-05-12 21:20:43 +02002648static ftrace_func_t saved_ftrace_func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05002649static int ftrace_start_up;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002650
Steven Rostedt (Red Hat)12cce592014-07-03 15:48:16 -04002651void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2652{
2653}
2654
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002655static void ftrace_startup_enable(int command)
2656{
2657 if (saved_ftrace_func != ftrace_trace_function) {
2658 saved_ftrace_func = ftrace_trace_function;
2659 command |= FTRACE_UPDATE_TRACE_FUNC;
2660 }
2661
2662 if (!command || !ftrace_enabled)
2663 return;
2664
2665 ftrace_run_update_code(command);
2666}
Steven Rostedtd61f82d2008-05-12 21:20:43 +02002667
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002668static void ftrace_startup_all(int command)
2669{
2670 update_all_ops = true;
2671 ftrace_startup_enable(command);
2672 update_all_ops = false;
2673}
2674
Steven Rostedt (VMware)3306fc4a2018-11-15 12:32:38 -05002675int ftrace_startup(struct ftrace_ops *ops, int command)
Steven Rostedt3d083392008-05-12 21:20:42 +02002676{
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05002677 int ret;
Steven Rostedtb8489142011-05-04 09:27:52 -04002678
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002679 if (unlikely(ftrace_disabled))
Steven Rostedta1cd6172011-05-23 15:24:25 -04002680 return -ENODEV;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002681
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05002682 ret = __register_ftrace_function(ops);
2683 if (ret)
2684 return ret;
2685
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05002686 ftrace_start_up++;
Steven Rostedt3d083392008-05-12 21:20:42 +02002687
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002688 /*
2689 * Note that ftrace probes uses this to start up
2690 * and modify functions it will probe. But we still
2691 * set the ADDING flag for modification, as probes
2692 * do not have trampolines. If they add them in the
2693 * future, then the probes will need to distinguish
2694 * between adding and updating probes.
2695 */
2696 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
Steven Rostedt (Red Hat)66209a52014-05-06 21:57:49 -04002697
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05002698 ret = ftrace_hash_ipmodify_enable(ops);
2699 if (ret < 0) {
2700 /* Rollback registration process */
2701 __unregister_ftrace_function(ops);
2702 ftrace_start_up--;
2703 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2704 return ret;
2705 }
2706
Jiri Olsa7f50d062016-03-16 15:34:33 +01002707 if (ftrace_hash_rec_enable(ops, 1))
2708 command |= FTRACE_UPDATE_CALLS;
Steven Rostedted926f92011-05-03 13:25:24 -04002709
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002710 ftrace_startup_enable(command);
Steven Rostedta1cd6172011-05-23 15:24:25 -04002711
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002712 ops->flags &= ~FTRACE_OPS_FL_ADDING;
2713
Steven Rostedta1cd6172011-05-23 15:24:25 -04002714 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +02002715}
2716
Steven Rostedt (VMware)3306fc4a2018-11-15 12:32:38 -05002717int ftrace_shutdown(struct ftrace_ops *ops, int command)
Steven Rostedt3d083392008-05-12 21:20:42 +02002718{
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05002719 int ret;
Steven Rostedtb8489142011-05-04 09:27:52 -04002720
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002721 if (unlikely(ftrace_disabled))
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05002722 return -ENODEV;
2723
2724 ret = __unregister_ftrace_function(ops);
2725 if (ret)
2726 return ret;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002727
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05002728 ftrace_start_up--;
Frederic Weisbecker9ea1a152009-06-20 06:52:21 +02002729 /*
2730 * Just warn in case of unbalance, no need to kill ftrace, it's not
2731 * critical but the ftrace_call callers may be never nopped again after
2732 * further ftrace uses.
2733 */
2734 WARN_ON_ONCE(ftrace_start_up < 0);
2735
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05002736 /* Disabling ipmodify never fails */
2737 ftrace_hash_ipmodify_disable(ops);
Jiri Olsa7f50d062016-03-16 15:34:33 +01002738
2739 if (ftrace_hash_rec_disable(ops, 1))
2740 command |= FTRACE_UPDATE_CALLS;
Steven Rostedtb8489142011-05-04 09:27:52 -04002741
Namhyung Kima737e6d2014-06-12 23:56:12 +09002742 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
Steven Rostedtb8489142011-05-04 09:27:52 -04002743
Steven Rostedtd61f82d2008-05-12 21:20:43 +02002744 if (saved_ftrace_func != ftrace_trace_function) {
2745 saved_ftrace_func = ftrace_trace_function;
2746 command |= FTRACE_UPDATE_TRACE_FUNC;
2747 }
2748
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002749 if (!command || !ftrace_enabled) {
2750 /*
Steven Rostedt (VMware)edb096e2017-09-01 12:18:28 -04002751 * If these are dynamic or per_cpu ops, they still
2752 * need their data freed. Since, function tracing is
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002753 * not currently active, we can just free them
2754 * without synchronizing all CPUs.
2755 */
Peter Zijlstrab3a88802017-10-11 09:45:32 +02002756 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
Steven Rostedt (VMware)edb096e2017-09-01 12:18:28 -04002757 goto free_ops;
2758
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05002759 return 0;
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002760 }
Steven Rostedt3d083392008-05-12 21:20:42 +02002761
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002762 /*
2763 * If the ops uses a trampoline, then it needs to be
2764 * tested first on update.
2765 */
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002766 ops->flags |= FTRACE_OPS_FL_REMOVING;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002767 removed_ops = ops;
2768
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002769 /* The trampoline logic checks the old hashes */
2770 ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2771 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2772
Steven Rostedtd61f82d2008-05-12 21:20:43 +02002773 ftrace_run_update_code(command);
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002774
Steven Rostedt (Red Hat)84bde622014-09-12 14:21:13 -04002775 /*
2776 * If there's no more ops registered with ftrace, run a
2777 * sanity check to make sure all rec flags are cleared.
2778 */
Chunyan Zhangf86f4182017-06-07 16:12:51 +08002779 if (rcu_dereference_protected(ftrace_ops_list,
2780 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
Steven Rostedt (Red Hat)84bde622014-09-12 14:21:13 -04002781 struct ftrace_page *pg;
2782 struct dyn_ftrace *rec;
2783
2784 do_for_each_ftrace_rec(pg, rec) {
Alexei Starovoitov977c1f92016-11-07 15:14:20 -08002785 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
Steven Rostedt (Red Hat)84bde622014-09-12 14:21:13 -04002786 pr_warn(" %pS flags:%lx\n",
2787 (void *)rec->ip, rec->flags);
2788 } while_for_each_ftrace_rec();
2789 }
2790
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002791 ops->old_hash.filter_hash = NULL;
2792 ops->old_hash.notrace_hash = NULL;
2793
2794 removed_ops = NULL;
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002795 ops->flags &= ~FTRACE_OPS_FL_REMOVING;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002796
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002797 /*
2798 * Dynamic ops may be freed, we must make sure that all
2799 * callers are done before leaving this function.
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -05002800 * The same goes for freeing the per_cpu data of the per_cpu
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002801 * ops.
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002802 */
Peter Zijlstrab3a88802017-10-11 09:45:32 +02002803 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
Steven Rostedt (VMware)0598e4f2017-04-06 10:28:12 -04002804 /*
2805 * We need to do a hard force of sched synchronization.
2806 * This is because we use preempt_disable() to do RCU, but
2807 * the function tracers can be called where RCU is not watching
2808 * (like before user_exit()). We can not rely on the RCU
2809 * infrastructure to do the synchronization, thus we must do it
2810 * ourselves.
2811 */
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002812 schedule_on_each_cpu(ftrace_sync);
2813
Steven Rostedt (VMware)0598e4f2017-04-06 10:28:12 -04002814 /*
2815 * When the kernel is preeptive, tasks can be preempted
2816 * while on a ftrace trampoline. Just scheduling a task on
2817 * a CPU is not good enough to flush them. Calling
2818 * synchornize_rcu_tasks() will wait for those tasks to
2819 * execute and either schedule voluntarily or enter user space.
2820 */
2821 if (IS_ENABLED(CONFIG_PREEMPT))
2822 synchronize_rcu_tasks();
2823
Steven Rostedt (VMware)edb096e2017-09-01 12:18:28 -04002824 free_ops:
Steven Rostedt (Red Hat)12cce592014-07-03 15:48:16 -04002825 arch_ftrace_trampoline_free(ops);
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002826 }
2827
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05002828 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +02002829}
2830
Ingo Molnare309b412008-05-12 21:20:51 +02002831static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002832{
Pratyush Anand1619dc32015-03-06 23:58:06 +05302833 int command;
2834
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002835 if (unlikely(ftrace_disabled))
2836 return;
2837
Steven Rostedtd61f82d2008-05-12 21:20:43 +02002838 /* Force update next time */
2839 saved_ftrace_func = NULL;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05002840 /* ftrace_start_up is true if we want ftrace running */
Pratyush Anand1619dc32015-03-06 23:58:06 +05302841 if (ftrace_start_up) {
2842 command = FTRACE_UPDATE_CALLS;
2843 if (ftrace_graph_active)
2844 command |= FTRACE_START_FUNC_RET;
Steven Rostedt (Red Hat)524a3862015-03-06 19:55:13 -05002845 ftrace_startup_enable(command);
Pratyush Anand1619dc32015-03-06 23:58:06 +05302846 }
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002847}
2848
Ingo Molnare309b412008-05-12 21:20:51 +02002849static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002850{
Pratyush Anand1619dc32015-03-06 23:58:06 +05302851 int command;
2852
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002853 if (unlikely(ftrace_disabled))
2854 return;
2855
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05002856 /* ftrace_start_up is true if ftrace is running */
Pratyush Anand1619dc32015-03-06 23:58:06 +05302857 if (ftrace_start_up) {
2858 command = FTRACE_DISABLE_CALLS;
2859 if (ftrace_graph_active)
2860 command |= FTRACE_STOP_FUNC_RET;
2861 ftrace_run_update_code(command);
2862 }
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002863}
2864
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +01002865static u64 ftrace_update_time;
Steven Rostedt3d083392008-05-12 21:20:42 +02002866unsigned long ftrace_update_tot_cnt;
2867
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002868static inline int ops_traces_mod(struct ftrace_ops *ops)
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04002869{
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002870 /*
2871 * Filter_hash being empty will default to trace module.
2872 * But notrace hash requires a test of individual module functions.
2873 */
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04002874 return ftrace_hash_empty(ops->func_hash->filter_hash) &&
2875 ftrace_hash_empty(ops->func_hash->notrace_hash);
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002876}
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04002877
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002878/*
2879 * Check if the current ops references the record.
2880 *
2881 * If the ops traces all functions, then it was already accounted for.
2882 * If the ops does not trace the current record function, skip it.
2883 * If the ops ignores the function via notrace filter, skip it.
2884 */
2885static inline bool
2886ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2887{
2888 /* If ops isn't enabled, ignore it */
2889 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
Gustavo A. R. Silva44ec3ec2018-08-01 20:00:56 -05002890 return false;
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002891
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05002892 /* If ops traces all then it includes this function */
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002893 if (ops_traces_mod(ops))
Gustavo A. R. Silva44ec3ec2018-08-01 20:00:56 -05002894 return true;
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002895
2896 /* The function must be in the filter */
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04002897 if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
Steven Rostedt (VMware)2b2c2792017-02-01 15:37:07 -05002898 !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
Gustavo A. R. Silva44ec3ec2018-08-01 20:00:56 -05002899 return false;
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002900
2901 /* If in notrace hash, we ignore it too */
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04002902 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
Gustavo A. R. Silva44ec3ec2018-08-01 20:00:56 -05002903 return false;
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002904
Gustavo A. R. Silva44ec3ec2018-08-01 20:00:56 -05002905 return true;
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002906}
2907
Jiri Slaby1dc43cf2014-02-24 19:59:56 +01002908static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
Steven Rostedt3d083392008-05-12 21:20:42 +02002909{
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002910 struct ftrace_page *pg;
Lai Jiangshane94142a2009-03-13 17:51:27 +08002911 struct dyn_ftrace *p;
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +01002912 u64 start, stop;
Jiri Slaby1dc43cf2014-02-24 19:59:56 +01002913 unsigned long update_cnt = 0;
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05002914 unsigned long rec_flags = 0;
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002915 int i;
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04002916
Ingo Molnar750ed1a2008-05-12 21:20:46 +02002917 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +02002918
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05002919 /*
2920 * When a module is loaded, this function is called to convert
2921 * the calls to mcount in its text to nops, and also to create
2922 * an entry in the ftrace data. Now, if ftrace is activated
2923 * after this call, but before the module sets its text to
2924 * read-only, the modification of enabling ftrace can fail if
2925 * the read-only is done while ftrace is converting the calls.
2926 * To prevent this, the module's records are set as disabled
2927 * and will be enabled after the call to set the module's text
2928 * to read-only.
2929 */
2930 if (mod)
2931 rec_flags |= FTRACE_FL_DISABLED;
2932
Jiri Slaby1dc43cf2014-02-24 19:59:56 +01002933 for (pg = new_pgs; pg; pg = pg->next) {
Abhishek Sagarf22f9a892008-06-21 23:50:29 +05302934
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002935 for (i = 0; i < pg->index; i++) {
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002936
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002937 /* If something went wrong, bail without enabling anything */
2938 if (unlikely(ftrace_disabled))
2939 return -1;
Steven Rostedt3d083392008-05-12 21:20:42 +02002940
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002941 p = &pg->records[i];
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05002942 p->flags = rec_flags;
Abhishek Sagar0eb96702008-06-01 21:47:30 +05302943
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002944 /*
2945 * Do the initial record conversion from mcount jump
2946 * to the NOP instructions.
2947 */
Vasily Gorbikcbdaeaf2019-06-05 13:11:58 +02002948 if (!__is_defined(CC_USING_NOP_MCOUNT) &&
2949 !ftrace_code_disable(mod, p))
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002950 break;
Jiri Olsa5cb084b2009-10-13 16:33:53 -04002951
Jiri Slaby1dc43cf2014-02-24 19:59:56 +01002952 update_cnt++;
Jiri Olsa5cb084b2009-10-13 16:33:53 -04002953 }
Steven Rostedt3d083392008-05-12 21:20:42 +02002954 }
2955
Ingo Molnar750ed1a2008-05-12 21:20:46 +02002956 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +02002957 ftrace_update_time = stop - start;
Jiri Slaby1dc43cf2014-02-24 19:59:56 +01002958 ftrace_update_tot_cnt += update_cnt;
Steven Rostedt3d083392008-05-12 21:20:42 +02002959
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02002960 return 0;
2961}
2962
Steven Rostedta7900872011-12-16 16:23:44 -05002963static int ftrace_allocate_records(struct ftrace_page *pg, int count)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002964{
Steven Rostedta7900872011-12-16 16:23:44 -05002965 int order;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002966 int cnt;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002967
Steven Rostedta7900872011-12-16 16:23:44 -05002968 if (WARN_ON(!count))
2969 return -EINVAL;
2970
2971 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002972
2973 /*
Steven Rostedta7900872011-12-16 16:23:44 -05002974 * We want to fill as much as possible. No more than a page
2975 * may be empty.
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002976 */
Steven Rostedta7900872011-12-16 16:23:44 -05002977 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2978 order--;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002979
Steven Rostedta7900872011-12-16 16:23:44 -05002980 again:
2981 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2982
2983 if (!pg->records) {
2984 /* if we can't allocate this size, try something smaller */
2985 if (!order)
2986 return -ENOMEM;
2987 order >>= 1;
2988 goto again;
2989 }
2990
2991 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2992 pg->size = cnt;
2993
2994 if (cnt > count)
2995 cnt = count;
2996
2997 return cnt;
2998}
2999
3000static struct ftrace_page *
3001ftrace_allocate_pages(unsigned long num_to_init)
3002{
3003 struct ftrace_page *start_pg;
3004 struct ftrace_page *pg;
3005 int order;
3006 int cnt;
3007
3008 if (!num_to_init)
Hariprasad Kelam9efb85c2019-03-24 00:05:23 +05303009 return NULL;
Steven Rostedta7900872011-12-16 16:23:44 -05003010
3011 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3012 if (!pg)
3013 return NULL;
3014
3015 /*
3016 * Try to allocate as much as possible in one continues
3017 * location that fills in all of the space. We want to
3018 * waste as little space as possible.
3019 */
3020 for (;;) {
3021 cnt = ftrace_allocate_records(pg, num_to_init);
3022 if (cnt < 0)
3023 goto free_pages;
3024
3025 num_to_init -= cnt;
3026 if (!num_to_init)
3027 break;
3028
3029 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3030 if (!pg->next)
3031 goto free_pages;
3032
3033 pg = pg->next;
3034 }
3035
3036 return start_pg;
3037
3038 free_pages:
Namhyung Kim1f61be002014-06-11 17:06:53 +09003039 pg = start_pg;
3040 while (pg) {
Steven Rostedta7900872011-12-16 16:23:44 -05003041 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
3042 free_pages((unsigned long)pg->records, order);
3043 start_pg = pg->next;
3044 kfree(pg);
3045 pg = start_pg;
3046 }
3047 pr_info("ftrace: FAILED to allocate memory for functions\n");
3048 return NULL;
3049}
3050
Steven Rostedt5072c592008-05-12 21:20:43 +02003051#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3052
3053struct ftrace_iterator {
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003054 loff_t pos;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003055 loff_t func_pos;
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003056 loff_t mod_pos;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003057 struct ftrace_page *pg;
3058 struct dyn_ftrace *func;
3059 struct ftrace_func_probe *probe;
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003060 struct ftrace_func_entry *probe_entry;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003061 struct trace_parser parser;
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003062 struct ftrace_hash *hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003063 struct ftrace_ops *ops;
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003064 struct trace_array *tr;
3065 struct list_head *mod_list;
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003066 int pidx;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003067 int idx;
3068 unsigned flags;
Steven Rostedt5072c592008-05-12 21:20:43 +02003069};
3070
Ingo Molnare309b412008-05-12 21:20:51 +02003071static void *
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003072t_probe_next(struct seq_file *m, loff_t *pos)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003073{
3074 struct ftrace_iterator *iter = m->private;
Steven Rostedt (VMware)d2afd57a2017-04-20 11:31:35 -04003075 struct trace_array *tr = iter->ops->private;
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04003076 struct list_head *func_probes;
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003077 struct ftrace_hash *hash;
3078 struct list_head *next;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003079 struct hlist_node *hnd = NULL;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003080 struct hlist_head *hhd;
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003081 int size;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003082
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003083 (*pos)++;
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003084 iter->pos = *pos;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003085
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04003086 if (!tr)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003087 return NULL;
3088
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04003089 func_probes = &tr->func_probes;
3090 if (list_empty(func_probes))
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003091 return NULL;
3092
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003093 if (!iter->probe) {
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04003094 next = func_probes->next;
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04003095 iter->probe = list_entry(next, struct ftrace_func_probe, list);
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003096 }
3097
3098 if (iter->probe_entry)
3099 hnd = &iter->probe_entry->hlist;
3100
3101 hash = iter->probe->ops.func_hash->filter_hash;
3102 size = 1 << hash->size_bits;
3103
3104 retry:
3105 if (iter->pidx >= size) {
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04003106 if (iter->probe->list.next == func_probes)
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003107 return NULL;
3108 next = iter->probe->list.next;
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04003109 iter->probe = list_entry(next, struct ftrace_func_probe, list);
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003110 hash = iter->probe->ops.func_hash->filter_hash;
3111 size = 1 << hash->size_bits;
3112 iter->pidx = 0;
3113 }
3114
3115 hhd = &hash->buckets[iter->pidx];
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003116
3117 if (hlist_empty(hhd)) {
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003118 iter->pidx++;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003119 hnd = NULL;
3120 goto retry;
3121 }
3122
3123 if (!hnd)
3124 hnd = hhd->first;
3125 else {
3126 hnd = hnd->next;
3127 if (!hnd) {
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003128 iter->pidx++;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003129 goto retry;
3130 }
3131 }
3132
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003133 if (WARN_ON_ONCE(!hnd))
3134 return NULL;
3135
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003136 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003137
3138 return iter;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003139}
3140
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003141static void *t_probe_start(struct seq_file *m, loff_t *pos)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003142{
3143 struct ftrace_iterator *iter = m->private;
3144 void *p = NULL;
Li Zefand82d6242009-06-24 09:54:54 +08003145 loff_t l;
3146
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003147 if (!(iter->flags & FTRACE_ITER_DO_PROBES))
Steven Rostedt69a30832011-12-19 15:21:16 -05003148 return NULL;
3149
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003150 if (iter->mod_pos > *pos)
Steven Rostedt2bccfff2010-09-09 08:43:22 -04003151 return NULL;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003152
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003153 iter->probe = NULL;
3154 iter->probe_entry = NULL;
3155 iter->pidx = 0;
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003156 for (l = 0; l <= (*pos - iter->mod_pos); ) {
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003157 p = t_probe_next(m, &l);
Li Zefand82d6242009-06-24 09:54:54 +08003158 if (!p)
3159 break;
3160 }
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003161 if (!p)
3162 return NULL;
3163
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003164 /* Only set this if we have an item */
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003165 iter->flags |= FTRACE_ITER_PROBE;
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003166
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003167 return iter;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003168}
3169
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003170static int
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003171t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003172{
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003173 struct ftrace_func_entry *probe_entry;
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04003174 struct ftrace_probe_ops *probe_ops;
3175 struct ftrace_func_probe *probe;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003176
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003177 probe = iter->probe;
3178 probe_entry = iter->probe_entry;
3179
3180 if (WARN_ON_ONCE(!probe || !probe_entry))
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003181 return -EIO;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003182
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04003183 probe_ops = probe->probe_ops;
Steven Rostedt809dcf22009-02-16 23:06:01 -05003184
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04003185 if (probe_ops->print)
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04003186 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003187
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04003188 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
3189 (void *)probe_ops->func);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003190
3191 return 0;
3192}
3193
3194static void *
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003195t_mod_next(struct seq_file *m, loff_t *pos)
3196{
3197 struct ftrace_iterator *iter = m->private;
3198 struct trace_array *tr = iter->tr;
3199
3200 (*pos)++;
3201 iter->pos = *pos;
3202
3203 iter->mod_list = iter->mod_list->next;
3204
3205 if (iter->mod_list == &tr->mod_trace ||
3206 iter->mod_list == &tr->mod_notrace) {
3207 iter->flags &= ~FTRACE_ITER_MOD;
3208 return NULL;
3209 }
3210
3211 iter->mod_pos = *pos;
3212
3213 return iter;
3214}
3215
3216static void *t_mod_start(struct seq_file *m, loff_t *pos)
3217{
3218 struct ftrace_iterator *iter = m->private;
3219 void *p = NULL;
3220 loff_t l;
3221
3222 if (iter->func_pos > *pos)
3223 return NULL;
3224
3225 iter->mod_pos = iter->func_pos;
3226
3227 /* probes are only available if tr is set */
3228 if (!iter->tr)
3229 return NULL;
3230
3231 for (l = 0; l <= (*pos - iter->func_pos); ) {
3232 p = t_mod_next(m, &l);
3233 if (!p)
3234 break;
3235 }
3236 if (!p) {
3237 iter->flags &= ~FTRACE_ITER_MOD;
3238 return t_probe_start(m, pos);
3239 }
3240
3241 /* Only set this if we have an item */
3242 iter->flags |= FTRACE_ITER_MOD;
3243
3244 return iter;
3245}
3246
3247static int
3248t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
3249{
3250 struct ftrace_mod_load *ftrace_mod;
3251 struct trace_array *tr = iter->tr;
3252
3253 if (WARN_ON_ONCE(!iter->mod_list) ||
3254 iter->mod_list == &tr->mod_trace ||
3255 iter->mod_list == &tr->mod_notrace)
3256 return -EIO;
3257
3258 ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
3259
3260 if (ftrace_mod->func)
3261 seq_printf(m, "%s", ftrace_mod->func);
3262 else
3263 seq_putc(m, '*');
3264
3265 seq_printf(m, ":mod:%s\n", ftrace_mod->module);
3266
3267 return 0;
3268}
3269
3270static void *
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003271t_func_next(struct seq_file *m, loff_t *pos)
Steven Rostedt5072c592008-05-12 21:20:43 +02003272{
3273 struct ftrace_iterator *iter = m->private;
3274 struct dyn_ftrace *rec = NULL;
3275
3276 (*pos)++;
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003277
Steven Rostedt5072c592008-05-12 21:20:43 +02003278 retry:
3279 if (iter->idx >= iter->pg->index) {
3280 if (iter->pg->next) {
3281 iter->pg = iter->pg->next;
3282 iter->idx = 0;
3283 goto retry;
3284 }
3285 } else {
3286 rec = &iter->pg->records[iter->idx++];
Steven Rostedt (VMware)c20489d2017-03-29 14:55:49 -04003287 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3288 !ftrace_lookup_ip(iter->hash, rec->ip)) ||
Steven Rostedt647bcd02011-05-03 14:39:21 -04003289
3290 ((iter->flags & FTRACE_ITER_ENABLED) &&
Steven Rostedt (Red Hat)23ea9c42013-05-09 19:31:48 -04003291 !(rec->flags & FTRACE_FL_ENABLED))) {
Steven Rostedt647bcd02011-05-03 14:39:21 -04003292
Steven Rostedt5072c592008-05-12 21:20:43 +02003293 rec = NULL;
3294 goto retry;
3295 }
3296 }
3297
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003298 if (!rec)
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003299 return NULL;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003300
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003301 iter->pos = iter->func_pos = *pos;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003302 iter->func = rec;
3303
3304 return iter;
Steven Rostedt5072c592008-05-12 21:20:43 +02003305}
3306
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003307static void *
3308t_next(struct seq_file *m, void *v, loff_t *pos)
3309{
3310 struct ftrace_iterator *iter = m->private;
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003311 loff_t l = *pos; /* t_probe_start() must use original pos */
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003312 void *ret;
3313
3314 if (unlikely(ftrace_disabled))
3315 return NULL;
3316
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003317 if (iter->flags & FTRACE_ITER_PROBE)
3318 return t_probe_next(m, pos);
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003319
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003320 if (iter->flags & FTRACE_ITER_MOD)
3321 return t_mod_next(m, pos);
3322
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003323 if (iter->flags & FTRACE_ITER_PRINTALL) {
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003324 /* next must increment pos, and t_probe_start does not */
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003325 (*pos)++;
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003326 return t_mod_start(m, &l);
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003327 }
3328
3329 ret = t_func_next(m, pos);
3330
3331 if (!ret)
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003332 return t_mod_start(m, &l);
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003333
3334 return ret;
3335}
3336
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003337static void reset_iter_read(struct ftrace_iterator *iter)
3338{
3339 iter->pos = 0;
3340 iter->func_pos = 0;
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003341 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
Steven Rostedt5072c592008-05-12 21:20:43 +02003342}
3343
3344static void *t_start(struct seq_file *m, loff_t *pos)
3345{
3346 struct ftrace_iterator *iter = m->private;
3347 void *p = NULL;
Li Zefan694ce0a2009-06-24 09:54:19 +08003348 loff_t l;
Steven Rostedt5072c592008-05-12 21:20:43 +02003349
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003350 mutex_lock(&ftrace_lock);
Steven Rostedt45a4a232011-04-21 23:16:46 -04003351
3352 if (unlikely(ftrace_disabled))
3353 return NULL;
3354
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003355 /*
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003356 * If an lseek was done, then reset and start from beginning.
3357 */
3358 if (*pos < iter->pos)
3359 reset_iter_read(iter);
3360
3361 /*
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003362 * For set_ftrace_filter reading, if we have the filter
3363 * off, we can short cut and just print out that all
3364 * functions are enabled.
3365 */
Steven Rostedt (VMware)c20489d2017-03-29 14:55:49 -04003366 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3367 ftrace_hash_empty(iter->hash)) {
Steven Rostedt (VMware)43ff9262017-03-30 16:51:43 -04003368 iter->func_pos = 1; /* Account for the message */
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003369 if (*pos > 0)
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003370 return t_mod_start(m, pos);
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003371 iter->flags |= FTRACE_ITER_PRINTALL;
Chris Wrightdf091622010-09-09 16:34:59 -07003372 /* reset in case of seek/pread */
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003373 iter->flags &= ~FTRACE_ITER_PROBE;
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003374 return iter;
3375 }
3376
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003377 if (iter->flags & FTRACE_ITER_MOD)
3378 return t_mod_start(m, pos);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003379
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003380 /*
3381 * Unfortunately, we need to restart at ftrace_pages_start
3382 * every time we let go of the ftrace_mutex. This is because
3383 * those pointers can change without the lock.
3384 */
Li Zefan694ce0a2009-06-24 09:54:19 +08003385 iter->pg = ftrace_pages_start;
3386 iter->idx = 0;
3387 for (l = 0; l <= *pos; ) {
Steven Rostedt (VMware)5bd84622017-03-29 22:45:18 -04003388 p = t_func_next(m, &l);
Li Zefan694ce0a2009-06-24 09:54:19 +08003389 if (!p)
3390 break;
Liming Wang50cdaf02008-11-28 12:13:21 +08003391 }
walimis5821e1b2008-11-15 15:19:06 +08003392
Steven Rostedt69a30832011-12-19 15:21:16 -05003393 if (!p)
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003394 return t_mod_start(m, pos);
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003395
3396 return iter;
Steven Rostedt5072c592008-05-12 21:20:43 +02003397}
3398
3399static void t_stop(struct seq_file *m, void *p)
3400{
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003401 mutex_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02003402}
3403
Steven Rostedt (Red Hat)15d5b022014-07-03 14:51:36 -04003404void * __weak
3405arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3406{
3407 return NULL;
3408}
3409
3410static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3411 struct dyn_ftrace *rec)
3412{
3413 void *ptr;
3414
3415 ptr = arch_ftrace_trampoline_func(ops, rec);
3416 if (ptr)
3417 seq_printf(m, " ->%pS", ptr);
3418}
3419
Steven Rostedt5072c592008-05-12 21:20:43 +02003420static int t_show(struct seq_file *m, void *v)
3421{
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003422 struct ftrace_iterator *iter = m->private;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003423 struct dyn_ftrace *rec;
Steven Rostedt5072c592008-05-12 21:20:43 +02003424
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003425 if (iter->flags & FTRACE_ITER_PROBE)
3426 return t_probe_show(m, iter);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003427
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003428 if (iter->flags & FTRACE_ITER_MOD)
3429 return t_mod_show(m, iter);
3430
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003431 if (iter->flags & FTRACE_ITER_PRINTALL) {
Namhyung Kim8c006cf2014-06-13 16:24:06 +09003432 if (iter->flags & FTRACE_ITER_NOTRACE)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003433 seq_puts(m, "#### no functions disabled ####\n");
Namhyung Kim8c006cf2014-06-13 16:24:06 +09003434 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003435 seq_puts(m, "#### all functions enabled ####\n");
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003436 return 0;
3437 }
3438
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003439 rec = iter->func;
3440
Steven Rostedt5072c592008-05-12 21:20:43 +02003441 if (!rec)
3442 return 0;
3443
Steven Rostedt647bcd02011-05-03 14:39:21 -04003444 seq_printf(m, "%ps", (void *)rec->ip);
Steven Rostedt (Red Hat)9674b2f2014-05-09 16:54:59 -04003445 if (iter->flags & FTRACE_ITER_ENABLED) {
Steven Rostedt (Red Hat)030f4e12015-12-01 12:24:45 -05003446 struct ftrace_ops *ops;
Steven Rostedt (Red Hat)15d5b022014-07-03 14:51:36 -04003447
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05003448 seq_printf(m, " (%ld)%s%s",
Steven Rostedt (Red Hat)0376bde2014-05-07 13:46:45 -04003449 ftrace_rec_count(rec),
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05003450 rec->flags & FTRACE_FL_REGS ? " R" : " ",
3451 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ");
Steven Rostedt (Red Hat)9674b2f2014-05-09 16:54:59 -04003452 if (rec->flags & FTRACE_FL_TRAMP_EN) {
Steven Rostedt (Red Hat)5fecaa02014-07-24 16:00:31 -04003453 ops = ftrace_find_tramp_ops_any(rec);
Steven Rostedt (Red Hat)39daa7b2015-11-25 15:12:38 -05003454 if (ops) {
3455 do {
3456 seq_printf(m, "\ttramp: %pS (%pS)",
3457 (void *)ops->trampoline,
3458 (void *)ops->func);
Steven Rostedt (Red Hat)030f4e12015-12-01 12:24:45 -05003459 add_trampoline_func(m, ops, rec);
Steven Rostedt (Red Hat)39daa7b2015-11-25 15:12:38 -05003460 ops = ftrace_find_tramp_ops_next(rec, ops);
3461 } while (ops);
3462 } else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003463 seq_puts(m, "\ttramp: ERROR!");
Steven Rostedt (Red Hat)030f4e12015-12-01 12:24:45 -05003464 } else {
3465 add_trampoline_func(m, NULL, rec);
Steven Rostedt (Red Hat)9674b2f2014-05-09 16:54:59 -04003466 }
3467 }
3468
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003469 seq_putc(m, '\n');
Steven Rostedt5072c592008-05-12 21:20:43 +02003470
3471 return 0;
3472}
3473
James Morris88e9d342009-09-22 16:43:43 -07003474static const struct seq_operations show_ftrace_seq_ops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02003475 .start = t_start,
3476 .next = t_next,
3477 .stop = t_stop,
3478 .show = t_show,
3479};
3480
Ingo Molnare309b412008-05-12 21:20:51 +02003481static int
Steven Rostedt5072c592008-05-12 21:20:43 +02003482ftrace_avail_open(struct inode *inode, struct file *file)
3483{
3484 struct ftrace_iterator *iter;
Steven Rostedt5072c592008-05-12 21:20:43 +02003485
Steven Rostedt4eebcc82008-05-12 21:20:48 +02003486 if (unlikely(ftrace_disabled))
3487 return -ENODEV;
3488
Jiri Olsa50e18b92012-04-25 10:23:39 +02003489 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
Steven Rostedt (VMware)c1bc5912017-03-29 11:38:13 -04003490 if (!iter)
3491 return -ENOMEM;
Steven Rostedt5072c592008-05-12 21:20:43 +02003492
Steven Rostedt (VMware)c1bc5912017-03-29 11:38:13 -04003493 iter->pg = ftrace_pages_start;
3494 iter->ops = &global_ops;
3495
3496 return 0;
Steven Rostedt5072c592008-05-12 21:20:43 +02003497}
3498
Steven Rostedt647bcd02011-05-03 14:39:21 -04003499static int
3500ftrace_enabled_open(struct inode *inode, struct file *file)
3501{
3502 struct ftrace_iterator *iter;
Steven Rostedt647bcd02011-05-03 14:39:21 -04003503
Jiri Olsa50e18b92012-04-25 10:23:39 +02003504 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
Steven Rostedt (VMware)c1bc5912017-03-29 11:38:13 -04003505 if (!iter)
3506 return -ENOMEM;
Steven Rostedt647bcd02011-05-03 14:39:21 -04003507
Steven Rostedt (VMware)c1bc5912017-03-29 11:38:13 -04003508 iter->pg = ftrace_pages_start;
3509 iter->flags = FTRACE_ITER_ENABLED;
3510 iter->ops = &global_ops;
3511
3512 return 0;
Steven Rostedt647bcd02011-05-03 14:39:21 -04003513}
3514
Steven Rostedtfc13cb02011-12-19 14:41:25 -05003515/**
3516 * ftrace_regex_open - initialize function tracer filter files
3517 * @ops: The ftrace_ops that hold the hash filters
3518 * @flag: The type of filter to process
3519 * @inode: The inode, usually passed in to your open routine
3520 * @file: The file, usually passed in to your open routine
3521 *
3522 * ftrace_regex_open() initializes the filter files for the
3523 * @ops. Depending on @flag it may process the filter hash or
3524 * the notrace hash of @ops. With this called from the open
3525 * routine, you can use ftrace_filter_write() for the write
3526 * routine if @flag has FTRACE_ITER_FILTER set, or
3527 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003528 * tracing_lseek() should be used as the lseek routine, and
Steven Rostedtfc13cb02011-12-19 14:41:25 -05003529 * release must call ftrace_regex_release().
3530 */
3531int
Steven Rostedtf45948e2011-05-02 12:29:25 -04003532ftrace_regex_open(struct ftrace_ops *ops, int flag,
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003533 struct inode *inode, struct file *file)
Steven Rostedt5072c592008-05-12 21:20:43 +02003534{
3535 struct ftrace_iterator *iter;
Steven Rostedtf45948e2011-05-02 12:29:25 -04003536 struct ftrace_hash *hash;
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003537 struct list_head *mod_head;
3538 struct trace_array *tr = ops->private;
Steven Rostedt5072c592008-05-12 21:20:43 +02003539 int ret = 0;
3540
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09003541 ftrace_ops_init(ops);
3542
Steven Rostedt4eebcc82008-05-12 21:20:48 +02003543 if (unlikely(ftrace_disabled))
3544 return -ENODEV;
3545
Steven Rostedt5072c592008-05-12 21:20:43 +02003546 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3547 if (!iter)
3548 return -ENOMEM;
3549
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003550 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
3551 kfree(iter);
3552 return -ENOMEM;
3553 }
3554
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09003555 iter->ops = ops;
3556 iter->flags = flag;
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003557 iter->tr = tr;
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09003558
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003559 mutex_lock(&ops->func_hash->regex_lock);
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09003560
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003561 if (flag & FTRACE_ITER_NOTRACE) {
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003562 hash = ops->func_hash->notrace_hash;
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003563 mod_head = tr ? &tr->mod_notrace : NULL;
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003564 } else {
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003565 hash = ops->func_hash->filter_hash;
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003566 mod_head = tr ? &tr->mod_trace : NULL;
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003567 }
Steven Rostedtf45948e2011-05-02 12:29:25 -04003568
Steven Rostedt (VMware)5985ea82017-06-23 16:05:11 -04003569 iter->mod_list = mod_head;
3570
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003571 if (file->f_mode & FMODE_WRITE) {
Namhyung Kimef2fbe12014-06-11 17:06:54 +09003572 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3573
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003574 if (file->f_flags & O_TRUNC) {
Namhyung Kimef2fbe12014-06-11 17:06:54 +09003575 iter->hash = alloc_ftrace_hash(size_bits);
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003576 clear_ftrace_mod_list(mod_head);
3577 } else {
Namhyung Kimef2fbe12014-06-11 17:06:54 +09003578 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003579 }
Namhyung Kimef2fbe12014-06-11 17:06:54 +09003580
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003581 if (!iter->hash) {
3582 trace_parser_put(&iter->parser);
3583 kfree(iter);
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09003584 ret = -ENOMEM;
3585 goto out_unlock;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003586 }
Steven Rostedt (VMware)c20489d2017-03-29 14:55:49 -04003587 } else
3588 iter->hash = hash;
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003589
Steven Rostedt5072c592008-05-12 21:20:43 +02003590 if (file->f_mode & FMODE_READ) {
3591 iter->pg = ftrace_pages_start;
Steven Rostedt5072c592008-05-12 21:20:43 +02003592
3593 ret = seq_open(file, &show_ftrace_seq_ops);
3594 if (!ret) {
3595 struct seq_file *m = file->private_data;
3596 m->private = iter;
Li Zefan79fe2492009-09-22 13:54:28 +08003597 } else {
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003598 /* Failed */
3599 free_ftrace_hash(iter->hash);
Li Zefan79fe2492009-09-22 13:54:28 +08003600 trace_parser_put(&iter->parser);
Steven Rostedt5072c592008-05-12 21:20:43 +02003601 kfree(iter);
Li Zefan79fe2492009-09-22 13:54:28 +08003602 }
Steven Rostedt5072c592008-05-12 21:20:43 +02003603 } else
3604 file->private_data = iter;
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09003605
3606 out_unlock:
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003607 mutex_unlock(&ops->func_hash->regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02003608
3609 return ret;
3610}
3611
Steven Rostedt41c52c02008-05-22 11:46:33 -04003612static int
3613ftrace_filter_open(struct inode *inode, struct file *file)
3614{
Steven Rostedt (Red Hat)e3b3e2e2013-11-11 23:07:14 -05003615 struct ftrace_ops *ops = inode->i_private;
3616
3617 return ftrace_regex_open(ops,
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04003618 FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
Steven Rostedt69a30832011-12-19 15:21:16 -05003619 inode, file);
Steven Rostedt41c52c02008-05-22 11:46:33 -04003620}
3621
3622static int
3623ftrace_notrace_open(struct inode *inode, struct file *file)
3624{
Steven Rostedt (Red Hat)e3b3e2e2013-11-11 23:07:14 -05003625 struct ftrace_ops *ops = inode->i_private;
3626
3627 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003628 inode, file);
Steven Rostedt41c52c02008-05-22 11:46:33 -04003629}
3630
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003631/* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
3632struct ftrace_glob {
3633 char *search;
3634 unsigned len;
3635 int type;
3636};
3637
Thiago Jung Bauermann7132e2d2016-04-25 18:56:14 -03003638/*
3639 * If symbols in an architecture don't correspond exactly to the user-visible
3640 * name of what they represent, it is possible to define this function to
3641 * perform the necessary adjustments.
3642*/
3643char * __weak arch_ftrace_match_adjust(char *str, const char *search)
3644{
3645 return str;
3646}
3647
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003648static int ftrace_match(char *str, struct ftrace_glob *g)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003649{
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003650 int matched = 0;
Li Zefan751e9982010-01-14 10:53:02 +08003651 int slen;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003652
Thiago Jung Bauermann7132e2d2016-04-25 18:56:14 -03003653 str = arch_ftrace_match_adjust(str, g->search);
3654
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003655 switch (g->type) {
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003656 case MATCH_FULL:
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003657 if (strcmp(str, g->search) == 0)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003658 matched = 1;
3659 break;
3660 case MATCH_FRONT_ONLY:
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003661 if (strncmp(str, g->search, g->len) == 0)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003662 matched = 1;
3663 break;
3664 case MATCH_MIDDLE_ONLY:
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003665 if (strstr(str, g->search))
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003666 matched = 1;
3667 break;
3668 case MATCH_END_ONLY:
Li Zefan751e9982010-01-14 10:53:02 +08003669 slen = strlen(str);
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003670 if (slen >= g->len &&
3671 memcmp(str + slen - g->len, g->search, g->len) == 0)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003672 matched = 1;
3673 break;
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09003674 case MATCH_GLOB:
3675 if (glob_match(g->search, str))
3676 matched = 1;
3677 break;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003678 }
3679
3680 return matched;
3681}
3682
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003683static int
Dmitry Safonovf0a3b152015-09-29 19:46:13 +03003684enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
Steven Rostedt996e87b2011-04-26 16:11:03 -04003685{
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003686 struct ftrace_func_entry *entry;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003687 int ret = 0;
3688
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003689 entry = ftrace_lookup_ip(hash, rec->ip);
Dmitry Safonovf0a3b152015-09-29 19:46:13 +03003690 if (clear_filter) {
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003691 /* Do nothing if it doesn't exist */
3692 if (!entry)
3693 return 0;
3694
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003695 free_hash_entry(hash, entry);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003696 } else {
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003697 /* Do nothing if it exists */
3698 if (entry)
3699 return 0;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003700
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003701 ret = add_hash_entry(hash, rec->ip);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003702 }
3703 return ret;
Steven Rostedt996e87b2011-04-26 16:11:03 -04003704}
3705
Steven Rostedt64e7c442009-02-13 17:08:48 -05003706static int
Steven Rostedt (VMware)f79b3f32019-02-11 15:00:48 -05003707add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
3708 int clear_filter)
3709{
3710 long index = simple_strtoul(func_g->search, NULL, 0);
3711 struct ftrace_page *pg;
3712 struct dyn_ftrace *rec;
3713
3714 /* The index starts at 1 */
3715 if (--index < 0)
3716 return 0;
3717
3718 do_for_each_ftrace_rec(pg, rec) {
3719 if (pg->index <= index) {
3720 index -= pg->index;
3721 /* this is a double loop, break goes to the next page */
3722 break;
3723 }
3724 rec = &pg->records[index];
3725 enter_record(hash, rec, clear_filter);
3726 return 1;
3727 } while_for_each_ftrace_rec();
3728 return 0;
3729}
3730
3731static int
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003732ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
3733 struct ftrace_glob *mod_g, int exclude_mod)
Steven Rostedt64e7c442009-02-13 17:08:48 -05003734{
3735 char str[KSYM_SYMBOL_LEN];
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003736 char *modname;
Steven Rostedt64e7c442009-02-13 17:08:48 -05003737
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003738 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3739
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003740 if (mod_g) {
3741 int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
3742
3743 /* blank module name to match all modules */
3744 if (!mod_g->len) {
3745 /* blank module globbing: modname xor exclude_mod */
Steven Rostedt (VMware)77c0edd2017-05-03 11:41:44 -04003746 if (!exclude_mod != !modname)
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003747 goto func_match;
3748 return 0;
3749 }
3750
Steven Rostedt (VMware)77c0edd2017-05-03 11:41:44 -04003751 /*
3752 * exclude_mod is set to trace everything but the given
3753 * module. If it is set and the module matches, then
3754 * return 0. If it is not set, and the module doesn't match
3755 * also return 0. Otherwise, check the function to see if
3756 * that matches.
3757 */
3758 if (!mod_matches == !exclude_mod)
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003759 return 0;
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003760func_match:
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003761 /* blank search means to match all funcs in the mod */
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003762 if (!func_g->len)
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003763 return 1;
3764 }
3765
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003766 return ftrace_match(str, func_g);
Steven Rostedt64e7c442009-02-13 17:08:48 -05003767}
3768
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003769static int
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003770match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003771{
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003772 struct ftrace_page *pg;
3773 struct dyn_ftrace *rec;
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003774 struct ftrace_glob func_g = { .type = MATCH_FULL };
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003775 struct ftrace_glob mod_g = { .type = MATCH_FULL };
3776 struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
3777 int exclude_mod = 0;
Li Zefan311d16d2009-12-08 11:15:11 +08003778 int found = 0;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003779 int ret;
Dan Carpenter2e028c42017-07-12 10:35:57 +03003780 int clear_filter = 0;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003781
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003782 if (func) {
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003783 func_g.type = filter_parse_regex(func, len, &func_g.search,
3784 &clear_filter);
3785 func_g.len = strlen(func_g.search);
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003786 }
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003787
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003788 if (mod) {
3789 mod_g.type = filter_parse_regex(mod, strlen(mod),
3790 &mod_g.search, &exclude_mod);
3791 mod_g.len = strlen(mod_g.search);
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003792 }
3793
Steven Rostedt52baf112009-02-14 01:15:39 -05003794 mutex_lock(&ftrace_lock);
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003795
3796 if (unlikely(ftrace_disabled))
3797 goto out_unlock;
3798
Steven Rostedt (VMware)f79b3f32019-02-11 15:00:48 -05003799 if (func_g.type == MATCH_INDEX) {
3800 found = add_rec_by_index(hash, &func_g, clear_filter);
3801 goto out_unlock;
3802 }
3803
Steven Rostedt265c8312009-02-13 12:43:56 -05003804 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedt (Red Hat)546fece2016-11-14 16:31:49 -05003805
3806 if (rec->flags & FTRACE_FL_DISABLED)
3807 continue;
3808
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003809 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
Dmitry Safonovf0a3b152015-09-29 19:46:13 +03003810 ret = enter_record(hash, rec, clear_filter);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003811 if (ret < 0) {
3812 found = ret;
3813 goto out_unlock;
3814 }
Li Zefan311d16d2009-12-08 11:15:11 +08003815 found = 1;
Steven Rostedt265c8312009-02-13 12:43:56 -05003816 }
3817 } while_for_each_ftrace_rec();
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003818 out_unlock:
Steven Rostedt52baf112009-02-14 01:15:39 -05003819 mutex_unlock(&ftrace_lock);
Li Zefan311d16d2009-12-08 11:15:11 +08003820
3821 return found;
Steven Rostedt5072c592008-05-12 21:20:43 +02003822}
3823
Steven Rostedt64e7c442009-02-13 17:08:48 -05003824static int
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003825ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
Steven Rostedt64e7c442009-02-13 17:08:48 -05003826{
Dmitry Safonovf0a3b152015-09-29 19:46:13 +03003827 return match_records(hash, buff, len, NULL);
Steven Rostedt64e7c442009-02-13 17:08:48 -05003828}
3829
Steven Rostedt (VMware)e16b35d2017-04-04 14:46:56 -04003830static void ftrace_ops_update_code(struct ftrace_ops *ops,
3831 struct ftrace_ops_hash *old_hash)
3832{
3833 struct ftrace_ops *op;
3834
3835 if (!ftrace_enabled)
3836 return;
3837
3838 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
3839 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
3840 return;
3841 }
3842
3843 /*
3844 * If this is the shared global_ops filter, then we need to
3845 * check if there is another ops that shares it, is enabled.
3846 * If so, we still need to run the modify code.
3847 */
3848 if (ops->func_hash != &global_ops.local_hash)
3849 return;
3850
3851 do_for_each_ftrace_op(op, ftrace_ops_list) {
3852 if (op->func_hash == &global_ops.local_hash &&
3853 op->flags & FTRACE_OPS_FL_ENABLED) {
3854 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
3855 /* Only need to do this once */
3856 return;
3857 }
3858 } while_for_each_ftrace_op(op);
3859}
3860
3861static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
3862 struct ftrace_hash **orig_hash,
3863 struct ftrace_hash *hash,
3864 int enable)
3865{
3866 struct ftrace_ops_hash old_hash_ops;
3867 struct ftrace_hash *old_hash;
3868 int ret;
3869
3870 old_hash = *orig_hash;
3871 old_hash_ops.filter_hash = ops->func_hash->filter_hash;
3872 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
3873 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3874 if (!ret) {
3875 ftrace_ops_update_code(ops, &old_hash_ops);
3876 free_ftrace_hash_rcu(old_hash);
3877 }
3878 return ret;
3879}
Steven Rostedt64e7c442009-02-13 17:08:48 -05003880
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003881static bool module_exists(const char *module)
3882{
3883 /* All modules have the symbol __this_module */
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01003884 static const char this_mod[] = "__this_module";
Salvatore Mesoraca419e9fe2018-03-30 10:53:08 +02003885 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003886 unsigned long val;
3887 int n;
3888
Salvatore Mesoraca419e9fe2018-03-30 10:53:08 +02003889 n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003890
Salvatore Mesoraca419e9fe2018-03-30 10:53:08 +02003891 if (n > sizeof(modname) - 1)
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003892 return false;
3893
3894 val = module_kallsyms_lookup_name(modname);
3895 return val != 0;
3896}
3897
3898static int cache_mod(struct trace_array *tr,
3899 const char *func, char *module, int enable)
3900{
3901 struct ftrace_mod_load *ftrace_mod, *n;
3902 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
3903 int ret;
3904
3905 mutex_lock(&ftrace_lock);
3906
3907 /* We do not cache inverse filters */
3908 if (func[0] == '!') {
3909 func++;
3910 ret = -EINVAL;
3911
3912 /* Look to remove this hash */
3913 list_for_each_entry_safe(ftrace_mod, n, head, list) {
3914 if (strcmp(ftrace_mod->module, module) != 0)
3915 continue;
3916
3917 /* no func matches all */
Dan Carpenter44925df2017-07-12 10:33:40 +03003918 if (strcmp(func, "*") == 0 ||
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04003919 (ftrace_mod->func &&
3920 strcmp(ftrace_mod->func, func) == 0)) {
3921 ret = 0;
3922 free_ftrace_mod(ftrace_mod);
3923 continue;
3924 }
3925 }
3926 goto out;
3927 }
3928
3929 ret = -EINVAL;
3930 /* We only care about modules that have not been loaded yet */
3931 if (module_exists(module))
3932 goto out;
3933
3934 /* Save this string off, and execute it when the module is loaded */
3935 ret = ftrace_add_mod(tr, func, module, enable);
3936 out:
3937 mutex_unlock(&ftrace_lock);
3938
3939 return ret;
3940}
3941
Steven Rostedt (VMware)d7fbf8d2017-06-26 10:57:21 -04003942static int
3943ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3944 int reset, int enable);
3945
Arnd Bergmann69449bbd2017-07-10 10:44:03 +02003946#ifdef CONFIG_MODULES
Steven Rostedt (VMware)d7fbf8d2017-06-26 10:57:21 -04003947static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
3948 char *mod, bool enable)
3949{
3950 struct ftrace_mod_load *ftrace_mod, *n;
3951 struct ftrace_hash **orig_hash, *new_hash;
3952 LIST_HEAD(process_mods);
3953 char *func;
3954 int ret;
3955
3956 mutex_lock(&ops->func_hash->regex_lock);
3957
3958 if (enable)
3959 orig_hash = &ops->func_hash->filter_hash;
3960 else
3961 orig_hash = &ops->func_hash->notrace_hash;
3962
3963 new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
3964 *orig_hash);
3965 if (!new_hash)
Steven Rostedt (VMware)3b58a3c2017-06-28 09:09:38 -04003966 goto out; /* warn? */
Steven Rostedt (VMware)d7fbf8d2017-06-26 10:57:21 -04003967
3968 mutex_lock(&ftrace_lock);
3969
3970 list_for_each_entry_safe(ftrace_mod, n, head, list) {
3971
3972 if (strcmp(ftrace_mod->module, mod) != 0)
3973 continue;
3974
3975 if (ftrace_mod->func)
3976 func = kstrdup(ftrace_mod->func, GFP_KERNEL);
3977 else
3978 func = kstrdup("*", GFP_KERNEL);
3979
3980 if (!func) /* warn? */
3981 continue;
3982
3983 list_del(&ftrace_mod->list);
3984 list_add(&ftrace_mod->list, &process_mods);
3985
3986 /* Use the newly allocated func, as it may be "*" */
3987 kfree(ftrace_mod->func);
3988 ftrace_mod->func = func;
3989 }
3990
3991 mutex_unlock(&ftrace_lock);
3992
3993 list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
3994
3995 func = ftrace_mod->func;
3996
3997 /* Grabs ftrace_lock, which is why we have this extra step */
3998 match_records(new_hash, func, strlen(func), mod);
3999 free_ftrace_mod(ftrace_mod);
4000 }
4001
Steven Rostedt (VMware)8c08f0d2017-06-26 11:47:31 -04004002 if (enable && list_empty(head))
4003 new_hash->flags &= ~FTRACE_HASH_FL_MOD;
4004
Steven Rostedt (VMware)d7fbf8d2017-06-26 10:57:21 -04004005 mutex_lock(&ftrace_lock);
4006
4007 ret = ftrace_hash_move_and_update_ops(ops, orig_hash,
4008 new_hash, enable);
4009 mutex_unlock(&ftrace_lock);
4010
Steven Rostedt (VMware)3b58a3c2017-06-28 09:09:38 -04004011 out:
Steven Rostedt (VMware)d7fbf8d2017-06-26 10:57:21 -04004012 mutex_unlock(&ops->func_hash->regex_lock);
4013
4014 free_ftrace_hash(new_hash);
4015}
4016
4017static void process_cached_mods(const char *mod_name)
4018{
4019 struct trace_array *tr;
4020 char *mod;
4021
4022 mod = kstrdup(mod_name, GFP_KERNEL);
4023 if (!mod)
4024 return;
4025
4026 mutex_lock(&trace_types_lock);
4027 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
4028 if (!list_empty(&tr->mod_trace))
4029 process_mod_list(&tr->mod_trace, tr->ops, mod, true);
4030 if (!list_empty(&tr->mod_notrace))
4031 process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
4032 }
4033 mutex_unlock(&trace_types_lock);
4034
4035 kfree(mod);
4036}
Arnd Bergmann69449bbd2017-07-10 10:44:03 +02004037#endif
Steven Rostedt (VMware)d7fbf8d2017-06-26 10:57:21 -04004038
Steven Rostedtf6180772009-02-14 00:40:25 -05004039/*
4040 * We register the module command as a template to show others how
4041 * to register the a command as well.
4042 */
4043
4044static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04004045ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04004046 char *func_orig, char *cmd, char *module, int enable)
Steven Rostedtf6180772009-02-14 00:40:25 -05004047{
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04004048 char *func;
Dmitry Safonov5e3949f2015-09-29 19:46:12 +03004049 int ret;
Steven Rostedtf6180772009-02-14 00:40:25 -05004050
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04004051 /* match_records() modifies func, and we need the original */
4052 func = kstrdup(func_orig, GFP_KERNEL);
4053 if (!func)
4054 return -ENOMEM;
4055
Steven Rostedtf6180772009-02-14 00:40:25 -05004056 /*
4057 * cmd == 'mod' because we only registered this func
4058 * for the 'mod' ftrace_func_command.
4059 * But if you register one func with multiple commands,
4060 * you can tell which command was used by the cmd
4061 * parameter.
4062 */
Dmitry Safonovf0a3b152015-09-29 19:46:13 +03004063 ret = match_records(hash, func, strlen(func), module);
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04004064 kfree(func);
4065
Steven Rostedtb448c4e2011-04-29 15:12:32 -04004066 if (!ret)
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04004067 return cache_mod(tr, func_orig, module, enable);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04004068 if (ret < 0)
4069 return ret;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04004070 return 0;
Steven Rostedtf6180772009-02-14 00:40:25 -05004071}
4072
4073static struct ftrace_func_command ftrace_mod_cmd = {
4074 .name = "mod",
4075 .func = ftrace_mod_callback,
4076};
4077
4078static int __init ftrace_mod_cmd_init(void)
4079{
4080 return register_ftrace_command(&ftrace_mod_cmd);
4081}
Steven Rostedt6f415672012-10-05 12:13:07 -04004082core_initcall(ftrace_mod_cmd_init);
Steven Rostedtf6180772009-02-14 00:40:25 -05004083
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04004084static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -04004085 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedt59df055f2009-02-14 15:29:06 -05004086{
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004087 struct ftrace_probe_ops *probe_ops;
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004088 struct ftrace_func_probe *probe;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004089
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004090 probe = container_of(op, struct ftrace_func_probe, ops);
4091 probe_ops = probe->probe_ops;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004092
4093 /*
4094 * Disable preemption for these calls to prevent a RCU grace
4095 * period. This syncs the hash iteration and freeing of items
4096 * on the hash. rcu_read_lock is too dangerous here.
4097 */
Steven Rostedt5168ae52010-06-03 09:36:50 -04004098 preempt_disable_notrace();
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04004099 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
Steven Rostedt5168ae52010-06-03 09:36:50 -04004100 preempt_enable_notrace();
Steven Rostedt59df055f2009-02-14 15:29:06 -05004101}
4102
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004103struct ftrace_func_map {
4104 struct ftrace_func_entry entry;
4105 void *data;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004106};
4107
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004108struct ftrace_func_mapper {
4109 struct ftrace_hash hash;
4110};
Steven Rostedt59df055f2009-02-14 15:29:06 -05004111
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004112/**
4113 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
4114 *
4115 * Returns a ftrace_func_mapper descriptor that can be used to map ips to data.
4116 */
4117struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
Steven Rostedt59df055f2009-02-14 15:29:06 -05004118{
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004119 struct ftrace_hash *hash;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004120
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004121 /*
4122 * The mapper is simply a ftrace_hash, but since the entries
4123 * in the hash are not ftrace_func_entry type, we define it
4124 * as a separate structure.
4125 */
4126 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4127 return (struct ftrace_func_mapper *)hash;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004128}
4129
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004130/**
4131 * ftrace_func_mapper_find_ip - Find some data mapped to an ip
4132 * @mapper: The mapper that has the ip maps
4133 * @ip: the instruction pointer to find the data for
4134 *
4135 * Returns the data mapped to @ip if found otherwise NULL. The return
4136 * is actually the address of the mapper data pointer. The address is
4137 * returned for use cases where the data is no bigger than a long, and
4138 * the user can use the data pointer as its data instead of having to
4139 * allocate more memory for the reference.
4140 */
4141void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
4142 unsigned long ip)
Steven Rostedt59df055f2009-02-14 15:29:06 -05004143{
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004144 struct ftrace_func_entry *entry;
4145 struct ftrace_func_map *map;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004146
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004147 entry = ftrace_lookup_ip(&mapper->hash, ip);
4148 if (!entry)
4149 return NULL;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004150
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004151 map = (struct ftrace_func_map *)entry;
4152 return &map->data;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004153}
4154
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004155/**
4156 * ftrace_func_mapper_add_ip - Map some data to an ip
4157 * @mapper: The mapper that has the ip maps
4158 * @ip: The instruction pointer address to map @data to
4159 * @data: The data to map to @ip
4160 *
4161 * Returns 0 on succes otherwise an error.
4162 */
4163int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
4164 unsigned long ip, void *data)
Steven Rostedt59df055f2009-02-14 15:29:06 -05004165{
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004166 struct ftrace_func_entry *entry;
4167 struct ftrace_func_map *map;
4168
4169 entry = ftrace_lookup_ip(&mapper->hash, ip);
4170 if (entry)
4171 return -EBUSY;
4172
4173 map = kmalloc(sizeof(*map), GFP_KERNEL);
4174 if (!map)
4175 return -ENOMEM;
4176
4177 map->entry.ip = ip;
4178 map->data = data;
4179
4180 __add_hash_entry(&mapper->hash, &map->entry);
4181
4182 return 0;
4183}
4184
4185/**
4186 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
4187 * @mapper: The mapper that has the ip maps
4188 * @ip: The instruction pointer address to remove the data from
4189 *
4190 * Returns the data if it is found, otherwise NULL.
4191 * Note, if the data pointer is used as the data itself, (see
4192 * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
4193 * if the data pointer was set to zero.
4194 */
4195void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
4196 unsigned long ip)
4197{
4198 struct ftrace_func_entry *entry;
4199 struct ftrace_func_map *map;
4200 void *data;
4201
4202 entry = ftrace_lookup_ip(&mapper->hash, ip);
4203 if (!entry)
4204 return NULL;
4205
4206 map = (struct ftrace_func_map *)entry;
4207 data = map->data;
4208
4209 remove_hash_entry(&mapper->hash, entry);
Steven Rostedt59df055f2009-02-14 15:29:06 -05004210 kfree(entry);
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004211
4212 return data;
4213}
4214
4215/**
4216 * free_ftrace_func_mapper - free a mapping of ips and data
4217 * @mapper: The mapper that has the ip maps
4218 * @free_func: A function to be called on each data item.
4219 *
4220 * This is used to free the function mapper. The @free_func is optional
4221 * and can be used if the data needs to be freed as well.
4222 */
4223void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
4224 ftrace_mapper_func free_func)
4225{
4226 struct ftrace_func_entry *entry;
4227 struct ftrace_func_map *map;
4228 struct hlist_head *hhd;
Wei Li04e03d92019-06-06 11:17:54 +08004229 int size, i;
4230
4231 if (!mapper)
4232 return;
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004233
4234 if (free_func && mapper->hash.count) {
Wei Li04e03d92019-06-06 11:17:54 +08004235 size = 1 << mapper->hash.size_bits;
Steven Rostedt (VMware)41794f12017-04-03 20:58:35 -04004236 for (i = 0; i < size; i++) {
4237 hhd = &mapper->hash.buckets[i];
4238 hlist_for_each_entry(entry, hhd, hlist) {
4239 map = (struct ftrace_func_map *)entry;
4240 free_func(map);
4241 }
4242 }
4243 }
4244 free_ftrace_hash(&mapper->hash);
4245}
4246
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004247static void release_probe(struct ftrace_func_probe *probe)
4248{
4249 struct ftrace_probe_ops *probe_ops;
4250
4251 mutex_lock(&ftrace_lock);
4252
4253 WARN_ON(probe->ref <= 0);
4254
4255 /* Subtract the ref that was used to protect this instance */
4256 probe->ref--;
4257
4258 if (!probe->ref) {
4259 probe_ops = probe->probe_ops;
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04004260 /*
4261 * Sending zero as ip tells probe_ops to free
4262 * the probe->data itself
4263 */
4264 if (probe_ops->free)
4265 probe_ops->free(probe_ops, probe->tr, 0, probe->data);
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004266 list_del(&probe->list);
4267 kfree(probe);
4268 }
4269 mutex_unlock(&ftrace_lock);
4270}
4271
4272static void acquire_probe_locked(struct ftrace_func_probe *probe)
4273{
4274 /*
4275 * Add one ref to keep it from being freed when releasing the
4276 * ftrace_lock mutex.
4277 */
4278 probe->ref++;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004279}
4280
Steven Rostedt59df055f2009-02-14 15:29:06 -05004281int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04004282register_ftrace_function_probe(char *glob, struct trace_array *tr,
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004283 struct ftrace_probe_ops *probe_ops,
4284 void *data)
Steven Rostedt59df055f2009-02-14 15:29:06 -05004285{
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004286 struct ftrace_func_entry *entry;
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004287 struct ftrace_func_probe *probe;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004288 struct ftrace_hash **orig_hash;
4289 struct ftrace_hash *old_hash;
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004290 struct ftrace_hash *hash;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004291 int count = 0;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004292 int size;
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004293 int ret;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004294 int i;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004295
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04004296 if (WARN_ON(!tr))
Steven Rostedt59df055f2009-02-14 15:29:06 -05004297 return -EINVAL;
4298
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004299 /* We do not support '!' for function probes */
4300 if (WARN_ON(glob[0] == '!'))
Steven Rostedt59df055f2009-02-14 15:29:06 -05004301 return -EINVAL;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004302
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05004303
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004304 mutex_lock(&ftrace_lock);
4305 /* Check if the probe_ops is already registered */
4306 list_for_each_entry(probe, &tr->func_probes, list) {
4307 if (probe->probe_ops == probe_ops)
4308 break;
4309 }
4310 if (&probe->list == &tr->func_probes) {
4311 probe = kzalloc(sizeof(*probe), GFP_KERNEL);
4312 if (!probe) {
4313 mutex_unlock(&ftrace_lock);
4314 return -ENOMEM;
4315 }
4316 probe->probe_ops = probe_ops;
4317 probe->ops.func = function_trace_probe_call;
4318 probe->tr = tr;
4319 ftrace_ops_init(&probe->ops);
4320 list_add(&probe->list, &tr->func_probes);
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004321 }
4322
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004323 acquire_probe_locked(probe);
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004324
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004325 mutex_unlock(&ftrace_lock);
4326
4327 mutex_lock(&probe->ops.func_hash->regex_lock);
4328
4329 orig_hash = &probe->ops.func_hash->filter_hash;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004330 old_hash = *orig_hash;
4331 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4332
4333 ret = ftrace_match_records(hash, glob, strlen(glob));
4334
4335 /* Nothing found? */
4336 if (!ret)
4337 ret = -EINVAL;
4338
4339 if (ret < 0)
Steven Rostedt (Red Hat)5ae0bf52013-05-09 18:20:37 -04004340 goto out;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004341
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004342 size = 1 << hash->size_bits;
4343 for (i = 0; i < size; i++) {
4344 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4345 if (ftrace_lookup_ip(old_hash, entry->ip))
4346 continue;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004347 /*
4348 * The caller might want to do something special
4349 * for each function we find. We call the callback
4350 * to give the caller an opportunity to do so.
4351 */
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004352 if (probe_ops->init) {
4353 ret = probe_ops->init(probe_ops, tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04004354 entry->ip, data,
4355 &probe->data);
4356 if (ret < 0) {
4357 if (probe_ops->free && count)
4358 probe_ops->free(probe_ops, tr,
4359 0, probe->data);
4360 probe->data = NULL;
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004361 goto out;
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04004362 }
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004363 }
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004364 count++;
4365 }
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004366 }
Steven Rostedt45a4a232011-04-21 23:16:46 -04004367
Steven Rostedt (Red Hat)5ae0bf52013-05-09 18:20:37 -04004368 mutex_lock(&ftrace_lock);
4369
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004370 if (!count) {
4371 /* Nothing was added? */
4372 ret = -EINVAL;
4373 goto out_unlock;
4374 }
Steven Rostedt59df055f2009-02-14 15:29:06 -05004375
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004376 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4377 hash, 1);
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004378 if (ret < 0)
Steven Rostedt (VMware)8d707252017-04-05 13:36:18 -04004379 goto err_unlock;
Steven Rostedt (Red Hat)546fece2016-11-14 16:31:49 -05004380
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004381 /* One ref for each new function traced */
4382 probe->ref += count;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004383
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004384 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
4385 ret = ftrace_startup(&probe->ops, 0);
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004386
Steven Rostedt59df055f2009-02-14 15:29:06 -05004387 out_unlock:
Steven Rostedt (Red Hat)5ae0bf52013-05-09 18:20:37 -04004388 mutex_unlock(&ftrace_lock);
Steven Rostedt59df055f2009-02-14 15:29:06 -05004389
4390 if (!ret)
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004391 ret = count;
Steven Rostedt (Red Hat)5ae0bf52013-05-09 18:20:37 -04004392 out:
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004393 mutex_unlock(&probe->ops.func_hash->regex_lock);
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004394 free_ftrace_hash(hash);
Steven Rostedt59df055f2009-02-14 15:29:06 -05004395
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004396 release_probe(probe);
4397
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004398 return ret;
Steven Rostedt (VMware)8d707252017-04-05 13:36:18 -04004399
4400 err_unlock:
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004401 if (!probe_ops->free || !count)
Steven Rostedt (VMware)8d707252017-04-05 13:36:18 -04004402 goto out_unlock;
4403
4404 /* Failed to do the move, need to call the free functions */
4405 for (i = 0; i < size; i++) {
4406 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4407 if (ftrace_lookup_ip(old_hash, entry->ip))
4408 continue;
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04004409 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
Steven Rostedt (VMware)8d707252017-04-05 13:36:18 -04004410 }
4411 }
4412 goto out_unlock;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004413}
4414
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04004415int
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004416unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4417 struct ftrace_probe_ops *probe_ops)
Steven Rostedt59df055f2009-02-14 15:29:06 -05004418{
Steven Rostedt (VMware)82cc4fc2017-04-14 17:45:45 -04004419 struct ftrace_ops_hash old_hash_ops;
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004420 struct ftrace_func_entry *entry;
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004421 struct ftrace_func_probe *probe;
Dmitry Safonov3ba00922015-09-29 19:46:14 +03004422 struct ftrace_glob func_g;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004423 struct ftrace_hash **orig_hash;
4424 struct ftrace_hash *old_hash;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004425 struct ftrace_hash *hash = NULL;
Sasha Levinb67bfe02013-02-27 17:06:00 -08004426 struct hlist_node *tmp;
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004427 struct hlist_head hhd;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004428 char str[KSYM_SYMBOL_LEN];
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004429 int count = 0;
4430 int i, ret = -ENODEV;
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004431 int size;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004432
Naveen N. Raocbab5672017-05-16 23:21:25 +05304433 if (!glob || !strlen(glob) || !strcmp(glob, "*"))
Dmitry Safonov3ba00922015-09-29 19:46:14 +03004434 func_g.search = NULL;
Naveen N. Raocbab5672017-05-16 23:21:25 +05304435 else {
Steven Rostedt59df055f2009-02-14 15:29:06 -05004436 int not;
4437
Dmitry Safonov3ba00922015-09-29 19:46:14 +03004438 func_g.type = filter_parse_regex(glob, strlen(glob),
4439 &func_g.search, &not);
4440 func_g.len = strlen(func_g.search);
Steven Rostedt59df055f2009-02-14 15:29:06 -05004441
Steven Rostedtb6887d72009-02-17 12:32:04 -05004442 /* we do not support '!' for function probes */
Steven Rostedt59df055f2009-02-14 15:29:06 -05004443 if (WARN_ON(not))
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04004444 return -EINVAL;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004445 }
4446
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004447 mutex_lock(&ftrace_lock);
4448 /* Check if the probe_ops is already registered */
4449 list_for_each_entry(probe, &tr->func_probes, list) {
4450 if (probe->probe_ops == probe_ops)
4451 break;
4452 }
4453 if (&probe->list == &tr->func_probes)
4454 goto err_unlock_ftrace;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004455
4456 ret = -EINVAL;
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004457 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
4458 goto err_unlock_ftrace;
4459
4460 acquire_probe_locked(probe);
4461
4462 mutex_unlock(&ftrace_lock);
4463
4464 mutex_lock(&probe->ops.func_hash->regex_lock);
4465
4466 orig_hash = &probe->ops.func_hash->filter_hash;
4467 old_hash = *orig_hash;
4468
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004469 if (ftrace_hash_empty(old_hash))
4470 goto out_unlock;
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004471
Steven Rostedt (VMware)82cc4fc2017-04-14 17:45:45 -04004472 old_hash_ops.filter_hash = old_hash;
4473 /* Probes only have filters */
4474 old_hash_ops.notrace_hash = NULL;
4475
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04004476 ret = -ENOMEM;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004477 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004478 if (!hash)
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004479 goto out_unlock;
4480
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004481 INIT_HLIST_HEAD(&hhd);
Steven Rostedt (Red Hat)7818b382013-03-13 12:42:58 -04004482
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004483 size = 1 << hash->size_bits;
4484 for (i = 0; i < size; i++) {
4485 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
Steven Rostedt59df055f2009-02-14 15:29:06 -05004486
Dmitry Safonov3ba00922015-09-29 19:46:14 +03004487 if (func_g.search) {
Steven Rostedt59df055f2009-02-14 15:29:06 -05004488 kallsyms_lookup(entry->ip, NULL, NULL,
4489 NULL, str);
Dmitry Safonov3ba00922015-09-29 19:46:14 +03004490 if (!ftrace_match(str, &func_g))
Steven Rostedt59df055f2009-02-14 15:29:06 -05004491 continue;
4492 }
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004493 count++;
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004494 remove_hash_entry(hash, entry);
4495 hlist_add_head(&entry->hlist, &hhd);
Steven Rostedt59df055f2009-02-14 15:29:06 -05004496 }
4497 }
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04004498
4499 /* Nothing found? */
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004500 if (!count) {
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04004501 ret = -EINVAL;
4502 goto out_unlock;
4503 }
4504
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004505 mutex_lock(&ftrace_lock);
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004506
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004507 WARN_ON(probe->ref < count);
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004508
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004509 probe->ref -= count;
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004510
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004511 if (ftrace_hash_empty(hash))
4512 ftrace_shutdown(&probe->ops, 0);
4513
4514 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004515 hash, 1);
Steven Rostedt (VMware)82cc4fc2017-04-14 17:45:45 -04004516
4517 /* still need to update the function call sites */
Steven Rostedt (VMware)1ec3a812017-04-04 18:16:29 -04004518 if (ftrace_enabled && !ftrace_hash_empty(hash))
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004519 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
Steven Rostedt (VMware)82cc4fc2017-04-14 17:45:45 -04004520 &old_hash_ops);
Paul E. McKenney74401722018-11-06 18:44:52 -08004521 synchronize_rcu();
Steven Rostedt (Red Hat)3296fc42014-07-24 15:33:41 -04004522
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004523 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
4524 hlist_del(&entry->hlist);
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004525 if (probe_ops->free)
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04004526 probe_ops->free(probe_ops, tr, entry->ip, probe->data);
Steven Rostedt (VMware)eee8ded2017-04-04 21:31:28 -04004527 kfree(entry);
Steven Rostedt (Red Hat)7818b382013-03-13 12:42:58 -04004528 }
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004529 mutex_unlock(&ftrace_lock);
Dmitry Safonov3ba00922015-09-29 19:46:14 +03004530
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004531 out_unlock:
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004532 mutex_unlock(&probe->ops.func_hash->regex_lock);
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04004533 free_ftrace_hash(hash);
Steven Rostedt59df055f2009-02-14 15:29:06 -05004534
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004535 release_probe(probe);
Steven Rostedt59df055f2009-02-14 15:29:06 -05004536
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004537 return ret;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004538
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04004539 err_unlock_ftrace:
4540 mutex_unlock(&ftrace_lock);
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04004541 return ret;
Steven Rostedt59df055f2009-02-14 15:29:06 -05004542}
4543
Naveen N. Raoa0e63692017-05-16 23:21:26 +05304544void clear_ftrace_function_probes(struct trace_array *tr)
4545{
4546 struct ftrace_func_probe *probe, *n;
4547
4548 list_for_each_entry_safe(probe, n, &tr->func_probes, list)
4549 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
4550}
4551
Steven Rostedtf6180772009-02-14 00:40:25 -05004552static LIST_HEAD(ftrace_commands);
4553static DEFINE_MUTEX(ftrace_cmd_mutex);
4554
Tom Zanussi38de93a2013-10-24 08:34:18 -05004555/*
4556 * Currently we only register ftrace commands from __init, so mark this
4557 * __init too.
4558 */
4559__init int register_ftrace_command(struct ftrace_func_command *cmd)
Steven Rostedtf6180772009-02-14 00:40:25 -05004560{
4561 struct ftrace_func_command *p;
4562 int ret = 0;
4563
4564 mutex_lock(&ftrace_cmd_mutex);
4565 list_for_each_entry(p, &ftrace_commands, list) {
4566 if (strcmp(cmd->name, p->name) == 0) {
4567 ret = -EBUSY;
4568 goto out_unlock;
4569 }
4570 }
4571 list_add(&cmd->list, &ftrace_commands);
4572 out_unlock:
4573 mutex_unlock(&ftrace_cmd_mutex);
4574
4575 return ret;
4576}
4577
Tom Zanussi38de93a2013-10-24 08:34:18 -05004578/*
4579 * Currently we only unregister ftrace commands from __init, so mark
4580 * this __init too.
4581 */
4582__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
Steven Rostedtf6180772009-02-14 00:40:25 -05004583{
4584 struct ftrace_func_command *p, *n;
4585 int ret = -ENODEV;
4586
4587 mutex_lock(&ftrace_cmd_mutex);
4588 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
4589 if (strcmp(cmd->name, p->name) == 0) {
4590 ret = 0;
4591 list_del_init(&p->list);
4592 goto out_unlock;
4593 }
4594 }
4595 out_unlock:
4596 mutex_unlock(&ftrace_cmd_mutex);
4597
4598 return ret;
4599}
4600
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04004601static int ftrace_process_regex(struct ftrace_iterator *iter,
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004602 char *buff, int len, int enable)
Steven Rostedt64e7c442009-02-13 17:08:48 -05004603{
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04004604 struct ftrace_hash *hash = iter->hash;
Steven Rostedt (VMware)d2afd57a2017-04-20 11:31:35 -04004605 struct trace_array *tr = iter->ops->private;
Steven Rostedtf6180772009-02-14 00:40:25 -05004606 char *func, *command, *next = buff;
Steven Rostedt6a24a242009-02-17 11:20:26 -05004607 struct ftrace_func_command *p;
GuoWen Li0aff1c02011-06-01 19:18:47 +08004608 int ret = -EINVAL;
Steven Rostedt64e7c442009-02-13 17:08:48 -05004609
4610 func = strsep(&next, ":");
4611
4612 if (!next) {
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04004613 ret = ftrace_match_records(hash, func, len);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04004614 if (!ret)
4615 ret = -EINVAL;
4616 if (ret < 0)
4617 return ret;
4618 return 0;
Steven Rostedt64e7c442009-02-13 17:08:48 -05004619 }
4620
Steven Rostedtf6180772009-02-14 00:40:25 -05004621 /* command found */
Steven Rostedt64e7c442009-02-13 17:08:48 -05004622
4623 command = strsep(&next, ":");
4624
Steven Rostedtf6180772009-02-14 00:40:25 -05004625 mutex_lock(&ftrace_cmd_mutex);
4626 list_for_each_entry(p, &ftrace_commands, list) {
4627 if (strcmp(p->name, command) == 0) {
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04004628 ret = p->func(tr, hash, func, command, next, enable);
Steven Rostedtf6180772009-02-14 00:40:25 -05004629 goto out_unlock;
4630 }
Steven Rostedt64e7c442009-02-13 17:08:48 -05004631 }
Steven Rostedtf6180772009-02-14 00:40:25 -05004632 out_unlock:
4633 mutex_unlock(&ftrace_cmd_mutex);
Steven Rostedt64e7c442009-02-13 17:08:48 -05004634
Steven Rostedtf6180772009-02-14 00:40:25 -05004635 return ret;
Steven Rostedt64e7c442009-02-13 17:08:48 -05004636}
4637
Ingo Molnare309b412008-05-12 21:20:51 +02004638static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04004639ftrace_regex_write(struct file *file, const char __user *ubuf,
4640 size_t cnt, loff_t *ppos, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02004641{
4642 struct ftrace_iterator *iter;
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004643 struct trace_parser *parser;
4644 ssize_t ret, read;
Steven Rostedt5072c592008-05-12 21:20:43 +02004645
Li Zefan4ba79782009-09-22 13:52:20 +08004646 if (!cnt)
Steven Rostedt5072c592008-05-12 21:20:43 +02004647 return 0;
4648
Steven Rostedt5072c592008-05-12 21:20:43 +02004649 if (file->f_mode & FMODE_READ) {
4650 struct seq_file *m = file->private_data;
4651 iter = m->private;
4652 } else
4653 iter = file->private_data;
4654
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09004655 if (unlikely(ftrace_disabled))
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004656 return -ENODEV;
4657
4658 /* iter->hash is a local copy, so we don't need regex_lock */
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09004659
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004660 parser = &iter->parser;
4661 read = trace_get_user(parser, ubuf, cnt, ppos);
Steven Rostedt5072c592008-05-12 21:20:43 +02004662
Li Zefan4ba79782009-09-22 13:52:20 +08004663 if (read >= 0 && trace_parser_loaded(parser) &&
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004664 !trace_parser_cont(parser)) {
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04004665 ret = ftrace_process_regex(iter, parser->buffer,
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004666 parser->idx, enable);
Li Zefan313254a2009-12-08 11:15:30 +08004667 trace_parser_clear(parser);
Steven Rostedt (Red Hat)7c088b52013-05-09 11:35:12 -04004668 if (ret < 0)
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004669 goto out;
Steven Rostedt5072c592008-05-12 21:20:43 +02004670 }
4671
Steven Rostedt5072c592008-05-12 21:20:43 +02004672 ret = read;
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004673 out:
Steven Rostedt5072c592008-05-12 21:20:43 +02004674 return ret;
4675}
4676
Steven Rostedtfc13cb02011-12-19 14:41:25 -05004677ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04004678ftrace_filter_write(struct file *file, const char __user *ubuf,
4679 size_t cnt, loff_t *ppos)
4680{
4681 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
4682}
4683
Steven Rostedtfc13cb02011-12-19 14:41:25 -05004684ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04004685ftrace_notrace_write(struct file *file, const char __user *ubuf,
4686 size_t cnt, loff_t *ppos)
4687{
4688 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
4689}
4690
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004691static int
Masami Hiramatsu647664e2012-06-05 19:28:08 +09004692ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
4693{
4694 struct ftrace_func_entry *entry;
4695
4696 if (!ftrace_location(ip))
4697 return -EINVAL;
4698
4699 if (remove) {
4700 entry = ftrace_lookup_ip(hash, ip);
4701 if (!entry)
4702 return -ENOENT;
4703 free_hash_entry(hash, entry);
4704 return 0;
4705 }
4706
4707 return add_hash_entry(hash, ip);
4708}
4709
4710static int
4711ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4712 unsigned long ip, int remove, int reset, int enable)
Steven Rostedt41c52c02008-05-22 11:46:33 -04004713{
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004714 struct ftrace_hash **orig_hash;
Steven Rostedtf45948e2011-05-02 12:29:25 -04004715 struct ftrace_hash *hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004716 int ret;
Steven Rostedtf45948e2011-05-02 12:29:25 -04004717
Steven Rostedt41c52c02008-05-22 11:46:33 -04004718 if (unlikely(ftrace_disabled))
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004719 return -ENODEV;
Steven Rostedt41c52c02008-05-22 11:46:33 -04004720
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04004721 mutex_lock(&ops->func_hash->regex_lock);
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004722
Steven Rostedtf45948e2011-05-02 12:29:25 -04004723 if (enable)
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04004724 orig_hash = &ops->func_hash->filter_hash;
Steven Rostedtf45948e2011-05-02 12:29:25 -04004725 else
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04004726 orig_hash = &ops->func_hash->notrace_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004727
Wang Nanb972cc52014-07-15 08:40:20 +08004728 if (reset)
4729 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4730 else
4731 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
4732
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004733 if (!hash) {
4734 ret = -ENOMEM;
4735 goto out_regex_unlock;
4736 }
Steven Rostedtf45948e2011-05-02 12:29:25 -04004737
Jiri Olsaac483c42012-01-02 10:04:14 +01004738 if (buf && !ftrace_match_records(hash, buf, len)) {
4739 ret = -EINVAL;
4740 goto out_regex_unlock;
4741 }
Masami Hiramatsu647664e2012-06-05 19:28:08 +09004742 if (ip) {
4743 ret = ftrace_match_addr(hash, ip, remove);
4744 if (ret < 0)
4745 goto out_regex_unlock;
4746 }
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004747
4748 mutex_lock(&ftrace_lock);
Steven Rostedt (VMware)e16b35d2017-04-04 14:46:56 -04004749 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004750 mutex_unlock(&ftrace_lock);
4751
Jiri Olsaac483c42012-01-02 10:04:14 +01004752 out_regex_unlock:
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04004753 mutex_unlock(&ops->func_hash->regex_lock);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004754
4755 free_ftrace_hash(hash);
4756 return ret;
Steven Rostedt41c52c02008-05-22 11:46:33 -04004757}
4758
Masami Hiramatsu647664e2012-06-05 19:28:08 +09004759static int
4760ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
4761 int reset, int enable)
4762{
Hariprasad Kelam9efb85c2019-03-24 00:05:23 +05304763 return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable);
Masami Hiramatsu647664e2012-06-05 19:28:08 +09004764}
4765
4766/**
4767 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
4768 * @ops - the ops to set the filter with
4769 * @ip - the address to add to or remove from the filter.
4770 * @remove - non zero to remove the ip from the filter
4771 * @reset - non zero to reset all filters before applying this filter.
4772 *
4773 * Filters denote which functions should be enabled when tracing is enabled
4774 * If @ip is NULL, it failes to update filter.
4775 */
4776int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
4777 int remove, int reset)
4778{
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09004779 ftrace_ops_init(ops);
Masami Hiramatsu647664e2012-06-05 19:28:08 +09004780 return ftrace_set_addr(ops, ip, remove, reset, 1);
4781}
4782EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
4783
Joel Fernandesd032ae82016-11-15 12:31:20 -08004784/**
4785 * ftrace_ops_set_global_filter - setup ops to use global filters
4786 * @ops - the ops which will use the global filters
4787 *
4788 * ftrace users who need global function trace filtering should call this.
4789 * It can set the global filter only if ops were not initialized before.
4790 */
4791void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
4792{
4793 if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
4794 return;
4795
4796 ftrace_ops_init(ops);
4797 ops->func_hash = &global_ops.local_hash;
4798}
4799EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
4800
Masami Hiramatsu647664e2012-06-05 19:28:08 +09004801static int
4802ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4803 int reset, int enable)
4804{
4805 return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
4806}
4807
Steven Rostedt77a2b372008-05-12 21:20:45 +02004808/**
4809 * ftrace_set_filter - set a function to filter on in ftrace
Steven Rostedt936e0742011-05-05 22:54:01 -04004810 * @ops - the ops to set the filter with
Steven Rostedt77a2b372008-05-12 21:20:45 +02004811 * @buf - the string that holds the function filter text.
4812 * @len - the length of the string.
4813 * @reset - non zero to reset all filters before applying this filter.
4814 *
4815 * Filters denote which functions should be enabled when tracing is enabled.
4816 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4817 */
Jiri Olsaac483c42012-01-02 10:04:14 +01004818int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
Steven Rostedt936e0742011-05-05 22:54:01 -04004819 int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02004820{
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09004821 ftrace_ops_init(ops);
Jiri Olsaac483c42012-01-02 10:04:14 +01004822 return ftrace_set_regex(ops, buf, len, reset, 1);
Steven Rostedt41c52c02008-05-22 11:46:33 -04004823}
Steven Rostedt936e0742011-05-05 22:54:01 -04004824EXPORT_SYMBOL_GPL(ftrace_set_filter);
Steven Rostedt4eebcc82008-05-12 21:20:48 +02004825
Steven Rostedt41c52c02008-05-22 11:46:33 -04004826/**
4827 * ftrace_set_notrace - set a function to not trace in ftrace
Steven Rostedt936e0742011-05-05 22:54:01 -04004828 * @ops - the ops to set the notrace filter with
Steven Rostedt41c52c02008-05-22 11:46:33 -04004829 * @buf - the string that holds the function notrace text.
4830 * @len - the length of the string.
4831 * @reset - non zero to reset all filters before applying this filter.
4832 *
4833 * Notrace Filters denote which functions should not be enabled when tracing
4834 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4835 * for tracing.
4836 */
Jiri Olsaac483c42012-01-02 10:04:14 +01004837int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
Steven Rostedt936e0742011-05-05 22:54:01 -04004838 int len, int reset)
4839{
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09004840 ftrace_ops_init(ops);
Jiri Olsaac483c42012-01-02 10:04:14 +01004841 return ftrace_set_regex(ops, buf, len, reset, 0);
Steven Rostedt936e0742011-05-05 22:54:01 -04004842}
4843EXPORT_SYMBOL_GPL(ftrace_set_notrace);
4844/**
Jiaxing Wang8d1b0652014-04-20 23:10:44 +08004845 * ftrace_set_global_filter - set a function to filter on with global tracers
Steven Rostedt936e0742011-05-05 22:54:01 -04004846 * @buf - the string that holds the function filter text.
4847 * @len - the length of the string.
4848 * @reset - non zero to reset all filters before applying this filter.
4849 *
4850 * Filters denote which functions should be enabled when tracing is enabled.
4851 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4852 */
4853void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
4854{
4855 ftrace_set_regex(&global_ops, buf, len, reset, 1);
4856}
4857EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
4858
4859/**
Jiaxing Wang8d1b0652014-04-20 23:10:44 +08004860 * ftrace_set_global_notrace - set a function to not trace with global tracers
Steven Rostedt936e0742011-05-05 22:54:01 -04004861 * @buf - the string that holds the function notrace text.
4862 * @len - the length of the string.
4863 * @reset - non zero to reset all filters before applying this filter.
4864 *
4865 * Notrace Filters denote which functions should not be enabled when tracing
4866 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4867 * for tracing.
4868 */
4869void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
Steven Rostedt41c52c02008-05-22 11:46:33 -04004870{
Steven Rostedtf45948e2011-05-02 12:29:25 -04004871 ftrace_set_regex(&global_ops, buf, len, reset, 0);
Steven Rostedt77a2b372008-05-12 21:20:45 +02004872}
Steven Rostedt936e0742011-05-05 22:54:01 -04004873EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
Steven Rostedt77a2b372008-05-12 21:20:45 +02004874
Steven Rostedt2af15d62009-05-28 13:37:24 -04004875/*
4876 * command line interface to allow users to set filters on boot up.
4877 */
4878#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
4879static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4880static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
4881
Steven Rostedt (Red Hat)f1ed7c72013-06-27 22:18:06 -04004882/* Used by function selftest to not test if filter is set */
4883bool ftrace_filter_param __initdata;
4884
Steven Rostedt2af15d62009-05-28 13:37:24 -04004885static int __init set_ftrace_notrace(char *str)
4886{
Steven Rostedt (Red Hat)f1ed7c72013-06-27 22:18:06 -04004887 ftrace_filter_param = true;
Chen Gang75761cc2013-04-08 12:12:39 +08004888 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
Steven Rostedt2af15d62009-05-28 13:37:24 -04004889 return 1;
4890}
4891__setup("ftrace_notrace=", set_ftrace_notrace);
4892
4893static int __init set_ftrace_filter(char *str)
4894{
Steven Rostedt (Red Hat)f1ed7c72013-06-27 22:18:06 -04004895 ftrace_filter_param = true;
Chen Gang75761cc2013-04-08 12:12:39 +08004896 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
Steven Rostedt2af15d62009-05-28 13:37:24 -04004897 return 1;
4898}
4899__setup("ftrace_filter=", set_ftrace_filter);
4900
Stefan Assmann369bc182009-10-12 22:17:21 +02004901#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Lai Jiangshanf6060f42009-11-05 11:16:17 +08004902static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
Namhyung Kim0d7d9a12014-06-13 01:23:50 +09004903static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09004904static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
Steven Rostedt801c29f2010-03-05 20:02:19 -05004905
Stefan Assmann369bc182009-10-12 22:17:21 +02004906static int __init set_graph_function(char *str)
4907{
Frederic Weisbecker06f43d62009-10-14 20:43:39 +02004908 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
Stefan Assmann369bc182009-10-12 22:17:21 +02004909 return 1;
4910}
4911__setup("ftrace_graph_filter=", set_graph_function);
4912
Namhyung Kim0d7d9a12014-06-13 01:23:50 +09004913static int __init set_graph_notrace_function(char *str)
4914{
4915 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
4916 return 1;
4917}
4918__setup("ftrace_graph_notrace=", set_graph_notrace_function);
4919
Todd Brandt65a50c652017-03-02 16:12:15 -08004920static int __init set_graph_max_depth_function(char *str)
4921{
4922 if (!str)
4923 return 0;
4924 fgraph_max_depth = simple_strtoul(str, NULL, 0);
4925 return 1;
4926}
4927__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
4928
Namhyung Kim0d7d9a12014-06-13 01:23:50 +09004929static void __init set_ftrace_early_graph(char *buf, int enable)
Stefan Assmann369bc182009-10-12 22:17:21 +02004930{
4931 int ret;
4932 char *func;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09004933 struct ftrace_hash *hash;
Namhyung Kim0d7d9a12014-06-13 01:23:50 +09004934
Steven Rostedt (VMware)92ad18e2017-03-02 12:53:26 -05004935 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4936 if (WARN_ON(!hash))
4937 return;
Stefan Assmann369bc182009-10-12 22:17:21 +02004938
4939 while (buf) {
4940 func = strsep(&buf, ",");
4941 /* we allow only one expression at a time */
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09004942 ret = ftrace_graph_set_hash(hash, func);
Stefan Assmann369bc182009-10-12 22:17:21 +02004943 if (ret)
4944 printk(KERN_DEBUG "ftrace: function %s not "
4945 "traceable\n", func);
4946 }
Steven Rostedt (VMware)92ad18e2017-03-02 12:53:26 -05004947
4948 if (enable)
4949 ftrace_graph_hash = hash;
4950 else
4951 ftrace_graph_notrace_hash = hash;
Stefan Assmann369bc182009-10-12 22:17:21 +02004952}
4953#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4954
Steven Rostedt2a85a372011-12-19 21:57:44 -05004955void __init
4956ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
Steven Rostedt2af15d62009-05-28 13:37:24 -04004957{
4958 char *func;
4959
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09004960 ftrace_ops_init(ops);
4961
Steven Rostedt2af15d62009-05-28 13:37:24 -04004962 while (buf) {
4963 func = strsep(&buf, ",");
Steven Rostedtf45948e2011-05-02 12:29:25 -04004964 ftrace_set_regex(ops, func, strlen(func), 0, enable);
Steven Rostedt2af15d62009-05-28 13:37:24 -04004965 }
4966}
4967
4968static void __init set_ftrace_early_filters(void)
4969{
4970 if (ftrace_filter_buf[0])
Steven Rostedt2a85a372011-12-19 21:57:44 -05004971 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
Steven Rostedt2af15d62009-05-28 13:37:24 -04004972 if (ftrace_notrace_buf[0])
Steven Rostedt2a85a372011-12-19 21:57:44 -05004973 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
Stefan Assmann369bc182009-10-12 22:17:21 +02004974#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4975 if (ftrace_graph_buf[0])
Namhyung Kim0d7d9a12014-06-13 01:23:50 +09004976 set_ftrace_early_graph(ftrace_graph_buf, 1);
4977 if (ftrace_graph_notrace_buf[0])
4978 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
Stefan Assmann369bc182009-10-12 22:17:21 +02004979#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
Steven Rostedt2af15d62009-05-28 13:37:24 -04004980}
4981
Steven Rostedtfc13cb02011-12-19 14:41:25 -05004982int ftrace_regex_release(struct inode *inode, struct file *file)
Steven Rostedt5072c592008-05-12 21:20:43 +02004983{
4984 struct seq_file *m = (struct seq_file *)file->private_data;
4985 struct ftrace_iterator *iter;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004986 struct ftrace_hash **orig_hash;
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004987 struct trace_parser *parser;
Steven Rostedted926f92011-05-03 13:25:24 -04004988 int filter_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004989 int ret;
Steven Rostedt5072c592008-05-12 21:20:43 +02004990
Steven Rostedt5072c592008-05-12 21:20:43 +02004991 if (file->f_mode & FMODE_READ) {
4992 iter = m->private;
Steven Rostedt5072c592008-05-12 21:20:43 +02004993 seq_release(inode, file);
4994 } else
4995 iter = file->private_data;
4996
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004997 parser = &iter->parser;
4998 if (trace_parser_loaded(parser)) {
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04004999 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
Steven Rostedt5072c592008-05-12 21:20:43 +02005000 }
5001
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02005002 trace_parser_put(parser);
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02005003
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04005004 mutex_lock(&iter->ops->func_hash->regex_lock);
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09005005
Steven Rostedt058e2972011-04-29 22:35:33 -04005006 if (file->f_mode & FMODE_WRITE) {
Steven Rostedted926f92011-05-03 13:25:24 -04005007 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
5008
Steven Rostedt (VMware)8c08f0d2017-06-26 11:47:31 -04005009 if (filter_hash) {
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04005010 orig_hash = &iter->ops->func_hash->filter_hash;
Steven Rostedt (VMware)69d71872017-07-05 09:45:43 -04005011 if (iter->tr && !list_empty(&iter->tr->mod_trace))
Steven Rostedt (VMware)8c08f0d2017-06-26 11:47:31 -04005012 iter->hash->flags |= FTRACE_HASH_FL_MOD;
5013 } else
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04005014 orig_hash = &iter->ops->func_hash->notrace_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04005015
Steven Rostedt058e2972011-04-29 22:35:33 -04005016 mutex_lock(&ftrace_lock);
Steven Rostedt (VMware)e16b35d2017-04-04 14:46:56 -04005017 ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
5018 iter->hash, filter_hash);
Steven Rostedt058e2972011-04-29 22:35:33 -04005019 mutex_unlock(&ftrace_lock);
Steven Rostedt (VMware)c20489d2017-03-29 14:55:49 -04005020 } else {
5021 /* For read only, the hash is the ops hash */
5022 iter->hash = NULL;
Steven Rostedt058e2972011-04-29 22:35:33 -04005023 }
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09005024
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04005025 mutex_unlock(&iter->ops->func_hash->regex_lock);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04005026 free_ftrace_hash(iter->hash);
5027 kfree(iter);
Steven Rostedt058e2972011-04-29 22:35:33 -04005028
Steven Rostedt5072c592008-05-12 21:20:43 +02005029 return 0;
5030}
5031
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005032static const struct file_operations ftrace_avail_fops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02005033 .open = ftrace_avail_open,
5034 .read = seq_read,
5035 .llseek = seq_lseek,
Li Zefan3be04b42009-08-17 16:54:03 +08005036 .release = seq_release_private,
Steven Rostedt5072c592008-05-12 21:20:43 +02005037};
5038
Steven Rostedt647bcd02011-05-03 14:39:21 -04005039static const struct file_operations ftrace_enabled_fops = {
5040 .open = ftrace_enabled_open,
5041 .read = seq_read,
5042 .llseek = seq_lseek,
5043 .release = seq_release_private,
5044};
5045
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005046static const struct file_operations ftrace_filter_fops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02005047 .open = ftrace_filter_open,
Lai Jiangshan850a80c2009-03-13 17:47:23 +08005048 .read = seq_read,
Steven Rostedt5072c592008-05-12 21:20:43 +02005049 .write = ftrace_filter_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005050 .llseek = tracing_lseek,
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04005051 .release = ftrace_regex_release,
Steven Rostedt5072c592008-05-12 21:20:43 +02005052};
5053
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005054static const struct file_operations ftrace_notrace_fops = {
Steven Rostedt41c52c02008-05-22 11:46:33 -04005055 .open = ftrace_notrace_open,
Lai Jiangshan850a80c2009-03-13 17:47:23 +08005056 .read = seq_read,
Steven Rostedt41c52c02008-05-22 11:46:33 -04005057 .write = ftrace_notrace_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005058 .llseek = tracing_lseek,
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04005059 .release = ftrace_regex_release,
Steven Rostedt41c52c02008-05-22 11:46:33 -04005060};
5061
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005062#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5063
5064static DEFINE_MUTEX(graph_lock);
5065
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005066struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH;
5067struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH;
5068
5069enum graph_filter_type {
5070 GRAPH_FILTER_NOTRACE = 0,
5071 GRAPH_FILTER_FUNCTION,
5072};
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005073
Steven Rostedt (VMware)555fc782017-02-02 10:15:22 -05005074#define FTRACE_GRAPH_EMPTY ((void *)1)
5075
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005076struct ftrace_graph_data {
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005077 struct ftrace_hash *hash;
5078 struct ftrace_func_entry *entry;
5079 int idx; /* for hash table iteration */
5080 enum graph_filter_type type;
5081 struct ftrace_hash *new_hash;
5082 const struct seq_operations *seq_ops;
5083 struct trace_parser parser;
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005084};
5085
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005086static void *
Li Zefan85951842009-06-24 09:54:00 +08005087__g_next(struct seq_file *m, loff_t *pos)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005088{
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005089 struct ftrace_graph_data *fgd = m->private;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005090 struct ftrace_func_entry *entry = fgd->entry;
5091 struct hlist_head *head;
5092 int i, idx = fgd->idx;
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005093
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005094 if (*pos >= fgd->hash->count)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005095 return NULL;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005096
5097 if (entry) {
5098 hlist_for_each_entry_continue(entry, hlist) {
5099 fgd->entry = entry;
5100 return entry;
5101 }
5102
5103 idx++;
5104 }
5105
5106 for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
5107 head = &fgd->hash->buckets[i];
5108 hlist_for_each_entry(entry, head, hlist) {
5109 fgd->entry = entry;
5110 fgd->idx = i;
5111 return entry;
5112 }
5113 }
5114 return NULL;
Li Zefan85951842009-06-24 09:54:00 +08005115}
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005116
Li Zefan85951842009-06-24 09:54:00 +08005117static void *
5118g_next(struct seq_file *m, void *v, loff_t *pos)
5119{
5120 (*pos)++;
5121 return __g_next(m, pos);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005122}
5123
5124static void *g_start(struct seq_file *m, loff_t *pos)
5125{
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005126 struct ftrace_graph_data *fgd = m->private;
5127
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005128 mutex_lock(&graph_lock);
5129
Steven Rostedt (VMware)649b9882017-02-02 20:16:29 -05005130 if (fgd->type == GRAPH_FILTER_FUNCTION)
5131 fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
5132 lockdep_is_held(&graph_lock));
5133 else
5134 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5135 lockdep_is_held(&graph_lock));
5136
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01005137 /* Nothing, tell g_show to print all functions are enabled */
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005138 if (ftrace_hash_empty(fgd->hash) && !*pos)
Steven Rostedt (VMware)555fc782017-02-02 10:15:22 -05005139 return FTRACE_GRAPH_EMPTY;
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01005140
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005141 fgd->idx = 0;
5142 fgd->entry = NULL;
Li Zefan85951842009-06-24 09:54:00 +08005143 return __g_next(m, pos);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005144}
5145
5146static void g_stop(struct seq_file *m, void *p)
5147{
5148 mutex_unlock(&graph_lock);
5149}
5150
5151static int g_show(struct seq_file *m, void *v)
5152{
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005153 struct ftrace_func_entry *entry = v;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005154
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005155 if (!entry)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005156 return 0;
5157
Steven Rostedt (VMware)555fc782017-02-02 10:15:22 -05005158 if (entry == FTRACE_GRAPH_EMPTY) {
Namhyung Kim280d1422014-06-13 01:23:51 +09005159 struct ftrace_graph_data *fgd = m->private;
5160
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005161 if (fgd->type == GRAPH_FILTER_FUNCTION)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005162 seq_puts(m, "#### all functions enabled ####\n");
Namhyung Kim280d1422014-06-13 01:23:51 +09005163 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005164 seq_puts(m, "#### no functions disabled ####\n");
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01005165 return 0;
5166 }
5167
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005168 seq_printf(m, "%ps\n", (void *)entry->ip);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005169
5170 return 0;
5171}
5172
James Morris88e9d342009-09-22 16:43:43 -07005173static const struct seq_operations ftrace_graph_seq_ops = {
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005174 .start = g_start,
5175 .next = g_next,
5176 .stop = g_stop,
5177 .show = g_show,
5178};
5179
5180static int
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005181__ftrace_graph_open(struct inode *inode, struct file *file,
5182 struct ftrace_graph_data *fgd)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005183{
5184 int ret = 0;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005185 struct ftrace_hash *new_hash = NULL;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005186
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005187 if (file->f_mode & FMODE_WRITE) {
5188 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
5189
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005190 if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
5191 return -ENOMEM;
5192
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005193 if (file->f_flags & O_TRUNC)
5194 new_hash = alloc_ftrace_hash(size_bits);
5195 else
5196 new_hash = alloc_and_copy_ftrace_hash(size_bits,
5197 fgd->hash);
5198 if (!new_hash) {
5199 ret = -ENOMEM;
5200 goto out;
5201 }
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005202 }
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005203
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005204 if (file->f_mode & FMODE_READ) {
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005205 ret = seq_open(file, &ftrace_graph_seq_ops);
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005206 if (!ret) {
5207 struct seq_file *m = file->private_data;
5208 m->private = fgd;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005209 } else {
5210 /* Failed */
5211 free_ftrace_hash(new_hash);
5212 new_hash = NULL;
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005213 }
5214 } else
5215 file->private_data = fgd;
Li Zefana4ec5e02009-09-18 14:06:28 +08005216
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005217out:
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005218 if (ret < 0 && file->f_mode & FMODE_WRITE)
5219 trace_parser_put(&fgd->parser);
5220
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005221 fgd->new_hash = new_hash;
Steven Rostedt (VMware)649b9882017-02-02 20:16:29 -05005222
5223 /*
5224 * All uses of fgd->hash must be taken with the graph_lock
5225 * held. The graph_lock is going to be released, so force
5226 * fgd->hash to be reinitialized when it is taken again.
5227 */
5228 fgd->hash = NULL;
5229
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005230 return ret;
5231}
5232
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005233static int
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005234ftrace_graph_open(struct inode *inode, struct file *file)
5235{
5236 struct ftrace_graph_data *fgd;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005237 int ret;
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005238
5239 if (unlikely(ftrace_disabled))
5240 return -ENODEV;
5241
5242 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
5243 if (fgd == NULL)
5244 return -ENOMEM;
5245
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005246 mutex_lock(&graph_lock);
5247
Steven Rostedt (VMware)649b9882017-02-02 20:16:29 -05005248 fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
5249 lockdep_is_held(&graph_lock));
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005250 fgd->type = GRAPH_FILTER_FUNCTION;
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005251 fgd->seq_ops = &ftrace_graph_seq_ops;
5252
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005253 ret = __ftrace_graph_open(inode, file, fgd);
5254 if (ret < 0)
5255 kfree(fgd);
5256
5257 mutex_unlock(&graph_lock);
5258 return ret;
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005259}
5260
5261static int
Namhyung Kim29ad23b2013-10-14 17:24:26 +09005262ftrace_graph_notrace_open(struct inode *inode, struct file *file)
5263{
5264 struct ftrace_graph_data *fgd;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005265 int ret;
Namhyung Kim29ad23b2013-10-14 17:24:26 +09005266
5267 if (unlikely(ftrace_disabled))
5268 return -ENODEV;
5269
5270 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
5271 if (fgd == NULL)
5272 return -ENOMEM;
5273
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005274 mutex_lock(&graph_lock);
5275
Steven Rostedt (VMware)649b9882017-02-02 20:16:29 -05005276 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5277 lockdep_is_held(&graph_lock));
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005278 fgd->type = GRAPH_FILTER_NOTRACE;
Namhyung Kim29ad23b2013-10-14 17:24:26 +09005279 fgd->seq_ops = &ftrace_graph_seq_ops;
5280
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005281 ret = __ftrace_graph_open(inode, file, fgd);
5282 if (ret < 0)
5283 kfree(fgd);
5284
5285 mutex_unlock(&graph_lock);
5286 return ret;
Namhyung Kim29ad23b2013-10-14 17:24:26 +09005287}
5288
5289static int
Li Zefan87827112009-07-23 11:29:11 +08005290ftrace_graph_release(struct inode *inode, struct file *file)
5291{
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005292 struct ftrace_graph_data *fgd;
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005293 struct ftrace_hash *old_hash, *new_hash;
5294 struct trace_parser *parser;
5295 int ret = 0;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005296
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005297 if (file->f_mode & FMODE_READ) {
5298 struct seq_file *m = file->private_data;
5299
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005300 fgd = m->private;
Li Zefan87827112009-07-23 11:29:11 +08005301 seq_release(inode, file);
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005302 } else {
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005303 fgd = file->private_data;
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005304 }
5305
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005306
5307 if (file->f_mode & FMODE_WRITE) {
5308
5309 parser = &fgd->parser;
5310
5311 if (trace_parser_loaded((parser))) {
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005312 ret = ftrace_graph_set_hash(fgd->new_hash,
5313 parser->buffer);
5314 }
5315
5316 trace_parser_put(parser);
5317
5318 new_hash = __ftrace_hash_move(fgd->new_hash);
5319 if (!new_hash) {
5320 ret = -ENOMEM;
5321 goto out;
5322 }
5323
5324 mutex_lock(&graph_lock);
5325
5326 if (fgd->type == GRAPH_FILTER_FUNCTION) {
5327 old_hash = rcu_dereference_protected(ftrace_graph_hash,
5328 lockdep_is_held(&graph_lock));
5329 rcu_assign_pointer(ftrace_graph_hash, new_hash);
5330 } else {
5331 old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5332 lockdep_is_held(&graph_lock));
5333 rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
5334 }
5335
5336 mutex_unlock(&graph_lock);
5337
5338 /* Wait till all users are no longer using the old hash */
Paul E. McKenney74401722018-11-06 18:44:52 -08005339 synchronize_rcu();
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005340
5341 free_ftrace_hash(old_hash);
5342 }
5343
5344 out:
Luis Henriquesf9797c22017-05-25 16:20:38 +01005345 free_ftrace_hash(fgd->new_hash);
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005346 kfree(fgd);
5347
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005348 return ret;
Li Zefan87827112009-07-23 11:29:11 +08005349}
5350
5351static int
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005352ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005353{
Dmitry Safonov3ba00922015-09-29 19:46:14 +03005354 struct ftrace_glob func_g;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005355 struct dyn_ftrace *rec;
5356 struct ftrace_page *pg;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005357 struct ftrace_func_entry *entry;
Li Zefanc7c6b1f2010-02-10 15:43:04 +08005358 int fail = 1;
Dmitry Safonov3ba00922015-09-29 19:46:14 +03005359 int not;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005360
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01005361 /* decode regex */
Dmitry Safonov3ba00922015-09-29 19:46:14 +03005362 func_g.type = filter_parse_regex(buffer, strlen(buffer),
5363 &func_g.search, &not);
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01005364
Dmitry Safonov3ba00922015-09-29 19:46:14 +03005365 func_g.len = strlen(func_g.search);
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01005366
Steven Rostedt52baf112009-02-14 01:15:39 -05005367 mutex_lock(&ftrace_lock);
Steven Rostedt45a4a232011-04-21 23:16:46 -04005368
5369 if (unlikely(ftrace_disabled)) {
5370 mutex_unlock(&ftrace_lock);
5371 return -ENODEV;
5372 }
5373
Steven Rostedt265c8312009-02-13 12:43:56 -05005374 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005375
Steven Rostedt (Red Hat)546fece2016-11-14 16:31:49 -05005376 if (rec->flags & FTRACE_FL_DISABLED)
5377 continue;
5378
Dmitry Safonov0b507e12015-09-29 19:46:15 +03005379 if (ftrace_match_record(rec, &func_g, NULL, 0)) {
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005380 entry = ftrace_lookup_ip(hash, rec->ip);
Li Zefanc7c6b1f2010-02-10 15:43:04 +08005381
5382 if (!not) {
5383 fail = 0;
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005384
5385 if (entry)
5386 continue;
5387 if (add_hash_entry(hash, rec->ip) < 0)
5388 goto out;
Li Zefanc7c6b1f2010-02-10 15:43:04 +08005389 } else {
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005390 if (entry) {
5391 free_hash_entry(hash, entry);
Li Zefanc7c6b1f2010-02-10 15:43:04 +08005392 fail = 0;
5393 }
5394 }
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005395 }
Steven Rostedt265c8312009-02-13 12:43:56 -05005396 } while_for_each_ftrace_rec();
Li Zefanc7c6b1f2010-02-10 15:43:04 +08005397out:
Steven Rostedt52baf112009-02-14 01:15:39 -05005398 mutex_unlock(&ftrace_lock);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005399
Li Zefanc7c6b1f2010-02-10 15:43:04 +08005400 if (fail)
5401 return -EINVAL;
5402
Li Zefanc7c6b1f2010-02-10 15:43:04 +08005403 return 0;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005404}
5405
5406static ssize_t
5407ftrace_graph_write(struct file *file, const char __user *ubuf,
5408 size_t cnt, loff_t *ppos)
5409{
Namhyung Kim6a101082013-10-14 17:24:25 +09005410 ssize_t read, ret = 0;
Namhyung Kimfaf982a2013-10-14 17:24:24 +09005411 struct ftrace_graph_data *fgd = file->private_data;
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005412 struct trace_parser *parser;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005413
Li Zefanc7c6b1f2010-02-10 15:43:04 +08005414 if (!cnt)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005415 return 0;
5416
Steven Rostedt (VMware)ae98d272017-02-02 16:59:06 -05005417 /* Read mode uses seq functions */
5418 if (file->f_mode & FMODE_READ) {
5419 struct seq_file *m = file->private_data;
5420 fgd = m->private;
5421 }
5422
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005423 parser = &fgd->parser;
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02005424
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005425 read = trace_get_user(parser, ubuf, cnt, ppos);
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02005426
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005427 if (read >= 0 && trace_parser_loaded(parser) &&
5428 !trace_parser_cont(parser)) {
Namhyung Kim6a101082013-10-14 17:24:25 +09005429
Namhyung Kimb9b0c8312017-01-20 11:44:47 +09005430 ret = ftrace_graph_set_hash(fgd->new_hash,
Steven Rostedt (VMware)e704eff2017-02-02 20:34:37 -05005431 parser->buffer);
5432 trace_parser_clear(parser);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005433 }
5434
Namhyung Kim6a101082013-10-14 17:24:25 +09005435 if (!ret)
5436 ret = read;
Li Zefan1eb90f12009-09-22 13:52:57 +08005437
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005438 return ret;
5439}
5440
5441static const struct file_operations ftrace_graph_fops = {
Li Zefan87827112009-07-23 11:29:11 +08005442 .open = ftrace_graph_open,
5443 .read = seq_read,
5444 .write = ftrace_graph_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005445 .llseek = tracing_lseek,
Li Zefan87827112009-07-23 11:29:11 +08005446 .release = ftrace_graph_release,
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005447};
Namhyung Kim29ad23b2013-10-14 17:24:26 +09005448
5449static const struct file_operations ftrace_graph_notrace_fops = {
5450 .open = ftrace_graph_notrace_open,
5451 .read = seq_read,
5452 .write = ftrace_graph_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005453 .llseek = tracing_lseek,
Namhyung Kim29ad23b2013-10-14 17:24:26 +09005454 .release = ftrace_graph_release,
5455};
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005456#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5457
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05005458void ftrace_create_filter_files(struct ftrace_ops *ops,
5459 struct dentry *parent)
5460{
5461
5462 trace_create_file("set_ftrace_filter", 0644, parent,
5463 ops, &ftrace_filter_fops);
5464
5465 trace_create_file("set_ftrace_notrace", 0644, parent,
5466 ops, &ftrace_notrace_fops);
5467}
5468
5469/*
5470 * The name "destroy_filter_files" is really a misnomer. Although
Hariprasad Kelam9efb85c2019-03-24 00:05:23 +05305471 * in the future, it may actually delete the files, but this is
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05005472 * really intended to make sure the ops passed in are disabled
5473 * and that when this function returns, the caller is free to
5474 * free the ops.
5475 *
5476 * The "destroy" name is only to match the "create" name that this
5477 * should be paired with.
5478 */
5479void ftrace_destroy_filter_files(struct ftrace_ops *ops)
5480{
5481 mutex_lock(&ftrace_lock);
5482 if (ops->flags & FTRACE_OPS_FL_ENABLED)
5483 ftrace_shutdown(ops, 0);
5484 ops->flags |= FTRACE_OPS_FL_DELETED;
Steven Rostedt (VMware)2840f842018-12-10 23:58:01 -05005485 ftrace_free_filter(ops);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05005486 mutex_unlock(&ftrace_lock);
5487}
5488
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05005489static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
Steven Rostedt5072c592008-05-12 21:20:43 +02005490{
Steven Rostedt5072c592008-05-12 21:20:43 +02005491
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005492 trace_create_file("available_filter_functions", 0444,
5493 d_tracer, NULL, &ftrace_avail_fops);
Steven Rostedt5072c592008-05-12 21:20:43 +02005494
Steven Rostedt647bcd02011-05-03 14:39:21 -04005495 trace_create_file("enabled_functions", 0444,
5496 d_tracer, NULL, &ftrace_enabled_fops);
5497
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05005498 ftrace_create_filter_files(&global_ops, d_tracer);
Steven Rostedtad90c0e2008-05-27 20:48:37 -04005499
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005500#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Chen LinX1ce05002014-09-03 14:31:09 +08005501 trace_create_file("set_graph_function", 0644, d_tracer,
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005502 NULL,
5503 &ftrace_graph_fops);
Chen LinX1ce05002014-09-03 14:31:09 +08005504 trace_create_file("set_graph_notrace", 0644, d_tracer,
Namhyung Kim29ad23b2013-10-14 17:24:26 +09005505 NULL,
5506 &ftrace_graph_notrace_fops);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05005507#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5508
Steven Rostedt5072c592008-05-12 21:20:43 +02005509 return 0;
5510}
5511
Steven Rostedt9fd49322012-04-24 22:32:06 -04005512static int ftrace_cmp_ips(const void *a, const void *b)
Steven Rostedt68950612011-12-16 17:06:45 -05005513{
Steven Rostedt9fd49322012-04-24 22:32:06 -04005514 const unsigned long *ipa = a;
5515 const unsigned long *ipb = b;
Steven Rostedt68950612011-12-16 17:06:45 -05005516
Steven Rostedt9fd49322012-04-24 22:32:06 -04005517 if (*ipa > *ipb)
5518 return 1;
5519 if (*ipa < *ipb)
5520 return -1;
5521 return 0;
5522}
5523
Jiri Olsa5cb084b2009-10-13 16:33:53 -04005524static int ftrace_process_locs(struct module *mod,
Steven Rostedt31e88902008-11-14 16:21:19 -08005525 unsigned long *start,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005526 unsigned long *end)
5527{
Steven Rostedt706c81f2012-04-24 23:45:26 -04005528 struct ftrace_page *start_pg;
Steven Rostedta7900872011-12-16 16:23:44 -05005529 struct ftrace_page *pg;
Steven Rostedt706c81f2012-04-24 23:45:26 -04005530 struct dyn_ftrace *rec;
Steven Rostedta7900872011-12-16 16:23:44 -05005531 unsigned long count;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005532 unsigned long *p;
5533 unsigned long addr;
Steven Rostedt4376cac2011-06-24 23:28:13 -04005534 unsigned long flags = 0; /* Shut up gcc */
Steven Rostedta7900872011-12-16 16:23:44 -05005535 int ret = -ENOMEM;
5536
5537 count = end - start;
5538
5539 if (!count)
5540 return 0;
5541
Steven Rostedt9fd49322012-04-24 22:32:06 -04005542 sort(start, count, sizeof(*start),
Rasmus Villemoes6db02902015-09-09 23:27:02 +02005543 ftrace_cmp_ips, NULL);
Steven Rostedt9fd49322012-04-24 22:32:06 -04005544
Steven Rostedt706c81f2012-04-24 23:45:26 -04005545 start_pg = ftrace_allocate_pages(count);
5546 if (!start_pg)
Steven Rostedta7900872011-12-16 16:23:44 -05005547 return -ENOMEM;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005548
Steven Rostedte6ea44e2009-02-14 01:42:44 -05005549 mutex_lock(&ftrace_lock);
Steven Rostedta7900872011-12-16 16:23:44 -05005550
Steven Rostedt320823092011-12-16 14:42:37 -05005551 /*
5552 * Core and each module needs their own pages, as
5553 * modules will free them when they are removed.
5554 * Force a new page to be allocated for modules.
5555 */
Steven Rostedta7900872011-12-16 16:23:44 -05005556 if (!mod) {
5557 WARN_ON(ftrace_pages || ftrace_pages_start);
5558 /* First initialization */
Steven Rostedt706c81f2012-04-24 23:45:26 -04005559 ftrace_pages = ftrace_pages_start = start_pg;
Steven Rostedta7900872011-12-16 16:23:44 -05005560 } else {
Steven Rostedt320823092011-12-16 14:42:37 -05005561 if (!ftrace_pages)
Steven Rostedta7900872011-12-16 16:23:44 -05005562 goto out;
Steven Rostedt320823092011-12-16 14:42:37 -05005563
Steven Rostedta7900872011-12-16 16:23:44 -05005564 if (WARN_ON(ftrace_pages->next)) {
5565 /* Hmm, we have free pages? */
5566 while (ftrace_pages->next)
5567 ftrace_pages = ftrace_pages->next;
Steven Rostedt320823092011-12-16 14:42:37 -05005568 }
Steven Rostedta7900872011-12-16 16:23:44 -05005569
Steven Rostedt706c81f2012-04-24 23:45:26 -04005570 ftrace_pages->next = start_pg;
Steven Rostedt320823092011-12-16 14:42:37 -05005571 }
5572
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005573 p = start;
Steven Rostedt706c81f2012-04-24 23:45:26 -04005574 pg = start_pg;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005575 while (p < end) {
5576 addr = ftrace_call_adjust(*p++);
Steven Rostedt20e52272008-11-14 16:21:19 -08005577 /*
5578 * Some architecture linkers will pad between
5579 * the different mcount_loc sections of different
5580 * object files to satisfy alignments.
5581 * Skip any NULL pointers.
5582 */
5583 if (!addr)
5584 continue;
Steven Rostedt706c81f2012-04-24 23:45:26 -04005585
5586 if (pg->index == pg->size) {
5587 /* We should have allocated enough */
5588 if (WARN_ON(!pg->next))
5589 break;
5590 pg = pg->next;
5591 }
5592
5593 rec = &pg->records[pg->index++];
5594 rec->ip = addr;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005595 }
5596
Steven Rostedt706c81f2012-04-24 23:45:26 -04005597 /* We should have used all pages */
5598 WARN_ON(pg->next);
5599
5600 /* Assign the last page to ftrace_pages */
5601 ftrace_pages = pg;
5602
Steven Rostedta4f18ed2011-06-07 09:26:46 -04005603 /*
Steven Rostedt4376cac2011-06-24 23:28:13 -04005604 * We only need to disable interrupts on start up
5605 * because we are modifying code that an interrupt
5606 * may execute, and the modification is not atomic.
5607 * But for modules, nothing runs the code we modify
5608 * until we are finished with it, and there's no
5609 * reason to cause large interrupt latencies while we do it.
Steven Rostedta4f18ed2011-06-07 09:26:46 -04005610 */
Steven Rostedt4376cac2011-06-24 23:28:13 -04005611 if (!mod)
5612 local_irq_save(flags);
Jiri Slaby1dc43cf2014-02-24 19:59:56 +01005613 ftrace_update_code(mod, start_pg);
Steven Rostedt4376cac2011-06-24 23:28:13 -04005614 if (!mod)
5615 local_irq_restore(flags);
Steven Rostedta7900872011-12-16 16:23:44 -05005616 ret = 0;
5617 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05005618 mutex_unlock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005619
Steven Rostedta7900872011-12-16 16:23:44 -05005620 return ret;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005621}
5622
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04005623struct ftrace_mod_func {
5624 struct list_head list;
5625 char *name;
5626 unsigned long ip;
5627 unsigned int size;
5628};
5629
5630struct ftrace_mod_map {
Steven Rostedt (VMware)6aa69782017-09-05 19:20:16 -04005631 struct rcu_head rcu;
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04005632 struct list_head list;
5633 struct module *mod;
5634 unsigned long start_addr;
5635 unsigned long end_addr;
5636 struct list_head funcs;
Steven Rostedt (VMware)6171a032017-09-06 08:40:41 -04005637 unsigned int num_funcs;
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04005638};
5639
Steven Rostedt93eb6772009-04-15 13:24:06 -04005640#ifdef CONFIG_MODULES
Steven Rostedt320823092011-12-16 14:42:37 -05005641
5642#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
5643
Steven Rostedt (VMware)6aa69782017-09-05 19:20:16 -04005644static LIST_HEAD(ftrace_mod_maps);
5645
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05005646static int referenced_filters(struct dyn_ftrace *rec)
5647{
5648 struct ftrace_ops *ops;
5649 int cnt = 0;
5650
5651 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
5652 if (ops_references_rec(ops, rec))
5653 cnt++;
5654 }
5655
5656 return cnt;
5657}
5658
Steven Rostedt (VMware)2a5bfe42017-08-31 17:36:51 -04005659static void
5660clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
5661{
5662 struct ftrace_func_entry *entry;
5663 struct dyn_ftrace *rec;
5664 int i;
5665
5666 if (ftrace_hash_empty(hash))
5667 return;
5668
5669 for (i = 0; i < pg->index; i++) {
5670 rec = &pg->records[i];
5671 entry = __ftrace_lookup_ip(hash, rec->ip);
5672 /*
5673 * Do not allow this rec to match again.
5674 * Yeah, it may waste some memory, but will be removed
5675 * if/when the hash is modified again.
5676 */
5677 if (entry)
5678 entry->ip = 0;
5679 }
5680}
5681
5682/* Clear any records from hashs */
5683static void clear_mod_from_hashes(struct ftrace_page *pg)
5684{
5685 struct trace_array *tr;
5686
5687 mutex_lock(&trace_types_lock);
5688 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5689 if (!tr->ops || !tr->ops->func_hash)
5690 continue;
5691 mutex_lock(&tr->ops->func_hash->regex_lock);
5692 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
5693 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
5694 mutex_unlock(&tr->ops->func_hash->regex_lock);
5695 }
5696 mutex_unlock(&trace_types_lock);
5697}
5698
Steven Rostedt (VMware)6aa69782017-09-05 19:20:16 -04005699static void ftrace_free_mod_map(struct rcu_head *rcu)
5700{
5701 struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
5702 struct ftrace_mod_func *mod_func;
5703 struct ftrace_mod_func *n;
5704
5705 /* All the contents of mod_map are now not visible to readers */
5706 list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
5707 kfree(mod_func->name);
5708 list_del(&mod_func->list);
5709 kfree(mod_func);
5710 }
5711
5712 kfree(mod_map);
5713}
5714
jolsa@redhat.come7247a12009-10-07 19:00:35 +02005715void ftrace_release_mod(struct module *mod)
Steven Rostedt93eb6772009-04-15 13:24:06 -04005716{
Steven Rostedt (VMware)6aa69782017-09-05 19:20:16 -04005717 struct ftrace_mod_map *mod_map;
5718 struct ftrace_mod_map *n;
Steven Rostedt93eb6772009-04-15 13:24:06 -04005719 struct dyn_ftrace *rec;
Steven Rostedt320823092011-12-16 14:42:37 -05005720 struct ftrace_page **last_pg;
Steven Rostedt (VMware)2a5bfe42017-08-31 17:36:51 -04005721 struct ftrace_page *tmp_page = NULL;
Steven Rostedt93eb6772009-04-15 13:24:06 -04005722 struct ftrace_page *pg;
Steven Rostedta7900872011-12-16 16:23:44 -05005723 int order;
Steven Rostedt93eb6772009-04-15 13:24:06 -04005724
Steven Rostedt93eb6772009-04-15 13:24:06 -04005725 mutex_lock(&ftrace_lock);
Steven Rostedt45a4a232011-04-21 23:16:46 -04005726
5727 if (ftrace_disabled)
5728 goto out_unlock;
5729
Steven Rostedt (VMware)6aa69782017-09-05 19:20:16 -04005730 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
5731 if (mod_map->mod == mod) {
5732 list_del_rcu(&mod_map->list);
Paul E. McKenney74401722018-11-06 18:44:52 -08005733 call_rcu(&mod_map->rcu, ftrace_free_mod_map);
Steven Rostedt (VMware)6aa69782017-09-05 19:20:16 -04005734 break;
5735 }
5736 }
5737
Steven Rostedt320823092011-12-16 14:42:37 -05005738 /*
5739 * Each module has its own ftrace_pages, remove
5740 * them from the list.
5741 */
5742 last_pg = &ftrace_pages_start;
5743 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
5744 rec = &pg->records[0];
Steven Rostedt (VMware)3e234282017-03-03 18:00:22 -05005745 if (within_module_core(rec->ip, mod) ||
5746 within_module_init(rec->ip, mod)) {
Steven Rostedt93eb6772009-04-15 13:24:06 -04005747 /*
Steven Rostedt320823092011-12-16 14:42:37 -05005748 * As core pages are first, the first
5749 * page should never be a module page.
Steven Rostedt93eb6772009-04-15 13:24:06 -04005750 */
Steven Rostedt320823092011-12-16 14:42:37 -05005751 if (WARN_ON(pg == ftrace_pages_start))
5752 goto out_unlock;
5753
5754 /* Check if we are deleting the last page */
5755 if (pg == ftrace_pages)
5756 ftrace_pages = next_to_ftrace_page(last_pg);
5757
Steven Rostedt (VMware)83dd1492017-06-27 11:04:40 -04005758 ftrace_update_tot_cnt -= pg->index;
Steven Rostedt320823092011-12-16 14:42:37 -05005759 *last_pg = pg->next;
Steven Rostedt (VMware)2a5bfe42017-08-31 17:36:51 -04005760
5761 pg->next = tmp_page;
5762 tmp_page = pg;
Steven Rostedt320823092011-12-16 14:42:37 -05005763 } else
5764 last_pg = &pg->next;
5765 }
Steven Rostedt45a4a232011-04-21 23:16:46 -04005766 out_unlock:
Steven Rostedt93eb6772009-04-15 13:24:06 -04005767 mutex_unlock(&ftrace_lock);
Steven Rostedt (VMware)2a5bfe42017-08-31 17:36:51 -04005768
5769 for (pg = tmp_page; pg; pg = tmp_page) {
5770
5771 /* Needs to be called outside of ftrace_lock */
5772 clear_mod_from_hashes(pg);
5773
5774 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
5775 free_pages((unsigned long)pg->records, order);
5776 tmp_page = pg->next;
5777 kfree(pg);
5778 }
Steven Rostedt93eb6772009-04-15 13:24:06 -04005779}
5780
Jessica Yu7dcd1822016-02-16 17:32:33 -05005781void ftrace_module_enable(struct module *mod)
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05005782{
5783 struct dyn_ftrace *rec;
5784 struct ftrace_page *pg;
5785
5786 mutex_lock(&ftrace_lock);
Josh Poimboeuf9f255b62019-06-13 20:07:22 -05005787 mutex_lock(&text_mutex);
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05005788
5789 if (ftrace_disabled)
5790 goto out_unlock;
5791
5792 /*
5793 * If the tracing is enabled, go ahead and enable the record.
5794 *
Hariprasad Kelam9efb85c2019-03-24 00:05:23 +05305795 * The reason not to enable the record immediately is the
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05005796 * inherent check of ftrace_make_nop/ftrace_make_call for
5797 * correct previous instructions. Making first the NOP
5798 * conversion puts the module to the correct state, thus
5799 * passing the ftrace_make_call check.
5800 *
5801 * We also delay this to after the module code already set the
5802 * text to read-only, as we now need to set it back to read-write
5803 * so that we can modify the text.
5804 */
5805 if (ftrace_start_up)
5806 ftrace_arch_code_modify_prepare();
5807
5808 do_for_each_ftrace_rec(pg, rec) {
5809 int cnt;
5810 /*
5811 * do_for_each_ftrace_rec() is a double loop.
5812 * module text shares the pg. If a record is
5813 * not part of this module, then skip this pg,
5814 * which the "break" will do.
5815 */
Steven Rostedt (VMware)3e234282017-03-03 18:00:22 -05005816 if (!within_module_core(rec->ip, mod) &&
5817 !within_module_init(rec->ip, mod))
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05005818 break;
5819
5820 cnt = 0;
5821
5822 /*
5823 * When adding a module, we need to check if tracers are
5824 * currently enabled and if they are, and can trace this record,
5825 * we need to enable the module functions as well as update the
5826 * reference counts for those function records.
5827 */
5828 if (ftrace_start_up)
5829 cnt += referenced_filters(rec);
5830
5831 /* This clears FTRACE_FL_DISABLED */
5832 rec->flags = cnt;
5833
5834 if (ftrace_start_up && cnt) {
5835 int failed = __ftrace_replace_code(rec, 1);
5836 if (failed) {
5837 ftrace_bug(failed, rec);
5838 goto out_loop;
5839 }
5840 }
5841
5842 } while_for_each_ftrace_rec();
5843
5844 out_loop:
5845 if (ftrace_start_up)
5846 ftrace_arch_code_modify_post_process();
5847
5848 out_unlock:
Josh Poimboeuf9f255b62019-06-13 20:07:22 -05005849 mutex_unlock(&text_mutex);
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05005850 mutex_unlock(&ftrace_lock);
Steven Rostedt (VMware)d7fbf8d2017-06-26 10:57:21 -04005851
5852 process_cached_mods(mod->name);
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05005853}
5854
Steven Rostedt (Red Hat)a949ae52014-04-24 10:40:12 -04005855void ftrace_module_init(struct module *mod)
Steven Rostedt93eb6772009-04-15 13:24:06 -04005856{
Steven Rostedt (Red Hat)97e9b4f2015-12-23 12:12:22 -05005857 if (ftrace_disabled || !mod->num_ftrace_callsites)
Abel Vesab6b71f62015-12-02 15:39:57 +01005858 return;
5859
Steven Rostedt (Red Hat)97e9b4f2015-12-23 12:12:22 -05005860 ftrace_process_locs(mod, mod->ftrace_callsites,
5861 mod->ftrace_callsites + mod->num_ftrace_callsites);
Steven Rostedt (Red Hat)8c189ea2013-02-13 15:18:38 -05005862}
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04005863
5864static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
5865 struct dyn_ftrace *rec)
5866{
5867 struct ftrace_mod_func *mod_func;
5868 unsigned long symsize;
5869 unsigned long offset;
5870 char str[KSYM_SYMBOL_LEN];
5871 char *modname;
5872 const char *ret;
5873
5874 ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
5875 if (!ret)
5876 return;
5877
5878 mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
5879 if (!mod_func)
5880 return;
5881
5882 mod_func->name = kstrdup(str, GFP_KERNEL);
5883 if (!mod_func->name) {
5884 kfree(mod_func);
5885 return;
5886 }
5887
5888 mod_func->ip = rec->ip - offset;
5889 mod_func->size = symsize;
5890
Steven Rostedt (VMware)6171a032017-09-06 08:40:41 -04005891 mod_map->num_funcs++;
5892
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04005893 list_add_rcu(&mod_func->list, &mod_map->funcs);
5894}
5895
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04005896static struct ftrace_mod_map *
5897allocate_ftrace_mod_map(struct module *mod,
5898 unsigned long start, unsigned long end)
5899{
5900 struct ftrace_mod_map *mod_map;
5901
5902 mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
5903 if (!mod_map)
5904 return NULL;
5905
5906 mod_map->mod = mod;
5907 mod_map->start_addr = start;
5908 mod_map->end_addr = end;
Steven Rostedt (VMware)6171a032017-09-06 08:40:41 -04005909 mod_map->num_funcs = 0;
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04005910
5911 INIT_LIST_HEAD_RCU(&mod_map->funcs);
5912
5913 list_add_rcu(&mod_map->list, &ftrace_mod_maps);
5914
5915 return mod_map;
5916}
5917
5918static const char *
5919ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
5920 unsigned long addr, unsigned long *size,
5921 unsigned long *off, char *sym)
5922{
5923 struct ftrace_mod_func *found_func = NULL;
5924 struct ftrace_mod_func *mod_func;
5925
5926 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
5927 if (addr >= mod_func->ip &&
5928 addr < mod_func->ip + mod_func->size) {
5929 found_func = mod_func;
5930 break;
5931 }
5932 }
5933
5934 if (found_func) {
5935 if (size)
5936 *size = found_func->size;
5937 if (off)
5938 *off = addr - found_func->ip;
5939 if (sym)
5940 strlcpy(sym, found_func->name, KSYM_NAME_LEN);
5941
5942 return found_func->name;
5943 }
5944
5945 return NULL;
5946}
5947
5948const char *
5949ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
5950 unsigned long *off, char **modname, char *sym)
5951{
5952 struct ftrace_mod_map *mod_map;
5953 const char *ret = NULL;
5954
Paul E. McKenney74401722018-11-06 18:44:52 -08005955 /* mod_map is freed via call_rcu() */
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04005956 preempt_disable();
5957 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
5958 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
5959 if (ret) {
5960 if (modname)
5961 *modname = mod_map->mod->name;
5962 break;
5963 }
5964 }
5965 preempt_enable();
5966
5967 return ret;
5968}
5969
Steven Rostedt (VMware)6171a032017-09-06 08:40:41 -04005970int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
5971 char *type, char *name,
5972 char *module_name, int *exported)
5973{
5974 struct ftrace_mod_map *mod_map;
5975 struct ftrace_mod_func *mod_func;
5976
5977 preempt_disable();
5978 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
5979
5980 if (symnum >= mod_map->num_funcs) {
5981 symnum -= mod_map->num_funcs;
5982 continue;
5983 }
5984
5985 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
5986 if (symnum > 1) {
5987 symnum--;
5988 continue;
5989 }
5990
5991 *value = mod_func->ip;
5992 *type = 'T';
5993 strlcpy(name, mod_func->name, KSYM_NAME_LEN);
5994 strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
5995 *exported = 1;
5996 preempt_enable();
5997 return 0;
5998 }
5999 WARN_ON(1);
6000 break;
6001 }
6002 preempt_enable();
6003 return -ERANGE;
6004}
6005
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04006006#else
6007static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
6008 struct dyn_ftrace *rec) { }
6009static inline struct ftrace_mod_map *
6010allocate_ftrace_mod_map(struct module *mod,
6011 unsigned long start, unsigned long end)
6012{
6013 return NULL;
6014}
Steven Rostedt93eb6772009-04-15 13:24:06 -04006015#endif /* CONFIG_MODULES */
6016
Joel Fernandes8715b102017-10-09 12:29:31 -07006017struct ftrace_init_func {
6018 struct list_head list;
6019 unsigned long ip;
6020};
6021
6022/* Clear any init ips from hashes */
6023static void
6024clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -05006025{
Joel Fernandes8715b102017-10-09 12:29:31 -07006026 struct ftrace_func_entry *entry;
6027
6028 if (ftrace_hash_empty(hash))
6029 return;
6030
6031 entry = __ftrace_lookup_ip(hash, func->ip);
6032
6033 /*
6034 * Do not allow this rec to match again.
6035 * Yeah, it may waste some memory, but will be removed
6036 * if/when the hash is modified again.
6037 */
6038 if (entry)
6039 entry->ip = 0;
6040}
6041
6042static void
6043clear_func_from_hashes(struct ftrace_init_func *func)
6044{
6045 struct trace_array *tr;
6046
6047 mutex_lock(&trace_types_lock);
6048 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6049 if (!tr->ops || !tr->ops->func_hash)
6050 continue;
6051 mutex_lock(&tr->ops->func_hash->regex_lock);
6052 clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
6053 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
6054 mutex_unlock(&tr->ops->func_hash->regex_lock);
6055 }
6056 mutex_unlock(&trace_types_lock);
6057}
6058
6059static void add_to_clear_hash_list(struct list_head *clear_list,
6060 struct dyn_ftrace *rec)
6061{
6062 struct ftrace_init_func *func;
6063
6064 func = kmalloc(sizeof(*func), GFP_KERNEL);
6065 if (!func) {
6066 WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n");
6067 return;
6068 }
6069
6070 func->ip = rec->ip;
6071 list_add(&func->list, clear_list);
6072}
6073
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04006074void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -05006075{
Steven Rostedt (VMware)6cafbe12017-06-20 10:44:58 -04006076 unsigned long start = (unsigned long)(start_ptr);
6077 unsigned long end = (unsigned long)(end_ptr);
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -05006078 struct ftrace_page **last_pg = &ftrace_pages_start;
6079 struct ftrace_page *pg;
6080 struct dyn_ftrace *rec;
6081 struct dyn_ftrace key;
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04006082 struct ftrace_mod_map *mod_map = NULL;
Joel Fernandes8715b102017-10-09 12:29:31 -07006083 struct ftrace_init_func *func, *func_next;
6084 struct list_head clear_hash;
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -05006085 int order;
6086
Joel Fernandes8715b102017-10-09 12:29:31 -07006087 INIT_LIST_HEAD(&clear_hash);
6088
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -05006089 key.ip = start;
6090 key.flags = end; /* overload flags, as it is unsigned long */
6091
6092 mutex_lock(&ftrace_lock);
6093
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04006094 /*
6095 * If we are freeing module init memory, then check if
6096 * any tracer is active. If so, we need to save a mapping of
6097 * the module functions being freed with the address.
6098 */
6099 if (mod && ftrace_ops_list != &ftrace_list_end)
6100 mod_map = allocate_ftrace_mod_map(mod, start, end);
6101
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -05006102 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
6103 if (end < pg->records[0].ip ||
6104 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
6105 continue;
6106 again:
6107 rec = bsearch(&key, pg->records, pg->index,
6108 sizeof(struct dyn_ftrace),
6109 ftrace_cmp_recs);
6110 if (!rec)
6111 continue;
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04006112
Joel Fernandes8715b102017-10-09 12:29:31 -07006113 /* rec will be cleared from hashes after ftrace_lock unlock */
6114 add_to_clear_hash_list(&clear_hash, rec);
6115
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04006116 if (mod_map)
6117 save_ftrace_mod_rec(mod_map, rec);
6118
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -05006119 pg->index--;
Steven Rostedt (VMware)4ec78462017-06-28 11:57:03 -04006120 ftrace_update_tot_cnt--;
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -05006121 if (!pg->index) {
6122 *last_pg = pg->next;
6123 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
6124 free_pages((unsigned long)pg->records, order);
6125 kfree(pg);
6126 pg = container_of(last_pg, struct ftrace_page, next);
6127 if (!(*last_pg))
6128 ftrace_pages = pg;
6129 continue;
6130 }
6131 memmove(rec, rec + 1,
6132 (pg->index - (rec - pg->records)) * sizeof(*rec));
6133 /* More than one function may be in this block */
6134 goto again;
6135 }
6136 mutex_unlock(&ftrace_lock);
Joel Fernandes8715b102017-10-09 12:29:31 -07006137
6138 list_for_each_entry_safe(func, func_next, &clear_hash, list) {
6139 clear_func_from_hashes(func);
6140 kfree(func);
6141 }
Steven Rostedt (VMware)42c269c2017-03-03 16:15:39 -05006142}
6143
Steven Rostedt (VMware)6cafbe12017-06-20 10:44:58 -04006144void __init ftrace_free_init_mem(void)
6145{
6146 void *start = (void *)(&__init_begin);
6147 void *end = (void *)(&__init_end);
6148
Steven Rostedt (VMware)aba4b5c2017-09-01 08:35:38 -04006149 ftrace_free_mem(NULL, start, end);
Steven Rostedt93eb6772009-04-15 13:24:06 -04006150}
6151
Steven Rostedt68bf21a2008-08-14 15:45:08 -04006152void __init ftrace_init(void)
6153{
Jiri Slaby1dc43cf2014-02-24 19:59:56 +01006154 extern unsigned long __start_mcount_loc[];
6155 extern unsigned long __stop_mcount_loc[];
Jiri Slaby3a36cb12014-02-24 19:59:59 +01006156 unsigned long count, flags;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04006157 int ret;
6158
Steven Rostedt68bf21a2008-08-14 15:45:08 -04006159 local_irq_save(flags);
Jiri Slaby3a36cb12014-02-24 19:59:59 +01006160 ret = ftrace_dyn_arch_init();
Steven Rostedt68bf21a2008-08-14 15:45:08 -04006161 local_irq_restore(flags);
Jiri Slabyaf64a7c2014-02-24 19:59:58 +01006162 if (ret)
Steven Rostedt68bf21a2008-08-14 15:45:08 -04006163 goto failed;
6164
6165 count = __stop_mcount_loc - __start_mcount_loc;
Jiri Slabyc867ccd2014-02-24 19:59:57 +01006166 if (!count) {
6167 pr_info("ftrace: No functions to be traced?\n");
Steven Rostedt68bf21a2008-08-14 15:45:08 -04006168 goto failed;
Jiri Slabyc867ccd2014-02-24 19:59:57 +01006169 }
6170
6171 pr_info("ftrace: allocating %ld entries in %ld pages\n",
6172 count, count / ENTRIES_PER_PAGE + 1);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04006173
6174 last_ftrace_enabled = ftrace_enabled = 1;
6175
Jiri Olsa5cb084b2009-10-13 16:33:53 -04006176 ret = ftrace_process_locs(NULL,
Steven Rostedt31e88902008-11-14 16:21:19 -08006177 __start_mcount_loc,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04006178 __stop_mcount_loc);
6179
Steven Rostedt2af15d62009-05-28 13:37:24 -04006180 set_ftrace_early_filters();
6181
Steven Rostedt68bf21a2008-08-14 15:45:08 -04006182 return;
6183 failed:
6184 ftrace_disabled = 1;
6185}
Steven Rostedt68bf21a2008-08-14 15:45:08 -04006186
Steven Rostedt (Red Hat)f3bea492014-07-02 23:23:31 -04006187/* Do nothing if arch does not support this */
6188void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
6189{
6190}
6191
6192static void ftrace_update_trampoline(struct ftrace_ops *ops)
6193{
Steven Rostedt (Red Hat)f3bea492014-07-02 23:23:31 -04006194 arch_ftrace_update_trampoline(ops);
6195}
6196
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04006197void ftrace_init_trace_array(struct trace_array *tr)
6198{
6199 INIT_LIST_HEAD(&tr->func_probes);
Steven Rostedt (VMware)673feb92017-06-23 15:26:26 -04006200 INIT_LIST_HEAD(&tr->mod_trace);
6201 INIT_LIST_HEAD(&tr->mod_notrace);
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04006202}
Steven Rostedt3d083392008-05-12 21:20:42 +02006203#else
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01006204
Steven Rostedt (VMware)3306fc4a2018-11-15 12:32:38 -05006205struct ftrace_ops global_ops = {
Steven Rostedtbd69c302011-05-03 21:55:54 -04006206 .func = ftrace_stub,
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -04006207 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
6208 FTRACE_OPS_FL_INITIALIZED |
6209 FTRACE_OPS_FL_PID,
Steven Rostedtbd69c302011-05-03 21:55:54 -04006210};
6211
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01006212static int __init ftrace_nodyn_init(void)
6213{
6214 ftrace_enabled = 1;
6215 return 0;
6216}
Steven Rostedt6f415672012-10-05 12:13:07 -04006217core_initcall(ftrace_nodyn_init);
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01006218
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006219static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006220static inline void ftrace_startup_enable(int command) { }
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04006221static inline void ftrace_startup_all(int command) { }
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05006222
Ingo Molnarc7aafc52008-05-12 21:20:45 +02006223# define ftrace_startup_sysctl() do { } while (0)
6224# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedtb8489142011-05-04 09:27:52 -04006225
Steven Rostedt (Red Hat)f3bea492014-07-02 23:23:31 -04006226static void ftrace_update_trampoline(struct ftrace_ops *ops)
6227{
6228}
6229
Steven Rostedt3d083392008-05-12 21:20:42 +02006230#endif /* CONFIG_DYNAMIC_FTRACE */
6231
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006232__init void ftrace_init_global_array_ops(struct trace_array *tr)
6233{
6234 tr->ops = &global_ops;
6235 tr->ops->private = tr;
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04006236 ftrace_init_trace_array(tr);
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006237}
6238
6239void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
6240{
6241 /* If we filter on pids, update to use the pid function */
6242 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
6243 if (WARN_ON(tr->ops->func != ftrace_stub))
6244 printk("ftrace ops had %pS for function\n",
6245 tr->ops->func);
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006246 }
6247 tr->ops->func = func;
6248 tr->ops->private = tr;
6249}
6250
6251void ftrace_reset_array_ops(struct trace_array *tr)
6252{
6253 tr->ops->func = ftrace_stub;
6254}
6255
Masami Hiramatsufabe38a2019-02-24 01:50:20 +09006256static nokprobe_inline void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04006257__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -04006258 struct ftrace_ops *ignored, struct pt_regs *regs)
Steven Rostedtb8489142011-05-04 09:27:52 -04006259{
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04006260 struct ftrace_ops *op;
Steven Rostedtedc15ca2012-11-02 17:47:21 -04006261 int bit;
Steven Rostedtb8489142011-05-04 09:27:52 -04006262
Steven Rostedtedc15ca2012-11-02 17:47:21 -04006263 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
6264 if (bit < 0)
6265 return;
Steven Rostedtc29f1222012-11-02 17:17:59 -04006266
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04006267 /*
6268 * Some of the ops may be dynamically allocated,
Paul E. McKenney74401722018-11-06 18:44:52 -08006269 * they must be freed after a synchronize_rcu().
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04006270 */
6271 preempt_disable_notrace();
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -05006272
Steven Rostedt0a016402012-11-02 17:03:03 -04006273 do_for_each_ftrace_op(op, ftrace_ops_list) {
Steven Rostedt (VMware)2fa717a2019-04-11 11:46:13 -04006274 /* Stub functions don't need to be called nor tested */
6275 if (op->flags & FTRACE_OPS_FL_STUB)
6276 continue;
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -05006277 /*
6278 * Check the following for each ops before calling their func:
6279 * if RCU flag is set, then rcu_is_watching() must be true
6280 * if PER_CPU is set, then ftrace_function_local_disable()
6281 * must be false
6282 * Otherwise test if the ip matches the ops filter
6283 *
6284 * If any of the above fails then the op->func() is not executed.
6285 */
6286 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -05006287 ftrace_ops_test(op, ip, regs)) {
Steven Rostedt (Red Hat)1d48d592014-06-25 11:54:03 -04006288 if (FTRACE_WARN_ON(!op->func)) {
6289 pr_warn("op=%p %pS\n", op, op);
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006290 goto out;
6291 }
Steven Rostedta1e2e312011-08-09 12:50:46 -04006292 op->func(ip, parent_ip, op, regs);
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006293 }
Steven Rostedt0a016402012-11-02 17:03:03 -04006294 } while_for_each_ftrace_op(op);
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006295out:
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04006296 preempt_enable_notrace();
Steven Rostedtedc15ca2012-11-02 17:47:21 -04006297 trace_clear_recursion(bit);
Steven Rostedtb8489142011-05-04 09:27:52 -04006298}
6299
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04006300/*
6301 * Some archs only support passing ip and parent_ip. Even though
6302 * the list function ignores the op parameter, we do not want any
6303 * C side effects, where a function is called without the caller
6304 * sending a third parameter.
Steven Rostedta1e2e312011-08-09 12:50:46 -04006305 * Archs are to support both the regs and ftrace_ops at the same time.
6306 * If they support ftrace_ops, it is assumed they support regs.
6307 * If call backs want to use regs, they must either check for regs
Masami Hiramatsu06aeaae2012-09-28 17:15:17 +09006308 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
6309 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
Steven Rostedta1e2e312011-08-09 12:50:46 -04006310 * An architecture can pass partial regs with ftrace_ops and still
Li Binb8ec3302015-11-30 18:23:36 +08006311 * set the ARCH_SUPPORTS_FTRACE_OPS.
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04006312 */
6313#if ARCH_SUPPORTS_FTRACE_OPS
6314static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -04006315 struct ftrace_ops *op, struct pt_regs *regs)
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04006316{
Steven Rostedta1e2e312011-08-09 12:50:46 -04006317 __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04006318}
Masami Hiramatsufabe38a2019-02-24 01:50:20 +09006319NOKPROBE_SYMBOL(ftrace_ops_list_func);
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04006320#else
6321static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
6322{
Steven Rostedta1e2e312011-08-09 12:50:46 -04006323 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04006324}
Masami Hiramatsufabe38a2019-02-24 01:50:20 +09006325NOKPROBE_SYMBOL(ftrace_ops_no_ops);
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04006326#endif
6327
Steven Rostedt (Red Hat)f1ff6342014-07-22 20:16:57 -04006328/*
6329 * If there's only one function registered but it does not support
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05006330 * recursion, needs RCU protection and/or requires per cpu handling, then
6331 * this function will be called by the mcount trampoline.
Steven Rostedt (Red Hat)f1ff6342014-07-22 20:16:57 -04006332 */
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05006333static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (Red Hat)f1ff6342014-07-22 20:16:57 -04006334 struct ftrace_ops *op, struct pt_regs *regs)
6335{
6336 int bit;
6337
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05006338 if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching())
6339 return;
6340
Steven Rostedt (Red Hat)f1ff6342014-07-22 20:16:57 -04006341 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
6342 if (bit < 0)
6343 return;
6344
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05006345 preempt_disable_notrace();
Steven Rostedt (Red Hat)f1ff6342014-07-22 20:16:57 -04006346
Peter Zijlstrab3a88802017-10-11 09:45:32 +02006347 op->func(ip, parent_ip, op, regs);
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05006348
6349 preempt_enable_notrace();
Steven Rostedt (Red Hat)f1ff6342014-07-22 20:16:57 -04006350 trace_clear_recursion(bit);
6351}
Masami Hiramatsufabe38a2019-02-24 01:50:20 +09006352NOKPROBE_SYMBOL(ftrace_ops_assist_func);
Steven Rostedt (Red Hat)f1ff6342014-07-22 20:16:57 -04006353
Steven Rostedt (Red Hat)87354052014-07-22 20:41:42 -04006354/**
6355 * ftrace_ops_get_func - get the function a trampoline should call
6356 * @ops: the ops to get the function for
6357 *
6358 * Normally the mcount trampoline will call the ops->func, but there
6359 * are times that it should not. For example, if the ops does not
6360 * have its own recursion protection, then it should call the
Chunyu Hu3a150df2017-02-22 08:29:26 +08006361 * ftrace_ops_assist_func() instead.
Steven Rostedt (Red Hat)87354052014-07-22 20:41:42 -04006362 *
6363 * Returns the function that the trampoline should call for @ops.
6364 */
6365ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
6366{
6367 /*
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05006368 * If the function does not handle recursion, needs to be RCU safe,
6369 * or does per cpu logic, then we need to call the assist handler.
Steven Rostedt (Red Hat)87354052014-07-22 20:41:42 -04006370 */
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05006371 if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
Peter Zijlstrab3a88802017-10-11 09:45:32 +02006372 ops->flags & FTRACE_OPS_FL_RCU)
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05006373 return ftrace_ops_assist_func;
Steven Rostedt (Red Hat)87354052014-07-22 20:41:42 -04006374
6375 return ops->func;
6376}
6377
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006378static void
6379ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
6380 struct task_struct *prev, struct task_struct *next)
Steven Rostedte32d8952008-12-04 00:26:41 -05006381{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006382 struct trace_array *tr = data;
6383 struct trace_pid_list *pid_list;
6384
6385 pid_list = rcu_dereference_sched(tr->function_pids);
6386
6387 this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
6388 trace_ignore_this_task(pid_list, next));
6389}
6390
Namhyung Kim1e104862017-04-17 11:44:28 +09006391static void
6392ftrace_pid_follow_sched_process_fork(void *data,
6393 struct task_struct *self,
6394 struct task_struct *task)
6395{
6396 struct trace_pid_list *pid_list;
6397 struct trace_array *tr = data;
6398
6399 pid_list = rcu_dereference_sched(tr->function_pids);
6400 trace_filter_add_remove_task(pid_list, self, task);
6401}
6402
6403static void
6404ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
6405{
6406 struct trace_pid_list *pid_list;
6407 struct trace_array *tr = data;
6408
6409 pid_list = rcu_dereference_sched(tr->function_pids);
6410 trace_filter_add_remove_task(pid_list, NULL, task);
6411}
6412
6413void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
6414{
6415 if (enable) {
6416 register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
6417 tr);
6418 register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
6419 tr);
6420 } else {
6421 unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
6422 tr);
6423 unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
6424 tr);
6425 }
6426}
6427
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006428static void clear_ftrace_pids(struct trace_array *tr)
6429{
6430 struct trace_pid_list *pid_list;
Steven Rostedte32d8952008-12-04 00:26:41 -05006431 int cpu;
6432
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006433 pid_list = rcu_dereference_protected(tr->function_pids,
6434 lockdep_is_held(&ftrace_lock));
6435 if (!pid_list)
6436 return;
6437
6438 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
6439
6440 for_each_possible_cpu(cpu)
6441 per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false;
6442
6443 rcu_assign_pointer(tr->function_pids, NULL);
6444
6445 /* Wait till all users are no longer using pid filtering */
Paul E. McKenney74401722018-11-06 18:44:52 -08006446 synchronize_rcu();
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006447
6448 trace_free_pid_list(pid_list);
Steven Rostedte32d8952008-12-04 00:26:41 -05006449}
6450
Namhyung Kimd879d0b2017-04-17 11:44:27 +09006451void ftrace_clear_pids(struct trace_array *tr)
6452{
6453 mutex_lock(&ftrace_lock);
6454
6455 clear_ftrace_pids(tr);
6456
6457 mutex_unlock(&ftrace_lock);
6458}
6459
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006460static void ftrace_pid_reset(struct trace_array *tr)
Steven Rostedte32d8952008-12-04 00:26:41 -05006461{
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006462 mutex_lock(&ftrace_lock);
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006463 clear_ftrace_pids(tr);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006464
6465 ftrace_update_pid_func();
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04006466 ftrace_startup_all(0);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006467
6468 mutex_unlock(&ftrace_lock);
6469}
6470
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006471/* Greater than any max PID */
6472#define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1)
6473
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006474static void *fpid_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006475 __acquires(RCU)
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006476{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006477 struct trace_pid_list *pid_list;
6478 struct trace_array *tr = m->private;
6479
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006480 mutex_lock(&ftrace_lock);
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006481 rcu_read_lock_sched();
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006482
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006483 pid_list = rcu_dereference_sched(tr->function_pids);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006484
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006485 if (!pid_list)
6486 return !(*pos) ? FTRACE_NO_PIDS : NULL;
6487
6488 return trace_pid_start(pid_list, pos);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006489}
6490
6491static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
6492{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006493 struct trace_array *tr = m->private;
6494 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
6495
6496 if (v == FTRACE_NO_PIDS)
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006497 return NULL;
6498
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006499 return trace_pid_next(pid_list, v, pos);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006500}
6501
6502static void fpid_stop(struct seq_file *m, void *p)
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006503 __releases(RCU)
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006504{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006505 rcu_read_unlock_sched();
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006506 mutex_unlock(&ftrace_lock);
6507}
6508
6509static int fpid_show(struct seq_file *m, void *v)
6510{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006511 if (v == FTRACE_NO_PIDS) {
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006512 seq_puts(m, "no pid\n");
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006513 return 0;
6514 }
6515
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006516 return trace_pid_show(m, v);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006517}
6518
6519static const struct seq_operations ftrace_pid_sops = {
6520 .start = fpid_start,
6521 .next = fpid_next,
6522 .stop = fpid_stop,
6523 .show = fpid_show,
6524};
6525
6526static int
6527ftrace_pid_open(struct inode *inode, struct file *file)
6528{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006529 struct trace_array *tr = inode->i_private;
6530 struct seq_file *m;
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006531 int ret = 0;
6532
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006533 if (trace_array_get(tr) < 0)
6534 return -ENODEV;
6535
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006536 if ((file->f_mode & FMODE_WRITE) &&
6537 (file->f_flags & O_TRUNC))
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006538 ftrace_pid_reset(tr);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006539
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006540 ret = seq_open(file, &ftrace_pid_sops);
6541 if (ret < 0) {
6542 trace_array_put(tr);
6543 } else {
6544 m = file->private_data;
6545 /* copy tr over to seq ops */
6546 m->private = tr;
6547 }
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006548
6549 return ret;
6550}
6551
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006552static void ignore_task_cpu(void *data)
6553{
6554 struct trace_array *tr = data;
6555 struct trace_pid_list *pid_list;
6556
6557 /*
6558 * This function is called by on_each_cpu() while the
6559 * event_mutex is held.
6560 */
6561 pid_list = rcu_dereference_protected(tr->function_pids,
6562 mutex_is_locked(&ftrace_lock));
6563
6564 this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
6565 trace_ignore_this_task(pid_list, current));
6566}
6567
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006568static ssize_t
6569ftrace_pid_write(struct file *filp, const char __user *ubuf,
6570 size_t cnt, loff_t *ppos)
6571{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006572 struct seq_file *m = filp->private_data;
6573 struct trace_array *tr = m->private;
6574 struct trace_pid_list *filtered_pids = NULL;
6575 struct trace_pid_list *pid_list;
6576 ssize_t ret;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006577
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006578 if (!cnt)
6579 return 0;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006580
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006581 mutex_lock(&ftrace_lock);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006582
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006583 filtered_pids = rcu_dereference_protected(tr->function_pids,
6584 lockdep_is_held(&ftrace_lock));
6585
6586 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
6587 if (ret < 0)
6588 goto out;
6589
6590 rcu_assign_pointer(tr->function_pids, pid_list);
6591
6592 if (filtered_pids) {
Paul E. McKenney74401722018-11-06 18:44:52 -08006593 synchronize_rcu();
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006594 trace_free_pid_list(filtered_pids);
6595 } else if (pid_list) {
6596 /* Register a probe to set whether to ignore the tracing of a task */
6597 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
6598 }
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006599
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006600 /*
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006601 * Ignoring of pids is done at task switch. But we have to
6602 * check for those tasks that are currently running.
6603 * Always do this in case a pid was appended or removed.
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006604 */
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006605 on_each_cpu(ignore_task_cpu, tr, 1);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006606
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006607 ftrace_update_pid_func();
6608 ftrace_startup_all(0);
6609 out:
6610 mutex_unlock(&ftrace_lock);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006611
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006612 if (ret > 0)
6613 *ppos += ret;
Steven Rostedt978f3a42008-12-04 00:26:40 -05006614
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006615 return ret;
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006616}
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006617
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006618static int
6619ftrace_pid_release(struct inode *inode, struct file *file)
6620{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006621 struct trace_array *tr = inode->i_private;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006622
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006623 trace_array_put(tr);
6624
6625 return seq_release(inode, file);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006626}
6627
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006628static const struct file_operations ftrace_pid_fops = {
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006629 .open = ftrace_pid_open,
6630 .write = ftrace_pid_write,
6631 .read = seq_read,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05006632 .llseek = tracing_lseek,
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04006633 .release = ftrace_pid_release,
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006634};
6635
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006636void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006637{
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006638 trace_create_file("set_ftrace_pid", 0644, d_tracer,
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04006639 tr, &ftrace_pid_fops);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006640}
Steven Rostedtdf4fc312008-11-26 00:16:23 -05006641
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04006642void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
6643 struct dentry *d_tracer)
6644{
6645 /* Only the top level directory has the dyn_tracefs and profile */
6646 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
6647
6648 ftrace_init_dyn_tracefs(d_tracer);
6649 ftrace_profile_tracefs(d_tracer);
6650}
6651
Steven Rostedt3d083392008-05-12 21:20:42 +02006652/**
Steven Rostedt81adbdc2008-10-23 09:33:02 -04006653 * ftrace_kill - kill ftrace
Steven Rostedta2bb6a32008-07-10 20:58:15 -04006654 *
6655 * This function should be used by panic code. It stops ftrace
6656 * but in a not so nice way. If you need to simply kill ftrace
6657 * from a non-atomic section, use ftrace_kill.
6658 */
Steven Rostedt81adbdc2008-10-23 09:33:02 -04006659void ftrace_kill(void)
Steven Rostedta2bb6a32008-07-10 20:58:15 -04006660{
6661 ftrace_disabled = 1;
6662 ftrace_enabled = 0;
Yisheng Xie5ccba642018-02-02 10:14:49 +08006663 ftrace_trace_function = ftrace_stub;
Steven Rostedta2bb6a32008-07-10 20:58:15 -04006664}
6665
6666/**
Steven Rostedte0a413f2011-09-29 21:26:16 -04006667 * Test if ftrace is dead or not.
6668 */
6669int ftrace_is_dead(void)
6670{
6671 return ftrace_disabled;
6672}
6673
6674/**
Steven Rostedt3d083392008-05-12 21:20:42 +02006675 * register_ftrace_function - register a function for profiling
6676 * @ops - ops structure that holds the function for profiling.
6677 *
6678 * Register a function to be called by all functions in the
6679 * kernel.
6680 *
6681 * Note: @ops->func and all the functions it calls must be labeled
6682 * with "notrace", otherwise it will go into a
6683 * recursive loop.
6684 */
6685int register_ftrace_function(struct ftrace_ops *ops)
6686{
Steven Rostedt45a4a232011-04-21 23:16:46 -04006687 int ret = -1;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02006688
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09006689 ftrace_ops_init(ops);
6690
Steven Rostedte6ea44e2009-02-14 01:42:44 -05006691 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01006692
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05006693 ret = ftrace_startup(ops, 0);
Steven Rostedtb8489142011-05-04 09:27:52 -04006694
Steven Rostedte6ea44e2009-02-14 01:42:44 -05006695 mutex_unlock(&ftrace_lock);
Borislav Petkov8d240dd2012-03-29 19:11:40 +02006696
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006697 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02006698}
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04006699EXPORT_SYMBOL_GPL(register_ftrace_function);
Steven Rostedt3d083392008-05-12 21:20:42 +02006700
6701/**
Uwe Kleine-Koenig32632922009-01-12 23:35:50 +01006702 * unregister_ftrace_function - unregister a function for profiling.
Steven Rostedt3d083392008-05-12 21:20:42 +02006703 * @ops - ops structure that holds the function to unregister
6704 *
6705 * Unregister a function that was added to be called by ftrace profiling.
6706 */
6707int unregister_ftrace_function(struct ftrace_ops *ops)
6708{
6709 int ret;
6710
Steven Rostedte6ea44e2009-02-14 01:42:44 -05006711 mutex_lock(&ftrace_lock);
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05006712 ret = ftrace_shutdown(ops, 0);
Steven Rostedte6ea44e2009-02-14 01:42:44 -05006713 mutex_unlock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006714
6715 return ret;
6716}
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04006717EXPORT_SYMBOL_GPL(unregister_ftrace_function);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006718
Ingo Molnare309b412008-05-12 21:20:51 +02006719int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006720ftrace_enable_sysctl(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07006721 void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006722 loff_t *ppos)
6723{
Steven Rostedt45a4a232011-04-21 23:16:46 -04006724 int ret = -ENODEV;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02006725
Steven Rostedte6ea44e2009-02-14 01:42:44 -05006726 mutex_lock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006727
Steven Rostedt45a4a232011-04-21 23:16:46 -04006728 if (unlikely(ftrace_disabled))
6729 goto out;
6730
6731 ret = proc_dointvec(table, write, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006732
Li Zefana32c7762009-06-26 16:55:51 +08006733 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006734 goto out;
6735
Li Zefana32c7762009-06-26 16:55:51 +08006736 last_ftrace_enabled = !!ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006737
6738 if (ftrace_enabled) {
6739
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006740 /* we are starting ftrace again */
Chunyan Zhangf86f4182017-06-07 16:12:51 +08006741 if (rcu_dereference_protected(ftrace_ops_list,
6742 lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
Jan Kiszka5000c412013-03-26 17:53:03 +01006743 update_ftrace_function();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006744
Steven Rostedt (Red Hat)524a3862015-03-06 19:55:13 -05006745 ftrace_startup_sysctl();
6746
Steven Rostedtb0fc4942008-05-12 21:20:43 +02006747 } else {
6748 /* stopping ftrace calls (just send to ftrace_stub) */
6749 ftrace_trace_function = ftrace_stub;
6750
6751 ftrace_shutdown_sysctl();
6752 }
6753
6754 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05006755 mutex_unlock(&ftrace_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02006756 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02006757}