blob: 8a4e5cb66a4c4c1b9726f942e3e54b8e52a45ff5 [file] [log] [blame]
Steven Rostedte5a81b62008-08-27 23:31:01 -04001/*
2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3 *
4 */
5#include <linux/stacktrace.h>
6#include <linux/kallsyms.h>
7#include <linux/seq_file.h>
8#include <linux/spinlock.h>
9#include <linux/uaccess.h>
10#include <linux/debugfs.h>
11#include <linux/ftrace.h>
12#include <linux/module.h>
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -050013#include <linux/sysctl.h>
Steven Rostedte5a81b62008-08-27 23:31:01 -040014#include <linux/init.h>
15#include <linux/fs.h>
Aaron Tomlin38628072014-03-24 14:03:57 +000016#include <linux/magic.h>
Steven Rostedt762e1202011-12-19 22:01:00 -050017
18#include <asm/setup.h>
19
Steven Rostedte5a81b62008-08-27 23:31:01 -040020#include "trace.h"
21
22#define STACK_TRACE_ENTRIES 500
23
Steven Rostedt (Red Hat)d4ecbfc2013-03-13 21:25:35 -040024#ifdef CC_USING_FENTRY
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -040025# define fentry 1
Steven Rostedt (Red Hat)d4ecbfc2013-03-13 21:25:35 -040026#else
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -040027# define fentry 0
Steven Rostedt (Red Hat)d4ecbfc2013-03-13 21:25:35 -040028#endif
29
Steven Rostedt1b6cced2008-08-29 16:51:43 -040030static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
31 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
32static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
33
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -040034/*
35 * Reserve one entry for the passed in ip. This will allow
36 * us to remove most or all of the stack size overhead
37 * added by the stack tracer itself.
38 */
Steven Rostedte5a81b62008-08-27 23:31:01 -040039static struct stack_trace max_stack_trace = {
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -040040 .max_entries = STACK_TRACE_ENTRIES - 1,
41 .entries = &stack_dump_trace[1],
Steven Rostedte5a81b62008-08-27 23:31:01 -040042};
43
44static unsigned long max_stack_size;
Thomas Gleixner445c8952009-12-02 19:49:50 +010045static arch_spinlock_t max_stack_lock =
Thomas Gleixneredc35bd2009-12-03 12:38:57 +010046 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedte5a81b62008-08-27 23:31:01 -040047
Steven Rostedte5a81b62008-08-27 23:31:01 -040048static DEFINE_PER_CPU(int, trace_active);
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -050049static DEFINE_MUTEX(stack_sysctl_mutex);
50
51int stack_tracer_enabled;
52static int last_stack_tracer_enabled;
Steven Rostedte5a81b62008-08-27 23:31:01 -040053
Minchan Kime3172182014-06-02 13:33:12 +090054static inline void print_max_stack(void)
55{
56 long i;
57 int size;
58
59 pr_emerg(" Depth Size Location (%d entries)\n"
60 " ----- ---- --------\n",
61 max_stack_trace.nr_entries - 1);
62
63 for (i = 0; i < max_stack_trace.nr_entries; i++) {
64 if (stack_dump_trace[i] == ULONG_MAX)
65 break;
66 if (i+1 == max_stack_trace.nr_entries ||
67 stack_dump_trace[i+1] == ULONG_MAX)
68 size = stack_dump_index[i];
69 else
70 size = stack_dump_index[i] - stack_dump_index[i+1];
71
72 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_dump_index[i],
73 size, (void *)stack_dump_trace[i]);
74 }
75}
76
Steven Rostedt (Red Hat)878895012013-03-13 20:43:57 -040077static inline void
Steven Rostedt (Red Hat)d4ecbfc2013-03-13 21:25:35 -040078check_stack(unsigned long ip, unsigned long *stack)
Steven Rostedte5a81b62008-08-27 23:31:01 -040079{
Minchan Kime3172182014-06-02 13:33:12 +090080 unsigned long this_size, flags; unsigned long *p, *top, *start;
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -040081 static int tracer_frame;
82 int frame_size = ACCESS_ONCE(tracer_frame);
Steven Rostedt1b6cced2008-08-29 16:51:43 -040083 int i;
Steven Rostedte5a81b62008-08-27 23:31:01 -040084
Steven Rostedt (Red Hat)878895012013-03-13 20:43:57 -040085 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
Steven Rostedte5a81b62008-08-27 23:31:01 -040086 this_size = THREAD_SIZE - this_size;
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -040087 /* Remove the frame of the tracer */
88 this_size -= frame_size;
Steven Rostedte5a81b62008-08-27 23:31:01 -040089
90 if (this_size <= max_stack_size)
91 return;
92
Steven Rostedt81520a12008-10-06 21:24:18 -040093 /* we do not handle interrupt stacks yet */
Steven Rostedt (Red Hat)878895012013-03-13 20:43:57 -040094 if (!object_is_on_stack(stack))
Steven Rostedt81520a12008-10-06 21:24:18 -040095 return;
96
Steven Rostedta5e25882008-12-02 15:34:05 -050097 local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010098 arch_spin_lock(&max_stack_lock);
Steven Rostedte5a81b62008-08-27 23:31:01 -040099
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -0400100 /* In case another CPU set the tracer_frame on us */
101 if (unlikely(!frame_size))
102 this_size -= tracer_frame;
103
Steven Rostedte5a81b62008-08-27 23:31:01 -0400104 /* a race could have already updated it */
105 if (this_size <= max_stack_size)
106 goto out;
107
108 max_stack_size = this_size;
109
Jiaxing Wang7eea4fc2014-04-20 23:10:43 +0800110 max_stack_trace.nr_entries = 0;
111
112 if (using_ftrace_ops_list_func())
113 max_stack_trace.skip = 4;
114 else
115 max_stack_trace.skip = 3;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400116
117 save_stack_trace(&max_stack_trace);
118
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400119 /*
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -0400120 * Add the passed in ip from the function tracer.
121 * Searching for this on the stack will skip over
122 * most of the overhead from the stack tracer itself.
Steven Rostedt (Red Hat)d4ecbfc2013-03-13 21:25:35 -0400123 */
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -0400124 stack_dump_trace[0] = ip;
125 max_stack_trace.nr_entries++;
Steven Rostedt (Red Hat)d4ecbfc2013-03-13 21:25:35 -0400126
127 /*
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400128 * Now find where in the stack these are.
129 */
130 i = 0;
Steven Rostedt (Red Hat)878895012013-03-13 20:43:57 -0400131 start = stack;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400132 top = (unsigned long *)
133 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
134
135 /*
136 * Loop through all the entries. One of the entries may
137 * for some reason be missed on the stack, so we may
138 * have to account for them. If they are all there, this
139 * loop will only happen once. This code only takes place
140 * on a new max, so it is far from a fast path.
141 */
142 while (i < max_stack_trace.nr_entries) {
Steven Rostedt0a371192008-12-03 11:04:50 -0500143 int found = 0;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400144
145 stack_dump_index[i] = this_size;
146 p = start;
147
148 for (; p < top && i < max_stack_trace.nr_entries; p++) {
149 if (*p == stack_dump_trace[i]) {
150 this_size = stack_dump_index[i++] =
151 (top - p) * sizeof(unsigned long);
Steven Rostedt0a371192008-12-03 11:04:50 -0500152 found = 1;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400153 /* Start the search from here */
154 start = p + 1;
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -0400155 /*
156 * We do not want to show the overhead
157 * of the stack tracer stack in the
158 * max stack. If we haven't figured
159 * out what that is, then figure it out
160 * now.
161 */
162 if (unlikely(!tracer_frame) && i == 1) {
163 tracer_frame = (p - stack) *
164 sizeof(unsigned long);
165 max_stack_size -= tracer_frame;
166 }
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400167 }
168 }
169
Steven Rostedt0a371192008-12-03 11:04:50 -0500170 if (!found)
171 i++;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400172 }
173
Minchan Kime3172182014-06-02 13:33:12 +0900174 if ((current != &init_task &&
175 *(end_of_stack(current)) != STACK_END_MAGIC)) {
176 print_max_stack();
177 BUG();
178 }
179
Steven Rostedte5a81b62008-08-27 23:31:01 -0400180 out:
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100181 arch_spin_unlock(&max_stack_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -0500182 local_irq_restore(flags);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400183}
184
185static void
Steven Rostedta1e2e312011-08-09 12:50:46 -0400186stack_trace_call(unsigned long ip, unsigned long parent_ip,
187 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400188{
Steven Rostedt (Red Hat)878895012013-03-13 20:43:57 -0400189 unsigned long stack;
Steven Rostedt5168ae52010-06-03 09:36:50 -0400190 int cpu;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400191
Steven Rostedt5168ae52010-06-03 09:36:50 -0400192 preempt_disable_notrace();
Steven Rostedte5a81b62008-08-27 23:31:01 -0400193
194 cpu = raw_smp_processor_id();
195 /* no atomic needed, we only modify this variable by this cpu */
196 if (per_cpu(trace_active, cpu)++ != 0)
197 goto out;
198
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -0400199 /*
200 * When fentry is used, the traced function does not get
201 * its stack frame set up, and we lose the parent.
202 * The ip is pretty useless because the function tracer
203 * was called before that function set up its stack frame.
204 * In this case, we use the parent ip.
205 *
206 * By adding the return address of either the parent ip
207 * or the current ip we can disregard most of the stack usage
208 * caused by the stack tracer itself.
209 *
210 * The function tracer always reports the address of where the
211 * mcount call was, but the stack will hold the return address.
212 */
213 if (fentry)
214 ip = parent_ip;
215 else
216 ip += MCOUNT_INSN_SIZE;
217
218 check_stack(ip, &stack);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400219
220 out:
221 per_cpu(trace_active, cpu)--;
222 /* prevent recursion in schedule */
Steven Rostedt5168ae52010-06-03 09:36:50 -0400223 preempt_enable_notrace();
Steven Rostedte5a81b62008-08-27 23:31:01 -0400224}
225
226static struct ftrace_ops trace_ops __read_mostly =
227{
228 .func = stack_trace_call,
Steven Rostedt47409742012-07-20 11:04:44 -0400229 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedte5a81b62008-08-27 23:31:01 -0400230};
231
232static ssize_t
233stack_max_size_read(struct file *filp, char __user *ubuf,
234 size_t count, loff_t *ppos)
235{
236 unsigned long *ptr = filp->private_data;
237 char buf[64];
238 int r;
239
240 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
241 if (r > sizeof(buf))
242 r = sizeof(buf);
243 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
244}
245
246static ssize_t
247stack_max_size_write(struct file *filp, const char __user *ubuf,
248 size_t count, loff_t *ppos)
249{
250 long *ptr = filp->private_data;
251 unsigned long val, flags;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400252 int ret;
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800253 int cpu;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400254
Peter Huewe22fe9b52011-06-07 21:58:27 +0200255 ret = kstrtoul_from_user(ubuf, count, 10, &val);
256 if (ret)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400257 return ret;
258
Steven Rostedta5e25882008-12-02 15:34:05 -0500259 local_irq_save(flags);
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800260
261 /*
262 * In case we trace inside arch_spin_lock() or after (NMI),
263 * we will cause circular lock, so we also need to increase
264 * the percpu trace_active here.
265 */
266 cpu = smp_processor_id();
267 per_cpu(trace_active, cpu)++;
268
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100269 arch_spin_lock(&max_stack_lock);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400270 *ptr = val;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100271 arch_spin_unlock(&max_stack_lock);
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800272
273 per_cpu(trace_active, cpu)--;
Steven Rostedta5e25882008-12-02 15:34:05 -0500274 local_irq_restore(flags);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400275
276 return count;
277}
278
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500279static const struct file_operations stack_max_size_fops = {
Steven Rostedte5a81b62008-08-27 23:31:01 -0400280 .open = tracing_open_generic,
281 .read = stack_max_size_read,
282 .write = stack_max_size_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200283 .llseek = default_llseek,
Steven Rostedte5a81b62008-08-27 23:31:01 -0400284};
285
286static void *
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800287__next(struct seq_file *m, loff_t *pos)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400288{
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800289 long n = *pos - 1;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400290
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800291 if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400292 return NULL;
293
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800294 m->private = (void *)n;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400295 return &m->private;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400296}
297
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800298static void *
299t_next(struct seq_file *m, void *v, loff_t *pos)
300{
301 (*pos)++;
302 return __next(m, pos);
303}
304
Steven Rostedte5a81b62008-08-27 23:31:01 -0400305static void *t_start(struct seq_file *m, loff_t *pos)
306{
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800307 int cpu;
308
Steven Rostedte5a81b62008-08-27 23:31:01 -0400309 local_irq_disable();
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800310
311 cpu = smp_processor_id();
312 per_cpu(trace_active, cpu)++;
313
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100314 arch_spin_lock(&max_stack_lock);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400315
Liming Wang522a1102008-11-21 11:00:18 +0800316 if (*pos == 0)
317 return SEQ_START_TOKEN;
318
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800319 return __next(m, pos);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400320}
321
322static void t_stop(struct seq_file *m, void *p)
323{
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800324 int cpu;
325
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100326 arch_spin_unlock(&max_stack_lock);
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800327
328 cpu = smp_processor_id();
329 per_cpu(trace_active, cpu)--;
330
Steven Rostedte5a81b62008-08-27 23:31:01 -0400331 local_irq_enable();
332}
333
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400334static int trace_lookup_stack(struct seq_file *m, long i)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400335{
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400336 unsigned long addr = stack_dump_trace[i];
Steven Rostedte5a81b62008-08-27 23:31:01 -0400337
Anton Blanchard151772d2010-08-25 11:32:38 +1000338 return seq_printf(m, "%pS\n", (void *)addr);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400339}
340
Steven Rostedte447e1df2009-03-12 19:42:29 -0400341static void print_disabled(struct seq_file *m)
342{
343 seq_puts(m, "#\n"
344 "# Stack tracer disabled\n"
345 "#\n"
346 "# To enable the stack tracer, either add 'stacktrace' to the\n"
347 "# kernel command line\n"
348 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
349 "#\n");
350}
351
Steven Rostedte5a81b62008-08-27 23:31:01 -0400352static int t_show(struct seq_file *m, void *v)
353{
Liming Wang522a1102008-11-21 11:00:18 +0800354 long i;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400355 int size;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400356
Liming Wang522a1102008-11-21 11:00:18 +0800357 if (v == SEQ_START_TOKEN) {
Steven Rostedteb1871f2009-03-13 00:00:58 -0400358 seq_printf(m, " Depth Size Location"
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400359 " (%d entries)\n"
Steven Rostedteb1871f2009-03-13 00:00:58 -0400360 " ----- ---- --------\n",
walimis083a63b2009-06-03 16:01:28 +0800361 max_stack_trace.nr_entries - 1);
Steven Rostedte447e1df2009-03-12 19:42:29 -0400362
363 if (!stack_tracer_enabled && !max_stack_size)
364 print_disabled(m);
365
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400366 return 0;
367 }
368
Liming Wang522a1102008-11-21 11:00:18 +0800369 i = *(long *)v;
370
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400371 if (i >= max_stack_trace.nr_entries ||
372 stack_dump_trace[i] == ULONG_MAX)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400373 return 0;
374
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400375 if (i+1 == max_stack_trace.nr_entries ||
376 stack_dump_trace[i+1] == ULONG_MAX)
377 size = stack_dump_index[i];
378 else
379 size = stack_dump_index[i] - stack_dump_index[i+1];
380
381 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
382
383 trace_lookup_stack(m, i);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400384
385 return 0;
386}
387
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500388static const struct seq_operations stack_trace_seq_ops = {
Steven Rostedte5a81b62008-08-27 23:31:01 -0400389 .start = t_start,
390 .next = t_next,
391 .stop = t_stop,
392 .show = t_show,
393};
394
395static int stack_trace_open(struct inode *inode, struct file *file)
396{
Li Zefand8cc1ab2009-07-23 11:28:40 +0800397 return seq_open(file, &stack_trace_seq_ops);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400398}
399
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500400static const struct file_operations stack_trace_fops = {
Steven Rostedte5a81b62008-08-27 23:31:01 -0400401 .open = stack_trace_open,
402 .read = seq_read,
403 .llseek = seq_lseek,
Li Zefand8cc1ab2009-07-23 11:28:40 +0800404 .release = seq_release,
Steven Rostedte5a81b62008-08-27 23:31:01 -0400405};
406
Steven Rostedtd2d45c72011-12-19 14:44:09 -0500407static int
408stack_trace_filter_open(struct inode *inode, struct file *file)
409{
410 return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
411 inode, file);
412}
413
414static const struct file_operations stack_trace_filter_fops = {
415 .open = stack_trace_filter_open,
416 .read = seq_read,
417 .write = ftrace_filter_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -0500418 .llseek = tracing_lseek,
Steven Rostedtd2d45c72011-12-19 14:44:09 -0500419 .release = ftrace_regex_release,
420};
421
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500422int
423stack_trace_sysctl(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700424 void __user *buffer, size_t *lenp,
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500425 loff_t *ppos)
426{
427 int ret;
428
429 mutex_lock(&stack_sysctl_mutex);
430
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700431 ret = proc_dointvec(table, write, buffer, lenp, ppos);
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500432
433 if (ret || !write ||
Li Zefana32c7762009-06-26 16:55:51 +0800434 (last_stack_tracer_enabled == !!stack_tracer_enabled))
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500435 goto out;
436
Li Zefana32c7762009-06-26 16:55:51 +0800437 last_stack_tracer_enabled = !!stack_tracer_enabled;
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500438
439 if (stack_tracer_enabled)
440 register_ftrace_function(&trace_ops);
441 else
442 unregister_ftrace_function(&trace_ops);
443
444 out:
445 mutex_unlock(&stack_sysctl_mutex);
446 return ret;
447}
448
Steven Rostedt762e1202011-12-19 22:01:00 -0500449static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
450
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500451static __init int enable_stacktrace(char *str)
452{
Steven Rostedt762e1202011-12-19 22:01:00 -0500453 if (strncmp(str, "_filter=", 8) == 0)
454 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
455
Steven Rostedte05a43b2008-12-17 09:43:00 -0500456 stack_tracer_enabled = 1;
457 last_stack_tracer_enabled = 1;
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500458 return 1;
459}
460__setup("stacktrace", enable_stacktrace);
461
Steven Rostedte5a81b62008-08-27 23:31:01 -0400462static __init int stack_trace_init(void)
463{
464 struct dentry *d_tracer;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400465
466 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +0900467 if (!d_tracer)
468 return 0;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400469
Frederic Weisbecker5452af62009-03-27 00:25:38 +0100470 trace_create_file("stack_max_size", 0644, d_tracer,
471 &max_stack_size, &stack_max_size_fops);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400472
Frederic Weisbecker5452af62009-03-27 00:25:38 +0100473 trace_create_file("stack_trace", 0444, d_tracer,
474 NULL, &stack_trace_fops);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400475
Steven Rostedtd2d45c72011-12-19 14:44:09 -0500476 trace_create_file("stack_trace_filter", 0444, d_tracer,
477 NULL, &stack_trace_filter_fops);
478
Steven Rostedt762e1202011-12-19 22:01:00 -0500479 if (stack_trace_filter_buf[0])
480 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
481
Steven Rostedte05a43b2008-12-17 09:43:00 -0500482 if (stack_tracer_enabled)
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500483 register_ftrace_function(&trace_ops);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400484
485 return 0;
486}
487
488device_initcall(stack_trace_init);