blob: 5d16f73898dbd2f851fe685a51e3e325d84ca9b0 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Steven Rostedte5a81b62008-08-27 23:31:01 -04002/*
3 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
4 *
5 */
Ingo Molnar68db0cf2017-02-08 18:51:37 +01006#include <linux/sched/task_stack.h>
Steven Rostedte5a81b62008-08-27 23:31:01 -04007#include <linux/stacktrace.h>
8#include <linux/kallsyms.h>
9#include <linux/seq_file.h>
10#include <linux/spinlock.h>
11#include <linux/uaccess.h>
Steven Rostedte5a81b62008-08-27 23:31:01 -040012#include <linux/ftrace.h>
13#include <linux/module.h>
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -050014#include <linux/sysctl.h>
Steven Rostedte5a81b62008-08-27 23:31:01 -040015#include <linux/init.h>
Steven Rostedt762e1202011-12-19 22:01:00 -050016
17#include <asm/setup.h>
18
Steven Rostedte5a81b62008-08-27 23:31:01 -040019#include "trace.h"
20
Thomas Gleixner3d9a8072019-04-25 11:44:54 +020021#define STACK_TRACE_ENTRIES 500
Steven Rostedt1b6cced2008-08-29 16:51:43 -040022
Thomas Gleixner3d9a8072019-04-25 11:44:54 +020023static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
24static unsigned stack_trace_index[STACK_TRACE_ENTRIES];
25
Thomas Gleixner9f50c912019-04-25 11:45:17 +020026static unsigned int stack_trace_nr_entries;
Thomas Gleixner3d9a8072019-04-25 11:44:54 +020027static unsigned long stack_trace_max_size;
28static arch_spinlock_t stack_trace_max_lock =
Thomas Gleixneredc35bd2009-12-03 12:38:57 +010029 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedte5a81b62008-08-27 23:31:01 -040030
Steven Rostedt (VMware)8aaf1ee2017-04-06 15:47:32 -040031DEFINE_PER_CPU(int, disable_stack_tracer);
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -050032static DEFINE_MUTEX(stack_sysctl_mutex);
33
34int stack_tracer_enabled;
Steven Rostedte5a81b62008-08-27 23:31:01 -040035
Thomas Gleixner3d9a8072019-04-25 11:44:54 +020036static void print_max_stack(void)
Minchan Kime3172182014-06-02 13:33:12 +090037{
38 long i;
39 int size;
40
41 pr_emerg(" Depth Size Location (%d entries)\n"
42 " ----- ---- --------\n",
Thomas Gleixner9f50c912019-04-25 11:45:17 +020043 stack_trace_nr_entries);
Minchan Kime3172182014-06-02 13:33:12 +090044
Thomas Gleixner9f50c912019-04-25 11:45:17 +020045 for (i = 0; i < stack_trace_nr_entries; i++) {
46 if (i + 1 == stack_trace_nr_entries)
AKASHI Takahirobb99d8c2015-10-30 14:25:39 +090047 size = stack_trace_index[i];
Minchan Kime3172182014-06-02 13:33:12 +090048 else
AKASHI Takahirobb99d8c2015-10-30 14:25:39 +090049 size = stack_trace_index[i] - stack_trace_index[i+1];
Minchan Kime3172182014-06-02 13:33:12 +090050
AKASHI Takahirobb99d8c2015-10-30 14:25:39 +090051 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
Minchan Kime3172182014-06-02 13:33:12 +090052 size, (void *)stack_dump_trace[i]);
53 }
54}
55
Thomas Gleixner3d9a8072019-04-25 11:44:54 +020056static void check_stack(unsigned long ip, unsigned long *stack)
Steven Rostedte5a81b62008-08-27 23:31:01 -040057{
Minchan Kime3172182014-06-02 13:33:12 +090058 unsigned long this_size, flags; unsigned long *p, *top, *start;
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -040059 static int tracer_frame;
Mark Rutland6aa7de02017-10-23 14:07:29 -070060 int frame_size = READ_ONCE(tracer_frame);
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -040061 int i, x;
Steven Rostedte5a81b62008-08-27 23:31:01 -040062
Steven Rostedt (Red Hat)878895012013-03-13 20:43:57 -040063 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
Steven Rostedte5a81b62008-08-27 23:31:01 -040064 this_size = THREAD_SIZE - this_size;
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -040065 /* Remove the frame of the tracer */
66 this_size -= frame_size;
Steven Rostedte5a81b62008-08-27 23:31:01 -040067
AKASHI Takahirobb99d8c2015-10-30 14:25:39 +090068 if (this_size <= stack_trace_max_size)
Steven Rostedte5a81b62008-08-27 23:31:01 -040069 return;
70
Steven Rostedt81520a12008-10-06 21:24:18 -040071 /* we do not handle interrupt stacks yet */
Steven Rostedt (Red Hat)878895012013-03-13 20:43:57 -040072 if (!object_is_on_stack(stack))
Steven Rostedt81520a12008-10-06 21:24:18 -040073 return;
74
Steven Rostedt (Red Hat)1904be12015-10-20 21:48:02 -040075 /* Can't do this from NMI context (can cause deadlocks) */
76 if (in_nmi())
77 return;
78
Steven Rostedta5e25882008-12-02 15:34:05 -050079 local_irq_save(flags);
Steven Rostedt (Red Hat)d3327362015-11-03 14:50:15 -050080 arch_spin_lock(&stack_trace_max_lock);
Steven Rostedte5a81b62008-08-27 23:31:01 -040081
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -040082 /* In case another CPU set the tracer_frame on us */
83 if (unlikely(!frame_size))
84 this_size -= tracer_frame;
85
Steven Rostedte5a81b62008-08-27 23:31:01 -040086 /* a race could have already updated it */
AKASHI Takahirobb99d8c2015-10-30 14:25:39 +090087 if (this_size <= stack_trace_max_size)
Steven Rostedte5a81b62008-08-27 23:31:01 -040088 goto out;
89
AKASHI Takahirobb99d8c2015-10-30 14:25:39 +090090 stack_trace_max_size = this_size;
Steven Rostedte5a81b62008-08-27 23:31:01 -040091
Thomas Gleixner9f50c912019-04-25 11:45:17 +020092 stack_trace_nr_entries = stack_trace_save(stack_dump_trace,
93 ARRAY_SIZE(stack_dump_trace) - 1,
94 0);
Steven Rostedte5a81b62008-08-27 23:31:01 -040095
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -040096 /* Skip over the overhead of the stack tracer itself */
Thomas Gleixner9f50c912019-04-25 11:45:17 +020097 for (i = 0; i < stack_trace_nr_entries; i++) {
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -040098 if (stack_dump_trace[i] == ip)
99 break;
100 }
Steven Rostedt (Red Hat)d4ecbfc2013-03-13 21:25:35 -0400101
102 /*
Steven Rostedt6ccd8372016-01-29 10:22:41 -0500103 * Some archs may not have the passed in ip in the dump.
104 * If that happens, we need to show everything.
105 */
Thomas Gleixner9f50c912019-04-25 11:45:17 +0200106 if (i == stack_trace_nr_entries)
Steven Rostedt6ccd8372016-01-29 10:22:41 -0500107 i = 0;
108
109 /*
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400110 * Now find where in the stack these are.
111 */
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -0400112 x = 0;
Steven Rostedt (Red Hat)878895012013-03-13 20:43:57 -0400113 start = stack;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400114 top = (unsigned long *)
115 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
116
117 /*
118 * Loop through all the entries. One of the entries may
119 * for some reason be missed on the stack, so we may
120 * have to account for them. If they are all there, this
121 * loop will only happen once. This code only takes place
122 * on a new max, so it is far from a fast path.
123 */
Thomas Gleixner9f50c912019-04-25 11:45:17 +0200124 while (i < stack_trace_nr_entries) {
Steven Rostedt0a371192008-12-03 11:04:50 -0500125 int found = 0;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400126
AKASHI Takahirobb99d8c2015-10-30 14:25:39 +0900127 stack_trace_index[x] = this_size;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400128 p = start;
129
Thomas Gleixner9f50c912019-04-25 11:45:17 +0200130 for (; p < top && i < stack_trace_nr_entries; p++) {
Yang Shi6e22c832016-02-12 12:46:00 -0800131 /*
132 * The READ_ONCE_NOCHECK is used to let KASAN know that
133 * this is not a stack-out-of-bounds error.
134 */
135 if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -0400136 stack_dump_trace[x] = stack_dump_trace[i++];
AKASHI Takahirobb99d8c2015-10-30 14:25:39 +0900137 this_size = stack_trace_index[x++] =
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400138 (top - p) * sizeof(unsigned long);
Steven Rostedt0a371192008-12-03 11:04:50 -0500139 found = 1;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400140 /* Start the search from here */
141 start = p + 1;
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -0400142 /*
143 * We do not want to show the overhead
144 * of the stack tracer stack in the
145 * max stack. If we haven't figured
146 * out what that is, then figure it out
147 * now.
148 */
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -0400149 if (unlikely(!tracer_frame)) {
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -0400150 tracer_frame = (p - stack) *
151 sizeof(unsigned long);
AKASHI Takahirobb99d8c2015-10-30 14:25:39 +0900152 stack_trace_max_size -= tracer_frame;
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -0400153 }
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400154 }
155 }
156
Steven Rostedt0a371192008-12-03 11:04:50 -0500157 if (!found)
158 i++;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400159 }
160
Thomas Gleixner9f50c912019-04-25 11:45:17 +0200161 stack_trace_nr_entries = x;
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -0400162
Aaron Tomlina70857e2014-09-12 14:16:18 +0100163 if (task_stack_end_corrupted(current)) {
Thomas Gleixner3d9a8072019-04-25 11:44:54 +0200164 print_max_stack();
Minchan Kime3172182014-06-02 13:33:12 +0900165 BUG();
166 }
167
Steven Rostedte5a81b62008-08-27 23:31:01 -0400168 out:
Steven Rostedt (Red Hat)d3327362015-11-03 14:50:15 -0500169 arch_spin_unlock(&stack_trace_max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -0500170 local_irq_restore(flags);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400171}
172
173static void
Steven Rostedta1e2e312011-08-09 12:50:46 -0400174stack_trace_call(unsigned long ip, unsigned long parent_ip,
175 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400176{
Steven Rostedt (Red Hat)878895012013-03-13 20:43:57 -0400177 unsigned long stack;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400178
Steven Rostedt5168ae52010-06-03 09:36:50 -0400179 preempt_disable_notrace();
Steven Rostedte5a81b62008-08-27 23:31:01 -0400180
Steven Rostedte5a81b62008-08-27 23:31:01 -0400181 /* no atomic needed, we only modify this variable by this cpu */
Steven Rostedt (VMware)8aaf1ee2017-04-06 15:47:32 -0400182 __this_cpu_inc(disable_stack_tracer);
183 if (__this_cpu_read(disable_stack_tracer) != 1)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400184 goto out;
185
Steven Rostedt (VMware)b00d6072017-12-05 04:41:51 -0500186 /* If rcu is not watching, then save stack trace can fail */
187 if (!rcu_is_watching())
188 goto out;
189
Steven Rostedt (Red Hat)72ac4262015-07-16 13:24:54 -0400190 ip += MCOUNT_INSN_SIZE;
Steven Rostedt (Red Hat)4df29712013-03-13 23:34:22 -0400191
192 check_stack(ip, &stack);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400193
194 out:
Steven Rostedt (VMware)8aaf1ee2017-04-06 15:47:32 -0400195 __this_cpu_dec(disable_stack_tracer);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400196 /* prevent recursion in schedule */
Steven Rostedt5168ae52010-06-03 09:36:50 -0400197 preempt_enable_notrace();
Steven Rostedte5a81b62008-08-27 23:31:01 -0400198}
199
200static struct ftrace_ops trace_ops __read_mostly =
201{
202 .func = stack_trace_call,
Steven Rostedt47409742012-07-20 11:04:44 -0400203 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedte5a81b62008-08-27 23:31:01 -0400204};
205
206static ssize_t
207stack_max_size_read(struct file *filp, char __user *ubuf,
208 size_t count, loff_t *ppos)
209{
210 unsigned long *ptr = filp->private_data;
211 char buf[64];
212 int r;
213
214 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
215 if (r > sizeof(buf))
216 r = sizeof(buf);
217 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
218}
219
220static ssize_t
221stack_max_size_write(struct file *filp, const char __user *ubuf,
222 size_t count, loff_t *ppos)
223{
224 long *ptr = filp->private_data;
225 unsigned long val, flags;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400226 int ret;
227
Peter Huewe22fe9b52011-06-07 21:58:27 +0200228 ret = kstrtoul_from_user(ubuf, count, 10, &val);
229 if (ret)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400230 return ret;
231
Steven Rostedta5e25882008-12-02 15:34:05 -0500232 local_irq_save(flags);
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800233
234 /*
235 * In case we trace inside arch_spin_lock() or after (NMI),
236 * we will cause circular lock, so we also need to increase
Steven Rostedt (VMware)8aaf1ee2017-04-06 15:47:32 -0400237 * the percpu disable_stack_tracer here.
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800238 */
Steven Rostedt (VMware)8aaf1ee2017-04-06 15:47:32 -0400239 __this_cpu_inc(disable_stack_tracer);
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800240
Steven Rostedt (Red Hat)d3327362015-11-03 14:50:15 -0500241 arch_spin_lock(&stack_trace_max_lock);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400242 *ptr = val;
Steven Rostedt (Red Hat)d3327362015-11-03 14:50:15 -0500243 arch_spin_unlock(&stack_trace_max_lock);
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800244
Steven Rostedt (VMware)8aaf1ee2017-04-06 15:47:32 -0400245 __this_cpu_dec(disable_stack_tracer);
Steven Rostedta5e25882008-12-02 15:34:05 -0500246 local_irq_restore(flags);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400247
248 return count;
249}
250
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500251static const struct file_operations stack_max_size_fops = {
Steven Rostedte5a81b62008-08-27 23:31:01 -0400252 .open = tracing_open_generic,
253 .read = stack_max_size_read,
254 .write = stack_max_size_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200255 .llseek = default_llseek,
Steven Rostedte5a81b62008-08-27 23:31:01 -0400256};
257
258static void *
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800259__next(struct seq_file *m, loff_t *pos)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400260{
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800261 long n = *pos - 1;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400262
Thomas Gleixner9f50c912019-04-25 11:45:17 +0200263 if (n >= stack_trace_nr_entries)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400264 return NULL;
265
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800266 m->private = (void *)n;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400267 return &m->private;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400268}
269
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800270static void *
271t_next(struct seq_file *m, void *v, loff_t *pos)
272{
273 (*pos)++;
274 return __next(m, pos);
275}
276
Steven Rostedte5a81b62008-08-27 23:31:01 -0400277static void *t_start(struct seq_file *m, loff_t *pos)
278{
Steven Rostedte5a81b62008-08-27 23:31:01 -0400279 local_irq_disable();
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800280
Steven Rostedt (VMware)8aaf1ee2017-04-06 15:47:32 -0400281 __this_cpu_inc(disable_stack_tracer);
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800282
Steven Rostedt (Red Hat)d3327362015-11-03 14:50:15 -0500283 arch_spin_lock(&stack_trace_max_lock);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400284
Liming Wang522a1102008-11-21 11:00:18 +0800285 if (*pos == 0)
286 return SEQ_START_TOKEN;
287
Li Zefan2fc5f0c2009-08-17 16:53:37 +0800288 return __next(m, pos);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400289}
290
291static void t_stop(struct seq_file *m, void *p)
292{
Steven Rostedt (Red Hat)d3327362015-11-03 14:50:15 -0500293 arch_spin_unlock(&stack_trace_max_lock);
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800294
Steven Rostedt (VMware)8aaf1ee2017-04-06 15:47:32 -0400295 __this_cpu_dec(disable_stack_tracer);
Lai Jiangshan4f48f8b2010-02-02 15:32:09 +0800296
Steven Rostedte5a81b62008-08-27 23:31:01 -0400297 local_irq_enable();
298}
299
Joe Perches962e3702015-04-15 16:18:22 -0700300static void trace_lookup_stack(struct seq_file *m, long i)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400301{
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400302 unsigned long addr = stack_dump_trace[i];
Steven Rostedte5a81b62008-08-27 23:31:01 -0400303
Joe Perches962e3702015-04-15 16:18:22 -0700304 seq_printf(m, "%pS\n", (void *)addr);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400305}
306
Steven Rostedte447e1df2009-03-12 19:42:29 -0400307static void print_disabled(struct seq_file *m)
308{
309 seq_puts(m, "#\n"
310 "# Stack tracer disabled\n"
311 "#\n"
312 "# To enable the stack tracer, either add 'stacktrace' to the\n"
313 "# kernel command line\n"
314 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
315 "#\n");
316}
317
Steven Rostedte5a81b62008-08-27 23:31:01 -0400318static int t_show(struct seq_file *m, void *v)
319{
Liming Wang522a1102008-11-21 11:00:18 +0800320 long i;
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400321 int size;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400322
Liming Wang522a1102008-11-21 11:00:18 +0800323 if (v == SEQ_START_TOKEN) {
Steven Rostedteb1871f2009-03-13 00:00:58 -0400324 seq_printf(m, " Depth Size Location"
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400325 " (%d entries)\n"
Steven Rostedteb1871f2009-03-13 00:00:58 -0400326 " ----- ---- --------\n",
Thomas Gleixner9f50c912019-04-25 11:45:17 +0200327 stack_trace_nr_entries);
Steven Rostedte447e1df2009-03-12 19:42:29 -0400328
AKASHI Takahirobb99d8c2015-10-30 14:25:39 +0900329 if (!stack_tracer_enabled && !stack_trace_max_size)
Steven Rostedte447e1df2009-03-12 19:42:29 -0400330 print_disabled(m);
331
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400332 return 0;
333 }
334
Liming Wang522a1102008-11-21 11:00:18 +0800335 i = *(long *)v;
336
Thomas Gleixner9f50c912019-04-25 11:45:17 +0200337 if (i >= stack_trace_nr_entries)
Steven Rostedte5a81b62008-08-27 23:31:01 -0400338 return 0;
339
Thomas Gleixner9f50c912019-04-25 11:45:17 +0200340 if (i + 1 == stack_trace_nr_entries)
AKASHI Takahirobb99d8c2015-10-30 14:25:39 +0900341 size = stack_trace_index[i];
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400342 else
AKASHI Takahirobb99d8c2015-10-30 14:25:39 +0900343 size = stack_trace_index[i] - stack_trace_index[i+1];
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400344
AKASHI Takahirobb99d8c2015-10-30 14:25:39 +0900345 seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
Steven Rostedt1b6cced2008-08-29 16:51:43 -0400346
347 trace_lookup_stack(m, i);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400348
349 return 0;
350}
351
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500352static const struct seq_operations stack_trace_seq_ops = {
Steven Rostedte5a81b62008-08-27 23:31:01 -0400353 .start = t_start,
354 .next = t_next,
355 .stop = t_stop,
356 .show = t_show,
357};
358
359static int stack_trace_open(struct inode *inode, struct file *file)
360{
Li Zefand8cc1ab2009-07-23 11:28:40 +0800361 return seq_open(file, &stack_trace_seq_ops);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400362}
363
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500364static const struct file_operations stack_trace_fops = {
Steven Rostedte5a81b62008-08-27 23:31:01 -0400365 .open = stack_trace_open,
366 .read = seq_read,
367 .llseek = seq_lseek,
Li Zefand8cc1ab2009-07-23 11:28:40 +0800368 .release = seq_release,
Steven Rostedte5a81b62008-08-27 23:31:01 -0400369};
370
Steven Rostedt (VMware)bbd1d272017-07-11 19:21:04 -0400371#ifdef CONFIG_DYNAMIC_FTRACE
372
Steven Rostedtd2d45c72011-12-19 14:44:09 -0500373static int
374stack_trace_filter_open(struct inode *inode, struct file *file)
375{
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -0400376 struct ftrace_ops *ops = inode->i_private;
377
378 return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
Steven Rostedtd2d45c72011-12-19 14:44:09 -0500379 inode, file);
380}
381
382static const struct file_operations stack_trace_filter_fops = {
383 .open = stack_trace_filter_open,
384 .read = seq_read,
385 .write = ftrace_filter_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -0500386 .llseek = tracing_lseek,
Steven Rostedtd2d45c72011-12-19 14:44:09 -0500387 .release = ftrace_regex_release,
388};
389
Steven Rostedt (VMware)bbd1d272017-07-11 19:21:04 -0400390#endif /* CONFIG_DYNAMIC_FTRACE */
391
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500392int
393stack_trace_sysctl(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700394 void __user *buffer, size_t *lenp,
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500395 loff_t *ppos)
396{
Thomas Gleixner3d9a8072019-04-25 11:44:54 +0200397 int was_enabled;
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500398 int ret;
399
400 mutex_lock(&stack_sysctl_mutex);
Thomas Gleixner3d9a8072019-04-25 11:44:54 +0200401 was_enabled = !!stack_tracer_enabled;
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500402
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700403 ret = proc_dointvec(table, write, buffer, lenp, ppos);
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500404
Thomas Gleixner3d9a8072019-04-25 11:44:54 +0200405 if (ret || !write || (was_enabled == !!stack_tracer_enabled))
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500406 goto out;
407
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500408 if (stack_tracer_enabled)
409 register_ftrace_function(&trace_ops);
410 else
411 unregister_ftrace_function(&trace_ops);
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500412 out:
413 mutex_unlock(&stack_sysctl_mutex);
414 return ret;
415}
416
Steven Rostedt762e1202011-12-19 22:01:00 -0500417static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
418
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500419static __init int enable_stacktrace(char *str)
420{
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -0500421 int len;
422
423 if ((len = str_has_prefix(str, "_filter=")))
424 strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE);
Steven Rostedt762e1202011-12-19 22:01:00 -0500425
Steven Rostedte05a43b2008-12-17 09:43:00 -0500426 stack_tracer_enabled = 1;
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500427 return 1;
428}
429__setup("stacktrace", enable_stacktrace);
430
Steven Rostedte5a81b62008-08-27 23:31:01 -0400431static __init int stack_trace_init(void)
432{
433 struct dentry *d_tracer;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400434
435 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -0500436 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +0900437 return 0;
Steven Rostedte5a81b62008-08-27 23:31:01 -0400438
Frederic Weisbecker5452af62009-03-27 00:25:38 +0100439 trace_create_file("stack_max_size", 0644, d_tracer,
AKASHI Takahirobb99d8c2015-10-30 14:25:39 +0900440 &stack_trace_max_size, &stack_max_size_fops);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400441
Frederic Weisbecker5452af62009-03-27 00:25:38 +0100442 trace_create_file("stack_trace", 0444, d_tracer,
443 NULL, &stack_trace_fops);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400444
Steven Rostedt (VMware)bbd1d272017-07-11 19:21:04 -0400445#ifdef CONFIG_DYNAMIC_FTRACE
Zhengyuan Liu0c5a9ac2018-02-08 09:41:53 +0800446 trace_create_file("stack_trace_filter", 0644, d_tracer,
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -0400447 &trace_ops, &stack_trace_filter_fops);
Steven Rostedt (VMware)bbd1d272017-07-11 19:21:04 -0400448#endif
Steven Rostedtd2d45c72011-12-19 14:44:09 -0500449
Steven Rostedt762e1202011-12-19 22:01:00 -0500450 if (stack_trace_filter_buf[0])
451 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
452
Steven Rostedte05a43b2008-12-17 09:43:00 -0500453 if (stack_tracer_enabled)
Steven Rostedtf38f1d2a2008-12-16 23:06:40 -0500454 register_ftrace_function(&trace_ops);
Steven Rostedte5a81b62008-08-27 23:31:01 -0400455
456 return 0;
457}
458
459device_initcall(stack_trace_init);