Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> |
| 3 | * |
| 4 | */ |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 5 | #include <linux/sched/task_stack.h> |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 6 | #include <linux/stacktrace.h> |
| 7 | #include <linux/kallsyms.h> |
| 8 | #include <linux/seq_file.h> |
| 9 | #include <linux/spinlock.h> |
| 10 | #include <linux/uaccess.h> |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 11 | #include <linux/ftrace.h> |
| 12 | #include <linux/module.h> |
Steven Rostedt | f38f1d2a | 2008-12-16 23:06:40 -0500 | [diff] [blame] | 13 | #include <linux/sysctl.h> |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 14 | #include <linux/init.h> |
Steven Rostedt | 762e120 | 2011-12-19 22:01:00 -0500 | [diff] [blame] | 15 | |
| 16 | #include <asm/setup.h> |
| 17 | |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 18 | #include "trace.h" |
| 19 | |
Steven Rostedt | 1b6cced | 2008-08-29 16:51:43 -0400 | [diff] [blame] | 20 | static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = |
| 21 | { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 22 | unsigned stack_trace_index[STACK_TRACE_ENTRIES]; |
Steven Rostedt | 1b6cced | 2008-08-29 16:51:43 -0400 | [diff] [blame] | 23 | |
Steven Rostedt (Red Hat) | 4df2971 | 2013-03-13 23:34:22 -0400 | [diff] [blame] | 24 | /* |
| 25 | * Reserve one entry for the passed in ip. This will allow |
| 26 | * us to remove most or all of the stack size overhead |
| 27 | * added by the stack tracer itself. |
| 28 | */ |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 29 | struct stack_trace stack_trace_max = { |
Steven Rostedt (Red Hat) | 4df2971 | 2013-03-13 23:34:22 -0400 | [diff] [blame] | 30 | .max_entries = STACK_TRACE_ENTRIES - 1, |
Steven Rostedt (Red Hat) | 72ac426 | 2015-07-16 13:24:54 -0400 | [diff] [blame] | 31 | .entries = &stack_dump_trace[0], |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 32 | }; |
| 33 | |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 34 | unsigned long stack_trace_max_size; |
Steven Rostedt (Red Hat) | d332736 | 2015-11-03 14:50:15 -0500 | [diff] [blame] | 35 | arch_spinlock_t stack_trace_max_lock = |
Thomas Gleixner | edc35bd | 2009-12-03 12:38:57 +0100 | [diff] [blame] | 36 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 37 | |
Steven Rostedt (VMware) | 8aaf1ee | 2017-04-06 15:47:32 -0400 | [diff] [blame] | 38 | DEFINE_PER_CPU(int, disable_stack_tracer); |
Steven Rostedt | f38f1d2a | 2008-12-16 23:06:40 -0500 | [diff] [blame] | 39 | static DEFINE_MUTEX(stack_sysctl_mutex); |
| 40 | |
| 41 | int stack_tracer_enabled; |
| 42 | static int last_stack_tracer_enabled; |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 43 | |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 44 | void stack_trace_print(void) |
Minchan Kim | e317218 | 2014-06-02 13:33:12 +0900 | [diff] [blame] | 45 | { |
| 46 | long i; |
| 47 | int size; |
| 48 | |
| 49 | pr_emerg(" Depth Size Location (%d entries)\n" |
| 50 | " ----- ---- --------\n", |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 51 | stack_trace_max.nr_entries); |
Minchan Kim | e317218 | 2014-06-02 13:33:12 +0900 | [diff] [blame] | 52 | |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 53 | for (i = 0; i < stack_trace_max.nr_entries; i++) { |
Minchan Kim | e317218 | 2014-06-02 13:33:12 +0900 | [diff] [blame] | 54 | if (stack_dump_trace[i] == ULONG_MAX) |
| 55 | break; |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 56 | if (i+1 == stack_trace_max.nr_entries || |
Minchan Kim | e317218 | 2014-06-02 13:33:12 +0900 | [diff] [blame] | 57 | stack_dump_trace[i+1] == ULONG_MAX) |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 58 | size = stack_trace_index[i]; |
Minchan Kim | e317218 | 2014-06-02 13:33:12 +0900 | [diff] [blame] | 59 | else |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 60 | size = stack_trace_index[i] - stack_trace_index[i+1]; |
Minchan Kim | e317218 | 2014-06-02 13:33:12 +0900 | [diff] [blame] | 61 | |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 62 | pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i], |
Minchan Kim | e317218 | 2014-06-02 13:33:12 +0900 | [diff] [blame] | 63 | size, (void *)stack_dump_trace[i]); |
| 64 | } |
| 65 | } |
| 66 | |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 67 | /* |
Masahiro Yamada | 505d308 | 2017-03-09 16:16:33 -0800 | [diff] [blame] | 68 | * When arch-specific code overrides this function, the following |
Steven Rostedt (Red Hat) | d332736 | 2015-11-03 14:50:15 -0500 | [diff] [blame] | 69 | * data should be filled up, assuming stack_trace_max_lock is held to |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 70 | * prevent concurrent updates. |
| 71 | * stack_trace_index[] |
| 72 | * stack_trace_max |
| 73 | * stack_trace_max_size |
| 74 | */ |
| 75 | void __weak |
Steven Rostedt (Red Hat) | d4ecbfc | 2013-03-13 21:25:35 -0400 | [diff] [blame] | 76 | check_stack(unsigned long ip, unsigned long *stack) |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 77 | { |
Minchan Kim | e317218 | 2014-06-02 13:33:12 +0900 | [diff] [blame] | 78 | unsigned long this_size, flags; unsigned long *p, *top, *start; |
Steven Rostedt (Red Hat) | 4df2971 | 2013-03-13 23:34:22 -0400 | [diff] [blame] | 79 | static int tracer_frame; |
| 80 | int frame_size = ACCESS_ONCE(tracer_frame); |
Steven Rostedt (Red Hat) | 72ac426 | 2015-07-16 13:24:54 -0400 | [diff] [blame] | 81 | int i, x; |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 82 | |
Steven Rostedt (Red Hat) | 87889501 | 2013-03-13 20:43:57 -0400 | [diff] [blame] | 83 | this_size = ((unsigned long)stack) & (THREAD_SIZE-1); |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 84 | this_size = THREAD_SIZE - this_size; |
Steven Rostedt (Red Hat) | 4df2971 | 2013-03-13 23:34:22 -0400 | [diff] [blame] | 85 | /* Remove the frame of the tracer */ |
| 86 | this_size -= frame_size; |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 87 | |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 88 | if (this_size <= stack_trace_max_size) |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 89 | return; |
| 90 | |
Steven Rostedt | 81520a1 | 2008-10-06 21:24:18 -0400 | [diff] [blame] | 91 | /* we do not handle interrupt stacks yet */ |
Steven Rostedt (Red Hat) | 87889501 | 2013-03-13 20:43:57 -0400 | [diff] [blame] | 92 | if (!object_is_on_stack(stack)) |
Steven Rostedt | 81520a1 | 2008-10-06 21:24:18 -0400 | [diff] [blame] | 93 | return; |
| 94 | |
Steven Rostedt (Red Hat) | 1904be1 | 2015-10-20 21:48:02 -0400 | [diff] [blame] | 95 | /* Can't do this from NMI context (can cause deadlocks) */ |
| 96 | if (in_nmi()) |
| 97 | return; |
| 98 | |
Steven Rostedt (VMware) | 03ecd3f | 2017-04-07 12:20:36 -0400 | [diff] [blame] | 99 | /* |
| 100 | * There's a slight chance that we are tracing inside the |
| 101 | * RCU infrastructure, and rcu_irq_enter() will not work |
| 102 | * as expected. |
| 103 | */ |
| 104 | if (unlikely(rcu_irq_enter_disabled())) |
| 105 | return; |
| 106 | |
Steven Rostedt | a5e2588 | 2008-12-02 15:34:05 -0500 | [diff] [blame] | 107 | local_irq_save(flags); |
Steven Rostedt (Red Hat) | d332736 | 2015-11-03 14:50:15 -0500 | [diff] [blame] | 108 | arch_spin_lock(&stack_trace_max_lock); |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 109 | |
Steven Rostedt (Red Hat) | a2d7629 | 2015-10-20 11:38:08 -0400 | [diff] [blame] | 110 | /* |
| 111 | * RCU may not be watching, make it see us. |
| 112 | * The stack trace code uses rcu_sched. |
| 113 | */ |
| 114 | rcu_irq_enter(); |
| 115 | |
Steven Rostedt (Red Hat) | 4df2971 | 2013-03-13 23:34:22 -0400 | [diff] [blame] | 116 | /* In case another CPU set the tracer_frame on us */ |
| 117 | if (unlikely(!frame_size)) |
| 118 | this_size -= tracer_frame; |
| 119 | |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 120 | /* a race could have already updated it */ |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 121 | if (this_size <= stack_trace_max_size) |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 122 | goto out; |
| 123 | |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 124 | stack_trace_max_size = this_size; |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 125 | |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 126 | stack_trace_max.nr_entries = 0; |
| 127 | stack_trace_max.skip = 3; |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 128 | |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 129 | save_stack_trace(&stack_trace_max); |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 130 | |
Steven Rostedt (Red Hat) | 72ac426 | 2015-07-16 13:24:54 -0400 | [diff] [blame] | 131 | /* Skip over the overhead of the stack tracer itself */ |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 132 | for (i = 0; i < stack_trace_max.nr_entries; i++) { |
Steven Rostedt (Red Hat) | 72ac426 | 2015-07-16 13:24:54 -0400 | [diff] [blame] | 133 | if (stack_dump_trace[i] == ip) |
| 134 | break; |
| 135 | } |
Steven Rostedt (Red Hat) | d4ecbfc | 2013-03-13 21:25:35 -0400 | [diff] [blame] | 136 | |
| 137 | /* |
Steven Rostedt | 6ccd837 | 2016-01-29 10:22:41 -0500 | [diff] [blame] | 138 | * Some archs may not have the passed in ip in the dump. |
| 139 | * If that happens, we need to show everything. |
| 140 | */ |
| 141 | if (i == stack_trace_max.nr_entries) |
| 142 | i = 0; |
| 143 | |
| 144 | /* |
Steven Rostedt | 1b6cced | 2008-08-29 16:51:43 -0400 | [diff] [blame] | 145 | * Now find where in the stack these are. |
| 146 | */ |
Steven Rostedt (Red Hat) | 72ac426 | 2015-07-16 13:24:54 -0400 | [diff] [blame] | 147 | x = 0; |
Steven Rostedt (Red Hat) | 87889501 | 2013-03-13 20:43:57 -0400 | [diff] [blame] | 148 | start = stack; |
Steven Rostedt | 1b6cced | 2008-08-29 16:51:43 -0400 | [diff] [blame] | 149 | top = (unsigned long *) |
| 150 | (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); |
| 151 | |
| 152 | /* |
| 153 | * Loop through all the entries. One of the entries may |
| 154 | * for some reason be missed on the stack, so we may |
| 155 | * have to account for them. If they are all there, this |
| 156 | * loop will only happen once. This code only takes place |
| 157 | * on a new max, so it is far from a fast path. |
| 158 | */ |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 159 | while (i < stack_trace_max.nr_entries) { |
Steven Rostedt | 0a37119 | 2008-12-03 11:04:50 -0500 | [diff] [blame] | 160 | int found = 0; |
Steven Rostedt | 1b6cced | 2008-08-29 16:51:43 -0400 | [diff] [blame] | 161 | |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 162 | stack_trace_index[x] = this_size; |
Steven Rostedt | 1b6cced | 2008-08-29 16:51:43 -0400 | [diff] [blame] | 163 | p = start; |
| 164 | |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 165 | for (; p < top && i < stack_trace_max.nr_entries; p++) { |
Steven Rostedt (Red Hat) | 72ac426 | 2015-07-16 13:24:54 -0400 | [diff] [blame] | 166 | if (stack_dump_trace[i] == ULONG_MAX) |
| 167 | break; |
Yang Shi | 6e22c83 | 2016-02-12 12:46:00 -0800 | [diff] [blame] | 168 | /* |
| 169 | * The READ_ONCE_NOCHECK is used to let KASAN know that |
| 170 | * this is not a stack-out-of-bounds error. |
| 171 | */ |
| 172 | if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) { |
Steven Rostedt (Red Hat) | 72ac426 | 2015-07-16 13:24:54 -0400 | [diff] [blame] | 173 | stack_dump_trace[x] = stack_dump_trace[i++]; |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 174 | this_size = stack_trace_index[x++] = |
Steven Rostedt | 1b6cced | 2008-08-29 16:51:43 -0400 | [diff] [blame] | 175 | (top - p) * sizeof(unsigned long); |
Steven Rostedt | 0a37119 | 2008-12-03 11:04:50 -0500 | [diff] [blame] | 176 | found = 1; |
Steven Rostedt | 1b6cced | 2008-08-29 16:51:43 -0400 | [diff] [blame] | 177 | /* Start the search from here */ |
| 178 | start = p + 1; |
Steven Rostedt (Red Hat) | 4df2971 | 2013-03-13 23:34:22 -0400 | [diff] [blame] | 179 | /* |
| 180 | * We do not want to show the overhead |
| 181 | * of the stack tracer stack in the |
| 182 | * max stack. If we haven't figured |
| 183 | * out what that is, then figure it out |
| 184 | * now. |
| 185 | */ |
Steven Rostedt (Red Hat) | 72ac426 | 2015-07-16 13:24:54 -0400 | [diff] [blame] | 186 | if (unlikely(!tracer_frame)) { |
Steven Rostedt (Red Hat) | 4df2971 | 2013-03-13 23:34:22 -0400 | [diff] [blame] | 187 | tracer_frame = (p - stack) * |
| 188 | sizeof(unsigned long); |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 189 | stack_trace_max_size -= tracer_frame; |
Steven Rostedt (Red Hat) | 4df2971 | 2013-03-13 23:34:22 -0400 | [diff] [blame] | 190 | } |
Steven Rostedt | 1b6cced | 2008-08-29 16:51:43 -0400 | [diff] [blame] | 191 | } |
| 192 | } |
| 193 | |
Steven Rostedt | 0a37119 | 2008-12-03 11:04:50 -0500 | [diff] [blame] | 194 | if (!found) |
| 195 | i++; |
Steven Rostedt | 1b6cced | 2008-08-29 16:51:43 -0400 | [diff] [blame] | 196 | } |
| 197 | |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 198 | stack_trace_max.nr_entries = x; |
Steven Rostedt (Red Hat) | 72ac426 | 2015-07-16 13:24:54 -0400 | [diff] [blame] | 199 | for (; x < i; x++) |
| 200 | stack_dump_trace[x] = ULONG_MAX; |
| 201 | |
Aaron Tomlin | a70857e | 2014-09-12 14:16:18 +0100 | [diff] [blame] | 202 | if (task_stack_end_corrupted(current)) { |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 203 | stack_trace_print(); |
Minchan Kim | e317218 | 2014-06-02 13:33:12 +0900 | [diff] [blame] | 204 | BUG(); |
| 205 | } |
| 206 | |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 207 | out: |
Steven Rostedt (Red Hat) | a2d7629 | 2015-10-20 11:38:08 -0400 | [diff] [blame] | 208 | rcu_irq_exit(); |
Steven Rostedt (Red Hat) | d332736 | 2015-11-03 14:50:15 -0500 | [diff] [blame] | 209 | arch_spin_unlock(&stack_trace_max_lock); |
Steven Rostedt | a5e2588 | 2008-12-02 15:34:05 -0500 | [diff] [blame] | 210 | local_irq_restore(flags); |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 211 | } |
| 212 | |
| 213 | static void |
Steven Rostedt | a1e2e31 | 2011-08-09 12:50:46 -0400 | [diff] [blame] | 214 | stack_trace_call(unsigned long ip, unsigned long parent_ip, |
| 215 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 216 | { |
Steven Rostedt (Red Hat) | 87889501 | 2013-03-13 20:43:57 -0400 | [diff] [blame] | 217 | unsigned long stack; |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 218 | |
Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 219 | preempt_disable_notrace(); |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 220 | |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 221 | /* no atomic needed, we only modify this variable by this cpu */ |
Steven Rostedt (VMware) | 8aaf1ee | 2017-04-06 15:47:32 -0400 | [diff] [blame] | 222 | __this_cpu_inc(disable_stack_tracer); |
| 223 | if (__this_cpu_read(disable_stack_tracer) != 1) |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 224 | goto out; |
| 225 | |
Steven Rostedt (Red Hat) | 72ac426 | 2015-07-16 13:24:54 -0400 | [diff] [blame] | 226 | ip += MCOUNT_INSN_SIZE; |
Steven Rostedt (Red Hat) | 4df2971 | 2013-03-13 23:34:22 -0400 | [diff] [blame] | 227 | |
| 228 | check_stack(ip, &stack); |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 229 | |
| 230 | out: |
Steven Rostedt (VMware) | 8aaf1ee | 2017-04-06 15:47:32 -0400 | [diff] [blame] | 231 | __this_cpu_dec(disable_stack_tracer); |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 232 | /* prevent recursion in schedule */ |
Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 233 | preempt_enable_notrace(); |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 234 | } |
| 235 | |
| 236 | static struct ftrace_ops trace_ops __read_mostly = |
| 237 | { |
| 238 | .func = stack_trace_call, |
Steven Rostedt | 4740974 | 2012-07-20 11:04:44 -0400 | [diff] [blame] | 239 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 240 | }; |
| 241 | |
| 242 | static ssize_t |
| 243 | stack_max_size_read(struct file *filp, char __user *ubuf, |
| 244 | size_t count, loff_t *ppos) |
| 245 | { |
| 246 | unsigned long *ptr = filp->private_data; |
| 247 | char buf[64]; |
| 248 | int r; |
| 249 | |
| 250 | r = snprintf(buf, sizeof(buf), "%ld\n", *ptr); |
| 251 | if (r > sizeof(buf)) |
| 252 | r = sizeof(buf); |
| 253 | return simple_read_from_buffer(ubuf, count, ppos, buf, r); |
| 254 | } |
| 255 | |
| 256 | static ssize_t |
| 257 | stack_max_size_write(struct file *filp, const char __user *ubuf, |
| 258 | size_t count, loff_t *ppos) |
| 259 | { |
| 260 | long *ptr = filp->private_data; |
| 261 | unsigned long val, flags; |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 262 | int ret; |
| 263 | |
Peter Huewe | 22fe9b5 | 2011-06-07 21:58:27 +0200 | [diff] [blame] | 264 | ret = kstrtoul_from_user(ubuf, count, 10, &val); |
| 265 | if (ret) |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 266 | return ret; |
| 267 | |
Steven Rostedt | a5e2588 | 2008-12-02 15:34:05 -0500 | [diff] [blame] | 268 | local_irq_save(flags); |
Lai Jiangshan | 4f48f8b | 2010-02-02 15:32:09 +0800 | [diff] [blame] | 269 | |
| 270 | /* |
| 271 | * In case we trace inside arch_spin_lock() or after (NMI), |
| 272 | * we will cause circular lock, so we also need to increase |
Steven Rostedt (VMware) | 8aaf1ee | 2017-04-06 15:47:32 -0400 | [diff] [blame] | 273 | * the percpu disable_stack_tracer here. |
Lai Jiangshan | 4f48f8b | 2010-02-02 15:32:09 +0800 | [diff] [blame] | 274 | */ |
Steven Rostedt (VMware) | 8aaf1ee | 2017-04-06 15:47:32 -0400 | [diff] [blame] | 275 | __this_cpu_inc(disable_stack_tracer); |
Lai Jiangshan | 4f48f8b | 2010-02-02 15:32:09 +0800 | [diff] [blame] | 276 | |
Steven Rostedt (Red Hat) | d332736 | 2015-11-03 14:50:15 -0500 | [diff] [blame] | 277 | arch_spin_lock(&stack_trace_max_lock); |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 278 | *ptr = val; |
Steven Rostedt (Red Hat) | d332736 | 2015-11-03 14:50:15 -0500 | [diff] [blame] | 279 | arch_spin_unlock(&stack_trace_max_lock); |
Lai Jiangshan | 4f48f8b | 2010-02-02 15:32:09 +0800 | [diff] [blame] | 280 | |
Steven Rostedt (VMware) | 8aaf1ee | 2017-04-06 15:47:32 -0400 | [diff] [blame] | 281 | __this_cpu_dec(disable_stack_tracer); |
Steven Rostedt | a5e2588 | 2008-12-02 15:34:05 -0500 | [diff] [blame] | 282 | local_irq_restore(flags); |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 283 | |
| 284 | return count; |
| 285 | } |
| 286 | |
Steven Rostedt | f38f1d2a | 2008-12-16 23:06:40 -0500 | [diff] [blame] | 287 | static const struct file_operations stack_max_size_fops = { |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 288 | .open = tracing_open_generic, |
| 289 | .read = stack_max_size_read, |
| 290 | .write = stack_max_size_write, |
Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 291 | .llseek = default_llseek, |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 292 | }; |
| 293 | |
| 294 | static void * |
Li Zefan | 2fc5f0c | 2009-08-17 16:53:37 +0800 | [diff] [blame] | 295 | __next(struct seq_file *m, loff_t *pos) |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 296 | { |
Li Zefan | 2fc5f0c | 2009-08-17 16:53:37 +0800 | [diff] [blame] | 297 | long n = *pos - 1; |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 298 | |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 299 | if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX) |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 300 | return NULL; |
| 301 | |
Li Zefan | 2fc5f0c | 2009-08-17 16:53:37 +0800 | [diff] [blame] | 302 | m->private = (void *)n; |
Steven Rostedt | 1b6cced | 2008-08-29 16:51:43 -0400 | [diff] [blame] | 303 | return &m->private; |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 304 | } |
| 305 | |
Li Zefan | 2fc5f0c | 2009-08-17 16:53:37 +0800 | [diff] [blame] | 306 | static void * |
| 307 | t_next(struct seq_file *m, void *v, loff_t *pos) |
| 308 | { |
| 309 | (*pos)++; |
| 310 | return __next(m, pos); |
| 311 | } |
| 312 | |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 313 | static void *t_start(struct seq_file *m, loff_t *pos) |
| 314 | { |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 315 | local_irq_disable(); |
Lai Jiangshan | 4f48f8b | 2010-02-02 15:32:09 +0800 | [diff] [blame] | 316 | |
Steven Rostedt (VMware) | 8aaf1ee | 2017-04-06 15:47:32 -0400 | [diff] [blame] | 317 | __this_cpu_inc(disable_stack_tracer); |
Lai Jiangshan | 4f48f8b | 2010-02-02 15:32:09 +0800 | [diff] [blame] | 318 | |
Steven Rostedt (Red Hat) | d332736 | 2015-11-03 14:50:15 -0500 | [diff] [blame] | 319 | arch_spin_lock(&stack_trace_max_lock); |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 320 | |
Liming Wang | 522a110 | 2008-11-21 11:00:18 +0800 | [diff] [blame] | 321 | if (*pos == 0) |
| 322 | return SEQ_START_TOKEN; |
| 323 | |
Li Zefan | 2fc5f0c | 2009-08-17 16:53:37 +0800 | [diff] [blame] | 324 | return __next(m, pos); |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 325 | } |
| 326 | |
| 327 | static void t_stop(struct seq_file *m, void *p) |
| 328 | { |
Steven Rostedt (Red Hat) | d332736 | 2015-11-03 14:50:15 -0500 | [diff] [blame] | 329 | arch_spin_unlock(&stack_trace_max_lock); |
Lai Jiangshan | 4f48f8b | 2010-02-02 15:32:09 +0800 | [diff] [blame] | 330 | |
Steven Rostedt (VMware) | 8aaf1ee | 2017-04-06 15:47:32 -0400 | [diff] [blame] | 331 | __this_cpu_dec(disable_stack_tracer); |
Lai Jiangshan | 4f48f8b | 2010-02-02 15:32:09 +0800 | [diff] [blame] | 332 | |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 333 | local_irq_enable(); |
| 334 | } |
| 335 | |
Joe Perches | 962e370 | 2015-04-15 16:18:22 -0700 | [diff] [blame] | 336 | static void trace_lookup_stack(struct seq_file *m, long i) |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 337 | { |
Steven Rostedt | 1b6cced | 2008-08-29 16:51:43 -0400 | [diff] [blame] | 338 | unsigned long addr = stack_dump_trace[i]; |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 339 | |
Joe Perches | 962e370 | 2015-04-15 16:18:22 -0700 | [diff] [blame] | 340 | seq_printf(m, "%pS\n", (void *)addr); |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 341 | } |
| 342 | |
Steven Rostedt | e447e1df | 2009-03-12 19:42:29 -0400 | [diff] [blame] | 343 | static void print_disabled(struct seq_file *m) |
| 344 | { |
| 345 | seq_puts(m, "#\n" |
| 346 | "# Stack tracer disabled\n" |
| 347 | "#\n" |
| 348 | "# To enable the stack tracer, either add 'stacktrace' to the\n" |
| 349 | "# kernel command line\n" |
| 350 | "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n" |
| 351 | "#\n"); |
| 352 | } |
| 353 | |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 354 | static int t_show(struct seq_file *m, void *v) |
| 355 | { |
Liming Wang | 522a110 | 2008-11-21 11:00:18 +0800 | [diff] [blame] | 356 | long i; |
Steven Rostedt | 1b6cced | 2008-08-29 16:51:43 -0400 | [diff] [blame] | 357 | int size; |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 358 | |
Liming Wang | 522a110 | 2008-11-21 11:00:18 +0800 | [diff] [blame] | 359 | if (v == SEQ_START_TOKEN) { |
Steven Rostedt | eb1871f | 2009-03-13 00:00:58 -0400 | [diff] [blame] | 360 | seq_printf(m, " Depth Size Location" |
Steven Rostedt | 1b6cced | 2008-08-29 16:51:43 -0400 | [diff] [blame] | 361 | " (%d entries)\n" |
Steven Rostedt | eb1871f | 2009-03-13 00:00:58 -0400 | [diff] [blame] | 362 | " ----- ---- --------\n", |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 363 | stack_trace_max.nr_entries); |
Steven Rostedt | e447e1df | 2009-03-12 19:42:29 -0400 | [diff] [blame] | 364 | |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 365 | if (!stack_tracer_enabled && !stack_trace_max_size) |
Steven Rostedt | e447e1df | 2009-03-12 19:42:29 -0400 | [diff] [blame] | 366 | print_disabled(m); |
| 367 | |
Steven Rostedt | 1b6cced | 2008-08-29 16:51:43 -0400 | [diff] [blame] | 368 | return 0; |
| 369 | } |
| 370 | |
Liming Wang | 522a110 | 2008-11-21 11:00:18 +0800 | [diff] [blame] | 371 | i = *(long *)v; |
| 372 | |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 373 | if (i >= stack_trace_max.nr_entries || |
Steven Rostedt | 1b6cced | 2008-08-29 16:51:43 -0400 | [diff] [blame] | 374 | stack_dump_trace[i] == ULONG_MAX) |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 375 | return 0; |
| 376 | |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 377 | if (i+1 == stack_trace_max.nr_entries || |
Steven Rostedt | 1b6cced | 2008-08-29 16:51:43 -0400 | [diff] [blame] | 378 | stack_dump_trace[i+1] == ULONG_MAX) |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 379 | size = stack_trace_index[i]; |
Steven Rostedt | 1b6cced | 2008-08-29 16:51:43 -0400 | [diff] [blame] | 380 | else |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 381 | size = stack_trace_index[i] - stack_trace_index[i+1]; |
Steven Rostedt | 1b6cced | 2008-08-29 16:51:43 -0400 | [diff] [blame] | 382 | |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 383 | seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size); |
Steven Rostedt | 1b6cced | 2008-08-29 16:51:43 -0400 | [diff] [blame] | 384 | |
| 385 | trace_lookup_stack(m, i); |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 386 | |
| 387 | return 0; |
| 388 | } |
| 389 | |
Steven Rostedt | f38f1d2a | 2008-12-16 23:06:40 -0500 | [diff] [blame] | 390 | static const struct seq_operations stack_trace_seq_ops = { |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 391 | .start = t_start, |
| 392 | .next = t_next, |
| 393 | .stop = t_stop, |
| 394 | .show = t_show, |
| 395 | }; |
| 396 | |
| 397 | static int stack_trace_open(struct inode *inode, struct file *file) |
| 398 | { |
Li Zefan | d8cc1ab | 2009-07-23 11:28:40 +0800 | [diff] [blame] | 399 | return seq_open(file, &stack_trace_seq_ops); |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 400 | } |
| 401 | |
Steven Rostedt | f38f1d2a | 2008-12-16 23:06:40 -0500 | [diff] [blame] | 402 | static const struct file_operations stack_trace_fops = { |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 403 | .open = stack_trace_open, |
| 404 | .read = seq_read, |
| 405 | .llseek = seq_lseek, |
Li Zefan | d8cc1ab | 2009-07-23 11:28:40 +0800 | [diff] [blame] | 406 | .release = seq_release, |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 407 | }; |
| 408 | |
Steven Rostedt (VMware) | bbd1d27 | 2017-07-11 19:21:04 -0400 | [diff] [blame^] | 409 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 410 | |
Steven Rostedt | d2d45c7 | 2011-12-19 14:44:09 -0500 | [diff] [blame] | 411 | static int |
| 412 | stack_trace_filter_open(struct inode *inode, struct file *file) |
| 413 | { |
Steven Rostedt (VMware) | 0f17976 | 2017-06-29 10:05:45 -0400 | [diff] [blame] | 414 | struct ftrace_ops *ops = inode->i_private; |
| 415 | |
| 416 | return ftrace_regex_open(ops, FTRACE_ITER_FILTER, |
Steven Rostedt | d2d45c7 | 2011-12-19 14:44:09 -0500 | [diff] [blame] | 417 | inode, file); |
| 418 | } |
| 419 | |
| 420 | static const struct file_operations stack_trace_filter_fops = { |
| 421 | .open = stack_trace_filter_open, |
| 422 | .read = seq_read, |
| 423 | .write = ftrace_filter_write, |
Steven Rostedt (Red Hat) | 098c879 | 2013-12-21 17:39:40 -0500 | [diff] [blame] | 424 | .llseek = tracing_lseek, |
Steven Rostedt | d2d45c7 | 2011-12-19 14:44:09 -0500 | [diff] [blame] | 425 | .release = ftrace_regex_release, |
| 426 | }; |
| 427 | |
Steven Rostedt (VMware) | bbd1d27 | 2017-07-11 19:21:04 -0400 | [diff] [blame^] | 428 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 429 | |
Steven Rostedt | f38f1d2a | 2008-12-16 23:06:40 -0500 | [diff] [blame] | 430 | int |
| 431 | stack_trace_sysctl(struct ctl_table *table, int write, |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 432 | void __user *buffer, size_t *lenp, |
Steven Rostedt | f38f1d2a | 2008-12-16 23:06:40 -0500 | [diff] [blame] | 433 | loff_t *ppos) |
| 434 | { |
| 435 | int ret; |
| 436 | |
| 437 | mutex_lock(&stack_sysctl_mutex); |
| 438 | |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 439 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
Steven Rostedt | f38f1d2a | 2008-12-16 23:06:40 -0500 | [diff] [blame] | 440 | |
| 441 | if (ret || !write || |
Li Zefan | a32c776 | 2009-06-26 16:55:51 +0800 | [diff] [blame] | 442 | (last_stack_tracer_enabled == !!stack_tracer_enabled)) |
Steven Rostedt | f38f1d2a | 2008-12-16 23:06:40 -0500 | [diff] [blame] | 443 | goto out; |
| 444 | |
Li Zefan | a32c776 | 2009-06-26 16:55:51 +0800 | [diff] [blame] | 445 | last_stack_tracer_enabled = !!stack_tracer_enabled; |
Steven Rostedt | f38f1d2a | 2008-12-16 23:06:40 -0500 | [diff] [blame] | 446 | |
| 447 | if (stack_tracer_enabled) |
| 448 | register_ftrace_function(&trace_ops); |
| 449 | else |
| 450 | unregister_ftrace_function(&trace_ops); |
| 451 | |
| 452 | out: |
| 453 | mutex_unlock(&stack_sysctl_mutex); |
| 454 | return ret; |
| 455 | } |
| 456 | |
Steven Rostedt | 762e120 | 2011-12-19 22:01:00 -0500 | [diff] [blame] | 457 | static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata; |
| 458 | |
Steven Rostedt | f38f1d2a | 2008-12-16 23:06:40 -0500 | [diff] [blame] | 459 | static __init int enable_stacktrace(char *str) |
| 460 | { |
Steven Rostedt | 762e120 | 2011-12-19 22:01:00 -0500 | [diff] [blame] | 461 | if (strncmp(str, "_filter=", 8) == 0) |
| 462 | strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE); |
| 463 | |
Steven Rostedt | e05a43b | 2008-12-17 09:43:00 -0500 | [diff] [blame] | 464 | stack_tracer_enabled = 1; |
| 465 | last_stack_tracer_enabled = 1; |
Steven Rostedt | f38f1d2a | 2008-12-16 23:06:40 -0500 | [diff] [blame] | 466 | return 1; |
| 467 | } |
| 468 | __setup("stacktrace", enable_stacktrace); |
| 469 | |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 470 | static __init int stack_trace_init(void) |
| 471 | { |
| 472 | struct dentry *d_tracer; |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 473 | |
| 474 | d_tracer = tracing_init_dentry(); |
Steven Rostedt (Red Hat) | 14a5ae4 | 2015-01-20 11:14:16 -0500 | [diff] [blame] | 475 | if (IS_ERR(d_tracer)) |
Namhyung Kim | ed6f1c9 | 2013-04-10 09:18:12 +0900 | [diff] [blame] | 476 | return 0; |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 477 | |
Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 478 | trace_create_file("stack_max_size", 0644, d_tracer, |
AKASHI Takahiro | bb99d8c | 2015-10-30 14:25:39 +0900 | [diff] [blame] | 479 | &stack_trace_max_size, &stack_max_size_fops); |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 480 | |
Frederic Weisbecker | 5452af6 | 2009-03-27 00:25:38 +0100 | [diff] [blame] | 481 | trace_create_file("stack_trace", 0444, d_tracer, |
| 482 | NULL, &stack_trace_fops); |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 483 | |
Steven Rostedt (VMware) | bbd1d27 | 2017-07-11 19:21:04 -0400 | [diff] [blame^] | 484 | #ifdef CONFIG_DYNAMIC_FTRACE |
Steven Rostedt | d2d45c7 | 2011-12-19 14:44:09 -0500 | [diff] [blame] | 485 | trace_create_file("stack_trace_filter", 0444, d_tracer, |
Steven Rostedt (VMware) | 0f17976 | 2017-06-29 10:05:45 -0400 | [diff] [blame] | 486 | &trace_ops, &stack_trace_filter_fops); |
Steven Rostedt (VMware) | bbd1d27 | 2017-07-11 19:21:04 -0400 | [diff] [blame^] | 487 | #endif |
Steven Rostedt | d2d45c7 | 2011-12-19 14:44:09 -0500 | [diff] [blame] | 488 | |
Steven Rostedt | 762e120 | 2011-12-19 22:01:00 -0500 | [diff] [blame] | 489 | if (stack_trace_filter_buf[0]) |
| 490 | ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1); |
| 491 | |
Steven Rostedt | e05a43b | 2008-12-17 09:43:00 -0500 | [diff] [blame] | 492 | if (stack_tracer_enabled) |
Steven Rostedt | f38f1d2a | 2008-12-16 23:06:40 -0500 | [diff] [blame] | 493 | register_ftrace_function(&trace_ops); |
Steven Rostedt | e5a81b6 | 2008-08-27 23:31:01 -0400 | [diff] [blame] | 494 | |
| 495 | return 0; |
| 496 | } |
| 497 | |
| 498 | device_initcall(stack_trace_init); |