Michael Ellerman | 7af76c5 | 2018-05-02 23:07:29 +1000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
Christoph Hellwig | fd3e0bb | 2008-04-17 14:35:00 +1000 | [diff] [blame] | 3 | /* |
Michael Ellerman | 7af76c5 | 2018-05-02 23:07:29 +1000 | [diff] [blame] | 4 | * Stack trace utility functions etc. |
Christoph Hellwig | fd3e0bb | 2008-04-17 14:35:00 +1000 | [diff] [blame] | 5 | * |
| 6 | * Copyright 2008 Christoph Hellwig, IBM Corp. |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 7 | * Copyright 2018 SUSE Linux GmbH |
Michael Ellerman | 7af76c5 | 2018-05-02 23:07:29 +1000 | [diff] [blame] | 8 | * Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp. |
Christoph Hellwig | fd3e0bb | 2008-04-17 14:35:00 +1000 | [diff] [blame] | 9 | */ |
| 10 | |
Paul Gortmaker | 4b16f8e | 2011-07-22 18:24:23 -0400 | [diff] [blame] | 11 | #include <linux/export.h> |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 12 | #include <linux/kallsyms.h> |
| 13 | #include <linux/module.h> |
Michael Ellerman | 5cc0591 | 2018-05-02 23:07:28 +1000 | [diff] [blame] | 14 | #include <linux/nmi.h> |
Christoph Hellwig | fd3e0bb | 2008-04-17 14:35:00 +1000 | [diff] [blame] | 15 | #include <linux/sched.h> |
Ingo Molnar | b17b015 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 16 | #include <linux/sched/debug.h> |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 17 | #include <linux/sched/task_stack.h> |
Christoph Hellwig | fd3e0bb | 2008-04-17 14:35:00 +1000 | [diff] [blame] | 18 | #include <linux/stacktrace.h> |
| 19 | #include <asm/ptrace.h> |
Arnd Bergmann | 01f4b8b | 2008-07-11 00:08:18 +1000 | [diff] [blame] | 20 | #include <asm/processor.h> |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 21 | #include <linux/ftrace.h> |
| 22 | #include <asm/kprobes.h> |
Christoph Hellwig | fd3e0bb | 2008-04-17 14:35:00 +1000 | [diff] [blame] | 23 | |
Michael Ellerman | 5cc0591 | 2018-05-02 23:07:28 +1000 | [diff] [blame] | 24 | #include <asm/paca.h> |
| 25 | |
Christoph Hellwig | fd3e0bb | 2008-04-17 14:35:00 +1000 | [diff] [blame] | 26 | /* |
| 27 | * Save stack-backtrace addresses into a stack_trace buffer. |
| 28 | */ |
Arnd Bergmann | 01f4b8b | 2008-07-11 00:08:18 +1000 | [diff] [blame] | 29 | static void save_context_stack(struct stack_trace *trace, unsigned long sp, |
| 30 | struct task_struct *tsk, int savesched) |
Christoph Hellwig | fd3e0bb | 2008-04-17 14:35:00 +1000 | [diff] [blame] | 31 | { |
Christoph Hellwig | fd3e0bb | 2008-04-17 14:35:00 +1000 | [diff] [blame] | 32 | for (;;) { |
| 33 | unsigned long *stack = (unsigned long *) sp; |
| 34 | unsigned long newsp, ip; |
| 35 | |
Arnd Bergmann | 01f4b8b | 2008-07-11 00:08:18 +1000 | [diff] [blame] | 36 | if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD)) |
Christoph Hellwig | fd3e0bb | 2008-04-17 14:35:00 +1000 | [diff] [blame] | 37 | return; |
| 38 | |
| 39 | newsp = stack[0]; |
| 40 | ip = stack[STACK_FRAME_LR_SAVE]; |
| 41 | |
Arnd Bergmann | 01f4b8b | 2008-07-11 00:08:18 +1000 | [diff] [blame] | 42 | if (savesched || !in_sched_functions(ip)) { |
| 43 | if (!trace->skip) |
| 44 | trace->entries[trace->nr_entries++] = ip; |
| 45 | else |
| 46 | trace->skip--; |
| 47 | } |
Christoph Hellwig | fd3e0bb | 2008-04-17 14:35:00 +1000 | [diff] [blame] | 48 | |
| 49 | if (trace->nr_entries >= trace->max_entries) |
| 50 | return; |
| 51 | |
| 52 | sp = newsp; |
| 53 | } |
| 54 | } |
Arnd Bergmann | 01f4b8b | 2008-07-11 00:08:18 +1000 | [diff] [blame] | 55 | |
| 56 | void save_stack_trace(struct stack_trace *trace) |
| 57 | { |
| 58 | unsigned long sp; |
| 59 | |
Anton Blanchard | acf620e | 2014-10-13 19:41:39 +1100 | [diff] [blame] | 60 | sp = current_stack_pointer(); |
Arnd Bergmann | 01f4b8b | 2008-07-11 00:08:18 +1000 | [diff] [blame] | 61 | |
| 62 | save_context_stack(trace, sp, current, 1); |
| 63 | } |
| 64 | EXPORT_SYMBOL_GPL(save_stack_trace); |
| 65 | |
| 66 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
| 67 | { |
Thadeu Lima de Souza Cascardo | 4f9b514 | 2017-03-27 16:32:33 -0300 | [diff] [blame] | 68 | unsigned long sp; |
| 69 | |
Christophe Leroy | 018cce3 | 2019-01-31 10:08:52 +0000 | [diff] [blame] | 70 | if (!try_get_task_stack(tsk)) |
| 71 | return; |
| 72 | |
Thadeu Lima de Souza Cascardo | 4f9b514 | 2017-03-27 16:32:33 -0300 | [diff] [blame] | 73 | if (tsk == current) |
| 74 | sp = current_stack_pointer(); |
| 75 | else |
| 76 | sp = tsk->thread.ksp; |
| 77 | |
| 78 | save_context_stack(trace, sp, tsk, 0); |
Christophe Leroy | 018cce3 | 2019-01-31 10:08:52 +0000 | [diff] [blame] | 79 | |
| 80 | put_task_stack(tsk); |
Arnd Bergmann | 01f4b8b | 2008-07-11 00:08:18 +1000 | [diff] [blame] | 81 | } |
| 82 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); |
Steven Rostedt | 35de3b1 | 2015-12-08 13:50:56 -0500 | [diff] [blame] | 83 | |
| 84 | void |
| 85 | save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) |
| 86 | { |
| 87 | save_context_stack(trace, regs->gpr[1], current, 0); |
| 88 | } |
| 89 | EXPORT_SYMBOL_GPL(save_stack_trace_regs); |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 90 | |
| 91 | #ifdef CONFIG_HAVE_RELIABLE_STACKTRACE |
Joe Lawrence | 18be376 | 2019-01-22 10:57:22 -0500 | [diff] [blame] | 92 | /* |
| 93 | * This function returns an error if it detects any unreliable features of the |
| 94 | * stack. Otherwise it guarantees that the stack trace is reliable. |
| 95 | * |
| 96 | * If the task is not 'current', the caller *must* ensure the task is inactive. |
| 97 | */ |
Christophe Leroy | 018cce3 | 2019-01-31 10:08:52 +0000 | [diff] [blame] | 98 | static int __save_stack_trace_tsk_reliable(struct task_struct *tsk, |
| 99 | struct stack_trace *trace) |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 100 | { |
| 101 | unsigned long sp; |
Joe Lawrence | 29a77bbb | 2019-01-22 10:57:23 -0500 | [diff] [blame] | 102 | unsigned long newsp; |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 103 | unsigned long stack_page = (unsigned long)task_stack_page(tsk); |
| 104 | unsigned long stack_end; |
| 105 | int graph_idx = 0; |
Joe Lawrence | 29a77bbb | 2019-01-22 10:57:23 -0500 | [diff] [blame] | 106 | bool firstframe; |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 107 | |
| 108 | stack_end = stack_page + THREAD_SIZE; |
| 109 | if (!is_idle_task(tsk)) { |
| 110 | /* |
| 111 | * For user tasks, this is the SP value loaded on |
| 112 | * kernel entry, see "PACAKSAVE(r13)" in _switch() and |
| 113 | * system_call_common()/EXCEPTION_PROLOG_COMMON(). |
| 114 | * |
| 115 | * Likewise for non-swapper kernel threads, |
| 116 | * this also happens to be the top of the stack |
| 117 | * as setup by copy_thread(). |
| 118 | * |
| 119 | * Note that stack backlinks are not properly setup by |
| 120 | * copy_thread() and thus, a forked task() will have |
| 121 | * an unreliable stack trace until it's been |
| 122 | * _switch()'ed to for the first time. |
| 123 | */ |
| 124 | stack_end -= STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); |
| 125 | } else { |
| 126 | /* |
| 127 | * idle tasks have a custom stack layout, |
| 128 | * c.f. cpu_idle_thread_init(). |
| 129 | */ |
| 130 | stack_end -= STACK_FRAME_OVERHEAD; |
| 131 | } |
| 132 | |
Joe Lawrence | 29a77bbb | 2019-01-22 10:57:23 -0500 | [diff] [blame] | 133 | if (tsk == current) |
| 134 | sp = current_stack_pointer(); |
| 135 | else |
| 136 | sp = tsk->thread.ksp; |
| 137 | |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 138 | if (sp < stack_page + sizeof(struct thread_struct) || |
| 139 | sp > stack_end - STACK_FRAME_MIN_SIZE) { |
Joe Lawrence | 3de27dc | 2019-01-22 10:57:24 -0500 | [diff] [blame] | 140 | return -EINVAL; |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 141 | } |
| 142 | |
Joe Lawrence | 29a77bbb | 2019-01-22 10:57:23 -0500 | [diff] [blame] | 143 | for (firstframe = true; sp != stack_end; |
| 144 | firstframe = false, sp = newsp) { |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 145 | unsigned long *stack = (unsigned long *) sp; |
Joe Lawrence | 29a77bbb | 2019-01-22 10:57:23 -0500 | [diff] [blame] | 146 | unsigned long ip; |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 147 | |
| 148 | /* sanity check: ABI requires SP to be aligned 16 bytes. */ |
| 149 | if (sp & 0xF) |
Joe Lawrence | 3de27dc | 2019-01-22 10:57:24 -0500 | [diff] [blame] | 150 | return -EINVAL; |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 151 | |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 152 | newsp = stack[0]; |
| 153 | /* Stack grows downwards; unwinder may only go up. */ |
| 154 | if (newsp <= sp) |
Joe Lawrence | 3de27dc | 2019-01-22 10:57:24 -0500 | [diff] [blame] | 155 | return -EINVAL; |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 156 | |
| 157 | if (newsp != stack_end && |
| 158 | newsp > stack_end - STACK_FRAME_MIN_SIZE) { |
Joe Lawrence | 3de27dc | 2019-01-22 10:57:24 -0500 | [diff] [blame] | 159 | return -EINVAL; /* invalid backlink, too far up. */ |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 160 | } |
| 161 | |
Joe Lawrence | 18be376 | 2019-01-22 10:57:22 -0500 | [diff] [blame] | 162 | /* |
| 163 | * We can only trust the bottom frame's backlink, the |
| 164 | * rest of the frame may be uninitialized, continue to |
| 165 | * the next. |
| 166 | */ |
Joe Lawrence | 29a77bbb | 2019-01-22 10:57:23 -0500 | [diff] [blame] | 167 | if (firstframe) |
| 168 | continue; |
Joe Lawrence | 18be376 | 2019-01-22 10:57:22 -0500 | [diff] [blame] | 169 | |
| 170 | /* Mark stacktraces with exception frames as unreliable. */ |
| 171 | if (sp <= stack_end - STACK_INT_FRAME_SIZE && |
| 172 | stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { |
Joe Lawrence | 3de27dc | 2019-01-22 10:57:24 -0500 | [diff] [blame] | 173 | return -EINVAL; |
Joe Lawrence | 18be376 | 2019-01-22 10:57:22 -0500 | [diff] [blame] | 174 | } |
| 175 | |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 176 | /* Examine the saved LR: it must point into kernel code. */ |
| 177 | ip = stack[STACK_FRAME_LR_SAVE]; |
Joe Lawrence | 18be376 | 2019-01-22 10:57:22 -0500 | [diff] [blame] | 178 | if (!__kernel_text_address(ip)) |
Joe Lawrence | 3de27dc | 2019-01-22 10:57:24 -0500 | [diff] [blame] | 179 | return -EINVAL; |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 180 | |
| 181 | /* |
| 182 | * FIXME: IMHO these tests do not belong in |
| 183 | * arch-dependent code, they are generic. |
| 184 | */ |
| 185 | ip = ftrace_graph_ret_addr(tsk, &graph_idx, ip, NULL); |
Aneesh Kumar K.V | 5e3f0d1 | 2018-05-22 14:38:20 +0530 | [diff] [blame] | 186 | #ifdef CONFIG_KPROBES |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 187 | /* |
| 188 | * Mark stacktraces with kretprobed functions on them |
| 189 | * as unreliable. |
| 190 | */ |
| 191 | if (ip == (unsigned long)kretprobe_trampoline) |
Joe Lawrence | 3de27dc | 2019-01-22 10:57:24 -0500 | [diff] [blame] | 192 | return -EINVAL; |
Aneesh Kumar K.V | 5e3f0d1 | 2018-05-22 14:38:20 +0530 | [diff] [blame] | 193 | #endif |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 194 | |
Joe Lawrence | 29a77bbb | 2019-01-22 10:57:23 -0500 | [diff] [blame] | 195 | if (trace->nr_entries >= trace->max_entries) |
| 196 | return -E2BIG; |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 197 | if (!trace->skip) |
| 198 | trace->entries[trace->nr_entries++] = ip; |
| 199 | else |
| 200 | trace->skip--; |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 201 | } |
| 202 | return 0; |
| 203 | } |
Christophe Leroy | 018cce3 | 2019-01-31 10:08:52 +0000 | [diff] [blame] | 204 | |
| 205 | int save_stack_trace_tsk_reliable(struct task_struct *tsk, |
| 206 | struct stack_trace *trace) |
| 207 | { |
| 208 | int ret; |
| 209 | |
| 210 | /* |
| 211 | * If the task doesn't have a stack (e.g., a zombie), the stack is |
| 212 | * "reliably" empty. |
| 213 | */ |
| 214 | if (!try_get_task_stack(tsk)) |
| 215 | return 0; |
| 216 | |
| 217 | ret = __save_stack_trace_tsk_reliable(tsk, trace); |
| 218 | |
| 219 | put_task_stack(tsk); |
| 220 | |
| 221 | return ret; |
| 222 | } |
Torsten Duwe | df78d3f | 2018-05-04 14:38:34 +0200 | [diff] [blame] | 223 | #endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */ |
Michael Ellerman | 5cc0591 | 2018-05-02 23:07:28 +1000 | [diff] [blame] | 224 | |
Michael Ellerman | e08ecba | 2018-06-19 21:51:55 +1000 | [diff] [blame] | 225 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) |
Michael Ellerman | 5cc0591 | 2018-05-02 23:07:28 +1000 | [diff] [blame] | 226 | static void handle_backtrace_ipi(struct pt_regs *regs) |
| 227 | { |
| 228 | nmi_cpu_backtrace(regs); |
| 229 | } |
| 230 | |
| 231 | static void raise_backtrace_ipi(cpumask_t *mask) |
| 232 | { |
| 233 | unsigned int cpu; |
| 234 | |
| 235 | for_each_cpu(cpu, mask) { |
| 236 | if (cpu == smp_processor_id()) |
| 237 | handle_backtrace_ipi(NULL); |
| 238 | else |
| 239 | smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, 5 * USEC_PER_SEC); |
| 240 | } |
| 241 | |
| 242 | for_each_cpu(cpu, mask) { |
| 243 | struct paca_struct *p = paca_ptrs[cpu]; |
| 244 | |
| 245 | cpumask_clear_cpu(cpu, mask); |
| 246 | |
| 247 | pr_warn("CPU %d didn't respond to backtrace IPI, inspecting paca.\n", cpu); |
| 248 | if (!virt_addr_valid(p)) { |
| 249 | pr_warn("paca pointer appears corrupt? (%px)\n", p); |
| 250 | continue; |
| 251 | } |
| 252 | |
| 253 | pr_warn("irq_soft_mask: 0x%02x in_mce: %d in_nmi: %d", |
| 254 | p->irq_soft_mask, p->in_mce, p->in_nmi); |
| 255 | |
| 256 | if (virt_addr_valid(p->__current)) |
| 257 | pr_cont(" current: %d (%s)\n", p->__current->pid, |
| 258 | p->__current->comm); |
| 259 | else |
| 260 | pr_cont(" current pointer corrupt? (%px)\n", p->__current); |
| 261 | |
| 262 | pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1); |
| 263 | show_stack(p->__current, (unsigned long *)p->saved_r1); |
| 264 | } |
| 265 | } |
| 266 | |
| 267 | void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) |
| 268 | { |
| 269 | nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi); |
| 270 | } |
Michael Ellerman | e08ecba | 2018-06-19 21:51:55 +1000 | [diff] [blame] | 271 | #endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */ |