blob: 28c3c25755d7fef3d4082c4d3fec63a695c655d6 [file] [log] [blame]
Michael Ellerman7af76c52018-05-02 23:07:29 +10001// SPDX-License-Identifier: GPL-2.0
2
Christoph Hellwigfd3e0bb2008-04-17 14:35:00 +10003/*
Michael Ellerman7af76c52018-05-02 23:07:29 +10004 * Stack trace utility functions etc.
Christoph Hellwigfd3e0bb2008-04-17 14:35:00 +10005 *
6 * Copyright 2008 Christoph Hellwig, IBM Corp.
Torsten Duwedf78d3f2018-05-04 14:38:34 +02007 * Copyright 2018 SUSE Linux GmbH
Michael Ellerman7af76c52018-05-02 23:07:29 +10008 * Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp.
Christoph Hellwigfd3e0bb2008-04-17 14:35:00 +10009 */
10
Paul Gortmaker4b16f8e2011-07-22 18:24:23 -040011#include <linux/export.h>
Torsten Duwedf78d3f2018-05-04 14:38:34 +020012#include <linux/kallsyms.h>
13#include <linux/module.h>
Michael Ellerman5cc05912018-05-02 23:07:28 +100014#include <linux/nmi.h>
Christoph Hellwigfd3e0bb2008-04-17 14:35:00 +100015#include <linux/sched.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010016#include <linux/sched/debug.h>
Torsten Duwedf78d3f2018-05-04 14:38:34 +020017#include <linux/sched/task_stack.h>
Christoph Hellwigfd3e0bb2008-04-17 14:35:00 +100018#include <linux/stacktrace.h>
19#include <asm/ptrace.h>
Arnd Bergmann01f4b8b2008-07-11 00:08:18 +100020#include <asm/processor.h>
Torsten Duwedf78d3f2018-05-04 14:38:34 +020021#include <linux/ftrace.h>
22#include <asm/kprobes.h>
Christoph Hellwigfd3e0bb2008-04-17 14:35:00 +100023
Michael Ellerman5cc05912018-05-02 23:07:28 +100024#include <asm/paca.h>
25
Christoph Hellwigfd3e0bb2008-04-17 14:35:00 +100026/*
27 * Save stack-backtrace addresses into a stack_trace buffer.
28 */
Arnd Bergmann01f4b8b2008-07-11 00:08:18 +100029static void save_context_stack(struct stack_trace *trace, unsigned long sp,
30 struct task_struct *tsk, int savesched)
Christoph Hellwigfd3e0bb2008-04-17 14:35:00 +100031{
Christoph Hellwigfd3e0bb2008-04-17 14:35:00 +100032 for (;;) {
33 unsigned long *stack = (unsigned long *) sp;
34 unsigned long newsp, ip;
35
Arnd Bergmann01f4b8b2008-07-11 00:08:18 +100036 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
Christoph Hellwigfd3e0bb2008-04-17 14:35:00 +100037 return;
38
39 newsp = stack[0];
40 ip = stack[STACK_FRAME_LR_SAVE];
41
Arnd Bergmann01f4b8b2008-07-11 00:08:18 +100042 if (savesched || !in_sched_functions(ip)) {
43 if (!trace->skip)
44 trace->entries[trace->nr_entries++] = ip;
45 else
46 trace->skip--;
47 }
Christoph Hellwigfd3e0bb2008-04-17 14:35:00 +100048
49 if (trace->nr_entries >= trace->max_entries)
50 return;
51
52 sp = newsp;
53 }
54}
Arnd Bergmann01f4b8b2008-07-11 00:08:18 +100055
56void save_stack_trace(struct stack_trace *trace)
57{
58 unsigned long sp;
59
Anton Blanchardacf620e2014-10-13 19:41:39 +110060 sp = current_stack_pointer();
Arnd Bergmann01f4b8b2008-07-11 00:08:18 +100061
62 save_context_stack(trace, sp, current, 1);
63}
64EXPORT_SYMBOL_GPL(save_stack_trace);
65
66void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
67{
Thadeu Lima de Souza Cascardo4f9b5142017-03-27 16:32:33 -030068 unsigned long sp;
69
70 if (tsk == current)
71 sp = current_stack_pointer();
72 else
73 sp = tsk->thread.ksp;
74
75 save_context_stack(trace, sp, tsk, 0);
Arnd Bergmann01f4b8b2008-07-11 00:08:18 +100076}
77EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
Steven Rostedt35de3b12015-12-08 13:50:56 -050078
79void
80save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
81{
82 save_context_stack(trace, regs->gpr[1], current, 0);
83}
84EXPORT_SYMBOL_GPL(save_stack_trace_regs);
Torsten Duwedf78d3f2018-05-04 14:38:34 +020085
86#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
Joe Lawrence18be3762019-01-22 10:57:22 -050087/*
88 * This function returns an error if it detects any unreliable features of the
89 * stack. Otherwise it guarantees that the stack trace is reliable.
90 *
91 * If the task is not 'current', the caller *must* ensure the task is inactive.
92 */
Torsten Duwedf78d3f2018-05-04 14:38:34 +020093int
94save_stack_trace_tsk_reliable(struct task_struct *tsk,
95 struct stack_trace *trace)
96{
97 unsigned long sp;
Joe Lawrence29a77bbb2019-01-22 10:57:23 -050098 unsigned long newsp;
Torsten Duwedf78d3f2018-05-04 14:38:34 +020099 unsigned long stack_page = (unsigned long)task_stack_page(tsk);
100 unsigned long stack_end;
101 int graph_idx = 0;
Joe Lawrence29a77bbb2019-01-22 10:57:23 -0500102 bool firstframe;
Torsten Duwedf78d3f2018-05-04 14:38:34 +0200103
104 stack_end = stack_page + THREAD_SIZE;
105 if (!is_idle_task(tsk)) {
106 /*
107 * For user tasks, this is the SP value loaded on
108 * kernel entry, see "PACAKSAVE(r13)" in _switch() and
109 * system_call_common()/EXCEPTION_PROLOG_COMMON().
110 *
111 * Likewise for non-swapper kernel threads,
112 * this also happens to be the top of the stack
113 * as setup by copy_thread().
114 *
115 * Note that stack backlinks are not properly setup by
116 * copy_thread() and thus, a forked task() will have
117 * an unreliable stack trace until it's been
118 * _switch()'ed to for the first time.
119 */
120 stack_end -= STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
121 } else {
122 /*
123 * idle tasks have a custom stack layout,
124 * c.f. cpu_idle_thread_init().
125 */
126 stack_end -= STACK_FRAME_OVERHEAD;
127 }
128
Joe Lawrence29a77bbb2019-01-22 10:57:23 -0500129 if (tsk == current)
130 sp = current_stack_pointer();
131 else
132 sp = tsk->thread.ksp;
133
Torsten Duwedf78d3f2018-05-04 14:38:34 +0200134 if (sp < stack_page + sizeof(struct thread_struct) ||
135 sp > stack_end - STACK_FRAME_MIN_SIZE) {
136 return 1;
137 }
138
Joe Lawrence29a77bbb2019-01-22 10:57:23 -0500139 for (firstframe = true; sp != stack_end;
140 firstframe = false, sp = newsp) {
Torsten Duwedf78d3f2018-05-04 14:38:34 +0200141 unsigned long *stack = (unsigned long *) sp;
Joe Lawrence29a77bbb2019-01-22 10:57:23 -0500142 unsigned long ip;
Torsten Duwedf78d3f2018-05-04 14:38:34 +0200143
144 /* sanity check: ABI requires SP to be aligned 16 bytes. */
145 if (sp & 0xF)
146 return 1;
147
Torsten Duwedf78d3f2018-05-04 14:38:34 +0200148 newsp = stack[0];
149 /* Stack grows downwards; unwinder may only go up. */
150 if (newsp <= sp)
151 return 1;
152
153 if (newsp != stack_end &&
154 newsp > stack_end - STACK_FRAME_MIN_SIZE) {
155 return 1; /* invalid backlink, too far up. */
156 }
157
Joe Lawrence18be3762019-01-22 10:57:22 -0500158 /*
159 * We can only trust the bottom frame's backlink, the
160 * rest of the frame may be uninitialized, continue to
161 * the next.
162 */
Joe Lawrence29a77bbb2019-01-22 10:57:23 -0500163 if (firstframe)
164 continue;
Joe Lawrence18be3762019-01-22 10:57:22 -0500165
166 /* Mark stacktraces with exception frames as unreliable. */
167 if (sp <= stack_end - STACK_INT_FRAME_SIZE &&
168 stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
169 return 1;
170 }
171
Torsten Duwedf78d3f2018-05-04 14:38:34 +0200172 /* Examine the saved LR: it must point into kernel code. */
173 ip = stack[STACK_FRAME_LR_SAVE];
Joe Lawrence18be3762019-01-22 10:57:22 -0500174 if (!__kernel_text_address(ip))
Torsten Duwedf78d3f2018-05-04 14:38:34 +0200175 return 1;
Torsten Duwedf78d3f2018-05-04 14:38:34 +0200176
177 /*
178 * FIXME: IMHO these tests do not belong in
179 * arch-dependent code, they are generic.
180 */
181 ip = ftrace_graph_ret_addr(tsk, &graph_idx, ip, NULL);
Aneesh Kumar K.V5e3f0d12018-05-22 14:38:20 +0530182#ifdef CONFIG_KPROBES
Torsten Duwedf78d3f2018-05-04 14:38:34 +0200183 /*
184 * Mark stacktraces with kretprobed functions on them
185 * as unreliable.
186 */
187 if (ip == (unsigned long)kretprobe_trampoline)
188 return 1;
Aneesh Kumar K.V5e3f0d12018-05-22 14:38:20 +0530189#endif
Torsten Duwedf78d3f2018-05-04 14:38:34 +0200190
Joe Lawrence29a77bbb2019-01-22 10:57:23 -0500191 if (trace->nr_entries >= trace->max_entries)
192 return -E2BIG;
Torsten Duwedf78d3f2018-05-04 14:38:34 +0200193 if (!trace->skip)
194 trace->entries[trace->nr_entries++] = ip;
195 else
196 trace->skip--;
Torsten Duwedf78d3f2018-05-04 14:38:34 +0200197 }
198 return 0;
199}
200EXPORT_SYMBOL_GPL(save_stack_trace_tsk_reliable);
201#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
Michael Ellerman5cc05912018-05-02 23:07:28 +1000202
Michael Ellermane08ecba2018-06-19 21:51:55 +1000203#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
Michael Ellerman5cc05912018-05-02 23:07:28 +1000204static void handle_backtrace_ipi(struct pt_regs *regs)
205{
206 nmi_cpu_backtrace(regs);
207}
208
209static void raise_backtrace_ipi(cpumask_t *mask)
210{
211 unsigned int cpu;
212
213 for_each_cpu(cpu, mask) {
214 if (cpu == smp_processor_id())
215 handle_backtrace_ipi(NULL);
216 else
217 smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, 5 * USEC_PER_SEC);
218 }
219
220 for_each_cpu(cpu, mask) {
221 struct paca_struct *p = paca_ptrs[cpu];
222
223 cpumask_clear_cpu(cpu, mask);
224
225 pr_warn("CPU %d didn't respond to backtrace IPI, inspecting paca.\n", cpu);
226 if (!virt_addr_valid(p)) {
227 pr_warn("paca pointer appears corrupt? (%px)\n", p);
228 continue;
229 }
230
231 pr_warn("irq_soft_mask: 0x%02x in_mce: %d in_nmi: %d",
232 p->irq_soft_mask, p->in_mce, p->in_nmi);
233
234 if (virt_addr_valid(p->__current))
235 pr_cont(" current: %d (%s)\n", p->__current->pid,
236 p->__current->comm);
237 else
238 pr_cont(" current pointer corrupt? (%px)\n", p->__current);
239
240 pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1);
241 show_stack(p->__current, (unsigned long *)p->saved_r1);
242 }
243}
244
245void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
246{
247 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);
248}
Michael Ellermane08ecba2018-06-19 21:51:55 +1000249#endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */