blob: 6106760de71657fd8f8bd4565a21c867adfe8a3b [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -05002#include <linux/sched.h>
Ingo Molnar29930022017-02-08 18:51:36 +01003#include <linux/sched/task.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +01004#include <linux/sched/task_stack.h>
Josh Poimboeufa8b7a922017-04-12 13:47:12 -05005#include <linux/interrupt.h>
6#include <asm/sections.h>
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -05007#include <asm/ptrace.h>
8#include <asm/bitops.h>
9#include <asm/stacktrace.h>
10#include <asm/unwind.h>
11
12#define FRAME_HEADER_SIZE (sizeof(long) * 2)
13
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -050014unsigned long unwind_get_return_address(struct unwind_state *state)
15{
16 if (unwind_done(state))
17 return 0;
18
19 return __kernel_text_address(state->ip) ? state->ip : 0;
20}
21EXPORT_SYMBOL_GPL(unwind_get_return_address);
22
23unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
24{
25 if (unwind_done(state))
26 return NULL;
27
28 return state->regs ? &state->regs->ip : state->bp + 1;
29}
Josh Poimboeuf84936112017-01-09 12:00:23 -060030
Josh Poimboeufaa4f8532017-04-18 08:12:58 -050031static void unwind_dump(struct unwind_state *state)
Josh Poimboeuf8b5e99f2016-12-16 10:05:06 -060032{
33 static bool dumped_before = false;
34 bool prev_zero, zero = false;
Josh Poimboeufaa4f8532017-04-18 08:12:58 -050035 unsigned long word, *sp;
Josh Poimboeuf262fa732017-04-25 20:48:52 -050036 struct stack_info stack_info = {0};
37 unsigned long visit_mask = 0;
Josh Poimboeuf8b5e99f2016-12-16 10:05:06 -060038
39 if (dumped_before)
40 return;
41
42 dumped_before = true;
43
Josh Poimboeuf4ea3d742017-04-18 08:12:57 -050044 printk_deferred("unwind stack type:%d next_sp:%p mask:0x%lx graph_idx:%d\n",
Josh Poimboeuf8b5e99f2016-12-16 10:05:06 -060045 state->stack_info.type, state->stack_info.next_sp,
46 state->stack_mask, state->graph_idx);
47
Josh Poimboeuf99bd28a2017-10-09 20:20:04 -050048 for (sp = PTR_ALIGN(state->orig_sp, sizeof(long)); sp;
49 sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
Josh Poimboeuf262fa732017-04-25 20:48:52 -050050 if (get_stack_info(sp, state->task, &stack_info, &visit_mask))
51 break;
Josh Poimboeuf8b5e99f2016-12-16 10:05:06 -060052
Josh Poimboeuf262fa732017-04-25 20:48:52 -050053 for (; sp < stack_info.end; sp++) {
Josh Poimboeuf8b5e99f2016-12-16 10:05:06 -060054
Josh Poimboeuf262fa732017-04-25 20:48:52 -050055 word = READ_ONCE_NOCHECK(*sp);
56
57 prev_zero = zero;
58 zero = word == 0;
59
60 if (zero) {
61 if (!prev_zero)
62 printk_deferred("%p: %0*x ...\n",
63 sp, BITS_PER_LONG/4, 0);
64 continue;
65 }
66
67 printk_deferred("%p: %0*lx (%pB)\n",
68 sp, BITS_PER_LONG/4, word, (void *)word);
Josh Poimboeuf8b5e99f2016-12-16 10:05:06 -060069 }
Josh Poimboeuf8b5e99f2016-12-16 10:05:06 -060070 }
71}
72
Josh Poimboeuf24d86f52016-10-27 08:10:58 -050073static size_t regs_size(struct pt_regs *regs)
74{
75 /* x86_32 regs from kernel mode are two words shorter: */
76 if (IS_ENABLED(CONFIG_X86_32) && !user_mode(regs))
77 return sizeof(*regs) - 2*sizeof(long);
78
79 return sizeof(*regs);
80}
81
Josh Poimboeufa8b7a922017-04-12 13:47:12 -050082static bool in_entry_code(unsigned long ip)
83{
84 char *addr = (char *)ip;
85
86 if (addr >= __entry_text_start && addr < __entry_text_end)
87 return true;
88
Josh Poimboeufa8b7a922017-04-12 13:47:12 -050089 if (addr >= __irqentry_text_start && addr < __irqentry_text_end)
90 return true;
Josh Poimboeufa8b7a922017-04-12 13:47:12 -050091
92 return false;
93}
94
Josh Poimboeufb0d50c72017-04-25 20:48:51 -050095static inline unsigned long *last_frame(struct unwind_state *state)
96{
97 return (unsigned long *)task_pt_regs(state->task) - 2;
98}
99
Josh Poimboeuf519fb5c2017-05-23 10:37:30 -0500100static bool is_last_frame(struct unwind_state *state)
101{
102 return state->bp == last_frame(state);
103}
104
Josh Poimboeuf87a6b292017-03-13 23:27:47 -0500105#ifdef CONFIG_X86_32
106#define GCC_REALIGN_WORDS 3
107#else
108#define GCC_REALIGN_WORDS 1
109#endif
110
Josh Poimboeufb0d50c72017-04-25 20:48:51 -0500111static inline unsigned long *last_aligned_frame(struct unwind_state *state)
112{
113 return last_frame(state) - GCC_REALIGN_WORDS;
114}
115
Josh Poimboeuf519fb5c2017-05-23 10:37:30 -0500116static bool is_last_aligned_frame(struct unwind_state *state)
Josh Poimboeufacb46082016-10-20 11:34:41 -0500117{
Josh Poimboeufb0d50c72017-04-25 20:48:51 -0500118 unsigned long *last_bp = last_frame(state);
119 unsigned long *aligned_bp = last_aligned_frame(state);
Josh Poimboeufacb46082016-10-20 11:34:41 -0500120
Josh Poimboeuf8023e0e2016-12-16 10:05:05 -0600121 /*
Josh Poimboeuf519fb5c2017-05-23 10:37:30 -0500122 * GCC can occasionally decide to realign the stack pointer and change
123 * the offset of the stack frame in the prologue of a function called
124 * by head/entry code. Examples:
Josh Poimboeuf87a6b292017-03-13 23:27:47 -0500125 *
126 * <start_secondary>:
127 * push %edi
128 * lea 0x8(%esp),%edi
129 * and $0xfffffff8,%esp
130 * pushl -0x4(%edi)
131 * push %ebp
132 * mov %esp,%ebp
133 *
134 * <x86_64_start_kernel>:
135 * lea 0x8(%rsp),%r10
136 * and $0xfffffffffffffff0,%rsp
137 * pushq -0x8(%r10)
138 * push %rbp
139 * mov %rsp,%rbp
140 *
Josh Poimboeuf519fb5c2017-05-23 10:37:30 -0500141 * After aligning the stack, it pushes a duplicate copy of the return
142 * address before pushing the frame pointer.
Josh Poimboeuf8023e0e2016-12-16 10:05:05 -0600143 */
Josh Poimboeuf519fb5c2017-05-23 10:37:30 -0500144 return (state->bp == aligned_bp && *(aligned_bp + 1) == *(last_bp + 1));
145}
146
147static bool is_last_ftrace_frame(struct unwind_state *state)
148{
149 unsigned long *last_bp = last_frame(state);
150 unsigned long *last_ftrace_bp = last_bp - 3;
151
152 /*
153 * When unwinding from an ftrace handler of a function called by entry
154 * code, the stack layout of the last frame is:
155 *
156 * bp
157 * parent ret addr
158 * bp
159 * function ret addr
160 * parent ret addr
161 * pt_regs
162 * -----------------
163 */
164 return (state->bp == last_ftrace_bp &&
165 *state->bp == *(state->bp + 2) &&
166 *(state->bp + 1) == *(state->bp + 4));
167}
168
169static bool is_last_task_frame(struct unwind_state *state)
170{
171 return is_last_frame(state) || is_last_aligned_frame(state) ||
172 is_last_ftrace_frame(state);
Josh Poimboeufacb46082016-10-20 11:34:41 -0500173}
174
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500175/*
176 * This determines if the frame pointer actually contains an encoded pointer to
177 * pt_regs on the stack. See ENCODE_FRAME_POINTER.
178 */
Josh Poimboeuf5c99b692017-10-09 20:20:03 -0500179#ifdef CONFIG_X86_64
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500180static struct pt_regs *decode_frame_pointer(unsigned long *bp)
181{
182 unsigned long regs = (unsigned long)bp;
183
184 if (!(regs & 0x1))
185 return NULL;
186
187 return (struct pt_regs *)(regs & ~0x1);
188}
Josh Poimboeuf5c99b692017-10-09 20:20:03 -0500189#else
190static struct pt_regs *decode_frame_pointer(unsigned long *bp)
191{
192 unsigned long regs = (unsigned long)bp;
193
194 if (regs & 0x80000000)
195 return NULL;
196
197 return (struct pt_regs *)(regs | 0x80000000);
198}
199#endif
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500200
Josh Poimboeuf62dd86a2017-10-09 20:20:02 -0500201#ifdef CONFIG_X86_32
202#define KERNEL_REGS_SIZE (sizeof(struct pt_regs) - 2*sizeof(long))
203#else
204#define KERNEL_REGS_SIZE (sizeof(struct pt_regs))
205#endif
206
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500207static bool update_stack_state(struct unwind_state *state,
208 unsigned long *next_bp)
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500209{
210 struct stack_info *info = &state->stack_info;
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500211 enum stack_type prev_type = info->type;
212 struct pt_regs *regs;
Josh Poimboeuf6bcdf9d2017-04-12 13:47:11 -0500213 unsigned long *frame, *prev_frame_end, *addr_p, addr;
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500214 size_t len;
215
216 if (state->regs)
217 prev_frame_end = (void *)state->regs + regs_size(state->regs);
218 else
219 prev_frame_end = (void *)state->bp + FRAME_HEADER_SIZE;
220
221 /* Is the next frame pointer an encoded pointer to pt_regs? */
222 regs = decode_frame_pointer(next_bp);
223 if (regs) {
224 frame = (unsigned long *)regs;
Josh Poimboeuf62dd86a2017-10-09 20:20:02 -0500225 len = KERNEL_REGS_SIZE;
Josh Poimboeufa8b7a922017-04-12 13:47:12 -0500226 state->got_irq = true;
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500227 } else {
228 frame = next_bp;
229 len = FRAME_HEADER_SIZE;
230 }
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500231
232 /*
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500233 * If the next bp isn't on the current stack, switch to the next one.
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500234 *
235 * We may have to traverse multiple stacks to deal with the possibility
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500236 * that info->next_sp could point to an empty stack and the next bp
237 * could be on a subsequent stack.
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500238 */
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500239 while (!on_stack(info, frame, len))
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500240 if (get_stack_info(info->next_sp, state->task, info,
241 &state->stack_mask))
242 return false;
243
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500244 /* Make sure it only unwinds up and doesn't overlap the prev frame: */
245 if (state->orig_sp && state->stack_info.type == prev_type &&
246 frame < prev_frame_end)
247 return false;
248
Josh Poimboeuf62dd86a2017-10-09 20:20:02 -0500249 /*
250 * On 32-bit with user mode regs, make sure the last two regs are safe
251 * to access:
252 */
253 if (IS_ENABLED(CONFIG_X86_32) && regs && user_mode(regs) &&
254 !on_stack(info, frame, len + 2*sizeof(long)))
255 return false;
256
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500257 /* Move state to the next frame: */
258 if (regs) {
259 state->regs = regs;
260 state->bp = NULL;
261 } else {
262 state->bp = next_bp;
263 state->regs = NULL;
264 }
265
Josh Poimboeuf6bcdf9d2017-04-12 13:47:11 -0500266 /* Save the return address: */
267 if (state->regs && user_mode(state->regs))
268 state->ip = 0;
269 else {
270 addr_p = unwind_get_return_address_ptr(state);
271 addr = READ_ONCE_TASK_STACK(state->task, *addr_p);
272 state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
273 addr, addr_p);
274 }
275
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500276 /* Save the original stack pointer for unwind_dump(): */
Josh Poimboeuf262fa732017-04-25 20:48:52 -0500277 if (!state->orig_sp)
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500278 state->orig_sp = frame;
Josh Poimboeuf8b5e99f2016-12-16 10:05:06 -0600279
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500280 return true;
281}
282
283bool unwind_next_frame(struct unwind_state *state)
284{
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500285 struct pt_regs *regs;
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500286 unsigned long *next_bp;
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500287
288 if (unwind_done(state))
289 return false;
290
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500291 /* Have we reached the end? */
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500292 if (state->regs && user_mode(state->regs))
293 goto the_end;
294
Josh Poimboeufacb46082016-10-20 11:34:41 -0500295 if (is_last_task_frame(state)) {
296 regs = task_pt_regs(state->task);
297
298 /*
299 * kthreads (other than the boot CPU's idle thread) have some
300 * partial regs at the end of their stack which were placed
301 * there by copy_thread_tls(). But the regs don't have any
302 * useful information, so we can skip them.
303 *
304 * This user_mode() check is slightly broader than a PF_KTHREAD
305 * check because it also catches the awkward situation where a
306 * newly forked kthread transitions into a user task by calling
307 * do_execve(), which eventually clears PF_KTHREAD.
308 */
309 if (!user_mode(regs))
310 goto the_end;
311
312 /*
313 * We're almost at the end, but not quite: there's still the
314 * syscall regs frame. Entry code doesn't encode the regs
315 * pointer for syscalls, so we have to set it manually.
316 */
317 state->regs = regs;
318 state->bp = NULL;
Josh Poimboeuf6bcdf9d2017-04-12 13:47:11 -0500319 state->ip = 0;
Josh Poimboeufacb46082016-10-20 11:34:41 -0500320 return true;
321 }
322
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500323 /* Get the next frame pointer: */
Jann Hornf4f34e12019-03-01 04:12:00 +0100324 if (state->next_bp) {
325 next_bp = state->next_bp;
326 state->next_bp = NULL;
327 } else if (state->regs) {
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500328 next_bp = (unsigned long *)state->regs->bp;
Jann Hornf4f34e12019-03-01 04:12:00 +0100329 } else {
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500330 next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task, *state->bp);
Jann Hornf4f34e12019-03-01 04:12:00 +0100331 }
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500332
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500333 /* Move to the next frame if it's safe: */
Josh Poimboeufa8b7a922017-04-12 13:47:12 -0500334 if (!update_stack_state(state, next_bp))
Josh Poimboeufc32c47c2016-10-26 10:41:48 -0500335 goto bad_address;
Josh Poimboeufc32c47c2016-10-26 10:41:48 -0500336
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500337 return true;
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500338
Josh Poimboeufc32c47c2016-10-26 10:41:48 -0500339bad_address:
Josh Poimboeufaf085d92017-02-13 19:42:28 -0600340 state->error = true;
341
Josh Poimboeuf900742d2017-01-09 12:00:22 -0600342 /*
343 * When unwinding a non-current task, the task might actually be
344 * running on another CPU, in which case it could be modifying its
345 * stack while we're reading it. This is generally not a problem and
346 * can be ignored as long as the caller understands that unwinding
347 * another task will not always succeed.
348 */
349 if (state->task != current)
350 goto the_end;
351
Josh Poimboeufa8b7a922017-04-12 13:47:12 -0500352 /*
353 * Don't warn if the unwinder got lost due to an interrupt in entry
Josh Poimboeufb0d50c72017-04-25 20:48:51 -0500354 * code or in the C handler before the first frame pointer got set up:
Josh Poimboeufa8b7a922017-04-12 13:47:12 -0500355 */
356 if (state->got_irq && in_entry_code(state->ip))
357 goto the_end;
Josh Poimboeufb0d50c72017-04-25 20:48:51 -0500358 if (state->regs &&
359 state->regs->sp >= (unsigned long)last_aligned_frame(state) &&
360 state->regs->sp < (unsigned long)task_pt_regs(state->task))
361 goto the_end;
Josh Poimboeufa8b7a922017-04-12 13:47:12 -0500362
Josh Poimboeufd4a2d032017-10-09 20:20:05 -0500363 /*
364 * There are some known frame pointer issues on 32-bit. Disable
365 * unwinder warnings on 32-bit until it gets objtool support.
366 */
367 if (IS_ENABLED(CONFIG_X86_32))
368 goto the_end;
369
Josh Poimboeuf24d86f52016-10-27 08:10:58 -0500370 if (state->regs) {
371 printk_deferred_once(KERN_WARNING
372 "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
373 state->regs, state->task->comm,
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500374 state->task->pid, next_bp);
Josh Poimboeufaa4f8532017-04-18 08:12:58 -0500375 unwind_dump(state);
Josh Poimboeuf24d86f52016-10-27 08:10:58 -0500376 } else {
377 printk_deferred_once(KERN_WARNING
378 "WARNING: kernel stack frame pointer at %p in %s:%d has bad value %p\n",
379 state->bp, state->task->comm,
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500380 state->task->pid, next_bp);
Josh Poimboeufaa4f8532017-04-18 08:12:58 -0500381 unwind_dump(state);
Josh Poimboeuf24d86f52016-10-27 08:10:58 -0500382 }
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500383the_end:
384 state->stack_info.type = STACK_TYPE_UNKNOWN;
385 return false;
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500386}
387EXPORT_SYMBOL_GPL(unwind_next_frame);
388
389void __unwind_start(struct unwind_state *state, struct task_struct *task,
390 struct pt_regs *regs, unsigned long *first_frame)
391{
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500392 unsigned long *bp;
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500393
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500394 memset(state, 0, sizeof(*state));
395 state->task = task;
Josh Poimboeufa8b7a922017-04-12 13:47:12 -0500396 state->got_irq = (regs);
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500397
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500398 /* Don't even attempt to start from user mode regs: */
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500399 if (regs && user_mode(regs)) {
400 state->stack_info.type = STACK_TYPE_UNKNOWN;
401 return;
402 }
403
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500404 bp = get_frame_pointer(task, regs);
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500405
Jann Hornf4f34e12019-03-01 04:12:00 +0100406 /*
407 * If we crash with IP==0, the last successfully executed instruction
408 * was probably an indirect function call with a NULL function pointer.
409 * That means that SP points into the middle of an incomplete frame:
410 * *SP is a return pointer, and *(SP-sizeof(unsigned long)) is where we
411 * would have written a frame pointer if we hadn't crashed.
412 * Pretend that the frame is complete and that BP points to it, but save
413 * the real BP so that we can use it when looking for the next frame.
414 */
415 if (regs && regs->ip == 0 &&
416 (unsigned long *)kernel_stack_pointer(regs) >= first_frame) {
417 state->next_bp = bp;
418 bp = ((unsigned long *)kernel_stack_pointer(regs)) - 1;
419 }
420
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500421 /* Initialize stack info and make sure the frame data is accessible: */
422 get_stack_info(bp, state->task, &state->stack_info,
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500423 &state->stack_mask);
Josh Poimboeuf5ed8d8b2017-04-12 13:47:10 -0500424 update_stack_state(state, bp);
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500425
426 /*
427 * The caller can provide the address of the first frame directly
428 * (first_frame) or indirectly (regs->sp) to indicate which stack frame
429 * to start unwinding at. Skip ahead until we reach it.
430 */
431 while (!unwind_done(state) &&
432 (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
Jann Hornf4f34e12019-03-01 04:12:00 +0100433 (state->next_bp == NULL && state->bp < first_frame)))
Josh Poimboeuf7c7900f2016-09-16 14:18:12 -0500434 unwind_next_frame(state);
435}
436EXPORT_SYMBOL_GPL(__unwind_start);