blob: 82da808b5c3686315a3064fe373300fe7ffe3b33 [file] [log] [blame]
Neil Horman878719e2008-10-23 10:40:06 -04001/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 */
5#include <linux/kallsyms.h>
6#include <linux/kprobes.h>
7#include <linux/uaccess.h>
8#include <linux/utsname.h>
9#include <linux/hardirq.h>
10#include <linux/kdebug.h>
11#include <linux/module.h>
12#include <linux/ptrace.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010013#include <linux/sched/debug.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010014#include <linux/sched/task_stack.h>
Steven Rostedt712406a2009-02-09 10:54:03 -080015#include <linux/ftrace.h>
Neil Horman878719e2008-10-23 10:40:06 -040016#include <linux/kexec.h>
17#include <linux/bug.h>
18#include <linux/nmi.h>
19#include <linux/sysfs.h>
20
Thomas Gleixner92a0f812017-12-20 18:51:31 +010021#include <asm/cpu_entry_area.h>
Neil Horman878719e2008-10-23 10:40:06 -040022#include <asm/stacktrace.h>
Josh Poimboeufe18bccc2016-09-16 14:18:16 -050023#include <asm/unwind.h>
Neil Horman878719e2008-10-23 10:40:06 -040024
Borislav Petkov5d12f042018-04-17 18:11:16 +020025#define OPCODE_BUFSIZE 64
26
Neil Horman878719e2008-10-23 10:40:06 -040027int panic_on_unrecovered_nmi;
Kurt Garloff5211a242009-06-24 14:32:11 -070028int panic_on_io_nmi;
Neil Horman878719e2008-10-23 10:40:06 -040029static int die_counter;
30
Josh Poimboeufcb76c932016-09-14 21:07:42 -050031bool in_task_stack(unsigned long *stack, struct task_struct *task,
32 struct stack_info *info)
33{
34 unsigned long *begin = task_stack_page(task);
35 unsigned long *end = task_stack_page(task) + THREAD_SIZE;
36
37 if (stack < begin || stack >= end)
38 return false;
39
40 info->type = STACK_TYPE_TASK;
41 info->begin = begin;
42 info->end = end;
43 info->next_sp = NULL;
44
45 return true;
46}
47
Dave Hansen4fe2d8b2017-12-04 17:25:07 -080048bool in_entry_stack(unsigned long *stack, struct stack_info *info)
Andy Lutomirski33a2f1a2017-12-04 15:07:13 +010049{
Dave Hansen4fe2d8b2017-12-04 17:25:07 -080050 struct entry_stack *ss = cpu_entry_stack(smp_processor_id());
Andy Lutomirski33a2f1a2017-12-04 15:07:13 +010051
Andy Lutomirski0f9a4812017-12-04 15:07:28 +010052 void *begin = ss;
53 void *end = ss + 1;
Andy Lutomirski33a2f1a2017-12-04 15:07:13 +010054
55 if ((void *)stack < begin || (void *)stack >= end)
56 return false;
57
Dave Hansen4fe2d8b2017-12-04 17:25:07 -080058 info->type = STACK_TYPE_ENTRY;
Andy Lutomirski33a2f1a2017-12-04 15:07:13 +010059 info->begin = begin;
60 info->end = end;
61 info->next_sp = NULL;
62
63 return true;
64}
65
Adrien Schildknecht1fc7f612015-02-20 03:34:21 +010066static void printk_stack_address(unsigned long address, int reliable,
Josh Poimboeufd438f5f2016-08-24 11:50:16 -050067 char *log_lvl)
Neil Horman878719e2008-10-23 10:40:06 -040068{
Josh Poimboeufd438f5f2016-08-24 11:50:16 -050069 touch_nmi_watchdog();
Josh Poimboeufbb5e5ce2016-10-25 09:51:12 -050070 printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
Jiri Slaby5f01c982013-10-25 15:06:58 +020071}
72
Borislav Petkove8b6f982018-04-17 18:11:20 +020073void show_opcodes(u8 *rip, const char *loglvl)
Borislav Petkovf0a1d7c2018-04-17 18:11:18 +020074{
Borislav Petkov9e4a90f2018-04-17 18:11:19 +020075 unsigned int code_prologue = OPCODE_BUFSIZE * 2 / 3;
76 u8 opcodes[OPCODE_BUFSIZE];
Borislav Petkovf0a1d7c2018-04-17 18:11:18 +020077 u8 *ip;
78 int i;
79
Borislav Petkove8b6f982018-04-17 18:11:20 +020080 printk("%sCode: ", loglvl);
Borislav Petkovf0a1d7c2018-04-17 18:11:18 +020081
82 ip = (u8 *)rip - code_prologue;
Borislav Petkov9e4a90f2018-04-17 18:11:19 +020083 if (probe_kernel_read(opcodes, ip, OPCODE_BUFSIZE)) {
84 pr_cont("Bad RIP value.\n");
85 return;
Borislav Petkovf0a1d7c2018-04-17 18:11:18 +020086 }
Borislav Petkov9e4a90f2018-04-17 18:11:19 +020087
88 for (i = 0; i < OPCODE_BUFSIZE; i++, ip++) {
89 if (ip == rip)
90 pr_cont("<%02x> ", opcodes[i]);
Borislav Petkovf0a1d7c2018-04-17 18:11:18 +020091 else
Borislav Petkov9e4a90f2018-04-17 18:11:19 +020092 pr_cont("%02x ", opcodes[i]);
Borislav Petkovf0a1d7c2018-04-17 18:11:18 +020093 }
94 pr_cont("\n");
95}
96
Borislav Petkov7cccf072018-04-17 18:11:22 +020097void show_ip(struct pt_regs *regs, const char *loglvl)
98{
99#ifdef CONFIG_X86_32
100 printk("%sEIP: %pS\n", loglvl, (void *)regs->ip);
101#else
102 printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip);
103#endif
104 show_opcodes((u8 *)regs->ip, loglvl);
105}
106
Josh Poimboeufb02fcf92017-12-04 15:07:09 +0100107void show_iret_regs(struct pt_regs *regs)
108{
Borislav Petkov7cccf072018-04-17 18:11:22 +0200109 show_ip(regs, KERN_DEFAULT);
Josh Poimboeufb02fcf92017-12-04 15:07:09 +0100110 printk(KERN_DEFAULT "RSP: %04x:%016lx EFLAGS: %08lx", (int)regs->ss,
111 regs->sp, regs->flags);
112}
113
Josh Poimboeufa9cdbe72017-12-31 10:18:06 -0600114static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
115 bool partial)
Josh Poimboeufb02fcf92017-12-04 15:07:09 +0100116{
Josh Poimboeufa9cdbe72017-12-31 10:18:06 -0600117 /*
118 * These on_stack() checks aren't strictly necessary: the unwind code
119 * has already validated the 'regs' pointer. The checks are done for
120 * ordering reasons: if the registers are on the next stack, we don't
121 * want to print them out yet. Otherwise they'll be shown as part of
122 * the wrong stack. Later, when show_trace_log_lvl() switches to the
123 * next stack, this function will be called again with the same regs so
124 * they can be printed in the right context.
125 */
126 if (!partial && on_stack(info, regs, sizeof(*regs))) {
Josh Poimboeufb02fcf92017-12-04 15:07:09 +0100127 __show_regs(regs, 0);
Josh Poimboeufa9cdbe72017-12-31 10:18:06 -0600128
129 } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
130 IRET_FRAME_SIZE)) {
Josh Poimboeufb02fcf92017-12-04 15:07:09 +0100131 /*
132 * When an interrupt or exception occurs in entry code, the
133 * full pt_regs might not have been saved yet. In that case
134 * just print the iret frame.
135 */
136 show_iret_regs(regs);
137 }
138}
139
Josh Poimboeufe18bccc2016-09-16 14:18:16 -0500140void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
141 unsigned long *stack, char *log_lvl)
Neil Horman878719e2008-10-23 10:40:06 -0400142{
Josh Poimboeufe18bccc2016-09-16 14:18:16 -0500143 struct unwind_state state;
144 struct stack_info stack_info = {0};
145 unsigned long visit_mask = 0;
146 int graph_idx = 0;
Arnd Bergmannebfc1502018-02-02 15:56:17 +0100147 bool partial = false;
Neil Horman878719e2008-10-23 10:40:06 -0400148
Neil Horman878719e2008-10-23 10:40:06 -0400149 printk("%sCall Trace:\n", log_lvl);
Josh Poimboeufe18bccc2016-09-16 14:18:16 -0500150
151 unwind_start(&state, task, regs, stack);
Josh Poimboeuff4474c92016-11-17 00:04:58 -0600152 stack = stack ? : get_stack_pointer(task, regs);
Josh Poimboeuf3ffdeb12017-12-31 10:18:07 -0600153 regs = unwind_get_entry_regs(&state, &partial);
Josh Poimboeufe18bccc2016-09-16 14:18:16 -0500154
155 /*
156 * Iterate through the stacks, starting with the current stack pointer.
157 * Each stack has a pointer to the next one.
158 *
159 * x86-64 can have several stacks:
160 * - task stack
161 * - interrupt stack
162 * - HW exception stacks (double fault, nmi, debug, mce)
Dave Hansen4fe2d8b2017-12-04 17:25:07 -0800163 * - entry stack
Josh Poimboeufe18bccc2016-09-16 14:18:16 -0500164 *
Andy Lutomirski6e60e582017-12-04 15:07:18 +0100165 * x86-32 can have up to four stacks:
Josh Poimboeufe18bccc2016-09-16 14:18:16 -0500166 * - task stack
167 * - softirq stack
168 * - hardirq stack
Dave Hansen4fe2d8b2017-12-04 17:25:07 -0800169 * - entry stack
Josh Poimboeufe18bccc2016-09-16 14:18:16 -0500170 */
Josh Poimboeuf3ffdeb12017-12-31 10:18:07 -0600171 for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
Josh Poimboeuf3d02a9c2016-11-18 11:46:23 -0600172 const char *stack_name;
Josh Poimboeufe18bccc2016-09-16 14:18:16 -0500173
Andy Lutomirski6e60e582017-12-04 15:07:18 +0100174 if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
175 /*
176 * We weren't on a valid stack. It's possible that
177 * we overflowed a valid stack into a guard page.
178 * See if the next page up is valid so that we can
179 * generate some kind of backtrace if this happens.
180 */
181 stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack);
182 if (get_stack_info(stack, task, &stack_info, &visit_mask))
183 break;
184 }
Josh Poimboeufe18bccc2016-09-16 14:18:16 -0500185
Josh Poimboeuf3d02a9c2016-11-18 11:46:23 -0600186 stack_name = stack_type_name(stack_info.type);
187 if (stack_name)
188 printk("%s <%s>\n", log_lvl, stack_name);
Josh Poimboeufe18bccc2016-09-16 14:18:16 -0500189
Josh Poimboeufb02fcf92017-12-04 15:07:09 +0100190 if (regs)
Josh Poimboeufa9cdbe72017-12-31 10:18:06 -0600191 show_regs_if_on_stack(&stack_info, regs, partial);
Josh Poimboeufb0529be2017-07-11 10:33:40 -0500192
Josh Poimboeufe18bccc2016-09-16 14:18:16 -0500193 /*
194 * Scan the stack, printing any text addresses we find. At the
195 * same time, follow proper stack frames with the unwinder.
196 *
197 * Addresses found during the scan which are not reported by
198 * the unwinder are considered to be additional clues which are
199 * sometimes useful for debugging and are prefixed with '?'.
200 * This also serves as a failsafe option in case the unwinder
201 * goes off in the weeds.
202 */
203 for (; stack < stack_info.end; stack++) {
204 unsigned long real_addr;
205 int reliable = 0;
Josh Poimboeuf91e08ab2016-11-17 09:57:24 -0600206 unsigned long addr = READ_ONCE_NOCHECK(*stack);
Josh Poimboeufe18bccc2016-09-16 14:18:16 -0500207 unsigned long *ret_addr_p =
208 unwind_get_return_address_ptr(&state);
209
210 if (!__kernel_text_address(addr))
211 continue;
212
Josh Poimboeuf3b3fa112016-10-20 11:34:43 -0500213 /*
214 * Don't print regs->ip again if it was already printed
Josh Poimboeufa9cdbe72017-12-31 10:18:06 -0600215 * by show_regs_if_on_stack().
Josh Poimboeuf3b3fa112016-10-20 11:34:43 -0500216 */
Josh Poimboeufb0529be2017-07-11 10:33:40 -0500217 if (regs && stack == &regs->ip)
218 goto next;
Josh Poimboeuf3b3fa112016-10-20 11:34:43 -0500219
Josh Poimboeufe18bccc2016-09-16 14:18:16 -0500220 if (stack == ret_addr_p)
221 reliable = 1;
222
223 /*
224 * When function graph tracing is enabled for a
225 * function, its return address on the stack is
226 * replaced with the address of an ftrace handler
227 * (return_to_handler). In that case, before printing
228 * the "real" address, we want to print the handler
229 * address as an "unreliable" hint that function graph
230 * tracing was involved.
231 */
232 real_addr = ftrace_graph_ret_addr(task, &graph_idx,
233 addr, stack);
234 if (real_addr != addr)
235 printk_stack_address(addr, 0, log_lvl);
236 printk_stack_address(real_addr, reliable, log_lvl);
237
238 if (!reliable)
239 continue;
240
Josh Poimboeufb0529be2017-07-11 10:33:40 -0500241next:
Josh Poimboeufe18bccc2016-09-16 14:18:16 -0500242 /*
243 * Get the next frame from the unwinder. No need to
244 * check for an error: if anything goes wrong, the rest
245 * of the addresses will just be printed as unreliable.
246 */
247 unwind_next_frame(&state);
Josh Poimboeuf3b3fa112016-10-20 11:34:43 -0500248
249 /* if the frame has entry regs, print them */
Josh Poimboeufa9cdbe72017-12-31 10:18:06 -0600250 regs = unwind_get_entry_regs(&state, &partial);
Josh Poimboeufb02fcf92017-12-04 15:07:09 +0100251 if (regs)
Josh Poimboeufa9cdbe72017-12-31 10:18:06 -0600252 show_regs_if_on_stack(&stack_info, regs, partial);
Josh Poimboeufe18bccc2016-09-16 14:18:16 -0500253 }
254
Josh Poimboeuf3d02a9c2016-11-18 11:46:23 -0600255 if (stack_name)
256 printk("%s </%s>\n", log_lvl, stack_name);
Josh Poimboeufe18bccc2016-09-16 14:18:16 -0500257 }
Neil Horman878719e2008-10-23 10:40:06 -0400258}
259
Neil Horman878719e2008-10-23 10:40:06 -0400260void show_stack(struct task_struct *task, unsigned long *sp)
261{
Josh Poimboeuf81539162016-09-16 08:05:20 -0500262 task = task ? : current;
263
Tejun Heoa77f2a42013-04-30 15:27:09 -0700264 /*
265 * Stack frames below this one aren't interesting. Don't show them
266 * if we're printing for %current.
267 */
Josh Poimboeufe18bccc2016-09-16 14:18:16 -0500268 if (!sp && task == current)
Josh Poimboeuf4b8afaf2016-08-24 11:50:17 -0500269 sp = get_stack_pointer(current, NULL);
Tejun Heoa77f2a42013-04-30 15:27:09 -0700270
Josh Poimboeuf0ee1dd92016-10-25 09:51:13 -0500271 show_trace_log_lvl(task, NULL, sp, KERN_DEFAULT);
Neil Horman878719e2008-10-23 10:40:06 -0400272}
273
Borislav Petkov81c29492016-07-05 00:31:27 +0200274void show_stack_regs(struct pt_regs *regs)
275{
Josh Poimboeuf0ee1dd92016-10-25 09:51:13 -0500276 show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT);
Borislav Petkov81c29492016-07-05 00:31:27 +0200277}
278
Thomas Gleixneredc35bd2009-12-03 12:38:57 +0100279static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Neil Horman878719e2008-10-23 10:40:06 -0400280static int die_owner = -1;
281static unsigned int die_nest_count;
282
Masami Hiramatsu93266382014-04-17 17:18:14 +0900283unsigned long oops_begin(void)
Neil Horman878719e2008-10-23 10:40:06 -0400284{
285 int cpu;
286 unsigned long flags;
287
Neil Horman878719e2008-10-23 10:40:06 -0400288 oops_enter();
289
290 /* racy, but better than risking deadlock. */
291 raw_local_irq_save(flags);
292 cpu = smp_processor_id();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100293 if (!arch_spin_trylock(&die_lock)) {
Neil Horman878719e2008-10-23 10:40:06 -0400294 if (cpu == die_owner)
295 /* nested oops. should stop eventually */;
296 else
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100297 arch_spin_lock(&die_lock);
Neil Horman878719e2008-10-23 10:40:06 -0400298 }
299 die_nest_count++;
300 die_owner = cpu;
301 console_verbose();
302 bust_spinlocks(1);
303 return flags;
304}
Masami Hiramatsu93266382014-04-17 17:18:14 +0900305NOKPROBE_SYMBOL(oops_begin);
Neil Horman878719e2008-10-23 10:40:06 -0400306
Andy Lutomirski2deb4be2016-07-14 13:22:55 -0700307void __noreturn rewind_stack_do_exit(int signr);
308
Masami Hiramatsu93266382014-04-17 17:18:14 +0900309void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
Neil Horman878719e2008-10-23 10:40:06 -0400310{
311 if (regs && kexec_should_crash(current))
312 crash_kexec(regs);
313
314 bust_spinlocks(0);
315 die_owner = -1;
Rusty Russell373d4d02013-01-21 17:17:39 +1030316 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
Neil Horman878719e2008-10-23 10:40:06 -0400317 die_nest_count--;
318 if (!die_nest_count)
319 /* Nest count reaches zero, release the lock. */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100320 arch_spin_unlock(&die_lock);
Neil Horman878719e2008-10-23 10:40:06 -0400321 raw_local_irq_restore(flags);
322 oops_exit();
323
324 if (!signr)
325 return;
326 if (in_interrupt())
327 panic("Fatal exception in interrupt");
328 if (panic_on_oops)
329 panic("Fatal exception");
Andy Lutomirski2deb4be2016-07-14 13:22:55 -0700330
331 /*
332 * We're not going to return, but we might be on an IST stack or
333 * have very little stack space left. Rewind the stack and kill
334 * the task.
335 */
336 rewind_stack_do_exit(signr);
Neil Horman878719e2008-10-23 10:40:06 -0400337}
Masami Hiramatsu93266382014-04-17 17:18:14 +0900338NOKPROBE_SYMBOL(oops_end);
Neil Horman878719e2008-10-23 10:40:06 -0400339
Masami Hiramatsu93266382014-04-17 17:18:14 +0900340int __die(const char *str, struct pt_regs *regs, long err)
Neil Horman878719e2008-10-23 10:40:06 -0400341{
342#ifdef CONFIG_X86_32
343 unsigned short ss;
344 unsigned long sp;
345#endif
Prarit Bhargavab0f4c4b2012-01-26 08:55:34 -0500346 printk(KERN_DEFAULT
Vlastimil Babka5f26d762017-12-19 22:33:46 +0100347 "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter,
Rasmus Villemoes8fad7ec2016-03-26 21:40:16 +0100348 IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
349 IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
350 debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
Vlastimil Babka5f26d762017-12-19 22:33:46 +0100351 IS_ENABLED(CONFIG_KASAN) ? " KASAN" : "",
352 IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ?
353 (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : "");
Rasmus Villemoes8fad7ec2016-03-26 21:40:16 +0100354
Neil Horman878719e2008-10-23 10:40:06 -0400355 if (notify_die(DIE_OOPS, str, regs, err,
Srikar Dronamraju51e7dc72012-03-12 14:55:55 +0530356 current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
Neil Horman878719e2008-10-23 10:40:06 -0400357 return 1;
358
Jan Beulich0fa0e2f2012-06-18 11:40:04 +0100359 print_modules();
Jan Beulich57da8b92012-05-09 08:47:37 +0100360 show_regs(regs);
Neil Horman878719e2008-10-23 10:40:06 -0400361#ifdef CONFIG_X86_32
Andy Lutomirskif39b6f02015-03-18 18:33:33 -0700362 if (user_mode(regs)) {
Neil Horman878719e2008-10-23 10:40:06 -0400363 sp = regs->sp;
Andy Lutomirski99504812017-07-28 06:00:32 -0700364 ss = regs->ss;
H. Peter Anvina343c752009-10-12 14:11:09 -0700365 } else {
366 sp = kernel_stack_pointer(regs);
367 savesegment(ss, ss);
Neil Horman878719e2008-10-23 10:40:06 -0400368 }
Josh Poimboeufbb5e5ce2016-10-25 09:51:12 -0500369 printk(KERN_EMERG "EIP: %pS SS:ESP: %04x:%08lx\n",
370 (void *)regs->ip, ss, sp);
Neil Horman878719e2008-10-23 10:40:06 -0400371#else
372 /* Executive summary in case the oops scrolled away */
Josh Poimboeufbb5e5ce2016-10-25 09:51:12 -0500373 printk(KERN_ALERT "RIP: %pS RSP: %016lx\n", (void *)regs->ip, regs->sp);
Neil Horman878719e2008-10-23 10:40:06 -0400374#endif
375 return 0;
376}
Masami Hiramatsu93266382014-04-17 17:18:14 +0900377NOKPROBE_SYMBOL(__die);
Neil Horman878719e2008-10-23 10:40:06 -0400378
379/*
380 * This is gone through when something in the kernel has done something bad
381 * and is about to be terminated:
382 */
383void die(const char *str, struct pt_regs *regs, long err)
384{
385 unsigned long flags = oops_begin();
386 int sig = SIGSEGV;
387
Neil Horman878719e2008-10-23 10:40:06 -0400388 if (__die(str, regs, err))
389 sig = 0;
390 oops_end(flags, regs, sig);
391}
392
Borislav Petkov16d1cb02018-03-06 10:49:14 +0100393void show_regs(struct pt_regs *regs)
394{
395 bool all = true;
Borislav Petkov16d1cb02018-03-06 10:49:14 +0100396
397 show_regs_print_info(KERN_DEFAULT);
398
399 if (IS_ENABLED(CONFIG_X86_32))
400 all = !user_mode(regs);
401
402 __show_regs(regs, all);
403
404 /*
Borislav Petkov7cccf072018-04-17 18:11:22 +0200405 * When in-kernel, we also print out the stack at the time of the fault..
Borislav Petkov16d1cb02018-03-06 10:49:14 +0100406 */
Borislav Petkov7cccf072018-04-17 18:11:22 +0200407 if (!user_mode(regs))
Borislav Petkov16d1cb02018-03-06 10:49:14 +0100408 show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT);
Borislav Petkov16d1cb02018-03-06 10:49:14 +0100409}