blob: b2f706f1e0b7aa9d10392e66f0372daf59cdb87a [file] [log] [blame]
Ingo Molnar21b32bb2006-07-03 00:24:40 -07001/*
Ingo Molnar21b32bb2006-07-03 00:24:40 -07002 * Stack trace management functions
3 *
Ingo Molnar8f47e162009-01-31 02:03:42 +01004 * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
Ingo Molnar21b32bb2006-07-03 00:24:40 -07005 */
6#include <linux/sched.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +01007#include <linux/sched/debug.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +01008#include <linux/sched/task_stack.h>
Ingo Molnar21b32bb2006-07-03 00:24:40 -07009#include <linux/stacktrace.h>
Paul Gortmaker186f4362016-07-13 20:18:56 -040010#include <linux/export.h>
Török Edwin02b67512008-11-22 13:28:47 +020011#include <linux/uaccess.h>
Andi Kleenc0b766f2006-09-26 10:52:34 +020012#include <asm/stacktrace.h>
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050013#include <asm/unwind.h>
Ingo Molnar21b32bb2006-07-03 00:24:40 -070014
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050015static int save_stack_address(struct stack_trace *trace, unsigned long addr,
16 bool nosched)
Ingo Molnar21b32bb2006-07-03 00:24:40 -070017{
Oleg Nesterov018378c2010-06-03 21:32:43 +020018 if (nosched && in_sched_functions(addr))
Alexei Starovoitov568b3292016-02-17 19:58:57 -080019 return 0;
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050020
Andi Kleenc0b766f2006-09-26 10:52:34 +020021 if (trace->skip > 0) {
22 trace->skip--;
Alexei Starovoitov568b3292016-02-17 19:58:57 -080023 return 0;
Andi Kleenc0b766f2006-09-26 10:52:34 +020024 }
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050025
26 if (trace->nr_entries >= trace->max_entries)
27 return -1;
28
29 trace->entries[trace->nr_entries++] = addr;
30 return 0;
31}
32
Vlastimil Babka77072f02017-09-29 11:23:35 +020033static void noinline __save_stack_trace(struct stack_trace *trace,
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050034 struct task_struct *task, struct pt_regs *regs,
35 bool nosched)
36{
37 struct unwind_state state;
38 unsigned long addr;
39
40 if (regs)
41 save_stack_address(trace, regs->ip, nosched);
42
43 for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
44 unwind_next_frame(&state)) {
45 addr = unwind_get_return_address(&state);
46 if (!addr || save_stack_address(trace, addr, nosched))
47 break;
Alexei Starovoitov568b3292016-02-17 19:58:57 -080048 }
Andi Kleenc0b766f2006-09-26 10:52:34 +020049}
50
Ingo Molnar21b32bb2006-07-03 00:24:40 -070051/*
52 * Save stack-backtrace addresses into a stack_trace buffer.
Ingo Molnar21b32bb2006-07-03 00:24:40 -070053 */
Christoph Hellwigab1b6f02007-05-08 00:23:29 -070054void save_stack_trace(struct stack_trace *trace)
Ingo Molnar21b32bb2006-07-03 00:24:40 -070055{
Vlastimil Babka77072f02017-09-29 11:23:35 +020056 trace->skip++;
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050057 __save_stack_trace(trace, current, NULL, false);
Ingo Molnar21b32bb2006-07-03 00:24:40 -070058}
Ingo Molnar85946982008-06-27 21:20:17 +020059EXPORT_SYMBOL_GPL(save_stack_trace);
Arjan van de Ven97455122008-01-25 21:08:34 +010060
Masami Hiramatsu39581062011-06-08 16:09:21 +090061void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
Vegard Nossumacc6be52008-05-20 11:15:43 +020062{
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050063 __save_stack_trace(trace, current, regs, false);
Vegard Nossumacc6be52008-05-20 11:15:43 +020064}
65
Arjan van de Ven97455122008-01-25 21:08:34 +010066void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
67{
Andy Lutomirski1959a602016-09-15 22:45:45 -070068 if (!try_get_task_stack(tsk))
69 return;
70
Vlastimil Babka77072f02017-09-29 11:23:35 +020071 if (tsk == current)
72 trace->skip++;
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050073 __save_stack_trace(trace, tsk, NULL, true);
Andy Lutomirski1959a602016-09-15 22:45:45 -070074
75 put_task_stack(tsk);
Arjan van de Ven97455122008-01-25 21:08:34 +010076}
Ingo Molnar85946982008-06-27 21:20:17 +020077EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
Török Edwin02b67512008-11-22 13:28:47 +020078
Josh Poimboeufaf085d92017-02-13 19:42:28 -060079#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
80
Vlastimil Babka77072f02017-09-29 11:23:35 +020081static int __always_inline
82__save_stack_trace_reliable(struct stack_trace *trace,
83 struct task_struct *task)
Josh Poimboeufaf085d92017-02-13 19:42:28 -060084{
85 struct unwind_state state;
86 struct pt_regs *regs;
87 unsigned long addr;
88
Jiri Slaby441ccc32018-05-18 08:47:10 +020089 for (unwind_start(&state, task, NULL, NULL);
90 !unwind_done(&state) && !unwind_error(&state);
Josh Poimboeufaf085d92017-02-13 19:42:28 -060091 unwind_next_frame(&state)) {
92
Josh Poimboeufa9cdbe72017-12-31 10:18:06 -060093 regs = unwind_get_entry_regs(&state, NULL);
Josh Poimboeufaf085d92017-02-13 19:42:28 -060094 if (regs) {
Jiri Slaby441ccc32018-05-18 08:47:10 +020095 /* Success path for user tasks */
96 if (user_mode(regs))
Thomas Gleixnerc5c27a02019-04-10 12:27:56 +020097 return 0;
Jiri Slaby441ccc32018-05-18 08:47:10 +020098
Josh Poimboeufaf085d92017-02-13 19:42:28 -060099 /*
100 * Kernel mode registers on the stack indicate an
101 * in-kernel interrupt or exception (e.g., preemption
102 * or a page fault), which can make frame pointers
103 * unreliable.
104 */
Josh Poimboeufaf085d92017-02-13 19:42:28 -0600105
Jiri Slaby0c414362018-05-18 08:47:11 +0200106 if (IS_ENABLED(CONFIG_FRAME_POINTER))
107 return -EINVAL;
Josh Poimboeufaf085d92017-02-13 19:42:28 -0600108 }
109
110 addr = unwind_get_return_address(&state);
111
112 /*
113 * A NULL or invalid return address probably means there's some
114 * generated code which __kernel_text_address() doesn't know
115 * about.
116 */
Jiri Slaby17426922018-05-18 08:47:09 +0200117 if (!addr)
Josh Poimboeufaf085d92017-02-13 19:42:28 -0600118 return -EINVAL;
Josh Poimboeufaf085d92017-02-13 19:42:28 -0600119
120 if (save_stack_address(trace, addr, false))
121 return -EINVAL;
122 }
123
124 /* Check for stack corruption */
Jiri Slaby17426922018-05-18 08:47:09 +0200125 if (unwind_error(&state))
Josh Poimboeufaf085d92017-02-13 19:42:28 -0600126 return -EINVAL;
Josh Poimboeufaf085d92017-02-13 19:42:28 -0600127
Jiri Slaby441ccc32018-05-18 08:47:10 +0200128 /* Success path for non-user tasks, i.e. kthreads and idle tasks */
129 if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
130 return -EINVAL;
131
Josh Poimboeufaf085d92017-02-13 19:42:28 -0600132 return 0;
133}
134
135/*
136 * This function returns an error if it detects any unreliable features of the
137 * stack. Otherwise it guarantees that the stack trace is reliable.
138 *
139 * If the task is not 'current', the caller *must* ensure the task is inactive.
140 */
141int save_stack_trace_tsk_reliable(struct task_struct *tsk,
142 struct stack_trace *trace)
143{
144 int ret;
145
Josh Poimboeuf6454b3b2017-12-18 15:13:44 -0600146 /*
147 * If the task doesn't have a stack (e.g., a zombie), the stack is
148 * "reliably" empty.
149 */
Josh Poimboeufaf085d92017-02-13 19:42:28 -0600150 if (!try_get_task_stack(tsk))
Josh Poimboeuf6454b3b2017-12-18 15:13:44 -0600151 return 0;
Josh Poimboeufaf085d92017-02-13 19:42:28 -0600152
153 ret = __save_stack_trace_reliable(trace, tsk);
154
155 put_task_stack(tsk);
156
157 return ret;
158}
159#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
160
Török Edwin02b67512008-11-22 13:28:47 +0200161/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
162
Frederic Weisbeckerc9cf4db2010-05-19 21:35:17 +0200163struct stack_frame_user {
Török Edwin02b67512008-11-22 13:28:47 +0200164 const void __user *next_fp;
Török Edwin8d7c6a92008-11-23 12:39:06 +0200165 unsigned long ret_addr;
Török Edwin02b67512008-11-22 13:28:47 +0200166};
167
Frederic Weisbeckerc9cf4db2010-05-19 21:35:17 +0200168static int
169copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
Török Edwin02b67512008-11-22 13:28:47 +0200170{
171 int ret;
172
Linus Torvalds96d4f262019-01-03 18:57:57 -0800173 if (!access_ok(fp, sizeof(*frame)))
Török Edwin02b67512008-11-22 13:28:47 +0200174 return 0;
175
176 ret = 1;
177 pagefault_disable();
178 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
179 ret = 0;
180 pagefault_enable();
181
182 return ret;
183}
184
Török Edwin8d7c6a92008-11-23 12:39:06 +0200185static inline void __save_stack_trace_user(struct stack_trace *trace)
186{
187 const struct pt_regs *regs = task_pt_regs(current);
188 const void __user *fp = (const void __user *)regs->bp;
189
190 if (trace->nr_entries < trace->max_entries)
191 trace->entries[trace->nr_entries++] = regs->ip;
192
193 while (trace->nr_entries < trace->max_entries) {
Frederic Weisbeckerc9cf4db2010-05-19 21:35:17 +0200194 struct stack_frame_user frame;
Török Edwin8d7c6a92008-11-23 12:39:06 +0200195
196 frame.next_fp = NULL;
197 frame.ret_addr = 0;
198 if (!copy_stack_frame(fp, &frame))
199 break;
200 if ((unsigned long)fp < regs->sp)
201 break;
202 if (frame.ret_addr) {
203 trace->entries[trace->nr_entries++] =
204 frame.ret_addr;
205 }
206 if (fp == frame.next_fp)
207 break;
208 fp = frame.next_fp;
209 }
210}
211
Török Edwin02b67512008-11-22 13:28:47 +0200212void save_stack_trace_user(struct stack_trace *trace)
213{
214 /*
215 * Trace user stack if we are not a kernel thread
216 */
Thomas Gleixnerc5c27a02019-04-10 12:27:56 +0200217 if (current->mm)
Török Edwin8d7c6a92008-11-23 12:39:06 +0200218 __save_stack_trace_user(trace);
Török Edwin02b67512008-11-22 13:28:47 +0200219}