blob: 6acf1d5ca8321277ffc5b9a2282c1f866da6c557 [file] [log] [blame]
Ingo Molnar21b32bb2006-07-03 00:24:40 -07001/*
Ingo Molnar21b32bb2006-07-03 00:24:40 -07002 * Stack trace management functions
3 *
Ingo Molnar8f47e162009-01-31 02:03:42 +01004 * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
Ingo Molnar21b32bb2006-07-03 00:24:40 -07005 */
6#include <linux/sched.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +01007#include <linux/sched/debug.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +01008#include <linux/sched/task_stack.h>
Ingo Molnar21b32bb2006-07-03 00:24:40 -07009#include <linux/stacktrace.h>
Paul Gortmaker186f4362016-07-13 20:18:56 -040010#include <linux/export.h>
Török Edwin02b67512008-11-22 13:28:47 +020011#include <linux/uaccess.h>
Andi Kleenc0b766f2006-09-26 10:52:34 +020012#include <asm/stacktrace.h>
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050013#include <asm/unwind.h>
Ingo Molnar21b32bb2006-07-03 00:24:40 -070014
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050015static int save_stack_address(struct stack_trace *trace, unsigned long addr,
16 bool nosched)
Ingo Molnar21b32bb2006-07-03 00:24:40 -070017{
Oleg Nesterov018378c2010-06-03 21:32:43 +020018 if (nosched && in_sched_functions(addr))
Alexei Starovoitov568b3292016-02-17 19:58:57 -080019 return 0;
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050020
Andi Kleenc0b766f2006-09-26 10:52:34 +020021 if (trace->skip > 0) {
22 trace->skip--;
Alexei Starovoitov568b3292016-02-17 19:58:57 -080023 return 0;
Andi Kleenc0b766f2006-09-26 10:52:34 +020024 }
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050025
26 if (trace->nr_entries >= trace->max_entries)
27 return -1;
28
29 trace->entries[trace->nr_entries++] = addr;
30 return 0;
31}
32
Vlastimil Babka77072f02017-09-29 11:23:35 +020033static void noinline __save_stack_trace(struct stack_trace *trace,
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050034 struct task_struct *task, struct pt_regs *regs,
35 bool nosched)
36{
37 struct unwind_state state;
38 unsigned long addr;
39
40 if (regs)
41 save_stack_address(trace, regs->ip, nosched);
42
43 for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
44 unwind_next_frame(&state)) {
45 addr = unwind_get_return_address(&state);
46 if (!addr || save_stack_address(trace, addr, nosched))
47 break;
Alexei Starovoitov568b3292016-02-17 19:58:57 -080048 }
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050049
50 if (trace->nr_entries < trace->max_entries)
51 trace->entries[trace->nr_entries++] = ULONG_MAX;
Andi Kleenc0b766f2006-09-26 10:52:34 +020052}
53
Ingo Molnar21b32bb2006-07-03 00:24:40 -070054/*
55 * Save stack-backtrace addresses into a stack_trace buffer.
Ingo Molnar21b32bb2006-07-03 00:24:40 -070056 */
Christoph Hellwigab1b6f02007-05-08 00:23:29 -070057void save_stack_trace(struct stack_trace *trace)
Ingo Molnar21b32bb2006-07-03 00:24:40 -070058{
Vlastimil Babka77072f02017-09-29 11:23:35 +020059 trace->skip++;
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050060 __save_stack_trace(trace, current, NULL, false);
Ingo Molnar21b32bb2006-07-03 00:24:40 -070061}
Ingo Molnar85946982008-06-27 21:20:17 +020062EXPORT_SYMBOL_GPL(save_stack_trace);
Arjan van de Ven97455122008-01-25 21:08:34 +010063
Masami Hiramatsu39581062011-06-08 16:09:21 +090064void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
Vegard Nossumacc6be52008-05-20 11:15:43 +020065{
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050066 __save_stack_trace(trace, current, regs, false);
Vegard Nossumacc6be52008-05-20 11:15:43 +020067}
68
Arjan van de Ven97455122008-01-25 21:08:34 +010069void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
70{
Andy Lutomirski1959a602016-09-15 22:45:45 -070071 if (!try_get_task_stack(tsk))
72 return;
73
Vlastimil Babka77072f02017-09-29 11:23:35 +020074 if (tsk == current)
75 trace->skip++;
Josh Poimboeuf49a612c2016-09-16 14:18:14 -050076 __save_stack_trace(trace, tsk, NULL, true);
Andy Lutomirski1959a602016-09-15 22:45:45 -070077
78 put_task_stack(tsk);
Arjan van de Ven97455122008-01-25 21:08:34 +010079}
Ingo Molnar85946982008-06-27 21:20:17 +020080EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
Török Edwin02b67512008-11-22 13:28:47 +020081
Josh Poimboeufaf085d92017-02-13 19:42:28 -060082#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
83
Vlastimil Babka77072f02017-09-29 11:23:35 +020084static int __always_inline
85__save_stack_trace_reliable(struct stack_trace *trace,
86 struct task_struct *task)
Josh Poimboeufaf085d92017-02-13 19:42:28 -060087{
88 struct unwind_state state;
89 struct pt_regs *regs;
90 unsigned long addr;
91
Jiri Slaby441ccc32018-05-18 08:47:10 +020092 for (unwind_start(&state, task, NULL, NULL);
93 !unwind_done(&state) && !unwind_error(&state);
Josh Poimboeufaf085d92017-02-13 19:42:28 -060094 unwind_next_frame(&state)) {
95
Josh Poimboeufa9cdbe72017-12-31 10:18:06 -060096 regs = unwind_get_entry_regs(&state, NULL);
Josh Poimboeufaf085d92017-02-13 19:42:28 -060097 if (regs) {
Jiri Slaby441ccc32018-05-18 08:47:10 +020098 /* Success path for user tasks */
99 if (user_mode(regs))
100 goto success;
101
Josh Poimboeufaf085d92017-02-13 19:42:28 -0600102 /*
103 * Kernel mode registers on the stack indicate an
104 * in-kernel interrupt or exception (e.g., preemption
105 * or a page fault), which can make frame pointers
106 * unreliable.
107 */
Josh Poimboeufaf085d92017-02-13 19:42:28 -0600108
Jiri Slaby441ccc32018-05-18 08:47:10 +0200109 return -EINVAL;
Josh Poimboeufaf085d92017-02-13 19:42:28 -0600110 }
111
112 addr = unwind_get_return_address(&state);
113
114 /*
115 * A NULL or invalid return address probably means there's some
116 * generated code which __kernel_text_address() doesn't know
117 * about.
118 */
Jiri Slaby17426922018-05-18 08:47:09 +0200119 if (!addr)
Josh Poimboeufaf085d92017-02-13 19:42:28 -0600120 return -EINVAL;
Josh Poimboeufaf085d92017-02-13 19:42:28 -0600121
122 if (save_stack_address(trace, addr, false))
123 return -EINVAL;
124 }
125
126 /* Check for stack corruption */
Jiri Slaby17426922018-05-18 08:47:09 +0200127 if (unwind_error(&state))
Josh Poimboeufaf085d92017-02-13 19:42:28 -0600128 return -EINVAL;
Josh Poimboeufaf085d92017-02-13 19:42:28 -0600129
Jiri Slaby441ccc32018-05-18 08:47:10 +0200130 /* Success path for non-user tasks, i.e. kthreads and idle tasks */
131 if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
132 return -EINVAL;
133
134success:
Josh Poimboeufaf085d92017-02-13 19:42:28 -0600135 if (trace->nr_entries < trace->max_entries)
136 trace->entries[trace->nr_entries++] = ULONG_MAX;
137
138 return 0;
139}
140
141/*
142 * This function returns an error if it detects any unreliable features of the
143 * stack. Otherwise it guarantees that the stack trace is reliable.
144 *
145 * If the task is not 'current', the caller *must* ensure the task is inactive.
146 */
147int save_stack_trace_tsk_reliable(struct task_struct *tsk,
148 struct stack_trace *trace)
149{
150 int ret;
151
Josh Poimboeuf6454b3b2017-12-18 15:13:44 -0600152 /*
153 * If the task doesn't have a stack (e.g., a zombie), the stack is
154 * "reliably" empty.
155 */
Josh Poimboeufaf085d92017-02-13 19:42:28 -0600156 if (!try_get_task_stack(tsk))
Josh Poimboeuf6454b3b2017-12-18 15:13:44 -0600157 return 0;
Josh Poimboeufaf085d92017-02-13 19:42:28 -0600158
159 ret = __save_stack_trace_reliable(trace, tsk);
160
161 put_task_stack(tsk);
162
163 return ret;
164}
165#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
166
Török Edwin02b67512008-11-22 13:28:47 +0200167/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
168
Frederic Weisbeckerc9cf4db2010-05-19 21:35:17 +0200169struct stack_frame_user {
Török Edwin02b67512008-11-22 13:28:47 +0200170 const void __user *next_fp;
Török Edwin8d7c6a92008-11-23 12:39:06 +0200171 unsigned long ret_addr;
Török Edwin02b67512008-11-22 13:28:47 +0200172};
173
Frederic Weisbeckerc9cf4db2010-05-19 21:35:17 +0200174static int
175copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
Török Edwin02b67512008-11-22 13:28:47 +0200176{
177 int ret;
178
179 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
180 return 0;
181
182 ret = 1;
183 pagefault_disable();
184 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
185 ret = 0;
186 pagefault_enable();
187
188 return ret;
189}
190
Török Edwin8d7c6a92008-11-23 12:39:06 +0200191static inline void __save_stack_trace_user(struct stack_trace *trace)
192{
193 const struct pt_regs *regs = task_pt_regs(current);
194 const void __user *fp = (const void __user *)regs->bp;
195
196 if (trace->nr_entries < trace->max_entries)
197 trace->entries[trace->nr_entries++] = regs->ip;
198
199 while (trace->nr_entries < trace->max_entries) {
Frederic Weisbeckerc9cf4db2010-05-19 21:35:17 +0200200 struct stack_frame_user frame;
Török Edwin8d7c6a92008-11-23 12:39:06 +0200201
202 frame.next_fp = NULL;
203 frame.ret_addr = 0;
204 if (!copy_stack_frame(fp, &frame))
205 break;
206 if ((unsigned long)fp < regs->sp)
207 break;
208 if (frame.ret_addr) {
209 trace->entries[trace->nr_entries++] =
210 frame.ret_addr;
211 }
212 if (fp == frame.next_fp)
213 break;
214 fp = frame.next_fp;
215 }
216}
217
Török Edwin02b67512008-11-22 13:28:47 +0200218void save_stack_trace_user(struct stack_trace *trace)
219{
220 /*
221 * Trace user stack if we are not a kernel thread
222 */
223 if (current->mm) {
Török Edwin8d7c6a92008-11-23 12:39:06 +0200224 __save_stack_trace_user(trace);
Török Edwin02b67512008-11-22 13:28:47 +0200225 }
226 if (trace->nr_entries < trace->max_entries)
227 trace->entries[trace->nr_entries++] = ULONG_MAX;
228}