blob: 23fa81e24c8a7f7437ee0c0149a30753df15241c [file] [log] [blame]
Ingo Molnar21b32bb2006-07-03 00:24:40 -07001/*
Ingo Molnar21b32bb2006-07-03 00:24:40 -07002 * Stack trace management functions
3 *
Ingo Molnar8f47e162009-01-31 02:03:42 +01004 * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
Ingo Molnar21b32bb2006-07-03 00:24:40 -07005 */
6#include <linux/sched.h>
7#include <linux/stacktrace.h>
Paul Gortmaker186f4362016-07-13 20:18:56 -04008#include <linux/export.h>
Török Edwin02b67512008-11-22 13:28:47 +02009#include <linux/uaccess.h>
Andi Kleenc0b766f2006-09-26 10:52:34 +020010#include <asm/stacktrace.h>
Ingo Molnar21b32bb2006-07-03 00:24:40 -070011
Josh Poimboeufcb76c932016-09-14 21:07:42 -050012static int save_stack_stack(void *data, const char *name)
Ingo Molnar21b32bb2006-07-03 00:24:40 -070013{
Steven Rostedt29a67972009-05-14 23:19:09 -040014 return 0;
Ingo Molnar21b32bb2006-07-03 00:24:40 -070015}
16
Alexei Starovoitov568b3292016-02-17 19:58:57 -080017static int
Oleg Nesterov018378c2010-06-03 21:32:43 +020018__save_stack_address(void *data, unsigned long addr, bool reliable, bool nosched)
Andi Kleenc0b766f2006-09-26 10:52:34 +020019{
Jan Engelhardtade1af72008-01-30 13:33:23 +010020 struct stack_trace *trace = data;
Oleg Nesterov147ec4d2010-06-03 21:32:39 +020021#ifdef CONFIG_FRAME_POINTER
Vegard Nossum16507432008-02-22 19:23:58 +010022 if (!reliable)
Alexei Starovoitov568b3292016-02-17 19:58:57 -080023 return 0;
Oleg Nesterov147ec4d2010-06-03 21:32:39 +020024#endif
Oleg Nesterov018378c2010-06-03 21:32:43 +020025 if (nosched && in_sched_functions(addr))
Alexei Starovoitov568b3292016-02-17 19:58:57 -080026 return 0;
Andi Kleenc0b766f2006-09-26 10:52:34 +020027 if (trace->skip > 0) {
28 trace->skip--;
Alexei Starovoitov568b3292016-02-17 19:58:57 -080029 return 0;
Andi Kleenc0b766f2006-09-26 10:52:34 +020030 }
Alexei Starovoitov568b3292016-02-17 19:58:57 -080031 if (trace->nr_entries < trace->max_entries) {
Andi Kleenc0b766f2006-09-26 10:52:34 +020032 trace->entries[trace->nr_entries++] = addr;
Alexei Starovoitov568b3292016-02-17 19:58:57 -080033 return 0;
34 } else {
35 return -1; /* no more room, stop walking the stack */
36 }
Andi Kleenc0b766f2006-09-26 10:52:34 +020037}
38
Alexei Starovoitov568b3292016-02-17 19:58:57 -080039static int save_stack_address(void *data, unsigned long addr, int reliable)
Oleg Nesterov018378c2010-06-03 21:32:43 +020040{
41 return __save_stack_address(data, addr, reliable, false);
42}
43
Alexei Starovoitov568b3292016-02-17 19:58:57 -080044static int
Arjan van de Ven5bc27dc2008-01-30 13:33:07 +010045save_stack_address_nosched(void *data, unsigned long addr, int reliable)
Arjan van de Ven97455122008-01-25 21:08:34 +010046{
Oleg Nesterov018378c2010-06-03 21:32:43 +020047 return __save_stack_address(data, addr, reliable, true);
Arjan van de Ven97455122008-01-25 21:08:34 +010048}
49
Jan Beulich9689ba82007-10-17 18:04:37 +020050static const struct stacktrace_ops save_stack_ops = {
Frederic Weisbecker61c19172009-12-17 05:40:33 +010051 .stack = save_stack_stack,
52 .address = save_stack_address,
53 .walk_stack = print_context_stack,
Andi Kleenc0b766f2006-09-26 10:52:34 +020054};
Ingo Molnar21b32bb2006-07-03 00:24:40 -070055
Arjan van de Ven97455122008-01-25 21:08:34 +010056static const struct stacktrace_ops save_stack_ops_nosched = {
Frederic Weisbecker61c19172009-12-17 05:40:33 +010057 .stack = save_stack_stack,
58 .address = save_stack_address_nosched,
59 .walk_stack = print_context_stack,
Arjan van de Ven97455122008-01-25 21:08:34 +010060};
61
Ingo Molnar21b32bb2006-07-03 00:24:40 -070062/*
63 * Save stack-backtrace addresses into a stack_trace buffer.
Ingo Molnar21b32bb2006-07-03 00:24:40 -070064 */
Christoph Hellwigab1b6f02007-05-08 00:23:29 -070065void save_stack_trace(struct stack_trace *trace)
Ingo Molnar21b32bb2006-07-03 00:24:40 -070066{
Namhyung Kime8e999cf2011-03-18 11:40:06 +090067 dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace);
Catalin Marinas006e84ee2007-02-13 13:26:21 +010068 if (trace->nr_entries < trace->max_entries)
69 trace->entries[trace->nr_entries++] = ULONG_MAX;
Ingo Molnar21b32bb2006-07-03 00:24:40 -070070}
Ingo Molnar85946982008-06-27 21:20:17 +020071EXPORT_SYMBOL_GPL(save_stack_trace);
Arjan van de Ven97455122008-01-25 21:08:34 +010072
Masami Hiramatsu39581062011-06-08 16:09:21 +090073void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
Vegard Nossumacc6be52008-05-20 11:15:43 +020074{
Namhyung Kime8e999cf2011-03-18 11:40:06 +090075 dump_trace(current, regs, NULL, 0, &save_stack_ops, trace);
Vegard Nossumacc6be52008-05-20 11:15:43 +020076 if (trace->nr_entries < trace->max_entries)
77 trace->entries[trace->nr_entries++] = ULONG_MAX;
78}
79
Arjan van de Ven97455122008-01-25 21:08:34 +010080void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
81{
Andy Lutomirski1959a602016-09-15 22:45:45 -070082 if (!try_get_task_stack(tsk))
83 return;
84
Namhyung Kime8e999cf2011-03-18 11:40:06 +090085 dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
Arjan van de Ven97455122008-01-25 21:08:34 +010086 if (trace->nr_entries < trace->max_entries)
87 trace->entries[trace->nr_entries++] = ULONG_MAX;
Andy Lutomirski1959a602016-09-15 22:45:45 -070088
89 put_task_stack(tsk);
Arjan van de Ven97455122008-01-25 21:08:34 +010090}
Ingo Molnar85946982008-06-27 21:20:17 +020091EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
Török Edwin02b67512008-11-22 13:28:47 +020092
93/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
94
Frederic Weisbeckerc9cf4db2010-05-19 21:35:17 +020095struct stack_frame_user {
Török Edwin02b67512008-11-22 13:28:47 +020096 const void __user *next_fp;
Török Edwin8d7c6a92008-11-23 12:39:06 +020097 unsigned long ret_addr;
Török Edwin02b67512008-11-22 13:28:47 +020098};
99
Frederic Weisbeckerc9cf4db2010-05-19 21:35:17 +0200100static int
101copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
Török Edwin02b67512008-11-22 13:28:47 +0200102{
103 int ret;
104
105 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
106 return 0;
107
108 ret = 1;
109 pagefault_disable();
110 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
111 ret = 0;
112 pagefault_enable();
113
114 return ret;
115}
116
Török Edwin8d7c6a92008-11-23 12:39:06 +0200117static inline void __save_stack_trace_user(struct stack_trace *trace)
118{
119 const struct pt_regs *regs = task_pt_regs(current);
120 const void __user *fp = (const void __user *)regs->bp;
121
122 if (trace->nr_entries < trace->max_entries)
123 trace->entries[trace->nr_entries++] = regs->ip;
124
125 while (trace->nr_entries < trace->max_entries) {
Frederic Weisbeckerc9cf4db2010-05-19 21:35:17 +0200126 struct stack_frame_user frame;
Török Edwin8d7c6a92008-11-23 12:39:06 +0200127
128 frame.next_fp = NULL;
129 frame.ret_addr = 0;
130 if (!copy_stack_frame(fp, &frame))
131 break;
132 if ((unsigned long)fp < regs->sp)
133 break;
134 if (frame.ret_addr) {
135 trace->entries[trace->nr_entries++] =
136 frame.ret_addr;
137 }
138 if (fp == frame.next_fp)
139 break;
140 fp = frame.next_fp;
141 }
142}
143
Török Edwin02b67512008-11-22 13:28:47 +0200144void save_stack_trace_user(struct stack_trace *trace)
145{
146 /*
147 * Trace user stack if we are not a kernel thread
148 */
149 if (current->mm) {
Török Edwin8d7c6a92008-11-23 12:39:06 +0200150 __save_stack_trace_user(trace);
Török Edwin02b67512008-11-22 13:28:47 +0200151 }
152 if (trace->nr_entries < trace->max_entries)
153 trace->entries[trace->nr_entries++] = ULONG_MAX;
154}
155