Ingo Molnar | 21b32bb | 2006-07-03 00:24:40 -0700 | [diff] [blame] | 1 | /* |
Ingo Molnar | 21b32bb | 2006-07-03 00:24:40 -0700 | [diff] [blame] | 2 | * Stack trace management functions |
| 3 | * |
Ingo Molnar | 8f47e16 | 2009-01-31 02:03:42 +0100 | [diff] [blame] | 4 | * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
Ingo Molnar | 21b32bb | 2006-07-03 00:24:40 -0700 | [diff] [blame] | 5 | */ |
| 6 | #include <linux/sched.h> |
Ingo Molnar | b17b015 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 7 | #include <linux/sched/debug.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 8 | #include <linux/sched/task_stack.h> |
Ingo Molnar | 21b32bb | 2006-07-03 00:24:40 -0700 | [diff] [blame] | 9 | #include <linux/stacktrace.h> |
Paul Gortmaker | 186f436 | 2016-07-13 20:18:56 -0400 | [diff] [blame] | 10 | #include <linux/export.h> |
Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 11 | #include <linux/uaccess.h> |
Andi Kleen | c0b766f | 2006-09-26 10:52:34 +0200 | [diff] [blame] | 12 | #include <asm/stacktrace.h> |
Josh Poimboeuf | 49a612c | 2016-09-16 14:18:14 -0500 | [diff] [blame] | 13 | #include <asm/unwind.h> |
Ingo Molnar | 21b32bb | 2006-07-03 00:24:40 -0700 | [diff] [blame] | 14 | |
Thomas Gleixner | 3599fe1 | 2019-04-25 11:45:22 +0200 | [diff] [blame] | 15 | void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, |
| 16 | struct task_struct *task, struct pt_regs *regs) |
Josh Poimboeuf | 49a612c | 2016-09-16 14:18:14 -0500 | [diff] [blame] | 17 | { |
| 18 | struct unwind_state state; |
| 19 | unsigned long addr; |
| 20 | |
Thomas Gleixner | 3599fe1 | 2019-04-25 11:45:22 +0200 | [diff] [blame] | 21 | if (regs && !consume_entry(cookie, regs->ip, false)) |
| 22 | return; |
Josh Poimboeuf | 49a612c | 2016-09-16 14:18:14 -0500 | [diff] [blame] | 23 | |
| 24 | for (unwind_start(&state, task, regs, NULL); !unwind_done(&state); |
| 25 | unwind_next_frame(&state)) { |
| 26 | addr = unwind_get_return_address(&state); |
Thomas Gleixner | 3599fe1 | 2019-04-25 11:45:22 +0200 | [diff] [blame] | 27 | if (!addr || !consume_entry(cookie, addr, false)) |
Josh Poimboeuf | 49a612c | 2016-09-16 14:18:14 -0500 | [diff] [blame] | 28 | break; |
Alexei Starovoitov | 568b329 | 2016-02-17 19:58:57 -0800 | [diff] [blame] | 29 | } |
Andi Kleen | c0b766f | 2006-09-26 10:52:34 +0200 | [diff] [blame] | 30 | } |
| 31 | |
Ingo Molnar | 21b32bb | 2006-07-03 00:24:40 -0700 | [diff] [blame] | 32 | /* |
Thomas Gleixner | 3599fe1 | 2019-04-25 11:45:22 +0200 | [diff] [blame] | 33 | * This function returns an error if it detects any unreliable features of the |
| 34 | * stack. Otherwise it guarantees that the stack trace is reliable. |
| 35 | * |
| 36 | * If the task is not 'current', the caller *must* ensure the task is inactive. |
Ingo Molnar | 21b32bb | 2006-07-03 00:24:40 -0700 | [diff] [blame] | 37 | */ |
Thomas Gleixner | 3599fe1 | 2019-04-25 11:45:22 +0200 | [diff] [blame] | 38 | int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, |
| 39 | void *cookie, struct task_struct *task) |
Josh Poimboeuf | af085d9 | 2017-02-13 19:42:28 -0600 | [diff] [blame] | 40 | { |
| 41 | struct unwind_state state; |
| 42 | struct pt_regs *regs; |
| 43 | unsigned long addr; |
| 44 | |
Jiri Slaby | 441ccc3 | 2018-05-18 08:47:10 +0200 | [diff] [blame] | 45 | for (unwind_start(&state, task, NULL, NULL); |
| 46 | !unwind_done(&state) && !unwind_error(&state); |
Josh Poimboeuf | af085d9 | 2017-02-13 19:42:28 -0600 | [diff] [blame] | 47 | unwind_next_frame(&state)) { |
| 48 | |
Josh Poimboeuf | a9cdbe7 | 2017-12-31 10:18:06 -0600 | [diff] [blame] | 49 | regs = unwind_get_entry_regs(&state, NULL); |
Josh Poimboeuf | af085d9 | 2017-02-13 19:42:28 -0600 | [diff] [blame] | 50 | if (regs) { |
Jiri Slaby | 441ccc3 | 2018-05-18 08:47:10 +0200 | [diff] [blame] | 51 | /* Success path for user tasks */ |
| 52 | if (user_mode(regs)) |
Thomas Gleixner | c5c27a0 | 2019-04-10 12:27:56 +0200 | [diff] [blame] | 53 | return 0; |
Jiri Slaby | 441ccc3 | 2018-05-18 08:47:10 +0200 | [diff] [blame] | 54 | |
Josh Poimboeuf | af085d9 | 2017-02-13 19:42:28 -0600 | [diff] [blame] | 55 | /* |
| 56 | * Kernel mode registers on the stack indicate an |
| 57 | * in-kernel interrupt or exception (e.g., preemption |
| 58 | * or a page fault), which can make frame pointers |
| 59 | * unreliable. |
| 60 | */ |
Josh Poimboeuf | af085d9 | 2017-02-13 19:42:28 -0600 | [diff] [blame] | 61 | |
Jiri Slaby | 0c41436 | 2018-05-18 08:47:11 +0200 | [diff] [blame] | 62 | if (IS_ENABLED(CONFIG_FRAME_POINTER)) |
| 63 | return -EINVAL; |
Josh Poimboeuf | af085d9 | 2017-02-13 19:42:28 -0600 | [diff] [blame] | 64 | } |
| 65 | |
| 66 | addr = unwind_get_return_address(&state); |
| 67 | |
| 68 | /* |
| 69 | * A NULL or invalid return address probably means there's some |
| 70 | * generated code which __kernel_text_address() doesn't know |
| 71 | * about. |
| 72 | */ |
Jiri Slaby | 1742692 | 2018-05-18 08:47:09 +0200 | [diff] [blame] | 73 | if (!addr) |
Josh Poimboeuf | af085d9 | 2017-02-13 19:42:28 -0600 | [diff] [blame] | 74 | return -EINVAL; |
Josh Poimboeuf | af085d9 | 2017-02-13 19:42:28 -0600 | [diff] [blame] | 75 | |
Thomas Gleixner | 3599fe1 | 2019-04-25 11:45:22 +0200 | [diff] [blame] | 76 | if (!consume_entry(cookie, addr, false)) |
Josh Poimboeuf | af085d9 | 2017-02-13 19:42:28 -0600 | [diff] [blame] | 77 | return -EINVAL; |
| 78 | } |
| 79 | |
| 80 | /* Check for stack corruption */ |
Jiri Slaby | 1742692 | 2018-05-18 08:47:09 +0200 | [diff] [blame] | 81 | if (unwind_error(&state)) |
Josh Poimboeuf | af085d9 | 2017-02-13 19:42:28 -0600 | [diff] [blame] | 82 | return -EINVAL; |
Josh Poimboeuf | af085d9 | 2017-02-13 19:42:28 -0600 | [diff] [blame] | 83 | |
Jiri Slaby | 441ccc3 | 2018-05-18 08:47:10 +0200 | [diff] [blame] | 84 | /* Success path for non-user tasks, i.e. kthreads and idle tasks */ |
| 85 | if (!(task->flags & (PF_KTHREAD | PF_IDLE))) |
| 86 | return -EINVAL; |
| 87 | |
Josh Poimboeuf | af085d9 | 2017-02-13 19:42:28 -0600 | [diff] [blame] | 88 | return 0; |
| 89 | } |
| 90 | |
Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 91 | /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ |
| 92 | |
Frederic Weisbecker | c9cf4db | 2010-05-19 21:35:17 +0200 | [diff] [blame] | 93 | struct stack_frame_user { |
Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 94 | const void __user *next_fp; |
Török Edwin | 8d7c6a9 | 2008-11-23 12:39:06 +0200 | [diff] [blame] | 95 | unsigned long ret_addr; |
Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 96 | }; |
| 97 | |
Frederic Weisbecker | c9cf4db | 2010-05-19 21:35:17 +0200 | [diff] [blame] | 98 | static int |
| 99 | copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) |
Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 100 | { |
| 101 | int ret; |
| 102 | |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 103 | if (!access_ok(fp, sizeof(*frame))) |
Török Edwin | 02b6751 | 2008-11-22 13:28:47 +0200 | [diff] [blame] | 104 | return 0; |
| 105 | |
| 106 | ret = 1; |
| 107 | pagefault_disable(); |
| 108 | if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) |
| 109 | ret = 0; |
| 110 | pagefault_enable(); |
| 111 | |
| 112 | return ret; |
| 113 | } |
| 114 | |
Thomas Gleixner | 3599fe1 | 2019-04-25 11:45:22 +0200 | [diff] [blame] | 115 | void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, |
| 116 | const struct pt_regs *regs) |
Török Edwin | 8d7c6a9 | 2008-11-23 12:39:06 +0200 | [diff] [blame] | 117 | { |
Török Edwin | 8d7c6a9 | 2008-11-23 12:39:06 +0200 | [diff] [blame] | 118 | const void __user *fp = (const void __user *)regs->bp; |
| 119 | |
Thomas Gleixner | 3599fe1 | 2019-04-25 11:45:22 +0200 | [diff] [blame] | 120 | if (!consume_entry(cookie, regs->ip, false)) |
| 121 | return; |
Török Edwin | 8d7c6a9 | 2008-11-23 12:39:06 +0200 | [diff] [blame] | 122 | |
Thomas Gleixner | 3599fe1 | 2019-04-25 11:45:22 +0200 | [diff] [blame] | 123 | while (1) { |
Frederic Weisbecker | c9cf4db | 2010-05-19 21:35:17 +0200 | [diff] [blame] | 124 | struct stack_frame_user frame; |
Török Edwin | 8d7c6a9 | 2008-11-23 12:39:06 +0200 | [diff] [blame] | 125 | |
| 126 | frame.next_fp = NULL; |
| 127 | frame.ret_addr = 0; |
| 128 | if (!copy_stack_frame(fp, &frame)) |
| 129 | break; |
| 130 | if ((unsigned long)fp < regs->sp) |
| 131 | break; |
| 132 | if (frame.ret_addr) { |
Thomas Gleixner | 3599fe1 | 2019-04-25 11:45:22 +0200 | [diff] [blame] | 133 | if (!consume_entry(cookie, frame.ret_addr, false)) |
| 134 | return; |
Török Edwin | 8d7c6a9 | 2008-11-23 12:39:06 +0200 | [diff] [blame] | 135 | } |
| 136 | if (fp == frame.next_fp) |
| 137 | break; |
| 138 | fp = frame.next_fp; |
| 139 | } |
| 140 | } |
| 141 | |