Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 2 | /* |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 3 | * stacktrace.c : stacktracing APIs needed by rest of kernel |
| 4 | * (wrappers over ARC dwarf based unwinder) |
| 5 | * |
Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 6 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
| 7 | * |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 8 | * vineetg: aug 2009 |
| 9 | * -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( ) |
| 10 | * for displaying task's kernel mode call stack in /proc/<pid>/stack |
| 11 | * -Iterator based approach to have single copy of unwinding core and APIs |
| 12 | * needing unwinding, implement the logic in iterator regarding: |
| 13 | * = which frame onwards to start capture |
| 14 | * = which frame to stop capturing (wchan) |
| 15 | * = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc) |
| 16 | * |
| 17 | * vineetg: March 2009 |
| 18 | * -Implemented correct versions of thread_saved_pc() and get_wchan() |
| 19 | * |
| 20 | * rajeshwarr: 2008 |
| 21 | * -Initial implementation |
Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 22 | */ |
| 23 | |
| 24 | #include <linux/ptrace.h> |
| 25 | #include <linux/export.h> |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 26 | #include <linux/stacktrace.h> |
| 27 | #include <linux/kallsyms.h> |
Ingo Molnar | b17b015 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 28 | #include <linux/sched/debug.h> |
| 29 | |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 30 | #include <asm/arcregs.h> |
| 31 | #include <asm/unwind.h> |
| 32 | #include <asm/switch_to.h> |
| 33 | |
| 34 | /*------------------------------------------------------------------------- |
| 35 | * Unwinder Iterator |
| 36 | *------------------------------------------------------------------------- |
| 37 | */ |
| 38 | |
| 39 | #ifdef CONFIG_ARC_DW2_UNWIND |
| 40 | |
| 41 | static void seed_unwind_frame_info(struct task_struct *tsk, |
| 42 | struct pt_regs *regs, |
| 43 | struct unwind_frame_info *frame_info) |
| 44 | { |
Vineet Gupta | 3a51d50 | 2013-07-10 16:03:45 +0200 | [diff] [blame] | 45 | /* |
| 46 | * synchronous unwinding (e.g. dump_stack) |
| 47 | * - uses current values of SP and friends |
| 48 | */ |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 49 | if (tsk == NULL && regs == NULL) { |
| 50 | unsigned long fp, sp, blink, ret; |
| 51 | frame_info->task = current; |
| 52 | |
| 53 | __asm__ __volatile__( |
| 54 | "mov %0,r27\n\t" |
| 55 | "mov %1,r28\n\t" |
| 56 | "mov %2,r31\n\t" |
| 57 | "mov %3,r63\n\t" |
| 58 | : "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret) |
| 59 | ); |
| 60 | |
| 61 | frame_info->regs.r27 = fp; |
| 62 | frame_info->regs.r28 = sp; |
| 63 | frame_info->regs.r31 = blink; |
| 64 | frame_info->regs.r63 = ret; |
| 65 | frame_info->call_frame = 0; |
| 66 | } else if (regs == NULL) { |
Vineet Gupta | 3a51d50 | 2013-07-10 16:03:45 +0200 | [diff] [blame] | 67 | /* |
| 68 | * Asynchronous unwinding of sleeping task |
| 69 | * - Gets SP etc from task's pt_regs (saved bottom of kernel |
| 70 | * mode stack of task) |
| 71 | */ |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 72 | |
| 73 | frame_info->task = tsk; |
| 74 | |
Vineet Gupta | 13648b0 | 2015-02-27 10:39:17 +0530 | [diff] [blame] | 75 | frame_info->regs.r27 = TSK_K_FP(tsk); |
| 76 | frame_info->regs.r28 = TSK_K_ESP(tsk); |
| 77 | frame_info->regs.r31 = TSK_K_BLINK(tsk); |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 78 | frame_info->regs.r63 = (unsigned int)__switch_to; |
| 79 | |
| 80 | /* In the prologue of __switch_to, first FP is saved on stack |
| 81 | * and then SP is copied to FP. Dwarf assumes cfa as FP based |
| 82 | * but we didn't save FP. The value retrieved above is FP's |
| 83 | * state in previous frame. |
| 84 | * As a work around for this, we unwind from __switch_to start |
| 85 | * and adjust SP accordingly. The other limitation is that |
| 86 | * __switch_to macro is dwarf rules are not generated for inline |
| 87 | * assembly code |
| 88 | */ |
| 89 | frame_info->regs.r27 = 0; |
Vineet Gupta | 16f9afe | 2013-05-27 21:43:41 +0530 | [diff] [blame] | 90 | frame_info->regs.r28 += 60; |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 91 | frame_info->call_frame = 0; |
| 92 | |
| 93 | } else { |
Vineet Gupta | 3a51d50 | 2013-07-10 16:03:45 +0200 | [diff] [blame] | 94 | /* |
| 95 | * Asynchronous unwinding of intr/exception |
| 96 | * - Just uses the pt_regs passed |
| 97 | */ |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 98 | frame_info->task = tsk; |
| 99 | |
| 100 | frame_info->regs.r27 = regs->fp; |
| 101 | frame_info->regs.r28 = regs->sp; |
| 102 | frame_info->regs.r31 = regs->blink; |
| 103 | frame_info->regs.r63 = regs->ret; |
| 104 | frame_info->call_frame = 0; |
| 105 | } |
| 106 | } |
| 107 | |
| 108 | #endif |
| 109 | |
Vineet Gupta | 3a51d50 | 2013-07-10 16:03:45 +0200 | [diff] [blame] | 110 | notrace noinline unsigned int |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 111 | arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, |
| 112 | int (*consumer_fn) (unsigned int, void *), void *arg) |
| 113 | { |
| 114 | #ifdef CONFIG_ARC_DW2_UNWIND |
| 115 | int ret = 0; |
| 116 | unsigned int address; |
| 117 | struct unwind_frame_info frame_info; |
| 118 | |
| 119 | seed_unwind_frame_info(tsk, regs, &frame_info); |
| 120 | |
| 121 | while (1) { |
| 122 | address = UNW_PC(&frame_info); |
| 123 | |
Vineet Gupta | def32fad | 2015-04-10 14:06:40 +0530 | [diff] [blame] | 124 | if (!address || !__kernel_text_address(address)) |
| 125 | break; |
| 126 | |
| 127 | if (consumer_fn(address, arg) == -1) |
| 128 | break; |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 129 | |
| 130 | ret = arc_unwind(&frame_info); |
Vineet Gupta | def32fad | 2015-04-10 14:06:40 +0530 | [diff] [blame] | 131 | if (ret) |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 132 | break; |
Vineet Gupta | def32fad | 2015-04-10 14:06:40 +0530 | [diff] [blame] | 133 | |
| 134 | frame_info.regs.r63 = frame_info.regs.r31; |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 135 | } |
| 136 | |
| 137 | return address; /* return the last address it saw */ |
| 138 | #else |
| 139 | /* On ARC, only Dward based unwinder works. fp based backtracing is |
| 140 | * not possible (-fno-omit-frame-pointer) because of the way function |
| 141 | * prelogue is setup (callee regs saved and then fp set and not other |
| 142 | * way around |
| 143 | */ |
Alexey Brodkin | 9bd54517 | 2016-06-23 11:00:39 +0300 | [diff] [blame] | 144 | pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n"); |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 145 | return 0; |
| 146 | |
| 147 | #endif |
| 148 | } |
| 149 | |
| 150 | /*------------------------------------------------------------------------- |
| 151 | * callbacks called by unwinder iterator to implement kernel APIs |
| 152 | * |
| 153 | * The callback can return -1 to force the iterator to stop, which by default |
| 154 | * keeps going till the bottom-most frame. |
| 155 | *------------------------------------------------------------------------- |
| 156 | */ |
| 157 | |
| 158 | /* Call-back which plugs into unwinding core to dump the stack in |
| 159 | * case of panic/OOPs/BUG etc |
| 160 | */ |
| 161 | static int __print_sym(unsigned int address, void *unused) |
| 162 | { |
Sergey Senozhatsky | d0729bc6 | 2017-12-11 21:50:25 +0900 | [diff] [blame] | 163 | printk(" %pS\n", (void *)address); |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 164 | return 0; |
| 165 | } |
| 166 | |
| 167 | #ifdef CONFIG_STACKTRACE |
| 168 | |
| 169 | /* Call-back which plugs into unwinding core to capture the |
| 170 | * traces needed by kernel on /proc/<pid>/stack |
| 171 | */ |
| 172 | static int __collect_all(unsigned int address, void *arg) |
| 173 | { |
| 174 | struct stack_trace *trace = arg; |
| 175 | |
| 176 | if (trace->skip > 0) |
| 177 | trace->skip--; |
| 178 | else |
| 179 | trace->entries[trace->nr_entries++] = address; |
| 180 | |
| 181 | if (trace->nr_entries >= trace->max_entries) |
| 182 | return -1; |
| 183 | |
| 184 | return 0; |
| 185 | } |
| 186 | |
| 187 | static int __collect_all_but_sched(unsigned int address, void *arg) |
| 188 | { |
| 189 | struct stack_trace *trace = arg; |
| 190 | |
| 191 | if (in_sched_functions(address)) |
| 192 | return 0; |
| 193 | |
| 194 | if (trace->skip > 0) |
| 195 | trace->skip--; |
| 196 | else |
| 197 | trace->entries[trace->nr_entries++] = address; |
| 198 | |
| 199 | if (trace->nr_entries >= trace->max_entries) |
| 200 | return -1; |
| 201 | |
| 202 | return 0; |
| 203 | } |
| 204 | |
| 205 | #endif |
| 206 | |
| 207 | static int __get_first_nonsched(unsigned int address, void *unused) |
| 208 | { |
| 209 | if (in_sched_functions(address)) |
| 210 | return 0; |
| 211 | |
| 212 | return -1; |
| 213 | } |
Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 214 | |
| 215 | /*------------------------------------------------------------------------- |
| 216 | * APIs expected by various kernel sub-systems |
| 217 | *------------------------------------------------------------------------- |
| 218 | */ |
| 219 | |
| 220 | noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs) |
| 221 | { |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 222 | pr_info("\nStack Trace:\n"); |
| 223 | arc_unwind_core(tsk, regs, __print_sym, NULL); |
Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 224 | } |
| 225 | EXPORT_SYMBOL(show_stacktrace); |
| 226 | |
| 227 | /* Expected by sched Code */ |
| 228 | void show_stack(struct task_struct *tsk, unsigned long *sp) |
| 229 | { |
| 230 | show_stacktrace(tsk, NULL); |
| 231 | } |
| 232 | |
Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 233 | /* Another API expected by schedular, shows up in "ps" as Wait Channel |
Adam Buchbinder | 7423cc0 | 2016-02-23 15:24:55 -0800 | [diff] [blame] | 234 | * Of course just returning schedule( ) would be pointless so unwind until |
Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 235 | * the function is not in schedular code |
| 236 | */ |
| 237 | unsigned int get_wchan(struct task_struct *tsk) |
| 238 | { |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 239 | return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL); |
Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 240 | } |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 241 | |
| 242 | #ifdef CONFIG_STACKTRACE |
| 243 | |
| 244 | /* |
| 245 | * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP. |
| 246 | * A typical use is when /proc/<pid>/stack is queried by userland |
| 247 | */ |
| 248 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
| 249 | { |
Vineet Gupta | 0dafafc | 2013-09-06 14:18:17 +0530 | [diff] [blame] | 250 | /* Assumes @tsk is sleeping so unwinds from __switch_to */ |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 251 | arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace); |
| 252 | } |
| 253 | |
| 254 | void save_stack_trace(struct stack_trace *trace) |
| 255 | { |
Vineet Gupta | 0dafafc | 2013-09-06 14:18:17 +0530 | [diff] [blame] | 256 | /* Pass NULL for task so it unwinds the current call frame */ |
| 257 | arc_unwind_core(NULL, NULL, __collect_all, trace); |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 258 | } |
Chen Gang | 8f146d0 | 2013-10-28 11:00:38 +0800 | [diff] [blame] | 259 | EXPORT_SYMBOL_GPL(save_stack_trace); |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 260 | #endif |