Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 1 | /* |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 2 | * stacktrace.c : stacktracing APIs needed by rest of kernel |
| 3 | * (wrappers over ARC dwarf based unwinder) |
| 4 | * |
Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 5 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 10 | * |
| 11 | * vineetg: aug 2009 |
| 12 | * -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( ) |
| 13 | * for displaying task's kernel mode call stack in /proc/<pid>/stack |
| 14 | * -Iterator based approach to have single copy of unwinding core and APIs |
| 15 | * needing unwinding, implement the logic in iterator regarding: |
| 16 | * = which frame onwards to start capture |
| 17 | * = which frame to stop capturing (wchan) |
| 18 | * = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc) |
| 19 | * |
| 20 | * vineetg: March 2009 |
| 21 | * -Implemented correct versions of thread_saved_pc() and get_wchan() |
| 22 | * |
| 23 | * rajeshwarr: 2008 |
| 24 | * -Initial implementation |
Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 25 | */ |
| 26 | |
| 27 | #include <linux/ptrace.h> |
| 28 | #include <linux/export.h> |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 29 | #include <linux/stacktrace.h> |
| 30 | #include <linux/kallsyms.h> |
| 31 | #include <asm/arcregs.h> |
| 32 | #include <asm/unwind.h> |
| 33 | #include <asm/switch_to.h> |
| 34 | |
| 35 | /*------------------------------------------------------------------------- |
| 36 | * Unwinder Iterator |
| 37 | *------------------------------------------------------------------------- |
| 38 | */ |
| 39 | |
| 40 | #ifdef CONFIG_ARC_DW2_UNWIND |
| 41 | |
| 42 | static void seed_unwind_frame_info(struct task_struct *tsk, |
| 43 | struct pt_regs *regs, |
| 44 | struct unwind_frame_info *frame_info) |
| 45 | { |
Vineet Gupta | 3a51d50 | 2013-07-10 16:03:45 +0200 | [diff] [blame] | 46 | /* |
| 47 | * synchronous unwinding (e.g. dump_stack) |
| 48 | * - uses current values of SP and friends |
| 49 | */ |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 50 | if (tsk == NULL && regs == NULL) { |
| 51 | unsigned long fp, sp, blink, ret; |
| 52 | frame_info->task = current; |
| 53 | |
| 54 | __asm__ __volatile__( |
| 55 | "mov %0,r27\n\t" |
| 56 | "mov %1,r28\n\t" |
| 57 | "mov %2,r31\n\t" |
| 58 | "mov %3,r63\n\t" |
| 59 | : "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret) |
| 60 | ); |
| 61 | |
| 62 | frame_info->regs.r27 = fp; |
| 63 | frame_info->regs.r28 = sp; |
| 64 | frame_info->regs.r31 = blink; |
| 65 | frame_info->regs.r63 = ret; |
| 66 | frame_info->call_frame = 0; |
| 67 | } else if (regs == NULL) { |
Vineet Gupta | 3a51d50 | 2013-07-10 16:03:45 +0200 | [diff] [blame] | 68 | /* |
| 69 | * Asynchronous unwinding of sleeping task |
| 70 | * - Gets SP etc from task's pt_regs (saved bottom of kernel |
| 71 | * mode stack of task) |
| 72 | */ |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 73 | |
| 74 | frame_info->task = tsk; |
| 75 | |
Vineet Gupta | 13648b0 | 2015-02-27 10:39:17 +0530 | [diff] [blame] | 76 | frame_info->regs.r27 = TSK_K_FP(tsk); |
| 77 | frame_info->regs.r28 = TSK_K_ESP(tsk); |
| 78 | frame_info->regs.r31 = TSK_K_BLINK(tsk); |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 79 | frame_info->regs.r63 = (unsigned int)__switch_to; |
| 80 | |
| 81 | /* In the prologue of __switch_to, first FP is saved on stack |
| 82 | * and then SP is copied to FP. Dwarf assumes cfa as FP based |
| 83 | * but we didn't save FP. The value retrieved above is FP's |
| 84 | * state in previous frame. |
| 85 | * As a work around for this, we unwind from __switch_to start |
| 86 | * and adjust SP accordingly. The other limitation is that |
| 87 | * __switch_to macro is dwarf rules are not generated for inline |
| 88 | * assembly code |
| 89 | */ |
| 90 | frame_info->regs.r27 = 0; |
Vineet Gupta | 16f9afe | 2013-05-27 21:43:41 +0530 | [diff] [blame] | 91 | frame_info->regs.r28 += 60; |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 92 | frame_info->call_frame = 0; |
| 93 | |
| 94 | } else { |
Vineet Gupta | 3a51d50 | 2013-07-10 16:03:45 +0200 | [diff] [blame] | 95 | /* |
| 96 | * Asynchronous unwinding of intr/exception |
| 97 | * - Just uses the pt_regs passed |
| 98 | */ |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 99 | frame_info->task = tsk; |
| 100 | |
| 101 | frame_info->regs.r27 = regs->fp; |
| 102 | frame_info->regs.r28 = regs->sp; |
| 103 | frame_info->regs.r31 = regs->blink; |
| 104 | frame_info->regs.r63 = regs->ret; |
| 105 | frame_info->call_frame = 0; |
| 106 | } |
| 107 | } |
| 108 | |
| 109 | #endif |
| 110 | |
Vineet Gupta | 3a51d50 | 2013-07-10 16:03:45 +0200 | [diff] [blame] | 111 | notrace noinline unsigned int |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 112 | arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, |
| 113 | int (*consumer_fn) (unsigned int, void *), void *arg) |
| 114 | { |
| 115 | #ifdef CONFIG_ARC_DW2_UNWIND |
| 116 | int ret = 0; |
| 117 | unsigned int address; |
| 118 | struct unwind_frame_info frame_info; |
| 119 | |
| 120 | seed_unwind_frame_info(tsk, regs, &frame_info); |
| 121 | |
| 122 | while (1) { |
| 123 | address = UNW_PC(&frame_info); |
| 124 | |
Vineet Gupta | def32fad | 2015-04-10 14:06:40 +0530 | [diff] [blame] | 125 | if (!address || !__kernel_text_address(address)) |
| 126 | break; |
| 127 | |
| 128 | if (consumer_fn(address, arg) == -1) |
| 129 | break; |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 130 | |
| 131 | ret = arc_unwind(&frame_info); |
Vineet Gupta | def32fad | 2015-04-10 14:06:40 +0530 | [diff] [blame] | 132 | if (ret) |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 133 | break; |
Vineet Gupta | def32fad | 2015-04-10 14:06:40 +0530 | [diff] [blame] | 134 | |
| 135 | frame_info.regs.r63 = frame_info.regs.r31; |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 136 | } |
| 137 | |
| 138 | return address; /* return the last address it saw */ |
| 139 | #else |
| 140 | /* On ARC, only Dward based unwinder works. fp based backtracing is |
| 141 | * not possible (-fno-omit-frame-pointer) because of the way function |
| 142 | * prelogue is setup (callee regs saved and then fp set and not other |
| 143 | * way around |
| 144 | */ |
| 145 | pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n"); |
| 146 | return 0; |
| 147 | |
| 148 | #endif |
| 149 | } |
| 150 | |
| 151 | /*------------------------------------------------------------------------- |
| 152 | * callbacks called by unwinder iterator to implement kernel APIs |
| 153 | * |
| 154 | * The callback can return -1 to force the iterator to stop, which by default |
| 155 | * keeps going till the bottom-most frame. |
| 156 | *------------------------------------------------------------------------- |
| 157 | */ |
| 158 | |
| 159 | /* Call-back which plugs into unwinding core to dump the stack in |
| 160 | * case of panic/OOPs/BUG etc |
| 161 | */ |
| 162 | static int __print_sym(unsigned int address, void *unused) |
| 163 | { |
| 164 | __print_symbol(" %s\n", address); |
| 165 | return 0; |
| 166 | } |
| 167 | |
| 168 | #ifdef CONFIG_STACKTRACE |
| 169 | |
| 170 | /* Call-back which plugs into unwinding core to capture the |
| 171 | * traces needed by kernel on /proc/<pid>/stack |
| 172 | */ |
| 173 | static int __collect_all(unsigned int address, void *arg) |
| 174 | { |
| 175 | struct stack_trace *trace = arg; |
| 176 | |
| 177 | if (trace->skip > 0) |
| 178 | trace->skip--; |
| 179 | else |
| 180 | trace->entries[trace->nr_entries++] = address; |
| 181 | |
| 182 | if (trace->nr_entries >= trace->max_entries) |
| 183 | return -1; |
| 184 | |
| 185 | return 0; |
| 186 | } |
| 187 | |
| 188 | static int __collect_all_but_sched(unsigned int address, void *arg) |
| 189 | { |
| 190 | struct stack_trace *trace = arg; |
| 191 | |
| 192 | if (in_sched_functions(address)) |
| 193 | return 0; |
| 194 | |
| 195 | if (trace->skip > 0) |
| 196 | trace->skip--; |
| 197 | else |
| 198 | trace->entries[trace->nr_entries++] = address; |
| 199 | |
| 200 | if (trace->nr_entries >= trace->max_entries) |
| 201 | return -1; |
| 202 | |
| 203 | return 0; |
| 204 | } |
| 205 | |
| 206 | #endif |
| 207 | |
| 208 | static int __get_first_nonsched(unsigned int address, void *unused) |
| 209 | { |
| 210 | if (in_sched_functions(address)) |
| 211 | return 0; |
| 212 | |
| 213 | return -1; |
| 214 | } |
Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 215 | |
| 216 | /*------------------------------------------------------------------------- |
| 217 | * APIs expected by various kernel sub-systems |
| 218 | *------------------------------------------------------------------------- |
| 219 | */ |
| 220 | |
| 221 | noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs) |
| 222 | { |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 223 | pr_info("\nStack Trace:\n"); |
| 224 | arc_unwind_core(tsk, regs, __print_sym, NULL); |
Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 225 | } |
| 226 | EXPORT_SYMBOL(show_stacktrace); |
| 227 | |
| 228 | /* Expected by sched Code */ |
| 229 | void show_stack(struct task_struct *tsk, unsigned long *sp) |
| 230 | { |
| 231 | show_stacktrace(tsk, NULL); |
| 232 | } |
| 233 | |
Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 234 | /* Another API expected by schedular, shows up in "ps" as Wait Channel |
Adam Buchbinder | 7423cc0 | 2016-02-23 15:24:55 -0800 | [diff] [blame] | 235 | * Of course just returning schedule( ) would be pointless so unwind until |
Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 236 | * the function is not in schedular code |
| 237 | */ |
| 238 | unsigned int get_wchan(struct task_struct *tsk) |
| 239 | { |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 240 | return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL); |
Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 241 | } |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 242 | |
| 243 | #ifdef CONFIG_STACKTRACE |
| 244 | |
| 245 | /* |
| 246 | * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP. |
| 247 | * A typical use is when /proc/<pid>/stack is queried by userland |
| 248 | */ |
| 249 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
| 250 | { |
Vineet Gupta | 0dafafc | 2013-09-06 14:18:17 +0530 | [diff] [blame] | 251 | /* Assumes @tsk is sleeping so unwinds from __switch_to */ |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 252 | arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace); |
| 253 | } |
| 254 | |
| 255 | void save_stack_trace(struct stack_trace *trace) |
| 256 | { |
Vineet Gupta | 0dafafc | 2013-09-06 14:18:17 +0530 | [diff] [blame] | 257 | /* Pass NULL for task so it unwinds the current call frame */ |
| 258 | arc_unwind_core(NULL, NULL, __collect_all, trace); |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 259 | } |
Chen Gang | 8f146d0 | 2013-10-28 11:00:38 +0800 | [diff] [blame] | 260 | EXPORT_SYMBOL_GPL(save_stack_trace); |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 261 | #endif |