Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 1 | /* |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 2 | * stacktrace.c : stacktracing APIs needed by rest of kernel |
| 3 | * (wrappers over ARC dwarf based unwinder) |
| 4 | * |
Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 5 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 10 | * |
| 11 | * vineetg: aug 2009 |
| 12 | * -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( ) |
| 13 | * for displaying task's kernel mode call stack in /proc/<pid>/stack |
| 14 | * -Iterator based approach to have single copy of unwinding core and APIs |
| 15 | * needing unwinding, implement the logic in iterator regarding: |
| 16 | * = which frame onwards to start capture |
| 17 | * = which frame to stop capturing (wchan) |
| 18 | * = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc) |
| 19 | * |
| 20 | * vineetg: March 2009 |
| 21 | * -Implemented correct versions of thread_saved_pc() and get_wchan() |
| 22 | * |
| 23 | * rajeshwarr: 2008 |
| 24 | * -Initial implementation |
Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 25 | */ |
| 26 | |
| 27 | #include <linux/ptrace.h> |
| 28 | #include <linux/export.h> |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 29 | #include <linux/stacktrace.h> |
| 30 | #include <linux/kallsyms.h> |
| 31 | #include <asm/arcregs.h> |
| 32 | #include <asm/unwind.h> |
| 33 | #include <asm/switch_to.h> |
| 34 | |
| 35 | /*------------------------------------------------------------------------- |
| 36 | * Unwinder Iterator |
| 37 | *------------------------------------------------------------------------- |
| 38 | */ |
| 39 | |
| 40 | #ifdef CONFIG_ARC_DW2_UNWIND |
| 41 | |
| 42 | static void seed_unwind_frame_info(struct task_struct *tsk, |
| 43 | struct pt_regs *regs, |
| 44 | struct unwind_frame_info *frame_info) |
| 45 | { |
| 46 | if (tsk == NULL && regs == NULL) { |
| 47 | unsigned long fp, sp, blink, ret; |
| 48 | frame_info->task = current; |
| 49 | |
| 50 | __asm__ __volatile__( |
| 51 | "mov %0,r27\n\t" |
| 52 | "mov %1,r28\n\t" |
| 53 | "mov %2,r31\n\t" |
| 54 | "mov %3,r63\n\t" |
| 55 | : "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret) |
| 56 | ); |
| 57 | |
| 58 | frame_info->regs.r27 = fp; |
| 59 | frame_info->regs.r28 = sp; |
| 60 | frame_info->regs.r31 = blink; |
| 61 | frame_info->regs.r63 = ret; |
| 62 | frame_info->call_frame = 0; |
| 63 | } else if (regs == NULL) { |
| 64 | |
| 65 | frame_info->task = tsk; |
| 66 | |
| 67 | frame_info->regs.r27 = KSTK_FP(tsk); |
| 68 | frame_info->regs.r28 = KSTK_ESP(tsk); |
| 69 | frame_info->regs.r31 = KSTK_BLINK(tsk); |
| 70 | frame_info->regs.r63 = (unsigned int)__switch_to; |
| 71 | |
| 72 | /* In the prologue of __switch_to, first FP is saved on stack |
| 73 | * and then SP is copied to FP. Dwarf assumes cfa as FP based |
| 74 | * but we didn't save FP. The value retrieved above is FP's |
| 75 | * state in previous frame. |
| 76 | * As a work around for this, we unwind from __switch_to start |
| 77 | * and adjust SP accordingly. The other limitation is that |
| 78 | * __switch_to macro is dwarf rules are not generated for inline |
| 79 | * assembly code |
| 80 | */ |
| 81 | frame_info->regs.r27 = 0; |
Vineet Gupta | 16f9afe | 2013-05-27 21:43:41 +0530 | [diff] [blame] | 82 | frame_info->regs.r28 += 60; |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 83 | frame_info->call_frame = 0; |
| 84 | |
| 85 | } else { |
| 86 | frame_info->task = tsk; |
| 87 | |
| 88 | frame_info->regs.r27 = regs->fp; |
| 89 | frame_info->regs.r28 = regs->sp; |
| 90 | frame_info->regs.r31 = regs->blink; |
| 91 | frame_info->regs.r63 = regs->ret; |
| 92 | frame_info->call_frame = 0; |
| 93 | } |
| 94 | } |
| 95 | |
| 96 | #endif |
| 97 | |
| 98 | static noinline unsigned int |
| 99 | arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, |
| 100 | int (*consumer_fn) (unsigned int, void *), void *arg) |
| 101 | { |
| 102 | #ifdef CONFIG_ARC_DW2_UNWIND |
| 103 | int ret = 0; |
| 104 | unsigned int address; |
| 105 | struct unwind_frame_info frame_info; |
| 106 | |
| 107 | seed_unwind_frame_info(tsk, regs, &frame_info); |
| 108 | |
| 109 | while (1) { |
| 110 | address = UNW_PC(&frame_info); |
| 111 | |
| 112 | if (address && __kernel_text_address(address)) { |
| 113 | if (consumer_fn(address, arg) == -1) |
| 114 | break; |
| 115 | } |
| 116 | |
| 117 | ret = arc_unwind(&frame_info); |
| 118 | |
| 119 | if (ret == 0) { |
| 120 | frame_info.regs.r63 = frame_info.regs.r31; |
| 121 | continue; |
| 122 | } else { |
| 123 | break; |
| 124 | } |
| 125 | } |
| 126 | |
| 127 | return address; /* return the last address it saw */ |
| 128 | #else |
| 129 | /* On ARC, only Dward based unwinder works. fp based backtracing is |
| 130 | * not possible (-fno-omit-frame-pointer) because of the way function |
| 131 | * prelogue is setup (callee regs saved and then fp set and not other |
| 132 | * way around |
| 133 | */ |
| 134 | pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n"); |
| 135 | return 0; |
| 136 | |
| 137 | #endif |
| 138 | } |
| 139 | |
| 140 | /*------------------------------------------------------------------------- |
| 141 | * callbacks called by unwinder iterator to implement kernel APIs |
| 142 | * |
| 143 | * The callback can return -1 to force the iterator to stop, which by default |
| 144 | * keeps going till the bottom-most frame. |
| 145 | *------------------------------------------------------------------------- |
| 146 | */ |
| 147 | |
| 148 | /* Call-back which plugs into unwinding core to dump the stack in |
| 149 | * case of panic/OOPs/BUG etc |
| 150 | */ |
| 151 | static int __print_sym(unsigned int address, void *unused) |
| 152 | { |
| 153 | __print_symbol(" %s\n", address); |
| 154 | return 0; |
| 155 | } |
| 156 | |
| 157 | #ifdef CONFIG_STACKTRACE |
| 158 | |
| 159 | /* Call-back which plugs into unwinding core to capture the |
| 160 | * traces needed by kernel on /proc/<pid>/stack |
| 161 | */ |
| 162 | static int __collect_all(unsigned int address, void *arg) |
| 163 | { |
| 164 | struct stack_trace *trace = arg; |
| 165 | |
| 166 | if (trace->skip > 0) |
| 167 | trace->skip--; |
| 168 | else |
| 169 | trace->entries[trace->nr_entries++] = address; |
| 170 | |
| 171 | if (trace->nr_entries >= trace->max_entries) |
| 172 | return -1; |
| 173 | |
| 174 | return 0; |
| 175 | } |
| 176 | |
| 177 | static int __collect_all_but_sched(unsigned int address, void *arg) |
| 178 | { |
| 179 | struct stack_trace *trace = arg; |
| 180 | |
| 181 | if (in_sched_functions(address)) |
| 182 | return 0; |
| 183 | |
| 184 | if (trace->skip > 0) |
| 185 | trace->skip--; |
| 186 | else |
| 187 | trace->entries[trace->nr_entries++] = address; |
| 188 | |
| 189 | if (trace->nr_entries >= trace->max_entries) |
| 190 | return -1; |
| 191 | |
| 192 | return 0; |
| 193 | } |
| 194 | |
| 195 | #endif |
| 196 | |
| 197 | static int __get_first_nonsched(unsigned int address, void *unused) |
| 198 | { |
| 199 | if (in_sched_functions(address)) |
| 200 | return 0; |
| 201 | |
| 202 | return -1; |
| 203 | } |
Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 204 | |
| 205 | /*------------------------------------------------------------------------- |
| 206 | * APIs expected by various kernel sub-systems |
| 207 | *------------------------------------------------------------------------- |
| 208 | */ |
| 209 | |
| 210 | noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs) |
| 211 | { |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 212 | pr_info("\nStack Trace:\n"); |
| 213 | arc_unwind_core(tsk, regs, __print_sym, NULL); |
Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 214 | } |
| 215 | EXPORT_SYMBOL(show_stacktrace); |
| 216 | |
| 217 | /* Expected by sched Code */ |
| 218 | void show_stack(struct task_struct *tsk, unsigned long *sp) |
| 219 | { |
| 220 | show_stacktrace(tsk, NULL); |
| 221 | } |
| 222 | |
Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 223 | /* Another API expected by schedular, shows up in "ps" as Wait Channel |
| 224 | * Ofcourse just returning schedule( ) would be pointless so unwind until |
| 225 | * the function is not in schedular code |
| 226 | */ |
| 227 | unsigned int get_wchan(struct task_struct *tsk) |
| 228 | { |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 229 | return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL); |
Vineet Gupta | c08098f | 2013-01-18 15:12:21 +0530 | [diff] [blame] | 230 | } |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 231 | |
| 232 | #ifdef CONFIG_STACKTRACE |
| 233 | |
| 234 | /* |
| 235 | * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP. |
| 236 | * A typical use is when /proc/<pid>/stack is queried by userland |
| 237 | */ |
| 238 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
| 239 | { |
Vineet Gupta | 0dafafc | 2013-09-06 14:18:17 +0530 | [diff] [blame] | 240 | /* Assumes @tsk is sleeping so unwinds from __switch_to */ |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 241 | arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace); |
| 242 | } |
| 243 | |
| 244 | void save_stack_trace(struct stack_trace *trace) |
| 245 | { |
Vineet Gupta | 0dafafc | 2013-09-06 14:18:17 +0530 | [diff] [blame] | 246 | /* Pass NULL for task so it unwinds the current call frame */ |
| 247 | arc_unwind_core(NULL, NULL, __collect_all, trace); |
Vineet Gupta | 44c8bb9 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 248 | } |
| 249 | #endif |