blob: 33b66b5c5aec3244a11f565ed0334383b8c29fae [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -05002#include <linux/module.h>
3#include <linux/sort.h>
4#include <asm/ptrace.h>
5#include <asm/stacktrace.h>
6#include <asm/unwind.h>
7#include <asm/orc_types.h>
8#include <asm/orc_lookup.h>
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -05009
10#define orc_warn(fmt, ...) \
11 printk_deferred_once(KERN_WARNING pr_fmt("WARNING: " fmt), ##__VA_ARGS__)
12
13extern int __start_orc_unwind_ip[];
14extern int __stop_orc_unwind_ip[];
15extern struct orc_entry __start_orc_unwind[];
16extern struct orc_entry __stop_orc_unwind[];
17
18static DEFINE_MUTEX(sort_mutex);
19int *cur_orc_ip_table = __start_orc_unwind_ip;
20struct orc_entry *cur_orc_table = __start_orc_unwind;
21
22unsigned int lookup_num_blocks;
23bool orc_init;
24
25static inline unsigned long orc_ip(const int *ip)
26{
27 return (unsigned long)ip + *ip;
28}
29
30static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table,
31 unsigned int num_entries, unsigned long ip)
32{
33 int *first = ip_table;
34 int *last = ip_table + num_entries - 1;
35 int *mid = first, *found = first;
36
37 if (!num_entries)
38 return NULL;
39
40 /*
41 * Do a binary range search to find the rightmost duplicate of a given
42 * starting address. Some entries are section terminators which are
43 * "weak" entries for ensuring there are no gaps. They should be
44 * ignored when they conflict with a real entry.
45 */
46 while (first <= last) {
47 mid = first + ((last - first) / 2);
48
49 if (orc_ip(mid) <= ip) {
50 found = mid;
51 first = mid + 1;
52 } else
53 last = mid - 1;
54 }
55
56 return u_table + (found - ip_table);
57}
58
59#ifdef CONFIG_MODULES
60static struct orc_entry *orc_module_find(unsigned long ip)
61{
62 struct module *mod;
63
64 mod = __module_address(ip);
65 if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip)
66 return NULL;
67 return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind,
68 mod->arch.num_orcs, ip);
69}
70#else
71static struct orc_entry *orc_module_find(unsigned long ip)
72{
73 return NULL;
74}
75#endif
76
Steven Rostedt (VMware)6be7fa32018-01-22 22:32:51 -050077#ifdef CONFIG_DYNAMIC_FTRACE
78static struct orc_entry *orc_find(unsigned long ip);
79
80/*
81 * Ftrace dynamic trampolines do not have orc entries of their own.
82 * But they are copies of the ftrace entries that are static and
83 * defined in ftrace_*.S, which do have orc entries.
84 *
85 * If the undwinder comes across a ftrace trampoline, then find the
86 * ftrace function that was used to create it, and use that ftrace
87 * function's orc entrie, as the placement of the return code in
88 * the stack will be identical.
89 */
90static struct orc_entry *orc_ftrace_find(unsigned long ip)
91{
92 struct ftrace_ops *ops;
93 unsigned long caller;
94
95 ops = ftrace_ops_trampoline(ip);
96 if (!ops)
97 return NULL;
98
99 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
100 caller = (unsigned long)ftrace_regs_call;
101 else
102 caller = (unsigned long)ftrace_call;
103
104 /* Prevent unlikely recursion */
105 if (ip == caller)
106 return NULL;
107
108 return orc_find(caller);
109}
110#else
111static struct orc_entry *orc_ftrace_find(unsigned long ip)
112{
113 return NULL;
114}
115#endif
116
Jann Hornac5cecc2019-03-01 04:12:01 +0100117/*
118 * If we crash with IP==0, the last successfully executed instruction
119 * was probably an indirect function call with a NULL function pointer,
120 * and we don't have unwind information for NULL.
121 * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function
122 * pointer into its parent and then continue normally from there.
123 */
124static struct orc_entry null_orc_entry = {
125 .sp_offset = sizeof(long),
126 .sp_reg = ORC_REG_SP,
127 .bp_reg = ORC_REG_UNDEFINED,
128 .type = ORC_TYPE_CALL
129};
130
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500131static struct orc_entry *orc_find(unsigned long ip)
132{
Steven Rostedt (VMware)6be7fa32018-01-22 22:32:51 -0500133 static struct orc_entry *orc;
134
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500135 if (!orc_init)
136 return NULL;
137
Jann Hornac5cecc2019-03-01 04:12:01 +0100138 if (ip == 0)
139 return &null_orc_entry;
140
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500141 /* For non-init vmlinux addresses, use the fast lookup table: */
142 if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
143 unsigned int idx, start, stop;
144
145 idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
146
147 if (unlikely((idx >= lookup_num_blocks-1))) {
Josh Poimboeuf58c38622017-10-20 11:21:34 -0500148 orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
149 idx, lookup_num_blocks, (void *)ip);
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500150 return NULL;
151 }
152
153 start = orc_lookup[idx];
154 stop = orc_lookup[idx + 1] + 1;
155
156 if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
157 (__start_orc_unwind + stop > __stop_orc_unwind))) {
Josh Poimboeuf58c38622017-10-20 11:21:34 -0500158 orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
159 idx, lookup_num_blocks, start, stop, (void *)ip);
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500160 return NULL;
161 }
162
163 return __orc_find(__start_orc_unwind_ip + start,
164 __start_orc_unwind + start, stop - start, ip);
165 }
166
167 /* vmlinux .init slow lookup: */
Josh Poimboeuf9fbcc572018-02-20 11:37:53 -0600168 if (init_kernel_text(ip))
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500169 return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
170 __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
171
172 /* Module lookup: */
Steven Rostedt (VMware)6be7fa32018-01-22 22:32:51 -0500173 orc = orc_module_find(ip);
174 if (orc)
175 return orc;
176
177 return orc_ftrace_find(ip);
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500178}
179
180static void orc_sort_swap(void *_a, void *_b, int size)
181{
182 struct orc_entry *orc_a, *orc_b;
183 struct orc_entry orc_tmp;
184 int *a = _a, *b = _b, tmp;
185 int delta = _b - _a;
186
187 /* Swap the .orc_unwind_ip entries: */
188 tmp = *a;
189 *a = *b + delta;
190 *b = tmp - delta;
191
192 /* Swap the corresponding .orc_unwind entries: */
193 orc_a = cur_orc_table + (a - cur_orc_ip_table);
194 orc_b = cur_orc_table + (b - cur_orc_ip_table);
195 orc_tmp = *orc_a;
196 *orc_a = *orc_b;
197 *orc_b = orc_tmp;
198}
199
200static int orc_sort_cmp(const void *_a, const void *_b)
201{
202 struct orc_entry *orc_a;
203 const int *a = _a, *b = _b;
204 unsigned long a_val = orc_ip(a);
205 unsigned long b_val = orc_ip(b);
206
207 if (a_val > b_val)
208 return 1;
209 if (a_val < b_val)
210 return -1;
211
212 /*
213 * The "weak" section terminator entries need to always be on the left
214 * to ensure the lookup code skips them in favor of real entries.
215 * These terminator entries exist to handle any gaps created by
216 * whitelisted .o files which didn't get objtool generation.
217 */
218 orc_a = cur_orc_table + (a - cur_orc_ip_table);
Josh Poimboeufd31a5802018-05-18 08:47:12 +0200219 return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500220}
221
222#ifdef CONFIG_MODULES
223void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size,
224 void *_orc, size_t orc_size)
225{
226 int *orc_ip = _orc_ip;
227 struct orc_entry *orc = _orc;
228 unsigned int num_entries = orc_ip_size / sizeof(int);
229
230 WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 ||
231 orc_size % sizeof(*orc) != 0 ||
232 num_entries != orc_size / sizeof(*orc));
233
234 /*
235 * The 'cur_orc_*' globals allow the orc_sort_swap() callback to
236 * associate an .orc_unwind_ip table entry with its corresponding
237 * .orc_unwind entry so they can both be swapped.
238 */
239 mutex_lock(&sort_mutex);
240 cur_orc_ip_table = orc_ip;
241 cur_orc_table = orc;
242 sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap);
243 mutex_unlock(&sort_mutex);
244
245 mod->arch.orc_unwind_ip = orc_ip;
246 mod->arch.orc_unwind = orc;
247 mod->arch.num_orcs = num_entries;
248}
249#endif
250
251void __init unwind_init(void)
252{
253 size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip;
254 size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind;
255 size_t num_entries = orc_ip_size / sizeof(int);
256 struct orc_entry *orc;
257 int i;
258
259 if (!num_entries || orc_ip_size % sizeof(int) != 0 ||
260 orc_size % sizeof(struct orc_entry) != 0 ||
261 num_entries != orc_size / sizeof(struct orc_entry)) {
262 orc_warn("WARNING: Bad or missing .orc_unwind table. Disabling unwinder.\n");
263 return;
264 }
265
266 /* Sort the .orc_unwind and .orc_unwind_ip tables: */
267 sort(__start_orc_unwind_ip, num_entries, sizeof(int), orc_sort_cmp,
268 orc_sort_swap);
269
270 /* Initialize the fast lookup table: */
271 lookup_num_blocks = orc_lookup_end - orc_lookup;
272 for (i = 0; i < lookup_num_blocks-1; i++) {
273 orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
274 num_entries,
275 LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i));
276 if (!orc) {
277 orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
278 return;
279 }
280
281 orc_lookup[i] = orc - __start_orc_unwind;
282 }
283
284 /* Initialize the ending block: */
285 orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries,
286 LOOKUP_STOP_IP);
287 if (!orc) {
288 orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
289 return;
290 }
291 orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind;
292
293 orc_init = true;
294}
295
296unsigned long unwind_get_return_address(struct unwind_state *state)
297{
298 if (unwind_done(state))
299 return 0;
300
301 return __kernel_text_address(state->ip) ? state->ip : 0;
302}
303EXPORT_SYMBOL_GPL(unwind_get_return_address);
304
305unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
306{
307 if (unwind_done(state))
308 return NULL;
309
310 if (state->regs)
311 return &state->regs->ip;
312
313 if (state->sp)
314 return (unsigned long *)state->sp - 1;
315
316 return NULL;
317}
318
Josh Poimboeufb02fcf92017-12-04 15:07:09 +0100319static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500320 size_t len)
321{
322 struct stack_info *info = &state->stack_info;
Josh Poimboeufb02fcf92017-12-04 15:07:09 +0100323 void *addr = (void *)_addr;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500324
Josh Poimboeufb02fcf92017-12-04 15:07:09 +0100325 if (!on_stack(info, addr, len) &&
326 (get_stack_info(addr, state->task, info, &state->stack_mask)))
327 return false;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500328
329 return true;
330}
331
332static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
333 unsigned long *val)
334{
335 if (!stack_access_ok(state, addr, sizeof(long)))
336 return false;
337
Josh Poimboeuf881125b2017-11-07 20:19:34 -0600338 *val = READ_ONCE_NOCHECK(*(unsigned long *)addr);
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500339 return true;
340}
341
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500342static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
Josh Poimboeufb02fcf92017-12-04 15:07:09 +0100343 unsigned long *ip, unsigned long *sp)
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500344{
Josh Poimboeufb02fcf92017-12-04 15:07:09 +0100345 struct pt_regs *regs = (struct pt_regs *)addr;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500346
Josh Poimboeufb02fcf92017-12-04 15:07:09 +0100347 /* x86-32 support will be more complicated due to the &regs->sp hack */
348 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32));
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500349
Josh Poimboeufb02fcf92017-12-04 15:07:09 +0100350 if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500351 return false;
352
353 *ip = regs->ip;
Josh Poimboeufb02fcf92017-12-04 15:07:09 +0100354 *sp = regs->sp;
355 return true;
356}
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500357
Josh Poimboeufb02fcf92017-12-04 15:07:09 +0100358static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr,
359 unsigned long *ip, unsigned long *sp)
360{
361 struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500362
Josh Poimboeufb02fcf92017-12-04 15:07:09 +0100363 if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
364 return false;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500365
Josh Poimboeufb02fcf92017-12-04 15:07:09 +0100366 *ip = regs->ip;
367 *sp = regs->sp;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500368 return true;
369}
370
371bool unwind_next_frame(struct unwind_state *state)
372{
Josh Poimboeufd31a5802018-05-18 08:47:12 +0200373 unsigned long ip_p, sp, orig_ip = state->ip, prev_sp = state->sp;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500374 enum stack_type prev_type = state->stack_info.type;
375 struct orc_entry *orc;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500376 bool indirect = false;
377
378 if (unwind_done(state))
379 return false;
380
381 /* Don't let modules unload while we're reading their ORC data. */
382 preempt_disable();
383
Josh Poimboeufd31a5802018-05-18 08:47:12 +0200384 /* End-of-stack check for user tasks: */
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500385 if (state->regs && user_mode(state->regs))
Josh Poimboeufd31a5802018-05-18 08:47:12 +0200386 goto the_end;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500387
388 /*
389 * Find the orc_entry associated with the text address.
390 *
391 * Decrement call return addresses by one so they work for sibling
392 * calls and calls to noreturn functions.
393 */
394 orc = orc_find(state->signal ? state->ip : state->ip - 1);
Josh Poimboeufd31a5802018-05-18 08:47:12 +0200395 if (!orc)
396 goto err;
397
398 /* End-of-stack check for kernel threads: */
399 if (orc->sp_reg == ORC_REG_UNDEFINED) {
400 if (!orc->end)
401 goto err;
402
403 goto the_end;
404 }
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500405
406 /* Find the previous frame's stack: */
407 switch (orc->sp_reg) {
408 case ORC_REG_SP:
409 sp = state->sp + orc->sp_offset;
410 break;
411
412 case ORC_REG_BP:
413 sp = state->bp + orc->sp_offset;
414 break;
415
416 case ORC_REG_SP_INDIRECT:
417 sp = state->sp + orc->sp_offset;
418 indirect = true;
419 break;
420
421 case ORC_REG_BP_INDIRECT:
422 sp = state->bp + orc->sp_offset;
423 indirect = true;
424 break;
425
426 case ORC_REG_R10:
427 if (!state->regs || !state->full_regs) {
Josh Poimboeuf58c38622017-10-20 11:21:34 -0500428 orc_warn("missing regs for base reg R10 at ip %pB\n",
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500429 (void *)state->ip);
Josh Poimboeufd31a5802018-05-18 08:47:12 +0200430 goto err;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500431 }
432 sp = state->regs->r10;
433 break;
434
435 case ORC_REG_R13:
436 if (!state->regs || !state->full_regs) {
Josh Poimboeuf58c38622017-10-20 11:21:34 -0500437 orc_warn("missing regs for base reg R13 at ip %pB\n",
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500438 (void *)state->ip);
Josh Poimboeufd31a5802018-05-18 08:47:12 +0200439 goto err;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500440 }
441 sp = state->regs->r13;
442 break;
443
444 case ORC_REG_DI:
445 if (!state->regs || !state->full_regs) {
Josh Poimboeuf58c38622017-10-20 11:21:34 -0500446 orc_warn("missing regs for base reg DI at ip %pB\n",
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500447 (void *)state->ip);
Josh Poimboeufd31a5802018-05-18 08:47:12 +0200448 goto err;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500449 }
450 sp = state->regs->di;
451 break;
452
453 case ORC_REG_DX:
454 if (!state->regs || !state->full_regs) {
Josh Poimboeuf58c38622017-10-20 11:21:34 -0500455 orc_warn("missing regs for base reg DX at ip %pB\n",
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500456 (void *)state->ip);
Josh Poimboeufd31a5802018-05-18 08:47:12 +0200457 goto err;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500458 }
459 sp = state->regs->dx;
460 break;
461
462 default:
Josh Poimboeuf58c38622017-10-20 11:21:34 -0500463 orc_warn("unknown SP base reg %d for ip %pB\n",
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500464 orc->sp_reg, (void *)state->ip);
Josh Poimboeufd31a5802018-05-18 08:47:12 +0200465 goto err;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500466 }
467
468 if (indirect) {
469 if (!deref_stack_reg(state, sp, &sp))
Josh Poimboeufd31a5802018-05-18 08:47:12 +0200470 goto err;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500471 }
472
473 /* Find IP, SP and possibly regs: */
474 switch (orc->type) {
475 case ORC_TYPE_CALL:
476 ip_p = sp - sizeof(long);
477
478 if (!deref_stack_reg(state, ip_p, &state->ip))
Josh Poimboeufd31a5802018-05-18 08:47:12 +0200479 goto err;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500480
481 state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
482 state->ip, (void *)ip_p);
483
484 state->sp = sp;
485 state->regs = NULL;
486 state->signal = false;
487 break;
488
489 case ORC_TYPE_REGS:
Josh Poimboeufb02fcf92017-12-04 15:07:09 +0100490 if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
Josh Poimboeuf58c38622017-10-20 11:21:34 -0500491 orc_warn("can't dereference registers at %p for ip %pB\n",
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500492 (void *)sp, (void *)orig_ip);
Josh Poimboeufd31a5802018-05-18 08:47:12 +0200493 goto err;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500494 }
495
496 state->regs = (struct pt_regs *)sp;
497 state->full_regs = true;
498 state->signal = true;
499 break;
500
501 case ORC_TYPE_REGS_IRET:
Josh Poimboeufb02fcf92017-12-04 15:07:09 +0100502 if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
Josh Poimboeuf58c38622017-10-20 11:21:34 -0500503 orc_warn("can't dereference iret registers at %p for ip %pB\n",
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500504 (void *)sp, (void *)orig_ip);
Josh Poimboeufd31a5802018-05-18 08:47:12 +0200505 goto err;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500506 }
507
Josh Poimboeufb02fcf92017-12-04 15:07:09 +0100508 state->regs = (void *)sp - IRET_FRAME_OFFSET;
509 state->full_regs = false;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500510 state->signal = true;
511 break;
512
513 default:
Josh Poimboeuf58c38622017-10-20 11:21:34 -0500514 orc_warn("unknown .orc_unwind entry type %d for ip %pB\n",
515 orc->type, (void *)orig_ip);
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500516 break;
517 }
518
519 /* Find BP: */
520 switch (orc->bp_reg) {
521 case ORC_REG_UNDEFINED:
522 if (state->regs && state->full_regs)
523 state->bp = state->regs->bp;
524 break;
525
526 case ORC_REG_PREV_SP:
527 if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp))
Josh Poimboeufd31a5802018-05-18 08:47:12 +0200528 goto err;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500529 break;
530
531 case ORC_REG_BP:
532 if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp))
Josh Poimboeufd31a5802018-05-18 08:47:12 +0200533 goto err;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500534 break;
535
536 default:
Josh Poimboeuf58c38622017-10-20 11:21:34 -0500537 orc_warn("unknown BP base reg %d for ip %pB\n",
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500538 orc->bp_reg, (void *)orig_ip);
Josh Poimboeufd31a5802018-05-18 08:47:12 +0200539 goto err;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500540 }
541
542 /* Prevent a recursive loop due to bad ORC data: */
543 if (state->stack_info.type == prev_type &&
544 on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
545 state->sp <= prev_sp) {
Josh Poimboeuf58c38622017-10-20 11:21:34 -0500546 orc_warn("stack going in the wrong direction? ip=%pB\n",
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500547 (void *)orig_ip);
Josh Poimboeufd31a5802018-05-18 08:47:12 +0200548 goto err;
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500549 }
550
551 preempt_enable();
552 return true;
553
Josh Poimboeufd31a5802018-05-18 08:47:12 +0200554err:
555 state->error = true;
556
557the_end:
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500558 preempt_enable();
559 state->stack_info.type = STACK_TYPE_UNKNOWN;
560 return false;
561}
562EXPORT_SYMBOL_GPL(unwind_next_frame);
563
564void __unwind_start(struct unwind_state *state, struct task_struct *task,
565 struct pt_regs *regs, unsigned long *first_frame)
566{
567 memset(state, 0, sizeof(*state));
568 state->task = task;
569
570 /*
571 * Refuse to unwind the stack of a task while it's executing on another
572 * CPU. This check is racy, but that's ok: the unwinder has other
573 * checks to prevent it from going off the rails.
574 */
575 if (task_on_another_cpu(task))
576 goto done;
577
578 if (regs) {
579 if (user_mode(regs))
580 goto done;
581
582 state->ip = regs->ip;
583 state->sp = kernel_stack_pointer(regs);
584 state->bp = regs->bp;
585 state->regs = regs;
586 state->full_regs = true;
587 state->signal = true;
588
589 } else if (task == current) {
590 asm volatile("lea (%%rip), %0\n\t"
591 "mov %%rsp, %1\n\t"
592 "mov %%rbp, %2\n\t"
593 : "=r" (state->ip), "=r" (state->sp),
594 "=r" (state->bp));
595
596 } else {
597 struct inactive_task_frame *frame = (void *)task->thread.sp;
598
599 state->sp = task->thread.sp;
600 state->bp = READ_ONCE_NOCHECK(frame->bp);
601 state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
602 }
603
604 if (get_stack_info((unsigned long *)state->sp, state->task,
Andy Lutomirskid3a09102017-12-04 15:07:08 +0100605 &state->stack_info, &state->stack_mask)) {
606 /*
607 * We weren't on a valid stack. It's possible that
608 * we overflowed a valid stack into a guard page.
609 * See if the next page up is valid so that we can
610 * generate some kind of backtrace if this happens.
611 */
612 void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
613 if (get_stack_info(next_page, state->task, &state->stack_info,
614 &state->stack_mask))
615 return;
616 }
Josh Poimboeufee9f8fc2017-07-24 18:36:57 -0500617
618 /*
619 * The caller can provide the address of the first frame directly
620 * (first_frame) or indirectly (regs->sp) to indicate which stack frame
621 * to start unwinding at. Skip ahead until we reach it.
622 */
623
624 /* When starting from regs, skip the regs frame: */
625 if (regs) {
626 unwind_next_frame(state);
627 return;
628 }
629
630 /* Otherwise, skip ahead to the user-specified starting frame: */
631 while (!unwind_done(state) &&
632 (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
633 state->sp <= (unsigned long)first_frame))
634 unwind_next_frame(state);
635
636 return;
637
638done:
639 state->stack_info.type = STACK_TYPE_UNKNOWN;
640 return;
641}
642EXPORT_SYMBOL_GPL(__unwind_start);