blob: 024f7aad19523c304ea21cd256dbc9362f9de225 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Anju T51c9c082017-02-08 15:20:51 +05302/*
3 * Code for Kernel probes Jump optimization.
4 *
5 * Copyright 2017, Anju T, IBM Corp.
Anju T51c9c082017-02-08 15:20:51 +05306 */
7
8#include <linux/kprobes.h>
9#include <linux/jump_label.h>
10#include <linux/types.h>
11#include <linux/slab.h>
12#include <linux/list.h>
13#include <asm/kprobes.h>
14#include <asm/ptrace.h>
15#include <asm/cacheflush.h>
16#include <asm/code-patching.h>
17#include <asm/sstep.h>
18#include <asm/ppc-opcode.h>
19
20#define TMPL_CALL_HDLR_IDX \
21 (optprobe_template_call_handler - optprobe_template_entry)
22#define TMPL_EMULATE_IDX \
23 (optprobe_template_call_emulate - optprobe_template_entry)
24#define TMPL_RET_IDX \
25 (optprobe_template_ret - optprobe_template_entry)
26#define TMPL_OP_IDX \
27 (optprobe_template_op_address - optprobe_template_entry)
28#define TMPL_INSN_IDX \
29 (optprobe_template_insn - optprobe_template_entry)
30#define TMPL_END_IDX \
31 (optprobe_template_end - optprobe_template_entry)
32
33DEFINE_INSN_CACHE_OPS(ppc_optinsn);
34
35static bool insn_page_in_use;
36
37static void *__ppc_alloc_insn_page(void)
38{
39 if (insn_page_in_use)
40 return NULL;
41 insn_page_in_use = true;
42 return &optinsn_slot;
43}
44
45static void __ppc_free_insn_page(void *page __maybe_unused)
46{
47 insn_page_in_use = false;
48}
49
50struct kprobe_insn_cache kprobe_ppc_optinsn_slots = {
51 .mutex = __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots.mutex),
52 .pages = LIST_HEAD_INIT(kprobe_ppc_optinsn_slots.pages),
53 /* insn_size initialized later */
54 .alloc = __ppc_alloc_insn_page,
55 .free = __ppc_free_insn_page,
56 .nr_garbage = 0,
57};
58
59/*
60 * Check if we can optimize this probe. Returns NIP post-emulation if this can
61 * be optimized and 0 otherwise.
62 */
63static unsigned long can_optimize(struct kprobe *p)
64{
65 struct pt_regs regs;
66 struct instruction_op op;
67 unsigned long nip = 0;
68
69 /*
70 * kprobe placed for kretprobe during boot time
Anju T762df102017-02-08 15:20:52 +053071 * has a 'nop' instruction, which can be emulated.
72 * So further checks can be skipped.
Anju T51c9c082017-02-08 15:20:51 +053073 */
74 if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
Anju T762df102017-02-08 15:20:52 +053075 return (unsigned long)p->addr + sizeof(kprobe_opcode_t);
Anju T51c9c082017-02-08 15:20:51 +053076
77 /*
78 * We only support optimizing kernel addresses, but not
79 * module addresses.
80 *
81 * FIXME: Optimize kprobes placed in module addresses.
82 */
83 if (!is_kernel_addr((unsigned long)p->addr))
84 return 0;
85
86 memset(&regs, 0, sizeof(struct pt_regs));
87 regs.nip = (unsigned long)p->addr;
88 regs.trap = 0x0;
89 regs.msr = MSR_KERNEL;
90
91 /*
92 * Kprobe placed in conditional branch instructions are
93 * not optimized, as we can't predict the nip prior with
94 * dummy pt_regs and can not ensure that the return branch
95 * from detour buffer falls in the range of address (i.e 32MB).
96 * A branch back from trampoline is set up in the detour buffer
97 * to the nip returned by the analyse_instr() here.
98 *
99 * Ensure that the instruction is not a conditional branch,
100 * and that can be emulated.
101 */
102 if (!is_conditional_branch(*p->ainsn.insn) &&
Naveen N. Rao8afafa62017-09-15 15:38:21 +0530103 analyse_instr(&op, &regs, *p->ainsn.insn) == 1) {
104 emulate_update_regs(&regs, &op);
Anju T51c9c082017-02-08 15:20:51 +0530105 nip = regs.nip;
Naveen N. Rao8afafa62017-09-15 15:38:21 +0530106 }
Anju T51c9c082017-02-08 15:20:51 +0530107
108 return nip;
109}
110
111static void optimized_callback(struct optimized_kprobe *op,
112 struct pt_regs *regs)
113{
Anju T51c9c082017-02-08 15:20:51 +0530114 /* This is possible if op is under delayed unoptimizing */
115 if (kprobe_disabled(&op->kp))
116 return;
117
Naveen N. Rao8a2d71a2017-10-23 22:07:38 +0530118 preempt_disable();
Anju T51c9c082017-02-08 15:20:51 +0530119
120 if (kprobe_running()) {
121 kprobes_inc_nmissed_count(&op->kp);
122 } else {
123 __this_cpu_write(current_kprobe, &op->kp);
124 regs->nip = (unsigned long)op->kp.addr;
Naveen N. Rao8a2d71a2017-10-23 22:07:38 +0530125 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
Anju T51c9c082017-02-08 15:20:51 +0530126 opt_pre_handler(&op->kp, regs);
127 __this_cpu_write(current_kprobe, NULL);
128 }
129
Naveen N. Rao8a2d71a2017-10-23 22:07:38 +0530130 preempt_enable_no_resched();
Anju T51c9c082017-02-08 15:20:51 +0530131}
132NOKPROBE_SYMBOL(optimized_callback);
133
134void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
135{
136 if (op->optinsn.insn) {
137 free_ppc_optinsn_slot(op->optinsn.insn, 1);
138 op->optinsn.insn = NULL;
139 }
140}
141
142/*
143 * emulate_step() requires insn to be emulated as
144 * second parameter. Load register 'r4' with the
145 * instruction.
146 */
147void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
148{
149 /* addis r4,0,(insn)@h */
Balbir Singhf3eca952017-06-06 14:29:39 +1000150 patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(4) |
151 ((val >> 16) & 0xffff));
152 addr++;
Anju T51c9c082017-02-08 15:20:51 +0530153
154 /* ori r4,r4,(insn)@l */
Balbir Singhf3eca952017-06-06 14:29:39 +1000155 patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(4) |
156 ___PPC_RS(4) | (val & 0xffff));
Anju T51c9c082017-02-08 15:20:51 +0530157}
158
159/*
160 * Generate instructions to load provided immediate 64-bit value
161 * to register 'r3' and patch these instructions at 'addr'.
162 */
163void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
164{
165 /* lis r3,(op)@highest */
Balbir Singhf3eca952017-06-06 14:29:39 +1000166 patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(3) |
167 ((val >> 48) & 0xffff));
168 addr++;
Anju T51c9c082017-02-08 15:20:51 +0530169
170 /* ori r3,r3,(op)@higher */
Balbir Singhf3eca952017-06-06 14:29:39 +1000171 patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
172 ___PPC_RS(3) | ((val >> 32) & 0xffff));
173 addr++;
Anju T51c9c082017-02-08 15:20:51 +0530174
175 /* rldicr r3,r3,32,31 */
Balbir Singhf3eca952017-06-06 14:29:39 +1000176 patch_instruction(addr, PPC_INST_RLDICR | ___PPC_RA(3) |
177 ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31));
178 addr++;
Anju T51c9c082017-02-08 15:20:51 +0530179
180 /* oris r3,r3,(op)@h */
Balbir Singhf3eca952017-06-06 14:29:39 +1000181 patch_instruction(addr, PPC_INST_ORIS | ___PPC_RA(3) |
182 ___PPC_RS(3) | ((val >> 16) & 0xffff));
183 addr++;
Anju T51c9c082017-02-08 15:20:51 +0530184
185 /* ori r3,r3,(op)@l */
Balbir Singhf3eca952017-06-06 14:29:39 +1000186 patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
187 ___PPC_RS(3) | (val & 0xffff));
Anju T51c9c082017-02-08 15:20:51 +0530188}
189
190int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
191{
192 kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step;
193 kprobe_opcode_t *op_callback_addr, *emulate_step_addr;
194 long b_offset;
Balbir Singhf3eca952017-06-06 14:29:39 +1000195 unsigned long nip, size;
196 int rc, i;
Anju T51c9c082017-02-08 15:20:51 +0530197
198 kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
199
200 nip = can_optimize(p);
201 if (!nip)
202 return -EILSEQ;
203
204 /* Allocate instruction slot for detour buffer */
205 buff = get_ppc_optinsn_slot();
206 if (!buff)
207 return -ENOMEM;
208
209 /*
210 * OPTPROBE uses 'b' instruction to branch to optinsn.insn.
211 *
212 * The target address has to be relatively nearby, to permit use
213 * of branch instruction in powerpc, because the address is specified
214 * in an immediate field in the instruction opcode itself, ie 24 bits
215 * in the opcode specify the address. Therefore the address should
216 * be within 32MB on either side of the current instruction.
217 */
218 b_offset = (unsigned long)buff - (unsigned long)p->addr;
219 if (!is_offset_in_branch_range(b_offset))
220 goto error;
221
222 /* Check if the return address is also within 32MB range */
223 b_offset = (unsigned long)(buff + TMPL_RET_IDX) -
224 (unsigned long)nip;
225 if (!is_offset_in_branch_range(b_offset))
226 goto error;
227
228 /* Setup template */
Balbir Singhf3eca952017-06-06 14:29:39 +1000229 /* We can optimize this via patch_instruction_window later */
230 size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
231 pr_devel("Copying template to %p, size %lu\n", buff, size);
232 for (i = 0; i < size; i++) {
233 rc = patch_instruction(buff + i, *(optprobe_template_entry + i));
234 if (rc < 0)
235 goto error;
236 }
Anju T51c9c082017-02-08 15:20:51 +0530237
238 /*
239 * Fixup the template with instructions to:
240 * 1. load the address of the actual probepoint
241 */
242 patch_imm64_load_insns((unsigned long)op, buff + TMPL_OP_IDX);
243
244 /*
245 * 2. branch to optimized_callback() and emulate_step()
246 */
Naveen N. Rao1b32cd12017-04-19 18:22:27 +0530247 op_callback_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("optimized_callback");
248 emulate_step_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("emulate_step");
Anju T51c9c082017-02-08 15:20:51 +0530249 if (!op_callback_addr || !emulate_step_addr) {
Naveen N. Rao1b32cd12017-04-19 18:22:27 +0530250 WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
Anju T51c9c082017-02-08 15:20:51 +0530251 goto error;
252 }
253
254 branch_op_callback = create_branch((unsigned int *)buff + TMPL_CALL_HDLR_IDX,
255 (unsigned long)op_callback_addr,
256 BRANCH_SET_LINK);
257
258 branch_emulate_step = create_branch((unsigned int *)buff + TMPL_EMULATE_IDX,
259 (unsigned long)emulate_step_addr,
260 BRANCH_SET_LINK);
261
262 if (!branch_op_callback || !branch_emulate_step)
263 goto error;
264
Balbir Singhf3eca952017-06-06 14:29:39 +1000265 patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
266 patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
Anju T51c9c082017-02-08 15:20:51 +0530267
268 /*
269 * 3. load instruction to be emulated into relevant register, and
270 */
271 patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX);
272
273 /*
274 * 4. branch back from trampoline
275 */
Balbir Singhf3eca952017-06-06 14:29:39 +1000276 patch_branch(buff + TMPL_RET_IDX, (unsigned long)nip, 0);
Anju T51c9c082017-02-08 15:20:51 +0530277
278 flush_icache_range((unsigned long)buff,
279 (unsigned long)(&buff[TMPL_END_IDX]));
280
281 op->optinsn.insn = buff;
282
283 return 0;
284
285error:
286 free_ppc_optinsn_slot(buff, 0);
287 return -ERANGE;
288
289}
290
291int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
292{
293 return optinsn->insn != NULL;
294}
295
296/*
297 * On powerpc, Optprobes always replaces one instruction (4 bytes
298 * aligned and 4 bytes long). It is impossible to encounter another
299 * kprobe in this address range. So always return 0.
300 */
301int arch_check_optimized_kprobe(struct optimized_kprobe *op)
302{
303 return 0;
304}
305
306void arch_optimize_kprobes(struct list_head *oplist)
307{
308 struct optimized_kprobe *op;
309 struct optimized_kprobe *tmp;
310
311 list_for_each_entry_safe(op, tmp, oplist, list) {
312 /*
313 * Backup instructions which will be replaced
314 * by jump address
315 */
316 memcpy(op->optinsn.copied_insn, op->kp.addr,
317 RELATIVEJUMP_SIZE);
318 patch_instruction(op->kp.addr,
319 create_branch((unsigned int *)op->kp.addr,
320 (unsigned long)op->optinsn.insn, 0));
321 list_del_init(&op->list);
322 }
323}
324
325void arch_unoptimize_kprobe(struct optimized_kprobe *op)
326{
327 arch_arm_kprobe(&op->kp);
328}
329
330void arch_unoptimize_kprobes(struct list_head *oplist,
331 struct list_head *done_list)
332{
333 struct optimized_kprobe *op;
334 struct optimized_kprobe *tmp;
335
336 list_for_each_entry_safe(op, tmp, oplist, list) {
337 arch_unoptimize_kprobe(op);
338 list_move(&op->list, done_list);
339 }
340}
341
342int arch_within_optimized_kprobe(struct optimized_kprobe *op,
343 unsigned long addr)
344{
345 return ((unsigned long)op->kp.addr <= addr &&
346 (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
347}