blob: b7b3a5e4e2241436a5fd805fdce0a6d2b5f51543 [file] [log] [blame]
Thomas Gleixnerd94d71c2019-05-29 07:12:40 -07001// SPDX-License-Identifier: GPL-2.0-only
Alexander Graf2a342ed2010-07-29 14:47:48 +02002/*
3 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
Scott Wood940b45e2011-11-08 18:23:28 -06004 * Copyright 2010-2011 Freescale Semiconductor, Inc.
Alexander Graf2a342ed2010-07-29 14:47:48 +02005 *
6 * Authors:
7 * Alexander Graf <agraf@suse.de>
Alexander Graf2a342ed2010-07-29 14:47:48 +02008 */
9
10#include <linux/kvm_host.h>
11#include <linux/init.h>
Paul Gortmaker66b15db2011-05-27 10:46:24 -040012#include <linux/export.h>
Catalin Marinas298a32b2019-04-05 18:38:49 -070013#include <linux/kmemleak.h>
Alexander Graf2a342ed2010-07-29 14:47:48 +020014#include <linux/kvm_para.h>
15#include <linux/slab.h>
16#include <linux/of.h>
Mathieu Malaterre9f9eae52018-03-28 21:58:11 +020017#include <linux/pagemap.h>
Alexander Graf2a342ed2010-07-29 14:47:48 +020018
19#include <asm/reg.h>
Alexander Graf2a342ed2010-07-29 14:47:48 +020020#include <asm/sections.h>
21#include <asm/cacheflush.h>
22#include <asm/disassemble.h>
Scott Wood940b45e2011-11-08 18:23:28 -060023#include <asm/ppc-opcode.h>
Liu Yu-B132012e1ae9c02012-03-15 10:52:13 +000024#include <asm/epapr_hcalls.h>
Alexander Graf2a342ed2010-07-29 14:47:48 +020025
Alexander Grafd17051c2010-07-29 14:47:57 +020026#define KVM_MAGIC_PAGE (-4096L)
27#define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
28
Alexander Grafd1293c92010-07-29 14:47:59 +020029#define KVM_INST_LWZ 0x80000000
30#define KVM_INST_STW 0x90000000
31#define KVM_INST_LD 0xe8000000
32#define KVM_INST_STD 0xf8000000
33#define KVM_INST_NOP 0x60000000
34#define KVM_INST_B 0x48000000
35#define KVM_INST_B_MASK 0x03ffffff
36#define KVM_INST_B_MAX 0x01ffffff
Scott Wood940b45e2011-11-08 18:23:28 -060037#define KVM_INST_LI 0x38000000
Alexander Grafd1293c92010-07-29 14:47:59 +020038
Alexander Graf73a18102010-07-29 14:47:58 +020039#define KVM_MASK_RT 0x03e00000
Alexander Graf512ba592010-08-05 11:26:04 +020040#define KVM_RT_30 0x03c00000
Alexander Grafcbe487f2010-08-03 10:39:35 +020041#define KVM_MASK_RB 0x0000f800
Alexander Grafd1293c92010-07-29 14:47:59 +020042#define KVM_INST_MFMSR 0x7c0000a6
Alexander Grafd1293c92010-07-29 14:47:59 +020043
Scott Woodb5904972011-11-08 18:23:30 -060044#define SPR_FROM 0
45#define SPR_TO 0x100
46
47#define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
48 (((sprn) & 0x1f) << 16) | \
49 (((sprn) & 0x3e0) << 6) | \
50 (moveto))
51
52#define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
53#define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
Alexander Graf73a18102010-07-29 14:47:58 +020054
Alexander Grafd1290b152010-07-29 14:48:00 +020055#define KVM_INST_TLBSYNC 0x7c00046c
Alexander Graf78109272010-07-29 14:48:05 +020056#define KVM_INST_MTMSRD_L0 0x7c000164
Alexander Graf819a63d2010-07-29 14:48:04 +020057#define KVM_INST_MTMSRD_L1 0x7c010164
Alexander Graf78109272010-07-29 14:48:05 +020058#define KVM_INST_MTMSR 0x7c000124
Alexander Grafd1290b152010-07-29 14:48:00 +020059
Scott Wood940b45e2011-11-08 18:23:28 -060060#define KVM_INST_WRTEE 0x7c000106
Alexander Graf644bfa02010-07-29 14:48:06 +020061#define KVM_INST_WRTEEI_0 0x7c000146
62#define KVM_INST_WRTEEI_1 0x7c008146
63
Alexander Grafcbe487f2010-08-03 10:39:35 +020064#define KVM_INST_MTSRIN 0x7c0001e4
65
Alexander Graf73a18102010-07-29 14:47:58 +020066static bool kvm_patching_worked = true;
Alexander Grafb18db0b2014-04-29 12:17:26 +020067char kvm_tmp[1024 * 1024];
Alexander Graf2d4f5672010-07-29 14:48:01 +020068static int kvm_tmp_index;
Alexander Graf73a18102010-07-29 14:47:58 +020069
70static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
71{
72 *inst = new_inst;
73 flush_icache_range((ulong)inst, (ulong)inst + 4);
74}
75
Alexander Graf512ba592010-08-05 11:26:04 +020076static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
77{
78#ifdef CONFIG_64BIT
79 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
80#else
81 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
82#endif
83}
84
Alexander Grafd1293c92010-07-29 14:47:59 +020085static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
86{
87#ifdef CONFIG_64BIT
88 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
89#else
90 kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
91#endif
92}
93
94static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
95{
96 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
97}
98
99static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
100{
101#ifdef CONFIG_64BIT
102 kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
103#else
104 kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
105#endif
106}
107
108static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
109{
110 kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
111}
112
Alexander Grafd1290b152010-07-29 14:48:00 +0200113static void kvm_patch_ins_nop(u32 *inst)
114{
115 kvm_patch_ins(inst, KVM_INST_NOP);
116}
117
Alexander Graf71ee8e32010-07-29 14:48:02 +0200118static void kvm_patch_ins_b(u32 *inst, int addr)
119{
Scott Wooda36be102010-10-18 17:35:48 -0500120#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
Alexander Graf71ee8e32010-07-29 14:48:02 +0200121 /* On relocatable kernels interrupts handlers and our code
122 can be in different regions, so we don't patch them */
123
Alexander Graf71ee8e32010-07-29 14:48:02 +0200124 if ((ulong)inst < (ulong)&__end_interrupts)
125 return;
126#endif
127
128 kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
129}
130
Alexander Graf2d4f5672010-07-29 14:48:01 +0200131static u32 *kvm_alloc(int len)
132{
133 u32 *p;
134
135 if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
136 printk(KERN_ERR "KVM: No more space (%d + %d)\n",
137 kvm_tmp_index, len);
138 kvm_patching_worked = false;
139 return NULL;
140 }
141
142 p = (void*)&kvm_tmp[kvm_tmp_index];
143 kvm_tmp_index += len;
144
145 return p;
146}
147
Alexander Graf819a63d2010-07-29 14:48:04 +0200148extern u32 kvm_emulate_mtmsrd_branch_offs;
149extern u32 kvm_emulate_mtmsrd_reg_offs;
Alexander Grafdf08bd12010-08-05 15:44:41 +0200150extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
Alexander Graf819a63d2010-07-29 14:48:04 +0200151extern u32 kvm_emulate_mtmsrd_len;
152extern u32 kvm_emulate_mtmsrd[];
153
154static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
155{
156 u32 *p;
157 int distance_start;
158 int distance_end;
159 ulong next_inst;
160
161 p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
162 if (!p)
163 return;
164
165 /* Find out where we are and put everything there */
166 distance_start = (ulong)p - (ulong)inst;
167 next_inst = ((ulong)inst + 4);
168 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
169
170 /* Make sure we only write valid b instructions */
171 if (distance_start > KVM_INST_B_MAX) {
172 kvm_patching_worked = false;
173 return;
174 }
175
176 /* Modify the chunk to fit the invocation */
177 memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
178 p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
Alexander Grafdf08bd12010-08-05 15:44:41 +0200179 switch (get_rt(rt)) {
180 case 30:
181 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
182 magic_var(scratch2), KVM_RT_30);
183 break;
184 case 31:
185 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
186 magic_var(scratch1), KVM_RT_30);
187 break;
188 default:
189 p[kvm_emulate_mtmsrd_reg_offs] |= rt;
190 break;
191 }
192
193 p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
Alexander Graf819a63d2010-07-29 14:48:04 +0200194 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
195
196 /* Patch the invocation */
197 kvm_patch_ins_b(inst, distance_start);
198}
199
Alexander Graf78109272010-07-29 14:48:05 +0200200extern u32 kvm_emulate_mtmsr_branch_offs;
201extern u32 kvm_emulate_mtmsr_reg1_offs;
202extern u32 kvm_emulate_mtmsr_reg2_offs;
Alexander Graf78109272010-07-29 14:48:05 +0200203extern u32 kvm_emulate_mtmsr_orig_ins_offs;
204extern u32 kvm_emulate_mtmsr_len;
205extern u32 kvm_emulate_mtmsr[];
206
207static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
208{
209 u32 *p;
210 int distance_start;
211 int distance_end;
212 ulong next_inst;
213
214 p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
215 if (!p)
216 return;
217
218 /* Find out where we are and put everything there */
219 distance_start = (ulong)p - (ulong)inst;
220 next_inst = ((ulong)inst + 4);
221 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
222
223 /* Make sure we only write valid b instructions */
224 if (distance_start > KVM_INST_B_MAX) {
225 kvm_patching_worked = false;
226 return;
227 }
228
229 /* Modify the chunk to fit the invocation */
230 memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
231 p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
Alexander Graf512ba592010-08-05 11:26:04 +0200232
233 /* Make clobbered registers work too */
234 switch (get_rt(rt)) {
235 case 30:
236 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
237 magic_var(scratch2), KVM_RT_30);
238 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
239 magic_var(scratch2), KVM_RT_30);
240 break;
241 case 31:
242 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
243 magic_var(scratch1), KVM_RT_30);
244 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
245 magic_var(scratch1), KVM_RT_30);
246 break;
247 default:
248 p[kvm_emulate_mtmsr_reg1_offs] |= rt;
249 p[kvm_emulate_mtmsr_reg2_offs] |= rt;
250 break;
251 }
252
Alexander Graf78109272010-07-29 14:48:05 +0200253 p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
254 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
255
256 /* Patch the invocation */
257 kvm_patch_ins_b(inst, distance_start);
258}
259
Alexander Graf644bfa02010-07-29 14:48:06 +0200260#ifdef CONFIG_BOOKE
261
Scott Wood940b45e2011-11-08 18:23:28 -0600262extern u32 kvm_emulate_wrtee_branch_offs;
263extern u32 kvm_emulate_wrtee_reg_offs;
264extern u32 kvm_emulate_wrtee_orig_ins_offs;
265extern u32 kvm_emulate_wrtee_len;
266extern u32 kvm_emulate_wrtee[];
Alexander Graf644bfa02010-07-29 14:48:06 +0200267
Scott Wood940b45e2011-11-08 18:23:28 -0600268static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
Alexander Graf644bfa02010-07-29 14:48:06 +0200269{
270 u32 *p;
271 int distance_start;
272 int distance_end;
273 ulong next_inst;
274
Scott Wood940b45e2011-11-08 18:23:28 -0600275 p = kvm_alloc(kvm_emulate_wrtee_len * 4);
Alexander Graf644bfa02010-07-29 14:48:06 +0200276 if (!p)
277 return;
278
279 /* Find out where we are and put everything there */
280 distance_start = (ulong)p - (ulong)inst;
281 next_inst = ((ulong)inst + 4);
Scott Wood940b45e2011-11-08 18:23:28 -0600282 distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
Alexander Graf644bfa02010-07-29 14:48:06 +0200283
284 /* Make sure we only write valid b instructions */
285 if (distance_start > KVM_INST_B_MAX) {
286 kvm_patching_worked = false;
287 return;
288 }
289
290 /* Modify the chunk to fit the invocation */
Scott Wood940b45e2011-11-08 18:23:28 -0600291 memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
292 p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
293
294 if (imm_one) {
295 p[kvm_emulate_wrtee_reg_offs] =
Michael Neulingc75df6f2012-06-25 13:33:10 +0000296 KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
Scott Wood940b45e2011-11-08 18:23:28 -0600297 } else {
298 /* Make clobbered registers work too */
299 switch (get_rt(rt)) {
300 case 30:
301 kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
302 magic_var(scratch2), KVM_RT_30);
303 break;
304 case 31:
305 kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
306 magic_var(scratch1), KVM_RT_30);
307 break;
308 default:
309 p[kvm_emulate_wrtee_reg_offs] |= rt;
310 break;
311 }
312 }
313
314 p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
315 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
316
317 /* Patch the invocation */
318 kvm_patch_ins_b(inst, distance_start);
319}
320
321extern u32 kvm_emulate_wrteei_0_branch_offs;
322extern u32 kvm_emulate_wrteei_0_len;
323extern u32 kvm_emulate_wrteei_0[];
324
325static void kvm_patch_ins_wrteei_0(u32 *inst)
326{
327 u32 *p;
328 int distance_start;
329 int distance_end;
330 ulong next_inst;
331
332 p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
333 if (!p)
334 return;
335
336 /* Find out where we are and put everything there */
337 distance_start = (ulong)p - (ulong)inst;
338 next_inst = ((ulong)inst + 4);
339 distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
340
341 /* Make sure we only write valid b instructions */
342 if (distance_start > KVM_INST_B_MAX) {
343 kvm_patching_worked = false;
344 return;
345 }
346
347 memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
348 p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
349 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
Alexander Graf644bfa02010-07-29 14:48:06 +0200350
351 /* Patch the invocation */
352 kvm_patch_ins_b(inst, distance_start);
353}
354
355#endif
356
Alexander Grafcbe487f2010-08-03 10:39:35 +0200357#ifdef CONFIG_PPC_BOOK3S_32
358
359extern u32 kvm_emulate_mtsrin_branch_offs;
360extern u32 kvm_emulate_mtsrin_reg1_offs;
361extern u32 kvm_emulate_mtsrin_reg2_offs;
362extern u32 kvm_emulate_mtsrin_orig_ins_offs;
363extern u32 kvm_emulate_mtsrin_len;
364extern u32 kvm_emulate_mtsrin[];
365
366static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
367{
368 u32 *p;
369 int distance_start;
370 int distance_end;
371 ulong next_inst;
372
373 p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
374 if (!p)
375 return;
376
377 /* Find out where we are and put everything there */
378 distance_start = (ulong)p - (ulong)inst;
379 next_inst = ((ulong)inst + 4);
380 distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
381
382 /* Make sure we only write valid b instructions */
383 if (distance_start > KVM_INST_B_MAX) {
384 kvm_patching_worked = false;
385 return;
386 }
387
388 /* Modify the chunk to fit the invocation */
389 memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
390 p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
391 p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
392 p[kvm_emulate_mtsrin_reg2_offs] |= rt;
393 p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
394 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
395
396 /* Patch the invocation */
397 kvm_patch_ins_b(inst, distance_start);
398}
399
400#endif
401
Alexander Graf73a18102010-07-29 14:47:58 +0200402static void kvm_map_magic_page(void *data)
403{
Alexander Graf7508e162010-08-03 11:32:56 +0200404 u32 *features = data;
405
Bharat Bhushan1820a8d2013-10-08 09:32:19 +0530406 ulong in[8] = {0};
Alexander Graf7508e162010-08-03 11:32:56 +0200407 ulong out[8];
408
409 in[0] = KVM_MAGIC_PAGE;
Alexander Graf5c165ae2014-05-12 01:11:55 +0200410 in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX;
Alexander Graf7508e162010-08-03 11:32:56 +0200411
Bharat Bhushan1820a8d2013-10-08 09:32:19 +0530412 epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
Alexander Graf7508e162010-08-03 11:32:56 +0200413
414 *features = out[0];
Alexander Graf73a18102010-07-29 14:47:58 +0200415}
416
Alexander Graf7508e162010-08-03 11:32:56 +0200417static void kvm_check_ins(u32 *inst, u32 features)
Alexander Graf73a18102010-07-29 14:47:58 +0200418{
419 u32 _inst = *inst;
420 u32 inst_no_rt = _inst & ~KVM_MASK_RT;
421 u32 inst_rt = _inst & KVM_MASK_RT;
422
423 switch (inst_no_rt) {
Alexander Grafd1293c92010-07-29 14:47:59 +0200424 /* Loads */
425 case KVM_INST_MFMSR:
426 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
427 break;
Scott Woodb5904972011-11-08 18:23:30 -0600428 case KVM_INST_MFSPR(SPRN_SPRG0):
Alexander Grafd1293c92010-07-29 14:47:59 +0200429 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
430 break;
Scott Woodb5904972011-11-08 18:23:30 -0600431 case KVM_INST_MFSPR(SPRN_SPRG1):
Alexander Grafd1293c92010-07-29 14:47:59 +0200432 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
433 break;
Scott Woodb5904972011-11-08 18:23:30 -0600434 case KVM_INST_MFSPR(SPRN_SPRG2):
Alexander Grafd1293c92010-07-29 14:47:59 +0200435 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
436 break;
Scott Woodb5904972011-11-08 18:23:30 -0600437 case KVM_INST_MFSPR(SPRN_SPRG3):
Alexander Grafd1293c92010-07-29 14:47:59 +0200438 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
439 break;
Scott Woodb5904972011-11-08 18:23:30 -0600440 case KVM_INST_MFSPR(SPRN_SRR0):
Alexander Grafd1293c92010-07-29 14:47:59 +0200441 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
442 break;
Scott Woodb5904972011-11-08 18:23:30 -0600443 case KVM_INST_MFSPR(SPRN_SRR1):
Alexander Grafd1293c92010-07-29 14:47:59 +0200444 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
445 break;
Scott Woodb5904972011-11-08 18:23:30 -0600446#ifdef CONFIG_BOOKE
447 case KVM_INST_MFSPR(SPRN_DEAR):
448#else
449 case KVM_INST_MFSPR(SPRN_DAR):
450#endif
Alexander Grafd1293c92010-07-29 14:47:59 +0200451 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
452 break;
Scott Woodb5904972011-11-08 18:23:30 -0600453 case KVM_INST_MFSPR(SPRN_DSISR):
Alexander Grafd1293c92010-07-29 14:47:59 +0200454 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
455 break;
456
Scott Woodb5904972011-11-08 18:23:30 -0600457#ifdef CONFIG_PPC_BOOK3E_MMU
458 case KVM_INST_MFSPR(SPRN_MAS0):
459 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
460 kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
461 break;
462 case KVM_INST_MFSPR(SPRN_MAS1):
463 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
464 kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
465 break;
466 case KVM_INST_MFSPR(SPRN_MAS2):
467 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
468 kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
469 break;
470 case KVM_INST_MFSPR(SPRN_MAS3):
471 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
472 kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
473 break;
474 case KVM_INST_MFSPR(SPRN_MAS4):
475 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
476 kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
477 break;
478 case KVM_INST_MFSPR(SPRN_MAS6):
479 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
480 kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
481 break;
482 case KVM_INST_MFSPR(SPRN_MAS7):
483 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
484 kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
485 break;
486#endif /* CONFIG_PPC_BOOK3E_MMU */
487
488 case KVM_INST_MFSPR(SPRN_SPRG4):
489#ifdef CONFIG_BOOKE
490 case KVM_INST_MFSPR(SPRN_SPRG4R):
491#endif
492 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
493 kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
494 break;
495 case KVM_INST_MFSPR(SPRN_SPRG5):
496#ifdef CONFIG_BOOKE
497 case KVM_INST_MFSPR(SPRN_SPRG5R):
498#endif
499 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
500 kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
501 break;
502 case KVM_INST_MFSPR(SPRN_SPRG6):
503#ifdef CONFIG_BOOKE
504 case KVM_INST_MFSPR(SPRN_SPRG6R):
505#endif
506 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
507 kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
508 break;
509 case KVM_INST_MFSPR(SPRN_SPRG7):
510#ifdef CONFIG_BOOKE
511 case KVM_INST_MFSPR(SPRN_SPRG7R):
512#endif
513 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
514 kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
515 break;
516
517#ifdef CONFIG_BOOKE
518 case KVM_INST_MFSPR(SPRN_ESR):
519 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
520 kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
521 break;
522#endif
523
524 case KVM_INST_MFSPR(SPRN_PIR):
525 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
526 kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
527 break;
528
529
Alexander Grafd1293c92010-07-29 14:47:59 +0200530 /* Stores */
Scott Woodb5904972011-11-08 18:23:30 -0600531 case KVM_INST_MTSPR(SPRN_SPRG0):
Alexander Grafd1293c92010-07-29 14:47:59 +0200532 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
533 break;
Scott Woodb5904972011-11-08 18:23:30 -0600534 case KVM_INST_MTSPR(SPRN_SPRG1):
Alexander Grafd1293c92010-07-29 14:47:59 +0200535 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
536 break;
Scott Woodb5904972011-11-08 18:23:30 -0600537 case KVM_INST_MTSPR(SPRN_SPRG2):
Alexander Grafd1293c92010-07-29 14:47:59 +0200538 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
539 break;
Scott Woodb5904972011-11-08 18:23:30 -0600540 case KVM_INST_MTSPR(SPRN_SPRG3):
Alexander Grafd1293c92010-07-29 14:47:59 +0200541 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
542 break;
Scott Woodb5904972011-11-08 18:23:30 -0600543 case KVM_INST_MTSPR(SPRN_SRR0):
Alexander Grafd1293c92010-07-29 14:47:59 +0200544 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
545 break;
Scott Woodb5904972011-11-08 18:23:30 -0600546 case KVM_INST_MTSPR(SPRN_SRR1):
Alexander Grafd1293c92010-07-29 14:47:59 +0200547 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
548 break;
Scott Woodb5904972011-11-08 18:23:30 -0600549#ifdef CONFIG_BOOKE
550 case KVM_INST_MTSPR(SPRN_DEAR):
551#else
552 case KVM_INST_MTSPR(SPRN_DAR):
553#endif
Alexander Grafd1293c92010-07-29 14:47:59 +0200554 kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
555 break;
Scott Woodb5904972011-11-08 18:23:30 -0600556 case KVM_INST_MTSPR(SPRN_DSISR):
Alexander Grafd1293c92010-07-29 14:47:59 +0200557 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
558 break;
Scott Woodb5904972011-11-08 18:23:30 -0600559#ifdef CONFIG_PPC_BOOK3E_MMU
560 case KVM_INST_MTSPR(SPRN_MAS0):
561 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
562 kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
563 break;
564 case KVM_INST_MTSPR(SPRN_MAS1):
565 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
566 kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
567 break;
568 case KVM_INST_MTSPR(SPRN_MAS2):
569 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
570 kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
571 break;
572 case KVM_INST_MTSPR(SPRN_MAS3):
573 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
574 kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
575 break;
576 case KVM_INST_MTSPR(SPRN_MAS4):
577 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
578 kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
579 break;
580 case KVM_INST_MTSPR(SPRN_MAS6):
581 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
582 kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
583 break;
584 case KVM_INST_MTSPR(SPRN_MAS7):
585 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
586 kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
587 break;
588#endif /* CONFIG_PPC_BOOK3E_MMU */
589
590 case KVM_INST_MTSPR(SPRN_SPRG4):
591 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
592 kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
593 break;
594 case KVM_INST_MTSPR(SPRN_SPRG5):
595 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
596 kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
597 break;
598 case KVM_INST_MTSPR(SPRN_SPRG6):
599 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
600 kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
601 break;
602 case KVM_INST_MTSPR(SPRN_SPRG7):
603 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
604 kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
605 break;
606
607#ifdef CONFIG_BOOKE
608 case KVM_INST_MTSPR(SPRN_ESR):
609 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
610 kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
611 break;
612#endif
Alexander Grafd1290b152010-07-29 14:48:00 +0200613
614 /* Nops */
615 case KVM_INST_TLBSYNC:
616 kvm_patch_ins_nop(inst);
617 break;
Alexander Graf819a63d2010-07-29 14:48:04 +0200618
619 /* Rewrites */
620 case KVM_INST_MTMSRD_L1:
Alexander Grafdf08bd12010-08-05 15:44:41 +0200621 kvm_patch_ins_mtmsrd(inst, inst_rt);
Alexander Graf819a63d2010-07-29 14:48:04 +0200622 break;
Alexander Graf78109272010-07-29 14:48:05 +0200623 case KVM_INST_MTMSR:
624 case KVM_INST_MTMSRD_L0:
Alexander Graf512ba592010-08-05 11:26:04 +0200625 kvm_patch_ins_mtmsr(inst, inst_rt);
Alexander Graf78109272010-07-29 14:48:05 +0200626 break;
Scott Wood940b45e2011-11-08 18:23:28 -0600627#ifdef CONFIG_BOOKE
628 case KVM_INST_WRTEE:
629 kvm_patch_ins_wrtee(inst, inst_rt, 0);
630 break;
631#endif
Alexander Graf73a18102010-07-29 14:47:58 +0200632 }
633
Alexander Grafcbe487f2010-08-03 10:39:35 +0200634 switch (inst_no_rt & ~KVM_MASK_RB) {
635#ifdef CONFIG_PPC_BOOK3S_32
636 case KVM_INST_MTSRIN:
637 if (features & KVM_MAGIC_FEAT_SR) {
638 u32 inst_rb = _inst & KVM_MASK_RB;
639 kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
640 }
641 break;
Alexander Grafcbe487f2010-08-03 10:39:35 +0200642#endif
643 }
644
Alexander Graf73a18102010-07-29 14:47:58 +0200645 switch (_inst) {
Alexander Graf644bfa02010-07-29 14:48:06 +0200646#ifdef CONFIG_BOOKE
647 case KVM_INST_WRTEEI_0:
Scott Wood940b45e2011-11-08 18:23:28 -0600648 kvm_patch_ins_wrteei_0(inst);
649 break;
650
Alexander Graf644bfa02010-07-29 14:48:06 +0200651 case KVM_INST_WRTEEI_1:
Scott Wood940b45e2011-11-08 18:23:28 -0600652 kvm_patch_ins_wrtee(inst, 0, 1);
Alexander Graf644bfa02010-07-29 14:48:06 +0200653 break;
654#endif
Alexander Graf73a18102010-07-29 14:47:58 +0200655 }
656}
657
Liu Yu-B13201befdc0a2011-12-01 20:22:53 +0000658extern u32 kvm_template_start[];
659extern u32 kvm_template_end[];
660
Alexander Graf73a18102010-07-29 14:47:58 +0200661static void kvm_use_magic_page(void)
662{
663 u32 *p;
664 u32 *start, *end;
Alexander Graf7508e162010-08-03 11:32:56 +0200665 u32 features;
Alexander Graf73a18102010-07-29 14:47:58 +0200666
667 /* Tell the host to map the magic page to -4096 on all CPUs */
Alexander Graf7508e162010-08-03 11:32:56 +0200668 on_each_cpu(kvm_map_magic_page, &features, 1);
Alexander Graf73a18102010-07-29 14:47:58 +0200669
670 /* Quick self-test to see if the mapping works */
Mathieu Malaterre9f9eae52018-03-28 21:58:11 +0200671 if (!fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) {
Alexander Graf73a18102010-07-29 14:47:58 +0200672 kvm_patching_worked = false;
673 return;
674 }
675
676 /* Now loop through all code and find instructions */
677 start = (void*)_stext;
678 end = (void*)_etext;
679
Scott Woodb5904972011-11-08 18:23:30 -0600680 /*
681 * Being interrupted in the middle of patching would
682 * be bad for SPRG4-7, which KVM can't keep in sync
683 * with emulated accesses because reads don't trap.
684 */
685 local_irq_disable();
686
Liu Yu-B13201befdc0a2011-12-01 20:22:53 +0000687 for (p = start; p < end; p++) {
688 /* Avoid patching the template code */
689 if (p >= kvm_template_start && p < kvm_template_end) {
690 p = kvm_template_end - 1;
691 continue;
692 }
Alexander Graf7508e162010-08-03 11:32:56 +0200693 kvm_check_ins(p, features);
Liu Yu-B13201befdc0a2011-12-01 20:22:53 +0000694 }
Alexander Graf73a18102010-07-29 14:47:58 +0200695
Scott Woodb5904972011-11-08 18:23:30 -0600696 local_irq_enable();
697
Alexander Graf73a18102010-07-29 14:47:58 +0200698 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
699 kvm_patching_worked ? "worked" : "failed");
700}
701
Alexander Graf2d4f5672010-07-29 14:48:01 +0200702static __init void kvm_free_tmp(void)
703{
Catalin Marinas298a32b2019-04-05 18:38:49 -0700704 /*
705 * Inform kmemleak about the hole in the .bss section since the
706 * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
707 */
708 kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
709 ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
Jiang Liudbe67df2013-07-03 15:02:51 -0700710 free_reserved_area(&kvm_tmp[kvm_tmp_index],
711 &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
Alexander Graf2d4f5672010-07-29 14:48:01 +0200712}
713
Alexander Graf73a18102010-07-29 14:47:58 +0200714static int __init kvm_guest_init(void)
715{
716 if (!kvm_para_available())
Alexander Graf2d4f5672010-07-29 14:48:01 +0200717 goto free_tmp;
Alexander Graf73a18102010-07-29 14:47:58 +0200718
Liu Yu-B132012e1ae9c02012-03-15 10:52:13 +0000719 if (!epapr_paravirt_enabled)
Alexander Graf2d4f5672010-07-29 14:48:01 +0200720 goto free_tmp;
Alexander Graf73a18102010-07-29 14:47:58 +0200721
722 if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
723 kvm_use_magic_page();
724
Alexander Graf591bd8e2010-08-17 22:08:39 +0200725#ifdef CONFIG_PPC_BOOK3S_64
Alexander Grafad087372010-08-17 11:41:44 +0200726 /* Enable napping */
727 powersave_nap = 1;
Alexander Graf591bd8e2010-08-17 22:08:39 +0200728#endif
Alexander Grafad087372010-08-17 11:41:44 +0200729
Alexander Graf2d4f5672010-07-29 14:48:01 +0200730free_tmp:
731 kvm_free_tmp();
732
Alexander Graf73a18102010-07-29 14:47:58 +0200733 return 0;
734}
735
736postcore_initcall(kvm_guest_init);