blob: 5169b8cc35bb2d99c322c3e607d9813e083659fc [file] [log] [blame]
Thomas Gleixnerf6ce7f22019-05-19 15:51:49 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -05002/*
3 * KVM paravirt_ops implementation
4 *
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -05005 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Copyright IBM Corporation, 2007
7 * Authors: Anthony Liguori <aliguori@us.ibm.com>
8 */
9
Frederic Weisbecker56dd9472013-02-24 00:23:25 +010010#include <linux/context_tracking.h>
Paul Gortmaker186f4362016-07-13 20:18:56 -040011#include <linux/init.h>
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -050012#include <linux/kernel.h>
13#include <linux/kvm_para.h>
14#include <linux/cpu.h>
15#include <linux/mm.h>
Marcelo Tosatti1da8a772008-02-22 12:21:37 -050016#include <linux/highmem.h>
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050017#include <linux/hardirq.h>
Gleb Natapovfd10cde2010-10-14 11:22:51 +020018#include <linux/notifier.h>
19#include <linux/reboot.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020020#include <linux/hash.h>
21#include <linux/sched.h>
22#include <linux/slab.h>
23#include <linux/kprobes.h>
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +053024#include <linux/debugfs.h>
Ulrich Obergfell9919e392014-10-13 15:55:37 -070025#include <linux/nmi.h>
Rik van Riel9db284f2016-03-21 15:13:27 +010026#include <linux/swait.h>
Marcelo Tosattia90ede72009-02-11 22:45:42 -020027#include <asm/timer.h>
Gleb Natapovfd10cde2010-10-14 11:22:51 +020028#include <asm/cpu.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020029#include <asm/traps.h>
30#include <asm/desc.h>
Gleb Natapov6c047cd2010-10-14 11:22:54 +020031#include <asm/tlbflush.h>
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +030032#include <asm/apic.h>
33#include <asm/apicdef.h>
Prarit Bhargavafc733732012-07-06 13:47:39 -040034#include <asm/hypervisor.h>
Peter Zijlstra48a8b972018-08-22 17:30:16 +020035#include <asm/tlb.h>
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050036
Gleb Natapovfd10cde2010-10-14 11:22:51 +020037static int kvmapf = 1;
38
Dou Liyangafdc3f582018-01-17 11:46:54 +080039static int __init parse_no_kvmapf(char *arg)
Gleb Natapovfd10cde2010-10-14 11:22:51 +020040{
41 kvmapf = 0;
42 return 0;
43}
44
45early_param("no-kvmapf", parse_no_kvmapf);
46
Glauber Costad910f5c2011-07-11 15:28:19 -040047static int steal_acc = 1;
Dou Liyangafdc3f582018-01-17 11:46:54 +080048static int __init parse_no_stealacc(char *arg)
Glauber Costad910f5c2011-07-11 15:28:19 -040049{
50 steal_acc = 0;
51 return 0;
52}
53
54early_param("no-steal-acc", parse_no_stealacc);
55
Brijesh Singh47162762017-10-20 09:30:58 -050056static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
Andi Kleen14e581c2019-03-29 17:47:42 -070057DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
Glauber Costad910f5c2011-07-11 15:28:19 -040058static int has_steal_clock = 0;
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050059
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -050060/*
61 * No need for any "IO delay" on KVM
62 */
63static void kvm_io_delay(void)
64{
65}
66
Gleb Natapov631bc482010-10-14 11:22:52 +020067#define KVM_TASK_SLEEP_HASHBITS 8
68#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
69
70struct kvm_task_sleep_node {
71 struct hlist_node link;
Rik van Riel9db284f2016-03-21 15:13:27 +010072 struct swait_queue_head wq;
Gleb Natapov631bc482010-10-14 11:22:52 +020073 u32 token;
74 int cpu;
Gleb Natapov6c047cd2010-10-14 11:22:54 +020075 bool halted;
Gleb Natapov631bc482010-10-14 11:22:52 +020076};
77
78static struct kvm_task_sleep_head {
Rik van Riel9db284f2016-03-21 15:13:27 +010079 raw_spinlock_t lock;
Gleb Natapov631bc482010-10-14 11:22:52 +020080 struct hlist_head list;
81} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
82
83static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
84 u32 token)
85{
86 struct hlist_node *p;
87
88 hlist_for_each(p, &b->list) {
89 struct kvm_task_sleep_node *n =
90 hlist_entry(p, typeof(*n), link);
91 if (n->token == token)
92 return n;
93 }
94
95 return NULL;
96}
97
Boqun Fenga2b78612017-10-03 21:36:51 +080098/*
99 * @interrupt_kernel: Is this called from a routine which interrupts the kernel
100 * (other than user space)?
101 */
102void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
Gleb Natapov631bc482010-10-14 11:22:52 +0200103{
104 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
105 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
106 struct kvm_task_sleep_node n, *e;
Rik van Riel9db284f2016-03-21 15:13:27 +0100107 DECLARE_SWAITQUEUE(wait);
Gleb Natapov631bc482010-10-14 11:22:52 +0200108
Li Zhong9b132fb2012-12-04 10:35:13 +0800109 rcu_irq_enter();
110
Rik van Riel9db284f2016-03-21 15:13:27 +0100111 raw_spin_lock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200112 e = _find_apf_task(b, token);
113 if (e) {
114 /* dummy entry exist -> wake up was delivered ahead of PF */
115 hlist_del(&e->link);
116 kfree(e);
Rik van Riel9db284f2016-03-21 15:13:27 +0100117 raw_spin_unlock(&b->lock);
Li Zhong9b132fb2012-12-04 10:35:13 +0800118
119 rcu_irq_exit();
Gleb Natapov631bc482010-10-14 11:22:52 +0200120 return;
121 }
122
123 n.token = token;
124 n.cpu = smp_processor_id();
Boqun Fenga2b78612017-10-03 21:36:51 +0800125 n.halted = is_idle_task(current) ||
126 (IS_ENABLED(CONFIG_PREEMPT_COUNT)
127 ? preempt_count() > 1 || rcu_preempt_depth()
128 : interrupt_kernel);
Rik van Riel9db284f2016-03-21 15:13:27 +0100129 init_swait_queue_head(&n.wq);
Gleb Natapov631bc482010-10-14 11:22:52 +0200130 hlist_add_head(&n.link, &b->list);
Rik van Riel9db284f2016-03-21 15:13:27 +0100131 raw_spin_unlock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200132
133 for (;;) {
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200134 if (!n.halted)
Peter Zijlstrab3dae102018-06-12 10:34:52 +0200135 prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
Gleb Natapov631bc482010-10-14 11:22:52 +0200136 if (hlist_unhashed(&n.link))
137 break;
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200138
Wanpeng Li337c0172017-08-01 05:20:03 -0700139 rcu_irq_exit();
140
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200141 if (!n.halted) {
142 local_irq_enable();
143 schedule();
144 local_irq_disable();
145 } else {
146 /*
147 * We cannot reschedule. So halt.
148 */
149 native_safe_halt();
150 local_irq_disable();
151 }
Wanpeng Li337c0172017-08-01 05:20:03 -0700152
153 rcu_irq_enter();
Gleb Natapov631bc482010-10-14 11:22:52 +0200154 }
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200155 if (!n.halted)
Rik van Riel9db284f2016-03-21 15:13:27 +0100156 finish_swait(&n.wq, &wait);
Gleb Natapov631bc482010-10-14 11:22:52 +0200157
Li Zhong9b132fb2012-12-04 10:35:13 +0800158 rcu_irq_exit();
Gleb Natapov631bc482010-10-14 11:22:52 +0200159 return;
160}
161EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
162
163static void apf_task_wake_one(struct kvm_task_sleep_node *n)
164{
165 hlist_del_init(&n->link);
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200166 if (n->halted)
167 smp_send_reschedule(n->cpu);
Davidlohr Buesoa0cff572017-09-13 13:08:21 -0700168 else if (swq_has_sleeper(&n->wq))
Peter Zijlstrab3dae102018-06-12 10:34:52 +0200169 swake_up_one(&n->wq);
Gleb Natapov631bc482010-10-14 11:22:52 +0200170}
171
172static void apf_task_wake_all(void)
173{
174 int i;
175
176 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
177 struct hlist_node *p, *next;
178 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
Rik van Riel9db284f2016-03-21 15:13:27 +0100179 raw_spin_lock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200180 hlist_for_each_safe(p, next, &b->list) {
181 struct kvm_task_sleep_node *n =
182 hlist_entry(p, typeof(*n), link);
183 if (n->cpu == smp_processor_id())
184 apf_task_wake_one(n);
185 }
Rik van Riel9db284f2016-03-21 15:13:27 +0100186 raw_spin_unlock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200187 }
188}
189
190void kvm_async_pf_task_wake(u32 token)
191{
192 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
193 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
194 struct kvm_task_sleep_node *n;
195
196 if (token == ~0) {
197 apf_task_wake_all();
198 return;
199 }
200
201again:
Rik van Riel9db284f2016-03-21 15:13:27 +0100202 raw_spin_lock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200203 n = _find_apf_task(b, token);
204 if (!n) {
205 /*
206 * async PF was not yet handled.
207 * Add dummy entry for the token.
208 */
Gleb Natapov62c49cc2012-05-02 15:04:02 +0300209 n = kzalloc(sizeof(*n), GFP_ATOMIC);
Gleb Natapov631bc482010-10-14 11:22:52 +0200210 if (!n) {
211 /*
212 * Allocation failed! Busy wait while other cpu
213 * handles async PF.
214 */
Rik van Riel9db284f2016-03-21 15:13:27 +0100215 raw_spin_unlock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200216 cpu_relax();
217 goto again;
218 }
219 n->token = token;
220 n->cpu = smp_processor_id();
Rik van Riel9db284f2016-03-21 15:13:27 +0100221 init_swait_queue_head(&n->wq);
Gleb Natapov631bc482010-10-14 11:22:52 +0200222 hlist_add_head(&n->link, &b->list);
223 } else
224 apf_task_wake_one(n);
Rik van Riel9db284f2016-03-21 15:13:27 +0100225 raw_spin_unlock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200226 return;
227}
228EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
229
230u32 kvm_read_and_reset_pf_reason(void)
231{
232 u32 reason = 0;
233
Christoph Lameter89cbc762014-08-17 12:30:40 -0500234 if (__this_cpu_read(apf_reason.enabled)) {
235 reason = __this_cpu_read(apf_reason.reason);
236 __this_cpu_write(apf_reason.reason, 0);
Gleb Natapov631bc482010-10-14 11:22:52 +0200237 }
238
239 return reason;
240}
241EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
Masami Hiramatsu93266382014-04-17 17:18:14 +0900242NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
Gleb Natapov631bc482010-10-14 11:22:52 +0200243
Masami Hiramatsu93266382014-04-17 17:18:14 +0900244dotraplinkage void
Gleb Natapov631bc482010-10-14 11:22:52 +0200245do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
246{
Frederic Weisbecker6c1e0252013-02-24 01:19:14 +0100247 enum ctx_state prev_state;
248
Gleb Natapov631bc482010-10-14 11:22:52 +0200249 switch (kvm_read_and_reset_pf_reason()) {
250 default:
Thomas Gleixner11a7ffb2017-08-28 08:47:22 +0200251 do_page_fault(regs, error_code);
Gleb Natapov631bc482010-10-14 11:22:52 +0200252 break;
253 case KVM_PV_REASON_PAGE_NOT_PRESENT:
254 /* page is swapped out by the host. */
Frederic Weisbecker6c1e0252013-02-24 01:19:14 +0100255 prev_state = exception_enter();
Boqun Fenga2b78612017-10-03 21:36:51 +0800256 kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs));
Frederic Weisbecker6c1e0252013-02-24 01:19:14 +0100257 exception_exit(prev_state);
Gleb Natapov631bc482010-10-14 11:22:52 +0200258 break;
259 case KVM_PV_REASON_PAGE_READY:
Gleb Natapove0875922012-04-04 15:30:33 +0300260 rcu_irq_enter();
Gleb Natapov631bc482010-10-14 11:22:52 +0200261 kvm_async_pf_task_wake((u32)read_cr2());
Gleb Natapove0875922012-04-04 15:30:33 +0300262 rcu_irq_exit();
Gleb Natapov631bc482010-10-14 11:22:52 +0200263 break;
264 }
265}
Masami Hiramatsu93266382014-04-17 17:18:14 +0900266NOKPROBE_SYMBOL(do_async_page_fault);
Gleb Natapov631bc482010-10-14 11:22:52 +0200267
Rakib Mullickd3ac8812009-07-02 11:40:36 +0600268static void __init paravirt_ops_setup(void)
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500269{
270 pv_info.name = "KVM";
Andy Lutomirski29fa6822014-12-05 19:03:28 -0800271
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500272 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
Juergen Gross5c835112018-08-28 09:40:19 +0200273 pv_ops.cpu.io_delay = kvm_io_delay;
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500274
Marcelo Tosattia90ede72009-02-11 22:45:42 -0200275#ifdef CONFIG_X86_IO_APIC
276 no_timer_check = 1;
277#endif
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500278}
279
Glauber Costad910f5c2011-07-11 15:28:19 -0400280static void kvm_register_steal_time(void)
281{
282 int cpu = smp_processor_id();
283 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
284
285 if (!has_steal_clock)
286 return;
287
Dave Hansen5dfd4862013-01-22 13:24:35 -0800288 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
Shuah Khan136867f2013-02-05 19:57:22 -0700289 pr_info("kvm-stealtime: cpu %d, msr %llx\n",
290 cpu, (unsigned long long) slow_virt_to_phys(st));
Glauber Costad910f5c2011-07-11 15:28:19 -0400291}
292
Brijesh Singh47162762017-10-20 09:30:58 -0500293static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300294
Wanpeng Li8ca22552016-11-07 11:13:40 +0800295static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300296{
297 /**
298 * This relies on __test_and_clear_bit to modify the memory
299 * in a way that is atomic with respect to the local CPU.
300 * The hypervisor only accesses this memory from the local CPU so
301 * there's no need for lock or memory barriers.
302 * An optimization barrier is implied in apic write.
303 */
Christoph Lameter89cbc762014-08-17 12:30:40 -0500304 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300305 return;
Wanpeng Li8ca22552016-11-07 11:13:40 +0800306 apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300307}
308
Nicholas Krauseed3cf152015-05-20 00:24:10 -0400309static void kvm_guest_cpu_init(void)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200310{
311 if (!kvm_para_available())
312 return;
313
314 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
Christoph Lameter89cbc762014-08-17 12:30:40 -0500315 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200316
Gleb Natapov6adba522010-10-14 11:22:55 +0200317#ifdef CONFIG_PREEMPT
318 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
319#endif
Wanpeng Li52a5c152017-07-13 18:30:42 -0700320 pa |= KVM_ASYNC_PF_ENABLED;
321
Radim Krčmářfe2a3022018-02-01 22:16:21 +0100322 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
323 pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
324
325 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500326 __this_cpu_write(apf_reason.enabled, 1);
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200327 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
328 smp_processor_id());
329 }
Glauber Costad910f5c2011-07-11 15:28:19 -0400330
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300331 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
332 unsigned long pa;
333 /* Size alignment is implied but just to make it explicit. */
334 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500335 __this_cpu_write(kvm_apic_eoi, 0);
336 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
Dave Hansen5dfd4862013-01-22 13:24:35 -0800337 | KVM_MSR_ENABLED;
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300338 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
339 }
340
Glauber Costad910f5c2011-07-11 15:28:19 -0400341 if (has_steal_clock)
342 kvm_register_steal_time();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200343}
344
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300345static void kvm_pv_disable_apf(void)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200346{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500347 if (!__this_cpu_read(apf_reason.enabled))
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200348 return;
349
350 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500351 __this_cpu_write(apf_reason.enabled, 0);
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200352
353 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
354 smp_processor_id());
355}
356
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300357static void kvm_pv_guest_cpu_reboot(void *unused)
358{
359 /*
360 * We disable PV EOI before we load a new kernel by kexec,
361 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
362 * New kernel can re-enable when it boots.
363 */
364 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
365 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
366 kvm_pv_disable_apf();
Florian Westphal8fbe6a52012-08-15 16:00:40 +0200367 kvm_disable_steal_time();
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300368}
369
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200370static int kvm_pv_reboot_notify(struct notifier_block *nb,
371 unsigned long code, void *unused)
372{
373 if (code == SYS_RESTART)
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300374 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200375 return NOTIFY_DONE;
376}
377
378static struct notifier_block kvm_pv_reboot_nb = {
379 .notifier_call = kvm_pv_reboot_notify,
380};
381
Glauber Costad910f5c2011-07-11 15:28:19 -0400382static u64 kvm_steal_clock(int cpu)
383{
384 u64 steal;
385 struct kvm_steal_time *src;
386 int version;
387
388 src = &per_cpu(steal_time, cpu);
389 do {
390 version = src->version;
Wanpeng Li5a48a622017-04-11 02:49:21 -0700391 virt_rmb();
Glauber Costad910f5c2011-07-11 15:28:19 -0400392 steal = src->steal;
Wanpeng Li5a48a622017-04-11 02:49:21 -0700393 virt_rmb();
Glauber Costad910f5c2011-07-11 15:28:19 -0400394 } while ((version & 1) || (version != src->version));
395
396 return steal;
397}
398
399void kvm_disable_steal_time(void)
400{
401 if (!has_steal_clock)
402 return;
403
404 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
405}
406
Brijesh Singh47162762017-10-20 09:30:58 -0500407static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
408{
409 early_set_memory_decrypted((unsigned long) ptr, size);
410}
411
412/*
413 * Iterate through all possible CPUs and map the memory region pointed
414 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
415 *
416 * Note: we iterate through all possible CPUs to ensure that CPUs
417 * hotplugged will have their per-cpu variable already mapped as
418 * decrypted.
419 */
420static void __init sev_map_percpu_data(void)
421{
422 int cpu;
423
424 if (!sev_active())
425 return;
426
427 for_each_possible_cpu(cpu) {
428 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
429 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
430 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
431 }
432}
433
Gleb Natapovca3f1012010-10-14 11:22:49 +0200434#ifdef CONFIG_SMP
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800435#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
436
437static void __send_ipi_mask(const struct cpumask *mask, int vector)
438{
439 unsigned long flags;
440 int cpu, apic_id, icr;
441 int min = 0, max = 0;
442#ifdef CONFIG_X86_64
443 __uint128_t ipi_bitmap = 0;
444#else
445 u64 ipi_bitmap = 0;
446#endif
Sean Christophersonde81c2f2019-01-23 09:22:40 -0800447 long ret;
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800448
449 if (cpumask_empty(mask))
450 return;
451
452 local_irq_save(flags);
453
454 switch (vector) {
455 default:
456 icr = APIC_DM_FIXED | vector;
457 break;
458 case NMI_VECTOR:
459 icr = APIC_DM_NMI;
460 break;
461 }
462
463 for_each_cpu(cpu, mask) {
464 apic_id = per_cpu(x86_cpu_to_apicid, cpu);
465 if (!ipi_bitmap) {
466 min = max = apic_id;
467 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
468 ipi_bitmap <<= min - apic_id;
469 min = apic_id;
470 } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
471 max = apic_id < max ? max : apic_id;
472 } else {
Sean Christophersonde81c2f2019-01-23 09:22:40 -0800473 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800474 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
Sean Christophersonde81c2f2019-01-23 09:22:40 -0800475 WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800476 min = max = apic_id;
477 ipi_bitmap = 0;
478 }
479 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
480 }
481
482 if (ipi_bitmap) {
Sean Christophersonde81c2f2019-01-23 09:22:40 -0800483 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800484 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
Sean Christophersonde81c2f2019-01-23 09:22:40 -0800485 WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800486 }
487
488 local_irq_restore(flags);
489}
490
491static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
492{
493 __send_ipi_mask(mask, vector);
494}
495
496static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
497{
498 unsigned int this_cpu = smp_processor_id();
499 struct cpumask new_mask;
500 const struct cpumask *local_mask;
501
502 cpumask_copy(&new_mask, mask);
503 cpumask_clear_cpu(this_cpu, &new_mask);
504 local_mask = &new_mask;
505 __send_ipi_mask(local_mask, vector);
506}
507
508static void kvm_send_ipi_allbutself(int vector)
509{
510 kvm_send_ipi_mask_allbutself(cpu_online_mask, vector);
511}
512
513static void kvm_send_ipi_all(int vector)
514{
515 __send_ipi_mask(cpu_online_mask, vector);
516}
517
518/*
519 * Set the IPI entry points
520 */
521static void kvm_setup_pv_ipi(void)
522{
523 apic->send_IPI_mask = kvm_send_ipi_mask;
524 apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
525 apic->send_IPI_allbutself = kvm_send_ipi_allbutself;
526 apic->send_IPI_all = kvm_send_ipi_all;
527 pr_info("KVM setup pv IPIs\n");
528}
529
Wanpeng Li34226b62018-03-24 21:17:24 -0700530static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
531{
532 native_smp_prepare_cpus(max_cpus);
Michael S. Tsirkin633711e2018-05-17 17:54:24 +0300533 if (kvm_para_has_hint(KVM_HINTS_REALTIME))
Wanpeng Li34226b62018-03-24 21:17:24 -0700534 static_branch_disable(&virt_spin_lock_key);
535}
536
Gleb Natapovca3f1012010-10-14 11:22:49 +0200537static void __init kvm_smp_prepare_boot_cpu(void)
538{
Brijesh Singh47162762017-10-20 09:30:58 -0500539 /*
540 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
541 * shares the guest physical address with the hypervisor.
542 */
543 sev_map_percpu_data();
544
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200545 kvm_guest_cpu_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200546 native_smp_prepare_boot_cpu();
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530547 kvm_spinlock_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200548}
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200549
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200550static void kvm_guest_cpu_offline(void)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200551{
Glauber Costad910f5c2011-07-11 15:28:19 -0400552 kvm_disable_steal_time();
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300553 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
554 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
555 kvm_pv_disable_apf();
Gleb Natapov631bc482010-10-14 11:22:52 +0200556 apf_task_wake_all();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200557}
558
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200559static int kvm_cpu_online(unsigned int cpu)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200560{
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200561 local_irq_disable();
562 kvm_guest_cpu_init();
563 local_irq_enable();
564 return 0;
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200565}
566
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200567static int kvm_cpu_down_prepare(unsigned int cpu)
568{
569 local_irq_disable();
570 kvm_guest_cpu_offline();
571 local_irq_enable();
572 return 0;
573}
Gleb Natapovca3f1012010-10-14 11:22:49 +0200574#endif
575
Gleb Natapov631bc482010-10-14 11:22:52 +0200576static void __init kvm_apf_trap_init(void)
577{
Thomas Gleixnerfacaa3e2017-08-28 08:47:59 +0200578 update_intr_gate(X86_TRAP_PF, async_page_fault);
Gleb Natapov631bc482010-10-14 11:22:52 +0200579}
580
Wanpeng Li858a43a2017-12-12 17:33:02 -0800581static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);
582
583static void kvm_flush_tlb_others(const struct cpumask *cpumask,
584 const struct flush_tlb_info *info)
585{
586 u8 state;
587 int cpu;
588 struct kvm_steal_time *src;
589 struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask);
590
591 cpumask_copy(flushmask, cpumask);
592 /*
593 * We have to call flush only on online vCPUs. And
594 * queue flush_on_enter for pre-empted vCPUs
595 */
596 for_each_cpu(cpu, flushmask) {
597 src = &per_cpu(steal_time, cpu);
598 state = READ_ONCE(src->preempted);
599 if ((state & KVM_VCPU_PREEMPTED)) {
600 if (try_cmpxchg(&src->preempted, &state,
601 state | KVM_VCPU_FLUSH_TLB))
602 __cpumask_clear_cpu(cpu, flushmask);
603 }
604 }
605
606 native_flush_tlb_others(flushmask, info);
607}
608
Juergen Grossf3614642017-11-09 14:27:38 +0100609static void __init kvm_guest_init(void)
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500610{
Gleb Natapov631bc482010-10-14 11:22:52 +0200611 int i;
612
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500613 if (!kvm_para_available())
614 return;
615
616 paravirt_ops_setup();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200617 register_reboot_notifier(&kvm_pv_reboot_nb);
Gleb Natapov631bc482010-10-14 11:22:52 +0200618 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
Rik van Riel9db284f2016-03-21 15:13:27 +0100619 raw_spin_lock_init(&async_pf_sleepers[i].lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200620 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
621 x86_init.irqs.trap_init = kvm_apf_trap_init;
622
Glauber Costad910f5c2011-07-11 15:28:19 -0400623 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
624 has_steal_clock = 1;
Juergen Gross5c835112018-08-28 09:40:19 +0200625 pv_ops.time.steal_clock = kvm_steal_clock;
Glauber Costad910f5c2011-07-11 15:28:19 -0400626 }
627
Wanpeng Li4f2f61f2018-02-04 22:57:58 -0800628 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
Michael S. Tsirkin633711e2018-05-17 17:54:24 +0300629 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
Peter Zijlstra48a8b972018-08-22 17:30:16 +0200630 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
Juergen Gross5c835112018-08-28 09:40:19 +0200631 pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
632 pv_ops.mmu.tlb_remove_table = tlb_remove_table;
Peter Zijlstra48a8b972018-08-22 17:30:16 +0200633 }
Wanpeng Li858a43a2017-12-12 17:33:02 -0800634
Michael S. Tsirkin90536662012-07-15 15:56:52 +0300635 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
636 apic_set_eoi_write(kvm_guest_apic_eoi_write);
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300637
Gleb Natapovca3f1012010-10-14 11:22:49 +0200638#ifdef CONFIG_SMP
Wanpeng Li34226b62018-03-24 21:17:24 -0700639 smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
Gleb Natapovca3f1012010-10-14 11:22:49 +0200640 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200641 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
642 kvm_cpu_online, kvm_cpu_down_prepare) < 0)
643 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200644#else
Brijesh Singh47162762017-10-20 09:30:58 -0500645 sev_map_percpu_data();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200646 kvm_guest_cpu_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200647#endif
Ulrich Obergfell9919e392014-10-13 15:55:37 -0700648
649 /*
650 * Hard lockup detection is enabled by default. Disable it, as guests
651 * can get false positives too easily, for example if the host is
652 * overcommitted.
653 */
Ulrich Obergfell692297d2015-04-14 15:44:19 -0700654 hardlockup_detector_disable();
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500655}
Glauber Costad910f5c2011-07-11 15:28:19 -0400656
Paolo Bonzini1c300a42014-01-27 14:49:40 +0100657static noinline uint32_t __kvm_cpuid_base(void)
658{
659 if (boot_cpu_data.cpuid_level < 0)
660 return 0; /* So we don't blow up on old processors */
661
Borislav Petkov0c9f35362016-03-29 17:41:55 +0200662 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
Paolo Bonzini1c300a42014-01-27 14:49:40 +0100663 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
664
665 return 0;
666}
667
668static inline uint32_t kvm_cpuid_base(void)
669{
670 static int kvm_cpuid_base = -1;
671
672 if (kvm_cpuid_base == -1)
673 kvm_cpuid_base = __kvm_cpuid_base();
674
675 return kvm_cpuid_base;
676}
677
678bool kvm_para_available(void)
679{
680 return kvm_cpuid_base() != 0;
681}
682EXPORT_SYMBOL_GPL(kvm_para_available);
683
Paolo Bonzini77f01bd2014-01-27 14:51:44 +0100684unsigned int kvm_arch_para_features(void)
685{
686 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
687}
688
Wanpeng Lia4429e52018-02-13 09:05:40 +0800689unsigned int kvm_arch_para_hints(void)
690{
691 return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
692}
693
Jason Wang9df56f12013-07-25 16:54:35 +0800694static uint32_t __init kvm_detect(void)
Prarit Bhargavafc733732012-07-06 13:47:39 -0400695{
Jason Wang9df56f12013-07-25 16:54:35 +0800696 return kvm_cpuid_base();
Prarit Bhargavafc733732012-07-06 13:47:39 -0400697}
698
Wanpeng Lid63bae02018-07-23 14:39:51 +0800699static void __init kvm_apic_init(void)
700{
Wanpeng Liaaffcfd2018-07-23 14:39:52 +0800701#if defined(CONFIG_SMP)
702 if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI))
703 kvm_setup_pv_ipi();
704#endif
Wanpeng Lid63bae02018-07-23 14:39:51 +0800705}
706
707static void __init kvm_init_platform(void)
708{
Linus Torvaldse61cf2e2018-08-19 10:38:36 -0700709 kvmclock_init();
Wanpeng Lid63bae02018-07-23 14:39:51 +0800710 x86_platform.apic_post_init = kvm_apic_init;
711}
712
Juergen Gross03b2a322017-11-09 14:27:36 +0100713const __initconst struct hypervisor_x86 x86_hyper_kvm = {
Prarit Bhargavafc733732012-07-06 13:47:39 -0400714 .name = "KVM",
715 .detect = kvm_detect,
Juergen Gross03b2a322017-11-09 14:27:36 +0100716 .type = X86_HYPER_KVM,
Juergen Grossf3614642017-11-09 14:27:38 +0100717 .init.guest_late_init = kvm_guest_init,
Juergen Grossf72e38e2017-11-09 14:27:35 +0100718 .init.x2apic_available = kvm_para_available,
Wanpeng Lid63bae02018-07-23 14:39:51 +0800719 .init.init_platform = kvm_init_platform,
Prarit Bhargavafc733732012-07-06 13:47:39 -0400720};
Prarit Bhargavafc733732012-07-06 13:47:39 -0400721
Glauber Costad910f5c2011-07-11 15:28:19 -0400722static __init int activate_jump_labels(void)
723{
724 if (has_steal_clock) {
Ingo Molnarc5905af2012-02-24 08:31:31 +0100725 static_key_slow_inc(&paravirt_steal_enabled);
Glauber Costad910f5c2011-07-11 15:28:19 -0400726 if (steal_acc)
Ingo Molnarc5905af2012-02-24 08:31:31 +0100727 static_key_slow_inc(&paravirt_steal_rq_enabled);
Glauber Costad910f5c2011-07-11 15:28:19 -0400728 }
729
730 return 0;
731}
732arch_initcall(activate_jump_labels);
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530733
Wanpeng Li858a43a2017-12-12 17:33:02 -0800734static __init int kvm_setup_pv_tlb_flush(void)
735{
736 int cpu;
737
Wanpeng Li4f2f61f2018-02-04 22:57:58 -0800738 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
Michael S. Tsirkin633711e2018-05-17 17:54:24 +0300739 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
Wanpeng Li17a10792018-03-24 21:18:35 -0700740 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
Wanpeng Li858a43a2017-12-12 17:33:02 -0800741 for_each_possible_cpu(cpu) {
742 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
743 GFP_KERNEL, cpu_to_node(cpu));
744 }
745 pr_info("KVM setup pv remote TLB flush\n");
746 }
747
748 return 0;
749}
750arch_initcall(kvm_setup_pv_tlb_flush);
751
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530752#ifdef CONFIG_PARAVIRT_SPINLOCKS
753
754/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
Raghavendra K T36bd6212013-08-16 15:08:41 +0530755static void kvm_kick_cpu(int cpu)
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530756{
757 int apicid;
758 unsigned long flags = 0;
759
760 apicid = per_cpu(x86_cpu_to_apicid, cpu);
761 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
762}
763
Waiman Longbf0c7c32015-04-24 14:56:39 -0400764#include <asm/qspinlock.h>
765
766static void kvm_wait(u8 *ptr, u8 val)
767{
768 unsigned long flags;
769
770 if (in_nmi())
771 return;
772
773 local_irq_save(flags);
774
775 if (READ_ONCE(*ptr) != val)
776 goto out;
777
778 /*
779 * halt until it's our turn and kicked. Note that we do safe halt
780 * for irq enabled case to avoid hang when lock info is overwritten
781 * in irq spinlock slowpath and no spurious interrupt occur to save us.
782 */
783 if (arch_irqs_disabled_flags(flags))
784 halt();
785 else
786 safe_halt();
787
788out:
789 local_irq_restore(flags);
790}
791
Waiman Longdd0fd8b2017-02-20 13:36:04 -0500792#ifdef CONFIG_X86_32
Waiman Long6c629852017-02-20 13:36:03 -0500793__visible bool __kvm_vcpu_is_preempted(long cpu)
Peter Zijlstra3cded412016-11-15 16:47:06 +0100794{
795 struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
796
Wanpeng Lifa55eed2017-12-12 17:33:01 -0800797 return !!(src->preempted & KVM_VCPU_PREEMPTED);
Peter Zijlstra3cded412016-11-15 16:47:06 +0100798}
799PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
800
Waiman Longdd0fd8b2017-02-20 13:36:04 -0500801#else
802
803#include <asm/asm-offsets.h>
804
805extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
806
807/*
808 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
809 * restoring to/from the stack.
810 */
811asm(
812".pushsection .text;"
813".global __raw_callee_save___kvm_vcpu_is_preempted;"
814".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
815"__raw_callee_save___kvm_vcpu_is_preempted:"
816"movq __per_cpu_offset(,%rdi,8), %rax;"
817"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
818"setne %al;"
819"ret;"
820".popsection");
821
822#endif
823
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530824/*
825 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
826 */
827void __init kvm_spinlock_init(void)
828{
829 if (!kvm_para_available())
830 return;
831 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
832 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
833 return;
834
Michael S. Tsirkin633711e2018-05-17 17:54:24 +0300835 if (kvm_para_has_hint(KVM_HINTS_REALTIME))
Wanpeng Lib2798ba2018-02-13 09:05:41 +0800836 return;
Wanpeng Lib2798ba2018-02-13 09:05:41 +0800837
Waiman Long3553ae52018-07-17 17:59:27 -0400838 /* Don't use the pvqspinlock code if there is only 1 vCPU. */
839 if (num_possible_cpus() == 1)
840 return;
841
Waiman Longbf0c7c32015-04-24 14:56:39 -0400842 __pv_init_lock_hash();
Juergen Gross5c835112018-08-28 09:40:19 +0200843 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
844 pv_ops.lock.queued_spin_unlock =
845 PV_CALLEE_SAVE(__pv_queued_spin_unlock);
846 pv_ops.lock.wait = kvm_wait;
847 pv_ops.lock.kick = kvm_kick_cpu;
Peter Zijlstra3cded412016-11-15 16:47:06 +0100848
849 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
Juergen Gross5c835112018-08-28 09:40:19 +0200850 pv_ops.lock.vcpu_is_preempted =
Peter Zijlstra3cded412016-11-15 16:47:06 +0100851 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
852 }
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530853}
Raghavendra K T3dbef3e2013-10-09 14:33:21 +0530854
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530855#endif /* CONFIG_PARAVIRT_SPINLOCKS */