blob: 4e37d1a851a62df3f9f841f3bbd66827af0c1920 [file] [log] [blame]
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -05001/*
2 * KVM paravirt_ops implementation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19 * Copyright IBM Corporation, 2007
20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
21 */
22
Frederic Weisbecker56dd9472013-02-24 00:23:25 +010023#include <linux/context_tracking.h>
Paul Gortmaker186f4362016-07-13 20:18:56 -040024#include <linux/init.h>
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -050025#include <linux/kernel.h>
26#include <linux/kvm_para.h>
27#include <linux/cpu.h>
28#include <linux/mm.h>
Marcelo Tosatti1da8a772008-02-22 12:21:37 -050029#include <linux/highmem.h>
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050030#include <linux/hardirq.h>
Gleb Natapovfd10cde2010-10-14 11:22:51 +020031#include <linux/notifier.h>
32#include <linux/reboot.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020033#include <linux/hash.h>
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/kprobes.h>
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +053037#include <linux/debugfs.h>
Ulrich Obergfell9919e392014-10-13 15:55:37 -070038#include <linux/nmi.h>
Rik van Riel9db284f2016-03-21 15:13:27 +010039#include <linux/swait.h>
Marcelo Tosattia90ede72009-02-11 22:45:42 -020040#include <asm/timer.h>
Gleb Natapovfd10cde2010-10-14 11:22:51 +020041#include <asm/cpu.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020042#include <asm/traps.h>
43#include <asm/desc.h>
Gleb Natapov6c047cd2010-10-14 11:22:54 +020044#include <asm/tlbflush.h>
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +030045#include <asm/apic.h>
46#include <asm/apicdef.h>
Prarit Bhargavafc733732012-07-06 13:47:39 -040047#include <asm/hypervisor.h>
Marcelo Tosatti3dc4f7c2012-11-27 23:28:56 -020048#include <asm/kvm_guest.h>
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050049
Gleb Natapovfd10cde2010-10-14 11:22:51 +020050static int kvmapf = 1;
51
52static int parse_no_kvmapf(char *arg)
53{
54 kvmapf = 0;
55 return 0;
56}
57
58early_param("no-kvmapf", parse_no_kvmapf);
59
Glauber Costad910f5c2011-07-11 15:28:19 -040060static int steal_acc = 1;
61static int parse_no_stealacc(char *arg)
62{
63 steal_acc = 0;
64 return 0;
65}
66
67early_param("no-steal-acc", parse_no_stealacc);
68
Marcelo Tosatti3dc4f7c2012-11-27 23:28:56 -020069static int kvmclock_vsyscall = 1;
70static int parse_no_kvmclock_vsyscall(char *arg)
71{
72 kvmclock_vsyscall = 0;
73 return 0;
74}
75
76early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
77
Brijesh Singh47162762017-10-20 09:30:58 -050078static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
79static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64);
Glauber Costad910f5c2011-07-11 15:28:19 -040080static int has_steal_clock = 0;
Marcelo Tosatti096d14a2008-02-22 12:21:38 -050081
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -050082/*
83 * No need for any "IO delay" on KVM
84 */
85static void kvm_io_delay(void)
86{
87}
88
Gleb Natapov631bc482010-10-14 11:22:52 +020089#define KVM_TASK_SLEEP_HASHBITS 8
90#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
91
92struct kvm_task_sleep_node {
93 struct hlist_node link;
Rik van Riel9db284f2016-03-21 15:13:27 +010094 struct swait_queue_head wq;
Gleb Natapov631bc482010-10-14 11:22:52 +020095 u32 token;
96 int cpu;
Gleb Natapov6c047cd2010-10-14 11:22:54 +020097 bool halted;
Gleb Natapov631bc482010-10-14 11:22:52 +020098};
99
100static struct kvm_task_sleep_head {
Rik van Riel9db284f2016-03-21 15:13:27 +0100101 raw_spinlock_t lock;
Gleb Natapov631bc482010-10-14 11:22:52 +0200102 struct hlist_head list;
103} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
104
105static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
106 u32 token)
107{
108 struct hlist_node *p;
109
110 hlist_for_each(p, &b->list) {
111 struct kvm_task_sleep_node *n =
112 hlist_entry(p, typeof(*n), link);
113 if (n->token == token)
114 return n;
115 }
116
117 return NULL;
118}
119
Boqun Fenga2b78612017-10-03 21:36:51 +0800120/*
121 * @interrupt_kernel: Is this called from a routine which interrupts the kernel
122 * (other than user space)?
123 */
124void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
Gleb Natapov631bc482010-10-14 11:22:52 +0200125{
126 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
127 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
128 struct kvm_task_sleep_node n, *e;
Rik van Riel9db284f2016-03-21 15:13:27 +0100129 DECLARE_SWAITQUEUE(wait);
Gleb Natapov631bc482010-10-14 11:22:52 +0200130
Li Zhong9b132fb2012-12-04 10:35:13 +0800131 rcu_irq_enter();
132
Rik van Riel9db284f2016-03-21 15:13:27 +0100133 raw_spin_lock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200134 e = _find_apf_task(b, token);
135 if (e) {
136 /* dummy entry exist -> wake up was delivered ahead of PF */
137 hlist_del(&e->link);
138 kfree(e);
Rik van Riel9db284f2016-03-21 15:13:27 +0100139 raw_spin_unlock(&b->lock);
Li Zhong9b132fb2012-12-04 10:35:13 +0800140
141 rcu_irq_exit();
Gleb Natapov631bc482010-10-14 11:22:52 +0200142 return;
143 }
144
145 n.token = token;
146 n.cpu = smp_processor_id();
Boqun Fenga2b78612017-10-03 21:36:51 +0800147 n.halted = is_idle_task(current) ||
148 (IS_ENABLED(CONFIG_PREEMPT_COUNT)
149 ? preempt_count() > 1 || rcu_preempt_depth()
150 : interrupt_kernel);
Rik van Riel9db284f2016-03-21 15:13:27 +0100151 init_swait_queue_head(&n.wq);
Gleb Natapov631bc482010-10-14 11:22:52 +0200152 hlist_add_head(&n.link, &b->list);
Rik van Riel9db284f2016-03-21 15:13:27 +0100153 raw_spin_unlock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200154
155 for (;;) {
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200156 if (!n.halted)
Rik van Riel9db284f2016-03-21 15:13:27 +0100157 prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
Gleb Natapov631bc482010-10-14 11:22:52 +0200158 if (hlist_unhashed(&n.link))
159 break;
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200160
Wanpeng Li337c0172017-08-01 05:20:03 -0700161 rcu_irq_exit();
162
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200163 if (!n.halted) {
164 local_irq_enable();
165 schedule();
166 local_irq_disable();
167 } else {
168 /*
169 * We cannot reschedule. So halt.
170 */
171 native_safe_halt();
172 local_irq_disable();
173 }
Wanpeng Li337c0172017-08-01 05:20:03 -0700174
175 rcu_irq_enter();
Gleb Natapov631bc482010-10-14 11:22:52 +0200176 }
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200177 if (!n.halted)
Rik van Riel9db284f2016-03-21 15:13:27 +0100178 finish_swait(&n.wq, &wait);
Gleb Natapov631bc482010-10-14 11:22:52 +0200179
Li Zhong9b132fb2012-12-04 10:35:13 +0800180 rcu_irq_exit();
Gleb Natapov631bc482010-10-14 11:22:52 +0200181 return;
182}
183EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
184
185static void apf_task_wake_one(struct kvm_task_sleep_node *n)
186{
187 hlist_del_init(&n->link);
Gleb Natapov6c047cd2010-10-14 11:22:54 +0200188 if (n->halted)
189 smp_send_reschedule(n->cpu);
Davidlohr Buesoa0cff572017-09-13 13:08:21 -0700190 else if (swq_has_sleeper(&n->wq))
Rik van Riel9db284f2016-03-21 15:13:27 +0100191 swake_up(&n->wq);
Gleb Natapov631bc482010-10-14 11:22:52 +0200192}
193
194static void apf_task_wake_all(void)
195{
196 int i;
197
198 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
199 struct hlist_node *p, *next;
200 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
Rik van Riel9db284f2016-03-21 15:13:27 +0100201 raw_spin_lock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200202 hlist_for_each_safe(p, next, &b->list) {
203 struct kvm_task_sleep_node *n =
204 hlist_entry(p, typeof(*n), link);
205 if (n->cpu == smp_processor_id())
206 apf_task_wake_one(n);
207 }
Rik van Riel9db284f2016-03-21 15:13:27 +0100208 raw_spin_unlock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200209 }
210}
211
212void kvm_async_pf_task_wake(u32 token)
213{
214 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
215 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
216 struct kvm_task_sleep_node *n;
217
218 if (token == ~0) {
219 apf_task_wake_all();
220 return;
221 }
222
223again:
Rik van Riel9db284f2016-03-21 15:13:27 +0100224 raw_spin_lock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200225 n = _find_apf_task(b, token);
226 if (!n) {
227 /*
228 * async PF was not yet handled.
229 * Add dummy entry for the token.
230 */
Gleb Natapov62c49cc2012-05-02 15:04:02 +0300231 n = kzalloc(sizeof(*n), GFP_ATOMIC);
Gleb Natapov631bc482010-10-14 11:22:52 +0200232 if (!n) {
233 /*
234 * Allocation failed! Busy wait while other cpu
235 * handles async PF.
236 */
Rik van Riel9db284f2016-03-21 15:13:27 +0100237 raw_spin_unlock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200238 cpu_relax();
239 goto again;
240 }
241 n->token = token;
242 n->cpu = smp_processor_id();
Rik van Riel9db284f2016-03-21 15:13:27 +0100243 init_swait_queue_head(&n->wq);
Gleb Natapov631bc482010-10-14 11:22:52 +0200244 hlist_add_head(&n->link, &b->list);
245 } else
246 apf_task_wake_one(n);
Rik van Riel9db284f2016-03-21 15:13:27 +0100247 raw_spin_unlock(&b->lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200248 return;
249}
250EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
251
252u32 kvm_read_and_reset_pf_reason(void)
253{
254 u32 reason = 0;
255
Christoph Lameter89cbc762014-08-17 12:30:40 -0500256 if (__this_cpu_read(apf_reason.enabled)) {
257 reason = __this_cpu_read(apf_reason.reason);
258 __this_cpu_write(apf_reason.reason, 0);
Gleb Natapov631bc482010-10-14 11:22:52 +0200259 }
260
261 return reason;
262}
263EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
Masami Hiramatsu93266382014-04-17 17:18:14 +0900264NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
Gleb Natapov631bc482010-10-14 11:22:52 +0200265
Masami Hiramatsu93266382014-04-17 17:18:14 +0900266dotraplinkage void
Gleb Natapov631bc482010-10-14 11:22:52 +0200267do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
268{
Frederic Weisbecker6c1e0252013-02-24 01:19:14 +0100269 enum ctx_state prev_state;
270
Gleb Natapov631bc482010-10-14 11:22:52 +0200271 switch (kvm_read_and_reset_pf_reason()) {
272 default:
Thomas Gleixner11a7ffb2017-08-28 08:47:22 +0200273 do_page_fault(regs, error_code);
Gleb Natapov631bc482010-10-14 11:22:52 +0200274 break;
275 case KVM_PV_REASON_PAGE_NOT_PRESENT:
276 /* page is swapped out by the host. */
Frederic Weisbecker6c1e0252013-02-24 01:19:14 +0100277 prev_state = exception_enter();
Boqun Fenga2b78612017-10-03 21:36:51 +0800278 kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs));
Frederic Weisbecker6c1e0252013-02-24 01:19:14 +0100279 exception_exit(prev_state);
Gleb Natapov631bc482010-10-14 11:22:52 +0200280 break;
281 case KVM_PV_REASON_PAGE_READY:
Gleb Natapove0875922012-04-04 15:30:33 +0300282 rcu_irq_enter();
Gleb Natapov631bc482010-10-14 11:22:52 +0200283 kvm_async_pf_task_wake((u32)read_cr2());
Gleb Natapove0875922012-04-04 15:30:33 +0300284 rcu_irq_exit();
Gleb Natapov631bc482010-10-14 11:22:52 +0200285 break;
286 }
287}
Masami Hiramatsu93266382014-04-17 17:18:14 +0900288NOKPROBE_SYMBOL(do_async_page_fault);
Gleb Natapov631bc482010-10-14 11:22:52 +0200289
Rakib Mullickd3ac8812009-07-02 11:40:36 +0600290static void __init paravirt_ops_setup(void)
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500291{
292 pv_info.name = "KVM";
Andy Lutomirski29fa6822014-12-05 19:03:28 -0800293
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500294 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
295 pv_cpu_ops.io_delay = kvm_io_delay;
296
Marcelo Tosattia90ede72009-02-11 22:45:42 -0200297#ifdef CONFIG_X86_IO_APIC
298 no_timer_check = 1;
299#endif
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500300}
301
Glauber Costad910f5c2011-07-11 15:28:19 -0400302static void kvm_register_steal_time(void)
303{
304 int cpu = smp_processor_id();
305 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
306
307 if (!has_steal_clock)
308 return;
309
Dave Hansen5dfd4862013-01-22 13:24:35 -0800310 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
Shuah Khan136867f2013-02-05 19:57:22 -0700311 pr_info("kvm-stealtime: cpu %d, msr %llx\n",
312 cpu, (unsigned long long) slow_virt_to_phys(st));
Glauber Costad910f5c2011-07-11 15:28:19 -0400313}
314
Brijesh Singh47162762017-10-20 09:30:58 -0500315static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300316
Wanpeng Li8ca22552016-11-07 11:13:40 +0800317static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300318{
319 /**
320 * This relies on __test_and_clear_bit to modify the memory
321 * in a way that is atomic with respect to the local CPU.
322 * The hypervisor only accesses this memory from the local CPU so
323 * there's no need for lock or memory barriers.
324 * An optimization barrier is implied in apic write.
325 */
Christoph Lameter89cbc762014-08-17 12:30:40 -0500326 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300327 return;
Wanpeng Li8ca22552016-11-07 11:13:40 +0800328 apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300329}
330
Nicholas Krauseed3cf152015-05-20 00:24:10 -0400331static void kvm_guest_cpu_init(void)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200332{
333 if (!kvm_para_available())
334 return;
335
336 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
Christoph Lameter89cbc762014-08-17 12:30:40 -0500337 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200338
Gleb Natapov6adba522010-10-14 11:22:55 +0200339#ifdef CONFIG_PREEMPT
340 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
341#endif
Wanpeng Li52a5c152017-07-13 18:30:42 -0700342 pa |= KVM_ASYNC_PF_ENABLED;
343
344 /* Async page fault support for L1 hypervisor is optional */
345 if (wrmsr_safe(MSR_KVM_ASYNC_PF_EN,
346 (pa | KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT) & 0xffffffff, pa >> 32) < 0)
347 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500348 __this_cpu_write(apf_reason.enabled, 1);
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200349 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
350 smp_processor_id());
351 }
Glauber Costad910f5c2011-07-11 15:28:19 -0400352
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300353 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
354 unsigned long pa;
355 /* Size alignment is implied but just to make it explicit. */
356 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500357 __this_cpu_write(kvm_apic_eoi, 0);
358 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
Dave Hansen5dfd4862013-01-22 13:24:35 -0800359 | KVM_MSR_ENABLED;
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300360 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
361 }
362
Glauber Costad910f5c2011-07-11 15:28:19 -0400363 if (has_steal_clock)
364 kvm_register_steal_time();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200365}
366
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300367static void kvm_pv_disable_apf(void)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200368{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500369 if (!__this_cpu_read(apf_reason.enabled))
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200370 return;
371
372 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500373 __this_cpu_write(apf_reason.enabled, 0);
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200374
375 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
376 smp_processor_id());
377}
378
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300379static void kvm_pv_guest_cpu_reboot(void *unused)
380{
381 /*
382 * We disable PV EOI before we load a new kernel by kexec,
383 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
384 * New kernel can re-enable when it boots.
385 */
386 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
387 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
388 kvm_pv_disable_apf();
Florian Westphal8fbe6a52012-08-15 16:00:40 +0200389 kvm_disable_steal_time();
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300390}
391
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200392static int kvm_pv_reboot_notify(struct notifier_block *nb,
393 unsigned long code, void *unused)
394{
395 if (code == SYS_RESTART)
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300396 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200397 return NOTIFY_DONE;
398}
399
400static struct notifier_block kvm_pv_reboot_nb = {
401 .notifier_call = kvm_pv_reboot_notify,
402};
403
Glauber Costad910f5c2011-07-11 15:28:19 -0400404static u64 kvm_steal_clock(int cpu)
405{
406 u64 steal;
407 struct kvm_steal_time *src;
408 int version;
409
410 src = &per_cpu(steal_time, cpu);
411 do {
412 version = src->version;
Wanpeng Li5a48a622017-04-11 02:49:21 -0700413 virt_rmb();
Glauber Costad910f5c2011-07-11 15:28:19 -0400414 steal = src->steal;
Wanpeng Li5a48a622017-04-11 02:49:21 -0700415 virt_rmb();
Glauber Costad910f5c2011-07-11 15:28:19 -0400416 } while ((version & 1) || (version != src->version));
417
418 return steal;
419}
420
421void kvm_disable_steal_time(void)
422{
423 if (!has_steal_clock)
424 return;
425
426 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
427}
428
Brijesh Singh47162762017-10-20 09:30:58 -0500429static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
430{
431 early_set_memory_decrypted((unsigned long) ptr, size);
432}
433
434/*
435 * Iterate through all possible CPUs and map the memory region pointed
436 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
437 *
438 * Note: we iterate through all possible CPUs to ensure that CPUs
439 * hotplugged will have their per-cpu variable already mapped as
440 * decrypted.
441 */
442static void __init sev_map_percpu_data(void)
443{
444 int cpu;
445
446 if (!sev_active())
447 return;
448
449 for_each_possible_cpu(cpu) {
450 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
451 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
452 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
453 }
454}
455
Gleb Natapovca3f1012010-10-14 11:22:49 +0200456#ifdef CONFIG_SMP
457static void __init kvm_smp_prepare_boot_cpu(void)
458{
Brijesh Singh47162762017-10-20 09:30:58 -0500459 /*
460 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
461 * shares the guest physical address with the hypervisor.
462 */
463 sev_map_percpu_data();
464
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200465 kvm_guest_cpu_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200466 native_smp_prepare_boot_cpu();
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530467 kvm_spinlock_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200468}
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200469
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200470static void kvm_guest_cpu_offline(void)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200471{
Glauber Costad910f5c2011-07-11 15:28:19 -0400472 kvm_disable_steal_time();
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300473 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
474 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
475 kvm_pv_disable_apf();
Gleb Natapov631bc482010-10-14 11:22:52 +0200476 apf_task_wake_all();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200477}
478
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200479static int kvm_cpu_online(unsigned int cpu)
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200480{
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200481 local_irq_disable();
482 kvm_guest_cpu_init();
483 local_irq_enable();
484 return 0;
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200485}
486
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200487static int kvm_cpu_down_prepare(unsigned int cpu)
488{
489 local_irq_disable();
490 kvm_guest_cpu_offline();
491 local_irq_enable();
492 return 0;
493}
Gleb Natapovca3f1012010-10-14 11:22:49 +0200494#endif
495
Gleb Natapov631bc482010-10-14 11:22:52 +0200496static void __init kvm_apf_trap_init(void)
497{
Thomas Gleixnerfacaa3e2017-08-28 08:47:59 +0200498 update_intr_gate(X86_TRAP_PF, async_page_fault);
Gleb Natapov631bc482010-10-14 11:22:52 +0200499}
500
Wanpeng Li858a43a2017-12-12 17:33:02 -0800501static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);
502
503static void kvm_flush_tlb_others(const struct cpumask *cpumask,
504 const struct flush_tlb_info *info)
505{
506 u8 state;
507 int cpu;
508 struct kvm_steal_time *src;
509 struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask);
510
511 cpumask_copy(flushmask, cpumask);
512 /*
513 * We have to call flush only on online vCPUs. And
514 * queue flush_on_enter for pre-empted vCPUs
515 */
516 for_each_cpu(cpu, flushmask) {
517 src = &per_cpu(steal_time, cpu);
518 state = READ_ONCE(src->preempted);
519 if ((state & KVM_VCPU_PREEMPTED)) {
520 if (try_cmpxchg(&src->preempted, &state,
521 state | KVM_VCPU_FLUSH_TLB))
522 __cpumask_clear_cpu(cpu, flushmask);
523 }
524 }
525
526 native_flush_tlb_others(flushmask, info);
527}
528
Juergen Grossf3614642017-11-09 14:27:38 +0100529static void __init kvm_guest_init(void)
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500530{
Gleb Natapov631bc482010-10-14 11:22:52 +0200531 int i;
532
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500533 if (!kvm_para_available())
534 return;
535
536 paravirt_ops_setup();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200537 register_reboot_notifier(&kvm_pv_reboot_nb);
Gleb Natapov631bc482010-10-14 11:22:52 +0200538 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
Rik van Riel9db284f2016-03-21 15:13:27 +0100539 raw_spin_lock_init(&async_pf_sleepers[i].lock);
Gleb Natapov631bc482010-10-14 11:22:52 +0200540 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
541 x86_init.irqs.trap_init = kvm_apf_trap_init;
542
Glauber Costad910f5c2011-07-11 15:28:19 -0400543 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
544 has_steal_clock = 1;
545 pv_time_ops.steal_clock = kvm_steal_clock;
546 }
547
Wanpeng Li858a43a2017-12-12 17:33:02 -0800548 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH))
549 pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;
550
Michael S. Tsirkin90536662012-07-15 15:56:52 +0300551 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
552 apic_set_eoi_write(kvm_guest_apic_eoi_write);
Michael S. Tsirkinab9cf492012-06-24 19:24:34 +0300553
Marcelo Tosatti3dc4f7c2012-11-27 23:28:56 -0200554 if (kvmclock_vsyscall)
555 kvm_setup_vsyscall_timeinfo();
556
Gleb Natapovca3f1012010-10-14 11:22:49 +0200557#ifdef CONFIG_SMP
558 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
Sebastian Andrzej Siewior9a20ea42016-08-18 14:57:29 +0200559 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
560 kvm_cpu_online, kvm_cpu_down_prepare) < 0)
561 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200562#else
Brijesh Singh47162762017-10-20 09:30:58 -0500563 sev_map_percpu_data();
Gleb Natapovfd10cde2010-10-14 11:22:51 +0200564 kvm_guest_cpu_init();
Gleb Natapovca3f1012010-10-14 11:22:49 +0200565#endif
Ulrich Obergfell9919e392014-10-13 15:55:37 -0700566
567 /*
568 * Hard lockup detection is enabled by default. Disable it, as guests
569 * can get false positives too easily, for example if the host is
570 * overcommitted.
571 */
Ulrich Obergfell692297d2015-04-14 15:44:19 -0700572 hardlockup_detector_disable();
Marcelo Tosatti0cf1bfd2008-02-22 12:21:36 -0500573}
Glauber Costad910f5c2011-07-11 15:28:19 -0400574
Paolo Bonzini1c300a42014-01-27 14:49:40 +0100575static noinline uint32_t __kvm_cpuid_base(void)
576{
577 if (boot_cpu_data.cpuid_level < 0)
578 return 0; /* So we don't blow up on old processors */
579
Borislav Petkov0c9f35362016-03-29 17:41:55 +0200580 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
Paolo Bonzini1c300a42014-01-27 14:49:40 +0100581 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
582
583 return 0;
584}
585
586static inline uint32_t kvm_cpuid_base(void)
587{
588 static int kvm_cpuid_base = -1;
589
590 if (kvm_cpuid_base == -1)
591 kvm_cpuid_base = __kvm_cpuid_base();
592
593 return kvm_cpuid_base;
594}
595
596bool kvm_para_available(void)
597{
598 return kvm_cpuid_base() != 0;
599}
600EXPORT_SYMBOL_GPL(kvm_para_available);
601
Paolo Bonzini77f01bd2014-01-27 14:51:44 +0100602unsigned int kvm_arch_para_features(void)
603{
604 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
605}
606
Jason Wang9df56f12013-07-25 16:54:35 +0800607static uint32_t __init kvm_detect(void)
Prarit Bhargavafc733732012-07-06 13:47:39 -0400608{
Jason Wang9df56f12013-07-25 16:54:35 +0800609 return kvm_cpuid_base();
Prarit Bhargavafc733732012-07-06 13:47:39 -0400610}
611
Juergen Gross03b2a322017-11-09 14:27:36 +0100612const __initconst struct hypervisor_x86 x86_hyper_kvm = {
Prarit Bhargavafc733732012-07-06 13:47:39 -0400613 .name = "KVM",
614 .detect = kvm_detect,
Juergen Gross03b2a322017-11-09 14:27:36 +0100615 .type = X86_HYPER_KVM,
Juergen Grossf3614642017-11-09 14:27:38 +0100616 .init.guest_late_init = kvm_guest_init,
Juergen Grossf72e38e2017-11-09 14:27:35 +0100617 .init.x2apic_available = kvm_para_available,
Prarit Bhargavafc733732012-07-06 13:47:39 -0400618};
Prarit Bhargavafc733732012-07-06 13:47:39 -0400619
Glauber Costad910f5c2011-07-11 15:28:19 -0400620static __init int activate_jump_labels(void)
621{
622 if (has_steal_clock) {
Ingo Molnarc5905af2012-02-24 08:31:31 +0100623 static_key_slow_inc(&paravirt_steal_enabled);
Glauber Costad910f5c2011-07-11 15:28:19 -0400624 if (steal_acc)
Ingo Molnarc5905af2012-02-24 08:31:31 +0100625 static_key_slow_inc(&paravirt_steal_rq_enabled);
Glauber Costad910f5c2011-07-11 15:28:19 -0400626 }
627
628 return 0;
629}
630arch_initcall(activate_jump_labels);
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530631
Wanpeng Li858a43a2017-12-12 17:33:02 -0800632static __init int kvm_setup_pv_tlb_flush(void)
633{
634 int cpu;
635
636 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH)) {
637 for_each_possible_cpu(cpu) {
638 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
639 GFP_KERNEL, cpu_to_node(cpu));
640 }
641 pr_info("KVM setup pv remote TLB flush\n");
642 }
643
644 return 0;
645}
646arch_initcall(kvm_setup_pv_tlb_flush);
647
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530648#ifdef CONFIG_PARAVIRT_SPINLOCKS
649
650/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
Raghavendra K T36bd6212013-08-16 15:08:41 +0530651static void kvm_kick_cpu(int cpu)
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530652{
653 int apicid;
654 unsigned long flags = 0;
655
656 apicid = per_cpu(x86_cpu_to_apicid, cpu);
657 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
658}
659
Waiman Longbf0c7c32015-04-24 14:56:39 -0400660#include <asm/qspinlock.h>
661
662static void kvm_wait(u8 *ptr, u8 val)
663{
664 unsigned long flags;
665
666 if (in_nmi())
667 return;
668
669 local_irq_save(flags);
670
671 if (READ_ONCE(*ptr) != val)
672 goto out;
673
674 /*
675 * halt until it's our turn and kicked. Note that we do safe halt
676 * for irq enabled case to avoid hang when lock info is overwritten
677 * in irq spinlock slowpath and no spurious interrupt occur to save us.
678 */
679 if (arch_irqs_disabled_flags(flags))
680 halt();
681 else
682 safe_halt();
683
684out:
685 local_irq_restore(flags);
686}
687
Waiman Longdd0fd8b2017-02-20 13:36:04 -0500688#ifdef CONFIG_X86_32
Waiman Long6c629852017-02-20 13:36:03 -0500689__visible bool __kvm_vcpu_is_preempted(long cpu)
Peter Zijlstra3cded412016-11-15 16:47:06 +0100690{
691 struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
692
Wanpeng Lifa55eed2017-12-12 17:33:01 -0800693 return !!(src->preempted & KVM_VCPU_PREEMPTED);
Peter Zijlstra3cded412016-11-15 16:47:06 +0100694}
695PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
696
Waiman Longdd0fd8b2017-02-20 13:36:04 -0500697#else
698
699#include <asm/asm-offsets.h>
700
701extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
702
703/*
704 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
705 * restoring to/from the stack.
706 */
707asm(
708".pushsection .text;"
709".global __raw_callee_save___kvm_vcpu_is_preempted;"
710".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
711"__raw_callee_save___kvm_vcpu_is_preempted:"
712"movq __per_cpu_offset(,%rdi,8), %rax;"
713"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
714"setne %al;"
715"ret;"
716".popsection");
717
718#endif
719
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530720/*
721 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
722 */
723void __init kvm_spinlock_init(void)
724{
725 if (!kvm_para_available())
726 return;
727 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
728 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
729 return;
730
Waiman Longbf0c7c32015-04-24 14:56:39 -0400731 __pv_init_lock_hash();
732 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
733 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
734 pv_lock_ops.wait = kvm_wait;
735 pv_lock_ops.kick = kvm_kick_cpu;
Peter Zijlstra3cded412016-11-15 16:47:06 +0100736
737 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
738 pv_lock_ops.vcpu_is_preempted =
739 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
740 }
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530741}
Raghavendra K T3dbef3e2013-10-09 14:33:21 +0530742
Srivatsa Vaddagiri92b75202013-08-06 14:55:41 +0530743#endif /* CONFIG_PARAVIRT_SPINLOCKS */