blob: a810a95b7bde3ce27683f62b6304f8ef74f339a5 [file] [log] [blame]
Blue Swirl296af7c2010-03-29 19:23:50 +00001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
Peter Maydell7b31bbc2016-01-26 18:16:56 +000025#include "qemu/osdep.h"
KONRAD Frederic8d4e9142017-02-23 18:29:08 +000026#include "qemu/config-file.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010027#include "cpu.h"
Paolo Bonzini83c90892012-12-17 18:19:49 +010028#include "monitor/monitor.h"
Markus Armbrustere688df62018-02-01 12:18:31 +010029#include "qapi/error.h"
Markus Armbruster112ed242018-02-26 17:13:27 -060030#include "qapi/qapi-commands-misc.h"
Markus Armbruster9af23982018-02-11 10:36:01 +010031#include "qapi/qapi-events-run-state.h"
Wenchao Xiaa4e15de2014-06-18 08:43:36 +020032#include "qapi/qmp/qerror.h"
Markus Armbrusterd49b6832015-03-17 18:29:20 +010033#include "qemu/error-report.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/sysemu.h"
Max Reitzda31d592016-03-16 19:54:32 +010035#include "sysemu/block-backend.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010036#include "exec/gdbstub.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010037#include "sysemu/dma.h"
Vincent Palatinb3946622017-01-10 11:59:55 +010038#include "sysemu/hw_accel.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/kvm.h"
Vincent Palatinb0cb0a62017-01-10 11:59:57 +010040#include "sysemu/hax.h"
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -050041#include "sysemu/hvf.h"
Justin Terry (VM)19306802018-01-22 13:07:49 -080042#include "sysemu/whpx.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010043#include "exec/exec-all.h"
Blue Swirl296af7c2010-03-29 19:23:50 +000044
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010045#include "qemu/thread.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/cpus.h"
47#include "sysemu/qtest.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010048#include "qemu/main-loop.h"
Markus Armbruster922a01a2018-02-01 12:18:46 +010049#include "qemu/option.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010050#include "qemu/bitmap.h"
Liu Ping Fancb365642013-09-25 14:20:58 +080051#include "qemu/seqlock.h"
KONRAD Frederic8d4e9142017-02-23 18:29:08 +000052#include "tcg.h"
Alexey Kardashevskiy9cb805f2014-08-20 22:16:33 +100053#include "hw/nmi.h"
Pavel Dovgalyuk8b427042015-09-17 19:24:05 +030054#include "sysemu/replay.h"
Igor Mammedovafed5a52017-05-10 13:29:55 +020055#include "hw/boards.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020056
Jan Kiszka6d9cb732011-02-01 22:15:58 +010057#ifdef CONFIG_LINUX
58
59#include <sys/prctl.h>
60
Marcelo Tosattic0532a72010-10-11 15:31:21 -030061#ifndef PR_MCE_KILL
62#define PR_MCE_KILL 33
63#endif
64
Jan Kiszka6d9cb732011-02-01 22:15:58 +010065#ifndef PR_MCE_KILL_SET
66#define PR_MCE_KILL_SET 1
67#endif
68
69#ifndef PR_MCE_KILL_EARLY
70#define PR_MCE_KILL_EARLY 1
71#endif
72
73#endif /* CONFIG_LINUX */
74
Sebastian Tanase27498be2014-07-25 11:56:33 +020075int64_t max_delay;
76int64_t max_advance;
Blue Swirl296af7c2010-03-29 19:23:50 +000077
Jason J. Herne2adcc852015-09-08 13:12:33 -040078/* vcpu throttling controls */
79static QEMUTimer *throttle_timer;
80static unsigned int throttle_percentage;
81
82#define CPU_THROTTLE_PCT_MIN 1
83#define CPU_THROTTLE_PCT_MAX 99
84#define CPU_THROTTLE_TIMESLICE_NS 10000000
85
Tiejun Chen321bc0b2013-08-02 09:43:09 +080086bool cpu_is_stopped(CPUState *cpu)
87{
88 return cpu->stopped || !runstate_is_running();
89}
90
Andreas Färbera98ae1d2013-05-26 23:21:08 +020091static bool cpu_thread_is_idle(CPUState *cpu)
Peter Maydellac873f12012-07-19 16:52:27 +010092{
Andreas Färberc64ca812012-05-03 02:11:45 +020093 if (cpu->stop || cpu->queued_work_first) {
Peter Maydellac873f12012-07-19 16:52:27 +010094 return false;
95 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +080096 if (cpu_is_stopped(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010097 return true;
98 }
Andreas Färber8c2e1b02013-08-25 18:53:55 +020099 if (!cpu->halted || cpu_has_work(cpu) ||
Alexander Graf215e79c2013-04-24 22:24:12 +0200100 kvm_halt_in_kernel()) {
Peter Maydellac873f12012-07-19 16:52:27 +0100101 return false;
102 }
103 return true;
104}
105
106static bool all_cpu_threads_idle(void)
107{
Andreas Färber182735e2013-05-29 22:29:20 +0200108 CPUState *cpu;
Peter Maydellac873f12012-07-19 16:52:27 +0100109
Andreas Färberbdc44642013-06-24 23:50:24 +0200110 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200111 if (!cpu_thread_is_idle(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +0100112 return false;
113 }
114 }
115 return true;
116}
117
Blue Swirl296af7c2010-03-29 19:23:50 +0000118/***********************************************************/
Paolo Bonzini946fb272011-09-12 13:57:37 +0200119/* guest cycle counter */
120
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200121/* Protected by TimersState seqlock */
122
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200123static bool icount_sleep = true;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200124/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
125#define MAX_ICOUNT_SHIFT 10
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200126
Paolo Bonzini946fb272011-09-12 13:57:37 +0200127typedef struct TimersState {
Liu Ping Fancb365642013-09-25 14:20:58 +0800128 /* Protected by BQL. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200129 int64_t cpu_ticks_prev;
130 int64_t cpu_ticks_offset;
Liu Ping Fancb365642013-09-25 14:20:58 +0800131
Paolo Bonzini94377112018-08-14 09:57:16 +0200132 /* Protect fields that can be respectively read outside the
133 * BQL, and written from multiple threads.
Liu Ping Fancb365642013-09-25 14:20:58 +0800134 */
135 QemuSeqLock vm_clock_seqlock;
Paolo Bonzini94377112018-08-14 09:57:16 +0200136 QemuSpin vm_clock_lock;
137
138 int16_t cpu_ticks_enabled;
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200139
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200140 /* Conversion factor from emulated instructions to virtual clock ticks. */
Paolo Bonzini94377112018-08-14 09:57:16 +0200141 int16_t icount_time_shift;
142
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200143 /* Compensate for varying guest execution speed. */
144 int64_t qemu_icount_bias;
Paolo Bonzini94377112018-08-14 09:57:16 +0200145
146 int64_t vm_clock_warp_start;
147 int64_t cpu_clock_offset;
148
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200149 /* Only written by TCG thread */
150 int64_t qemu_icount;
Paolo Bonzini94377112018-08-14 09:57:16 +0200151
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300152 /* for adjusting icount */
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300153 QEMUTimer *icount_rt_timer;
154 QEMUTimer *icount_vm_timer;
155 QEMUTimer *icount_warp_timer;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200156} TimersState;
157
Liu Ping Fand9cd4002013-07-21 08:43:00 +0000158static TimersState timers_state;
KONRAD Frederic8d4e9142017-02-23 18:29:08 +0000159bool mttcg_enabled;
160
161/*
162 * We default to false if we know other options have been enabled
163 * which are currently incompatible with MTTCG. Otherwise when each
164 * guest (target) has been updated to support:
165 * - atomic instructions
166 * - memory ordering primitives (barriers)
167 * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
168 *
169 * Once a guest architecture has been converted to the new primitives
170 * there are two remaining limitations to check.
171 *
172 * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
173 * - The host must have a stronger memory order than the guest
174 *
175 * It may be possible in future to support strong guests on weak hosts
176 * but that will require tagging all load/stores in a guest with their
177 * implicit memory order requirements which would likely slow things
178 * down a lot.
179 */
180
181static bool check_tcg_memory_orders_compatible(void)
182{
183#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
184 return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
185#else
186 return false;
187#endif
188}
189
190static bool default_mttcg_enabled(void)
191{
Alex Bennée83fd9622017-02-27 17:09:01 +0000192 if (use_icount || TCG_OVERSIZED_GUEST) {
KONRAD Frederic8d4e9142017-02-23 18:29:08 +0000193 return false;
194 } else {
195#ifdef TARGET_SUPPORTS_MTTCG
196 return check_tcg_memory_orders_compatible();
197#else
198 return false;
199#endif
200 }
201}
202
203void qemu_tcg_configure(QemuOpts *opts, Error **errp)
204{
205 const char *t = qemu_opt_get(opts, "thread");
206 if (t) {
207 if (strcmp(t, "multi") == 0) {
208 if (TCG_OVERSIZED_GUEST) {
209 error_setg(errp, "No MTTCG when guest word size > hosts");
Alex Bennée83fd9622017-02-27 17:09:01 +0000210 } else if (use_icount) {
211 error_setg(errp, "No MTTCG when icount is enabled");
KONRAD Frederic8d4e9142017-02-23 18:29:08 +0000212 } else {
Nikunj A Dadhania86953502017-04-10 11:36:55 +0530213#ifndef TARGET_SUPPORTS_MTTCG
Alex Bennéec34c7622017-02-28 14:40:17 +0000214 error_report("Guest not yet converted to MTTCG - "
215 "you may get unexpected results");
216#endif
KONRAD Frederic8d4e9142017-02-23 18:29:08 +0000217 if (!check_tcg_memory_orders_compatible()) {
218 error_report("Guest expects a stronger memory ordering "
219 "than the host provides");
Pranith Kumar8cfef892017-03-25 16:19:23 -0400220 error_printf("This may cause strange/hard to debug errors\n");
KONRAD Frederic8d4e9142017-02-23 18:29:08 +0000221 }
222 mttcg_enabled = true;
223 }
224 } else if (strcmp(t, "single") == 0) {
225 mttcg_enabled = false;
226 } else {
227 error_setg(errp, "Invalid 'thread' setting %s", t);
228 }
229 } else {
230 mttcg_enabled = default_mttcg_enabled();
231 }
232}
Paolo Bonzini946fb272011-09-12 13:57:37 +0200233
Alex Bennéee4cd9652017-03-31 16:09:42 +0100234/* The current number of executed instructions is based on what we
235 * originally budgeted minus the current state of the decrementing
236 * icount counters in extra/u16.low.
237 */
238static int64_t cpu_get_icount_executed(CPUState *cpu)
239{
240 return cpu->icount_budget - (cpu->icount_decr.u16.low + cpu->icount_extra);
241}
242
Alex Bennée512d3c82017-04-05 12:32:37 +0100243/*
244 * Update the global shared timer_state.qemu_icount to take into
245 * account executed instructions. This is done by the TCG vCPU
246 * thread so the main-loop can see time has moved forward.
247 */
248void cpu_update_icount(CPUState *cpu)
249{
250 int64_t executed = cpu_get_icount_executed(cpu);
251 cpu->icount_budget -= executed;
252
Paolo Bonzini94377112018-08-14 09:57:16 +0200253#ifndef CONFIG_ATOMIC64
254 seqlock_write_lock(&timers_state.vm_clock_seqlock,
255 &timers_state.vm_clock_lock);
256#endif
Alex Bennée512d3c82017-04-05 12:32:37 +0100257 atomic_set__nocheck(&timers_state.qemu_icount,
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200258 timers_state.qemu_icount + executed);
Paolo Bonzini94377112018-08-14 09:57:16 +0200259#ifndef CONFIG_ATOMIC64
260 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
261 &timers_state.vm_clock_lock);
Alex Bennée512d3c82017-04-05 12:32:37 +0100262#endif
263}
264
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200265static int64_t cpu_get_icount_raw_locked(void)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200266{
Andreas Färber4917cf42013-05-27 05:17:50 +0200267 CPUState *cpu = current_cpu;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200268
Alex Bennée243c5f72017-03-30 18:49:22 +0100269 if (cpu && cpu->running) {
Paolo Bonzini414b15c2015-06-24 14:16:26 +0200270 if (!cpu->can_do_io) {
Alistair Francis493d89b2018-02-03 09:43:14 +0100271 error_report("Bad icount read");
Pavel Dovgalyuk2a629142014-12-08 10:53:45 +0300272 exit(1);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200273 }
Alex Bennéee4cd9652017-03-31 16:09:42 +0100274 /* Take into account what has run */
Alex Bennée1d059062017-04-05 10:53:47 +0100275 cpu_update_icount(cpu);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200276 }
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200277 /* The read is protected by the seqlock, so __nocheck is okay. */
Alex Bennée1d059062017-04-05 10:53:47 +0100278 return atomic_read__nocheck(&timers_state.qemu_icount);
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200279}
280
281static int64_t cpu_get_icount_locked(void)
282{
283 int64_t icount = cpu_get_icount_raw_locked();
284 return atomic_read__nocheck(&timers_state.qemu_icount_bias) + cpu_icount_to_ns(icount);
285}
286
287int64_t cpu_get_icount_raw(void)
288{
289 int64_t icount;
290 unsigned start;
291
292 do {
293 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
294 icount = cpu_get_icount_raw_locked();
295 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
296
297 return icount;
Pavel Dovgalyuk2a629142014-12-08 10:53:45 +0300298}
299
300/* Return the virtual CPU time, based on the instruction counter. */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200301int64_t cpu_get_icount(void)
302{
303 int64_t icount;
304 unsigned start;
305
306 do {
307 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
308 icount = cpu_get_icount_locked();
309 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
310
311 return icount;
312}
313
KONRAD Frederic3f031312014-08-01 01:37:15 +0200314int64_t cpu_icount_to_ns(int64_t icount)
315{
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200316 return icount << atomic_read(&timers_state.icount_time_shift);
KONRAD Frederic3f031312014-08-01 01:37:15 +0200317}
318
Cao jind90f3cc2016-07-29 19:05:38 +0800319/* return the time elapsed in VM between vm_start and vm_stop. Unless
320 * icount is active, cpu_get_ticks() uses units of the host CPU cycle
321 * counter.
322 *
323 * Caller must hold the BQL
324 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200325int64_t cpu_get_ticks(void)
326{
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100327 int64_t ticks;
328
Paolo Bonzini946fb272011-09-12 13:57:37 +0200329 if (use_icount) {
330 return cpu_get_icount();
331 }
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100332
333 ticks = timers_state.cpu_ticks_offset;
334 if (timers_state.cpu_ticks_enabled) {
Christopher Covington4a7428c2015-09-25 10:42:21 -0400335 ticks += cpu_get_host_ticks();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200336 }
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100337
338 if (timers_state.cpu_ticks_prev > ticks) {
339 /* Note: non increasing ticks may happen if the host uses
340 software suspend */
341 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
342 ticks = timers_state.cpu_ticks_prev;
343 }
344
345 timers_state.cpu_ticks_prev = ticks;
346 return ticks;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200347}
348
Liu Ping Fancb365642013-09-25 14:20:58 +0800349static int64_t cpu_get_clock_locked(void)
350{
Cao jin1d45cea2016-07-29 19:05:37 +0800351 int64_t time;
Liu Ping Fancb365642013-09-25 14:20:58 +0800352
Cao jin1d45cea2016-07-29 19:05:37 +0800353 time = timers_state.cpu_clock_offset;
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100354 if (timers_state.cpu_ticks_enabled) {
Cao jin1d45cea2016-07-29 19:05:37 +0800355 time += get_clock();
Liu Ping Fancb365642013-09-25 14:20:58 +0800356 }
357
Cao jin1d45cea2016-07-29 19:05:37 +0800358 return time;
Liu Ping Fancb365642013-09-25 14:20:58 +0800359}
360
Cao jind90f3cc2016-07-29 19:05:38 +0800361/* Return the monotonic time elapsed in VM, i.e.,
Peter Maydell8212ff82016-09-15 10:24:22 +0100362 * the time between vm_start and vm_stop
363 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200364int64_t cpu_get_clock(void)
365{
366 int64_t ti;
Liu Ping Fancb365642013-09-25 14:20:58 +0800367 unsigned start;
368
369 do {
370 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
371 ti = cpu_get_clock_locked();
372 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
373
374 return ti;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200375}
376
Liu Ping Fancb365642013-09-25 14:20:58 +0800377/* enable cpu_get_ticks()
Cao jin3224e872016-07-08 18:31:37 +0800378 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
Liu Ping Fancb365642013-09-25 14:20:58 +0800379 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200380void cpu_enable_ticks(void)
381{
Paolo Bonzini94377112018-08-14 09:57:16 +0200382 seqlock_write_lock(&timers_state.vm_clock_seqlock,
383 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200384 if (!timers_state.cpu_ticks_enabled) {
Christopher Covington4a7428c2015-09-25 10:42:21 -0400385 timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200386 timers_state.cpu_clock_offset -= get_clock();
387 timers_state.cpu_ticks_enabled = 1;
388 }
Paolo Bonzini94377112018-08-14 09:57:16 +0200389 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
390 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200391}
392
393/* disable cpu_get_ticks() : the clock is stopped. You must not call
Liu Ping Fancb365642013-09-25 14:20:58 +0800394 * cpu_get_ticks() after that.
Cao jin3224e872016-07-08 18:31:37 +0800395 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
Liu Ping Fancb365642013-09-25 14:20:58 +0800396 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200397void cpu_disable_ticks(void)
398{
Paolo Bonzini94377112018-08-14 09:57:16 +0200399 seqlock_write_lock(&timers_state.vm_clock_seqlock,
400 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200401 if (timers_state.cpu_ticks_enabled) {
Christopher Covington4a7428c2015-09-25 10:42:21 -0400402 timers_state.cpu_ticks_offset += cpu_get_host_ticks();
Liu Ping Fancb365642013-09-25 14:20:58 +0800403 timers_state.cpu_clock_offset = cpu_get_clock_locked();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200404 timers_state.cpu_ticks_enabled = 0;
405 }
Paolo Bonzini94377112018-08-14 09:57:16 +0200406 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
407 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200408}
409
410/* Correlation between real and virtual time is always going to be
411 fairly approximate, so ignore small variation.
412 When the guest is idle real and virtual time will be aligned in
413 the IO wait loop. */
Rutuja Shah73bcb242016-03-21 21:32:30 +0530414#define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200415
416static void icount_adjust(void)
417{
418 int64_t cur_time;
419 int64_t cur_icount;
420 int64_t delta;
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200421
422 /* Protected by TimersState mutex. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200423 static int64_t last_delta;
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200424
Paolo Bonzini946fb272011-09-12 13:57:37 +0200425 /* If the VM is not running, then do nothing. */
426 if (!runstate_is_running()) {
427 return;
428 }
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200429
Paolo Bonzini94377112018-08-14 09:57:16 +0200430 seqlock_write_lock(&timers_state.vm_clock_seqlock,
431 &timers_state.vm_clock_lock);
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200432 cur_time = cpu_get_clock_locked();
433 cur_icount = cpu_get_icount_locked();
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200434
Paolo Bonzini946fb272011-09-12 13:57:37 +0200435 delta = cur_icount - cur_time;
436 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
437 if (delta > 0
438 && last_delta + ICOUNT_WOBBLE < delta * 2
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200439 && timers_state.icount_time_shift > 0) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200440 /* The guest is getting too far ahead. Slow time down. */
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200441 atomic_set(&timers_state.icount_time_shift,
442 timers_state.icount_time_shift - 1);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200443 }
444 if (delta < 0
445 && last_delta - ICOUNT_WOBBLE > delta * 2
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200446 && timers_state.icount_time_shift < MAX_ICOUNT_SHIFT) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200447 /* The guest is getting too far behind. Speed time up. */
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200448 atomic_set(&timers_state.icount_time_shift,
449 timers_state.icount_time_shift + 1);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200450 }
451 last_delta = delta;
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200452 atomic_set__nocheck(&timers_state.qemu_icount_bias,
453 cur_icount - (timers_state.qemu_icount
454 << timers_state.icount_time_shift));
Paolo Bonzini94377112018-08-14 09:57:16 +0200455 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
456 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200457}
458
459static void icount_adjust_rt(void *opaque)
460{
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300461 timer_mod(timers_state.icount_rt_timer,
Pavel Dovgalyuk1979b902015-01-12 15:00:43 +0300462 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200463 icount_adjust();
464}
465
466static void icount_adjust_vm(void *opaque)
467{
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300468 timer_mod(timers_state.icount_vm_timer,
Alex Bligh40daca52013-08-21 16:03:02 +0100469 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
Rutuja Shah73bcb242016-03-21 21:32:30 +0530470 NANOSECONDS_PER_SECOND / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200471 icount_adjust();
472}
473
474static int64_t qemu_icount_round(int64_t count)
475{
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200476 int shift = atomic_read(&timers_state.icount_time_shift);
477 return (count + (1 << shift) - 1) >> shift;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200478}
479
Pavel Dovgalyukefab87c2015-09-17 19:24:39 +0300480static void icount_warp_rt(void)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200481{
Alex Bennéeccffff42016-04-04 15:35:48 +0100482 unsigned seq;
483 int64_t warp_start;
484
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200485 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
486 * changes from -1 to another value, so the race here is okay.
487 */
Alex Bennéeccffff42016-04-04 15:35:48 +0100488 do {
489 seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300490 warp_start = timers_state.vm_clock_warp_start;
Alex Bennéeccffff42016-04-04 15:35:48 +0100491 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
492
493 if (warp_start == -1) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200494 return;
495 }
496
Paolo Bonzini94377112018-08-14 09:57:16 +0200497 seqlock_write_lock(&timers_state.vm_clock_seqlock,
498 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200499 if (runstate_is_running()) {
Pavel Dovgalyuk8eda2062015-09-17 19:24:28 +0300500 int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT,
501 cpu_get_clock_locked());
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200502 int64_t warp_delta;
503
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300504 warp_delta = clock - timers_state.vm_clock_warp_start;
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200505 if (use_icount == 2) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200506 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100507 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
Paolo Bonzini946fb272011-09-12 13:57:37 +0200508 * far ahead of real time.
509 */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200510 int64_t cur_icount = cpu_get_icount_locked();
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300511 int64_t delta = clock - cur_icount;
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200512 warp_delta = MIN(warp_delta, delta);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200513 }
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200514 atomic_set__nocheck(&timers_state.qemu_icount_bias,
515 timers_state.qemu_icount_bias + warp_delta);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200516 }
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300517 timers_state.vm_clock_warp_start = -1;
Paolo Bonzini94377112018-08-14 09:57:16 +0200518 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
519 &timers_state.vm_clock_lock);
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200520
521 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
522 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
523 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200524}
525
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300526static void icount_timer_cb(void *opaque)
Pavel Dovgalyukefab87c2015-09-17 19:24:39 +0300527{
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300528 /* No need for a checkpoint because the timer already synchronizes
529 * with CHECKPOINT_CLOCK_VIRTUAL_RT.
530 */
531 icount_warp_rt();
Pavel Dovgalyukefab87c2015-09-17 19:24:39 +0300532}
533
Paolo Bonzini8156be52012-03-28 15:42:04 +0200534void qtest_clock_warp(int64_t dest)
535{
Alex Bligh40daca52013-08-21 16:03:02 +0100536 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Fam Zhengefef88b2015-01-19 17:51:43 +0800537 AioContext *aio_context;
Paolo Bonzini8156be52012-03-28 15:42:04 +0200538 assert(qtest_enabled());
Fam Zhengefef88b2015-01-19 17:51:43 +0800539 aio_context = qemu_get_aio_context();
Paolo Bonzini8156be52012-03-28 15:42:04 +0200540 while (clock < dest) {
Alex Bligh40daca52013-08-21 16:03:02 +0100541 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Sergey Fedorovc9299e22014-06-10 13:10:28 +0400542 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
Fam Zhengefef88b2015-01-19 17:51:43 +0800543
Paolo Bonzini94377112018-08-14 09:57:16 +0200544 seqlock_write_lock(&timers_state.vm_clock_seqlock,
545 &timers_state.vm_clock_lock);
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200546 atomic_set__nocheck(&timers_state.qemu_icount_bias,
547 timers_state.qemu_icount_bias + warp);
Paolo Bonzini94377112018-08-14 09:57:16 +0200548 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
549 &timers_state.vm_clock_lock);
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200550
Alex Bligh40daca52013-08-21 16:03:02 +0100551 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
Fam Zhengefef88b2015-01-19 17:51:43 +0800552 timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
Alex Bligh40daca52013-08-21 16:03:02 +0100553 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200554 }
Alex Bligh40daca52013-08-21 16:03:02 +0100555 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200556}
557
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300558void qemu_start_warp_timer(void)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200559{
Paolo Bonzinice78d182013-10-07 17:30:02 +0200560 int64_t clock;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200561 int64_t deadline;
562
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300563 if (!use_icount) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200564 return;
565 }
566
Pavel Dovgalyuk8bd7f712015-09-17 19:24:44 +0300567 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
568 * do not fire, so computing the deadline does not make sense.
569 */
570 if (!runstate_is_running()) {
571 return;
572 }
573
574 /* warp clock deterministically in record/replay mode */
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300575 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
Pavel Dovgalyuk8bd7f712015-09-17 19:24:44 +0300576 return;
577 }
578
Paolo Bonzinice78d182013-10-07 17:30:02 +0200579 if (!all_cpu_threads_idle()) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200580 return;
581 }
582
Paolo Bonzini8156be52012-03-28 15:42:04 +0200583 if (qtest_enabled()) {
584 /* When testing, qtest commands advance icount. */
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300585 return;
Paolo Bonzini8156be52012-03-28 15:42:04 +0200586 }
587
Alex Blighac70aaf2013-08-21 16:02:57 +0100588 /* We want to use the earliest deadline from ALL vm_clocks */
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300589 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
Alex Bligh40daca52013-08-21 16:03:02 +0100590 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200591 if (deadline < 0) {
Victor CLEMENTd7a0f712015-05-29 17:14:06 +0200592 static bool notified;
593 if (!icount_sleep && !notified) {
Alistair Francis3dc6f862017-07-12 06:57:41 -0700594 warn_report("icount sleep disabled and no active timers");
Victor CLEMENTd7a0f712015-05-29 17:14:06 +0200595 notified = true;
596 }
Paolo Bonzinice78d182013-10-07 17:30:02 +0200597 return;
Alex Blighac70aaf2013-08-21 16:02:57 +0100598 }
599
Paolo Bonzini946fb272011-09-12 13:57:37 +0200600 if (deadline > 0) {
601 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100602 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
Paolo Bonzini946fb272011-09-12 13:57:37 +0200603 * sleep. Otherwise, the CPU might be waiting for a future timer
604 * interrupt to wake it up, but the interrupt never comes because
605 * the vCPU isn't running any insns and thus doesn't advance the
Alex Bligh40daca52013-08-21 16:03:02 +0100606 * QEMU_CLOCK_VIRTUAL.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200607 */
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200608 if (!icount_sleep) {
609 /*
610 * We never let VCPUs sleep in no sleep icount mode.
611 * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
612 * to the next QEMU_CLOCK_VIRTUAL event and notify it.
613 * It is useful when we want a deterministic execution time,
614 * isolated from host latencies.
615 */
Paolo Bonzini94377112018-08-14 09:57:16 +0200616 seqlock_write_lock(&timers_state.vm_clock_seqlock,
617 &timers_state.vm_clock_lock);
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200618 atomic_set__nocheck(&timers_state.qemu_icount_bias,
619 timers_state.qemu_icount_bias + deadline);
Paolo Bonzini94377112018-08-14 09:57:16 +0200620 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
621 &timers_state.vm_clock_lock);
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200622 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
623 } else {
624 /*
625 * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
626 * "real" time, (related to the time left until the next event) has
627 * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
628 * This avoids that the warps are visible externally; for example,
629 * you will not be sending network packets continuously instead of
630 * every 100ms.
631 */
Paolo Bonzini94377112018-08-14 09:57:16 +0200632 seqlock_write_lock(&timers_state.vm_clock_seqlock,
633 &timers_state.vm_clock_lock);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300634 if (timers_state.vm_clock_warp_start == -1
635 || timers_state.vm_clock_warp_start > clock) {
636 timers_state.vm_clock_warp_start = clock;
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200637 }
Paolo Bonzini94377112018-08-14 09:57:16 +0200638 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
639 &timers_state.vm_clock_lock);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300640 timer_mod_anticipate(timers_state.icount_warp_timer,
641 clock + deadline);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200642 }
Alex Blighac70aaf2013-08-21 16:02:57 +0100643 } else if (deadline == 0) {
Alex Bligh40daca52013-08-21 16:03:02 +0100644 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200645 }
646}
647
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300648static void qemu_account_warp_timer(void)
649{
650 if (!use_icount || !icount_sleep) {
651 return;
652 }
653
654 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
655 * do not fire, so computing the deadline does not make sense.
656 */
657 if (!runstate_is_running()) {
658 return;
659 }
660
661 /* warp clock deterministically in record/replay mode */
662 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
663 return;
664 }
665
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300666 timer_del(timers_state.icount_warp_timer);
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300667 icount_warp_rt();
668}
669
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200670static bool icount_state_needed(void *opaque)
671{
672 return use_icount;
673}
674
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300675static bool warp_timer_state_needed(void *opaque)
676{
677 TimersState *s = opaque;
678 return s->icount_warp_timer != NULL;
679}
680
681static bool adjust_timers_state_needed(void *opaque)
682{
683 TimersState *s = opaque;
684 return s->icount_rt_timer != NULL;
685}
686
687/*
688 * Subsection for warp timer migration is optional, because may not be created
689 */
690static const VMStateDescription icount_vmstate_warp_timer = {
691 .name = "timer/icount/warp_timer",
692 .version_id = 1,
693 .minimum_version_id = 1,
694 .needed = warp_timer_state_needed,
695 .fields = (VMStateField[]) {
696 VMSTATE_INT64(vm_clock_warp_start, TimersState),
697 VMSTATE_TIMER_PTR(icount_warp_timer, TimersState),
698 VMSTATE_END_OF_LIST()
699 }
700};
701
702static const VMStateDescription icount_vmstate_adjust_timers = {
703 .name = "timer/icount/timers",
704 .version_id = 1,
705 .minimum_version_id = 1,
706 .needed = adjust_timers_state_needed,
707 .fields = (VMStateField[]) {
708 VMSTATE_TIMER_PTR(icount_rt_timer, TimersState),
709 VMSTATE_TIMER_PTR(icount_vm_timer, TimersState),
710 VMSTATE_END_OF_LIST()
711 }
712};
713
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200714/*
715 * This is a subsection for icount migration.
716 */
717static const VMStateDescription icount_vmstate_timers = {
718 .name = "timer/icount",
719 .version_id = 1,
720 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200721 .needed = icount_state_needed,
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200722 .fields = (VMStateField[]) {
723 VMSTATE_INT64(qemu_icount_bias, TimersState),
724 VMSTATE_INT64(qemu_icount, TimersState),
725 VMSTATE_END_OF_LIST()
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300726 },
727 .subsections = (const VMStateDescription*[]) {
728 &icount_vmstate_warp_timer,
729 &icount_vmstate_adjust_timers,
730 NULL
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200731 }
732};
733
Paolo Bonzini946fb272011-09-12 13:57:37 +0200734static const VMStateDescription vmstate_timers = {
735 .name = "timer",
736 .version_id = 2,
737 .minimum_version_id = 1,
Juan Quintela35d08452014-04-16 16:01:33 +0200738 .fields = (VMStateField[]) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200739 VMSTATE_INT64(cpu_ticks_offset, TimersState),
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200740 VMSTATE_UNUSED(8),
Paolo Bonzini946fb272011-09-12 13:57:37 +0200741 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
742 VMSTATE_END_OF_LIST()
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200743 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200744 .subsections = (const VMStateDescription*[]) {
745 &icount_vmstate_timers,
746 NULL
Paolo Bonzini946fb272011-09-12 13:57:37 +0200747 }
748};
749
Paolo Bonzini14e6fe12016-10-31 10:36:08 +0100750static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
Jason J. Herne2adcc852015-09-08 13:12:33 -0400751{
Jason J. Herne2adcc852015-09-08 13:12:33 -0400752 double pct;
753 double throttle_ratio;
754 long sleeptime_ns;
755
756 if (!cpu_throttle_get_percentage()) {
757 return;
758 }
759
760 pct = (double)cpu_throttle_get_percentage()/100;
761 throttle_ratio = pct / (1 - pct);
762 sleeptime_ns = (long)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS);
763
764 qemu_mutex_unlock_iothread();
Jason J. Herne2adcc852015-09-08 13:12:33 -0400765 g_usleep(sleeptime_ns / 1000); /* Convert ns to us for usleep call */
766 qemu_mutex_lock_iothread();
Felipe Franciosi90bb0c02017-05-19 22:29:50 +0100767 atomic_set(&cpu->throttle_thread_scheduled, 0);
Jason J. Herne2adcc852015-09-08 13:12:33 -0400768}
769
770static void cpu_throttle_timer_tick(void *opaque)
771{
772 CPUState *cpu;
773 double pct;
774
775 /* Stop the timer if needed */
776 if (!cpu_throttle_get_percentage()) {
777 return;
778 }
779 CPU_FOREACH(cpu) {
780 if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
Paolo Bonzini14e6fe12016-10-31 10:36:08 +0100781 async_run_on_cpu(cpu, cpu_throttle_thread,
782 RUN_ON_CPU_NULL);
Jason J. Herne2adcc852015-09-08 13:12:33 -0400783 }
784 }
785
786 pct = (double)cpu_throttle_get_percentage()/100;
787 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
788 CPU_THROTTLE_TIMESLICE_NS / (1-pct));
789}
790
791void cpu_throttle_set(int new_throttle_pct)
792{
793 /* Ensure throttle percentage is within valid range */
794 new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
795 new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
796
797 atomic_set(&throttle_percentage, new_throttle_pct);
798
799 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
800 CPU_THROTTLE_TIMESLICE_NS);
801}
802
803void cpu_throttle_stop(void)
804{
805 atomic_set(&throttle_percentage, 0);
806}
807
808bool cpu_throttle_active(void)
809{
810 return (cpu_throttle_get_percentage() != 0);
811}
812
813int cpu_throttle_get_percentage(void)
814{
815 return atomic_read(&throttle_percentage);
816}
817
Pavel Dovgalyuk4603ea02014-09-01 09:34:49 +0400818void cpu_ticks_init(void)
819{
Emilio G. Cotaccdb3c12016-06-08 14:55:20 -0400820 seqlock_init(&timers_state.vm_clock_seqlock);
Pavel Dovgalyuk4603ea02014-09-01 09:34:49 +0400821 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
Jason J. Herne2adcc852015-09-08 13:12:33 -0400822 throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
823 cpu_throttle_timer_tick, NULL);
Pavel Dovgalyuk4603ea02014-09-01 09:34:49 +0400824}
825
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200826void configure_icount(QemuOpts *opts, Error **errp)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200827{
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200828 const char *option;
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200829 char *rem_str = NULL;
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200830
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200831 option = qemu_opt_get(opts, "shift");
Paolo Bonzini946fb272011-09-12 13:57:37 +0200832 if (!option) {
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200833 if (qemu_opt_get(opts, "align") != NULL) {
834 error_setg(errp, "Please specify shift option when using align");
835 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200836 return;
837 }
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200838
839 icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200840 if (icount_sleep) {
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300841 timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300842 icount_timer_cb, NULL);
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200843 }
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200844
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200845 icount_align_option = qemu_opt_get_bool(opts, "align", false);
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200846
847 if (icount_align_option && !icount_sleep) {
Pranith Kumar778d9f92016-02-26 10:16:51 -0500848 error_setg(errp, "align=on and sleep=off are incompatible");
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200849 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200850 if (strcmp(option, "auto") != 0) {
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200851 errno = 0;
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200852 timers_state.icount_time_shift = strtol(option, &rem_str, 0);
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200853 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
854 error_setg(errp, "icount: Invalid shift value");
855 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200856 use_icount = 1;
857 return;
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200858 } else if (icount_align_option) {
859 error_setg(errp, "shift=auto and align=on are incompatible");
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200860 } else if (!icount_sleep) {
Pranith Kumar778d9f92016-02-26 10:16:51 -0500861 error_setg(errp, "shift=auto and sleep=off are incompatible");
Paolo Bonzini946fb272011-09-12 13:57:37 +0200862 }
863
864 use_icount = 2;
865
866 /* 125MIPS seems a reasonable initial guess at the guest speed.
867 It will be corrected fairly quickly anyway. */
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200868 timers_state.icount_time_shift = 3;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200869
870 /* Have both realtime and virtual time triggers for speed adjustment.
871 The realtime trigger catches emulated time passing too slowly,
872 the virtual time trigger catches emulated time passing too fast.
873 Realtime triggers occur even when idle, so use them less frequently
874 than VM triggers. */
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300875 timers_state.vm_clock_warp_start = -1;
876 timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300877 icount_adjust_rt, NULL);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300878 timer_mod(timers_state.icount_rt_timer,
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300879 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300880 timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
Alex Bligh40daca52013-08-21 16:03:02 +0100881 icount_adjust_vm, NULL);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300882 timer_mod(timers_state.icount_vm_timer,
Alex Bligh40daca52013-08-21 16:03:02 +0100883 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
Rutuja Shah73bcb242016-03-21 21:32:30 +0530884 NANOSECONDS_PER_SECOND / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200885}
886
887/***********************************************************/
Alex Bennée65467062017-02-23 18:29:09 +0000888/* TCG vCPU kick timer
889 *
890 * The kick timer is responsible for moving single threaded vCPU
891 * emulation on to the next vCPU. If more than one vCPU is running a
892 * timer event with force a cpu->exit so the next vCPU can get
893 * scheduled.
894 *
895 * The timer is removed if all vCPUs are idle and restarted again once
896 * idleness is complete.
897 */
898
899static QEMUTimer *tcg_kick_vcpu_timer;
Alex Bennée791158d2017-02-23 18:29:10 +0000900static CPUState *tcg_current_rr_cpu;
Alex Bennée65467062017-02-23 18:29:09 +0000901
902#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
903
904static inline int64_t qemu_tcg_next_kick(void)
905{
906 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
907}
908
Alex Bennée791158d2017-02-23 18:29:10 +0000909/* Kick the currently round-robin scheduled vCPU */
910static void qemu_cpu_kick_rr_cpu(void)
911{
912 CPUState *cpu;
Alex Bennée791158d2017-02-23 18:29:10 +0000913 do {
914 cpu = atomic_mb_read(&tcg_current_rr_cpu);
915 if (cpu) {
916 cpu_exit(cpu);
917 }
918 } while (cpu != atomic_mb_read(&tcg_current_rr_cpu));
919}
920
Paolo Bonzini6b8f0182017-03-02 19:56:40 +0100921static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
922{
923}
924
Paolo Bonzini3f53bc62017-03-03 11:50:29 +0100925void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
926{
Paolo Bonzini6b8f0182017-03-02 19:56:40 +0100927 if (!use_icount || type != QEMU_CLOCK_VIRTUAL) {
928 qemu_notify_event();
929 return;
930 }
931
Peter Maydellc52e7132018-04-10 13:02:25 +0100932 if (qemu_in_vcpu_thread()) {
933 /* A CPU is currently running; kick it back out to the
934 * tcg_cpu_exec() loop so it will recalculate its
935 * icount deadline immediately.
936 */
937 qemu_cpu_kick(current_cpu);
938 } else if (first_cpu) {
Paolo Bonzini6b8f0182017-03-02 19:56:40 +0100939 /* qemu_cpu_kick is not enough to kick a halted CPU out of
940 * qemu_tcg_wait_io_event. async_run_on_cpu, instead,
941 * causes cpu_thread_is_idle to return false. This way,
942 * handle_icount_deadline can run.
Peter Maydellc52e7132018-04-10 13:02:25 +0100943 * If we have no CPUs at all for some reason, we don't
944 * need to do anything.
Paolo Bonzini6b8f0182017-03-02 19:56:40 +0100945 */
946 async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
947 }
Paolo Bonzini3f53bc62017-03-03 11:50:29 +0100948}
949
Alex Bennée65467062017-02-23 18:29:09 +0000950static void kick_tcg_thread(void *opaque)
951{
952 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
Alex Bennée791158d2017-02-23 18:29:10 +0000953 qemu_cpu_kick_rr_cpu();
Alex Bennée65467062017-02-23 18:29:09 +0000954}
955
956static void start_tcg_kick_timer(void)
957{
Paolo Bonzinidb08b682018-01-11 13:53:12 +0100958 assert(!mttcg_enabled);
959 if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
Alex Bennée65467062017-02-23 18:29:09 +0000960 tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
961 kick_tcg_thread, NULL);
962 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
963 }
964}
965
966static void stop_tcg_kick_timer(void)
967{
Paolo Bonzinidb08b682018-01-11 13:53:12 +0100968 assert(!mttcg_enabled);
Alex Bennée65467062017-02-23 18:29:09 +0000969 if (tcg_kick_vcpu_timer) {
970 timer_del(tcg_kick_vcpu_timer);
971 tcg_kick_vcpu_timer = NULL;
972 }
973}
974
Alex Bennée65467062017-02-23 18:29:09 +0000975/***********************************************************/
Blue Swirl296af7c2010-03-29 19:23:50 +0000976void hw_error(const char *fmt, ...)
977{
978 va_list ap;
Andreas Färber55e5c282012-12-17 06:18:02 +0100979 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000980
981 va_start(ap, fmt);
982 fprintf(stderr, "qemu: hardware error: ");
983 vfprintf(stderr, fmt, ap);
984 fprintf(stderr, "\n");
Andreas Färberbdc44642013-06-24 23:50:24 +0200985 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100986 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
Andreas Färber878096e2013-05-27 01:33:50 +0200987 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
Blue Swirl296af7c2010-03-29 19:23:50 +0000988 }
989 va_end(ap);
990 abort();
991}
992
993void cpu_synchronize_all_states(void)
994{
Andreas Färber182735e2013-05-29 22:29:20 +0200995 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000996
Andreas Färberbdc44642013-06-24 23:50:24 +0200997 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200998 cpu_synchronize_state(cpu);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -0500999 /* TODO: move to cpu_synchronize_state() */
1000 if (hvf_enabled()) {
1001 hvf_cpu_synchronize_state(cpu);
1002 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001003 }
1004}
1005
1006void cpu_synchronize_all_post_reset(void)
1007{
Andreas Färber182735e2013-05-29 22:29:20 +02001008 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001009
Andreas Färberbdc44642013-06-24 23:50:24 +02001010 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001011 cpu_synchronize_post_reset(cpu);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001012 /* TODO: move to cpu_synchronize_post_reset() */
1013 if (hvf_enabled()) {
1014 hvf_cpu_synchronize_post_reset(cpu);
1015 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001016 }
1017}
1018
1019void cpu_synchronize_all_post_init(void)
1020{
Andreas Färber182735e2013-05-29 22:29:20 +02001021 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001022
Andreas Färberbdc44642013-06-24 23:50:24 +02001023 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001024 cpu_synchronize_post_init(cpu);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001025 /* TODO: move to cpu_synchronize_post_init() */
1026 if (hvf_enabled()) {
1027 hvf_cpu_synchronize_post_init(cpu);
1028 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001029 }
1030}
1031
David Gibson75e972d2017-05-26 14:46:28 +10001032void cpu_synchronize_all_pre_loadvm(void)
1033{
1034 CPUState *cpu;
1035
1036 CPU_FOREACH(cpu) {
1037 cpu_synchronize_pre_loadvm(cpu);
1038 }
1039}
1040
Stefan Hajnoczi4486e892018-03-07 14:42:05 +00001041static int do_vm_stop(RunState state, bool send_stop)
Blue Swirl296af7c2010-03-29 19:23:50 +00001042{
Kevin Wolf56983462013-07-05 13:49:54 +02001043 int ret = 0;
1044
Luiz Capitulino13548692011-07-29 15:36:43 -03001045 if (runstate_is_running()) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001046 cpu_disable_ticks();
Blue Swirl296af7c2010-03-29 19:23:50 +00001047 pause_all_vcpus();
Luiz Capitulinof5bbfba2011-07-29 15:04:45 -03001048 runstate_set(state);
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -03001049 vm_state_notify(0, state);
Stefan Hajnoczi4486e892018-03-07 14:42:05 +00001050 if (send_stop) {
1051 qapi_event_send_stop(&error_abort);
1052 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001053 }
Kevin Wolf56983462013-07-05 13:49:54 +02001054
Kevin Wolf594a45c2013-07-18 14:52:19 +02001055 bdrv_drain_all();
Pavel Dovgalyuk6d0ceb82016-09-26 11:08:16 +03001056 replay_disable_events();
John Snow22af08e2016-09-22 21:45:51 -04001057 ret = bdrv_flush_all();
Kevin Wolf594a45c2013-07-18 14:52:19 +02001058
Kevin Wolf56983462013-07-05 13:49:54 +02001059 return ret;
Blue Swirl296af7c2010-03-29 19:23:50 +00001060}
1061
Stefan Hajnoczi4486e892018-03-07 14:42:05 +00001062/* Special vm_stop() variant for terminating the process. Historically clients
1063 * did not expect a QMP STOP event and so we need to retain compatibility.
1064 */
1065int vm_shutdown(void)
1066{
1067 return do_vm_stop(RUN_STATE_SHUTDOWN, false);
1068}
1069
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001070static bool cpu_can_run(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001071{
Andreas Färber4fdeee72012-05-02 23:10:09 +02001072 if (cpu->stop) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001073 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001074 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +08001075 if (cpu_is_stopped(cpu)) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001076 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001077 }
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001078 return true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001079}
1080
Andreas Färber91325042013-05-27 02:07:49 +02001081static void cpu_handle_guest_debug(CPUState *cpu)
Jan Kiszka3c638d02010-06-25 16:56:56 +02001082{
Andreas Färber64f6b342013-05-27 02:06:09 +02001083 gdb_set_stop_cpu(cpu);
Jan Kiszka8cf71712011-02-07 12:19:16 +01001084 qemu_system_debug_request();
Andreas Färberf324e762012-05-02 23:26:21 +02001085 cpu->stopped = true;
Jan Kiszka3c638d02010-06-25 16:56:56 +02001086}
1087
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001088#ifdef CONFIG_LINUX
1089static void sigbus_reraise(void)
1090{
1091 sigset_t set;
1092 struct sigaction action;
1093
1094 memset(&action, 0, sizeof(action));
1095 action.sa_handler = SIG_DFL;
1096 if (!sigaction(SIGBUS, &action, NULL)) {
1097 raise(SIGBUS);
1098 sigemptyset(&set);
1099 sigaddset(&set, SIGBUS);
Peter Maydella2d17612016-05-16 18:33:59 +01001100 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001101 }
1102 perror("Failed to re-raise SIGBUS!\n");
1103 abort();
1104}
1105
Paolo Bonzinid98d4072017-02-08 13:22:12 +01001106static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx)
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001107{
Paolo Bonzinia16fc072017-02-09 09:50:02 +01001108 if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) {
1109 sigbus_reraise();
1110 }
1111
Paolo Bonzini2ae41db2017-02-08 12:48:54 +01001112 if (current_cpu) {
1113 /* Called asynchronously in VCPU thread. */
1114 if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) {
1115 sigbus_reraise();
1116 }
1117 } else {
1118 /* Called synchronously (via signalfd) in main thread. */
1119 if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) {
1120 sigbus_reraise();
1121 }
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001122 }
1123}
1124
1125static void qemu_init_sigbus(void)
1126{
1127 struct sigaction action;
1128
1129 memset(&action, 0, sizeof(action));
1130 action.sa_flags = SA_SIGINFO;
Paolo Bonzinid98d4072017-02-08 13:22:12 +01001131 action.sa_sigaction = sigbus_handler;
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001132 sigaction(SIGBUS, &action, NULL);
1133
1134 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
1135}
Paolo Bonzinia16fc072017-02-09 09:50:02 +01001136#else /* !CONFIG_LINUX */
1137static void qemu_init_sigbus(void)
1138{
1139}
Paolo Bonzinia16fc072017-02-09 09:50:02 +01001140#endif /* !CONFIG_LINUX */
Blue Swirl296af7c2010-03-29 19:23:50 +00001141
Stefan Weilb2532d82012-09-27 07:41:42 +02001142static QemuMutex qemu_global_mutex;
Blue Swirl296af7c2010-03-29 19:23:50 +00001143
1144static QemuThread io_thread;
1145
Blue Swirl296af7c2010-03-29 19:23:50 +00001146/* cpu creation */
1147static QemuCond qemu_cpu_cond;
1148/* system init */
Blue Swirl296af7c2010-03-29 19:23:50 +00001149static QemuCond qemu_pause_cond;
1150
Paolo Bonzinid3b12f52011-09-13 10:30:52 +02001151void qemu_init_cpu_loop(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001152{
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001153 qemu_init_sigbus();
Anthony Liguoried945922011-02-08 18:18:18 +01001154 qemu_cond_init(&qemu_cpu_cond);
Anthony Liguoried945922011-02-08 18:18:18 +01001155 qemu_cond_init(&qemu_pause_cond);
Blue Swirl296af7c2010-03-29 19:23:50 +00001156 qemu_mutex_init(&qemu_global_mutex);
Blue Swirl296af7c2010-03-29 19:23:50 +00001157
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001158 qemu_thread_get_self(&io_thread);
Blue Swirl296af7c2010-03-29 19:23:50 +00001159}
1160
Paolo Bonzini14e6fe12016-10-31 10:36:08 +01001161void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -03001162{
Sergey Fedorovd148d902016-08-29 09:51:00 +02001163 do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
Chegu Vinod3c022702013-06-24 03:49:41 -06001164}
1165
Gu Zheng4c055ab2016-05-12 09:18:13 +05301166static void qemu_kvm_destroy_vcpu(CPUState *cpu)
1167{
1168 if (kvm_destroy_vcpu(cpu) < 0) {
1169 error_report("kvm_destroy_vcpu failed");
1170 exit(EXIT_FAILURE);
1171 }
1172}
1173
1174static void qemu_tcg_destroy_vcpu(CPUState *cpu)
1175{
1176}
1177
David Hildenbrandebd05fe2017-11-29 20:12:15 +01001178static void qemu_cpu_stop(CPUState *cpu, bool exit)
1179{
1180 g_assert(qemu_cpu_is_self(cpu));
1181 cpu->stop = false;
1182 cpu->stopped = true;
1183 if (exit) {
1184 cpu_exit(cpu);
1185 }
1186 qemu_cond_broadcast(&qemu_pause_cond);
1187}
1188
Andreas Färber509a0d72012-05-03 02:18:09 +02001189static void qemu_wait_io_event_common(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001190{
Alex Bennée37257942017-02-23 18:29:14 +00001191 atomic_mb_set(&cpu->thread_kicked, false);
Andreas Färber4fdeee72012-05-02 23:10:09 +02001192 if (cpu->stop) {
David Hildenbrandebd05fe2017-11-29 20:12:15 +01001193 qemu_cpu_stop(cpu, false);
Blue Swirl296af7c2010-03-29 19:23:50 +00001194 }
Sergey Fedorova5403c62016-08-02 18:27:36 +01001195 process_queued_cpu_work(cpu);
Alex Bennée37257942017-02-23 18:29:14 +00001196}
1197
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001198static void qemu_tcg_rr_wait_io_event(CPUState *cpu)
Alex Bennée37257942017-02-23 18:29:14 +00001199{
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001200 while (all_cpu_threads_idle()) {
Alex Bennée65467062017-02-23 18:29:09 +00001201 stop_tcg_kick_timer();
KONRAD Fredericd5f8d612015-08-10 17:27:06 +02001202 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +01001203 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001204
Alex Bennée65467062017-02-23 18:29:09 +00001205 start_tcg_kick_timer();
1206
Alex Bennée37257942017-02-23 18:29:14 +00001207 qemu_wait_io_event_common(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001208}
1209
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001210static void qemu_wait_io_event(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001211{
Andreas Färbera98ae1d2013-05-26 23:21:08 +02001212 while (cpu_thread_is_idle(cpu)) {
Andreas Färberf5c121b2012-05-03 01:22:49 +02001213 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +01001214 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001215
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001216#ifdef _WIN32
1217 /* Eat dummy APC queued by qemu_cpu_kick_thread. */
1218 if (!tcg_enabled()) {
1219 SleepEx(0, TRUE);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001220 }
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001221#endif
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001222 qemu_wait_io_event_common(cpu);
1223}
1224
Jan Kiszka7e97cd82011-02-07 12:19:12 +01001225static void *qemu_kvm_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +00001226{
Andreas Färber48a106b2013-05-27 02:20:39 +02001227 CPUState *cpu = arg;
Jan Kiszka84b49152011-02-01 22:15:50 +01001228 int r;
Blue Swirl296af7c2010-03-29 19:23:50 +00001229
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001230 rcu_register_thread();
1231
Paolo Bonzini2e7f7a32015-06-18 18:47:18 +02001232 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +02001233 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +02001234 cpu->thread_id = qemu_get_thread_id();
Pavel Dovgalyuk626cf8f2014-12-08 10:53:17 +03001235 cpu->can_do_io = 1;
Andreas Färber4917cf42013-05-27 05:17:50 +02001236 current_cpu = cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001237
Andreas Färber504134d2012-12-17 06:38:45 +01001238 r = kvm_init_vcpu(cpu);
Jan Kiszka84b49152011-02-01 22:15:50 +01001239 if (r < 0) {
Alistair Francis493d89b2018-02-03 09:43:14 +01001240 error_report("kvm_init_vcpu failed: %s", strerror(-r));
Jan Kiszka84b49152011-02-01 22:15:50 +01001241 exit(1);
1242 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001243
Paolo Bonzini18268b62017-02-09 09:41:14 +01001244 kvm_init_cpu_signals(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001245
1246 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +02001247 cpu->created = true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001248 qemu_cond_signal(&qemu_cpu_cond);
1249
Gu Zheng4c055ab2016-05-12 09:18:13 +05301250 do {
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001251 if (cpu_can_run(cpu)) {
Andreas Färber1458c362013-05-26 23:46:55 +02001252 r = kvm_cpu_exec(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +01001253 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +02001254 cpu_handle_guest_debug(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +01001255 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001256 }
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001257 qemu_wait_io_event(cpu);
Gu Zheng4c055ab2016-05-12 09:18:13 +05301258 } while (!cpu->unplug || cpu_can_run(cpu));
Blue Swirl296af7c2010-03-29 19:23:50 +00001259
Gu Zheng4c055ab2016-05-12 09:18:13 +05301260 qemu_kvm_destroy_vcpu(cpu);
Bharata B Rao2c579042016-05-12 09:18:14 +05301261 cpu->created = false;
1262 qemu_cond_signal(&qemu_cpu_cond);
Gu Zheng4c055ab2016-05-12 09:18:13 +05301263 qemu_mutex_unlock_iothread();
Paolo Bonzini57615ed2018-01-30 11:04:36 -05001264 rcu_unregister_thread();
Blue Swirl296af7c2010-03-29 19:23:50 +00001265 return NULL;
1266}
1267
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001268static void *qemu_dummy_cpu_thread_fn(void *arg)
1269{
1270#ifdef _WIN32
Alistair Francis493d89b2018-02-03 09:43:14 +01001271 error_report("qtest is not supported under Windows");
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001272 exit(1);
1273#else
Andreas Färber10a90212013-05-27 02:24:35 +02001274 CPUState *cpu = arg;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001275 sigset_t waitset;
1276 int r;
1277
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001278 rcu_register_thread();
1279
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001280 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +02001281 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +02001282 cpu->thread_id = qemu_get_thread_id();
Pavel Dovgalyuk626cf8f2014-12-08 10:53:17 +03001283 cpu->can_do_io = 1;
Alex Bennée37257942017-02-23 18:29:14 +00001284 current_cpu = cpu;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001285
1286 sigemptyset(&waitset);
1287 sigaddset(&waitset, SIG_IPI);
1288
1289 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +02001290 cpu->created = true;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001291 qemu_cond_signal(&qemu_cpu_cond);
1292
Paolo Bonzinid2831ab2018-01-30 11:04:53 -05001293 do {
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001294 qemu_mutex_unlock_iothread();
1295 do {
1296 int sig;
1297 r = sigwait(&waitset, &sig);
1298 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1299 if (r == -1) {
1300 perror("sigwait");
1301 exit(1);
1302 }
1303 qemu_mutex_lock_iothread();
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001304 qemu_wait_io_event(cpu);
Paolo Bonzinid2831ab2018-01-30 11:04:53 -05001305 } while (!cpu->unplug);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001306
Paolo Bonzinid2831ab2018-01-30 11:04:53 -05001307 rcu_unregister_thread();
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001308 return NULL;
1309#endif
1310}
1311
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001312static int64_t tcg_get_icount_limit(void)
1313{
1314 int64_t deadline;
1315
1316 if (replay_mode != REPLAY_MODE_PLAY) {
1317 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1318
1319 /* Maintain prior (possibly buggy) behaviour where if no deadline
1320 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1321 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1322 * nanoseconds.
1323 */
1324 if ((deadline < 0) || (deadline > INT32_MAX)) {
1325 deadline = INT32_MAX;
1326 }
1327
1328 return qemu_icount_round(deadline);
1329 } else {
1330 return replay_get_instructions();
1331 }
1332}
1333
Alex Bennée12e97002016-10-27 16:10:14 +01001334static void handle_icount_deadline(void)
1335{
Paolo Bonzini6b8f0182017-03-02 19:56:40 +01001336 assert(qemu_in_vcpu_thread());
Alex Bennée12e97002016-10-27 16:10:14 +01001337 if (use_icount) {
1338 int64_t deadline =
1339 qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1340
1341 if (deadline == 0) {
Paolo Bonzini6b8f0182017-03-02 19:56:40 +01001342 /* Wake up other AioContexts. */
Alex Bennée12e97002016-10-27 16:10:14 +01001343 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini6b8f0182017-03-02 19:56:40 +01001344 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
Alex Bennée12e97002016-10-27 16:10:14 +01001345 }
1346 }
1347}
1348
Alex Bennée05248382017-03-29 16:46:59 +01001349static void prepare_icount_for_run(CPUState *cpu)
1350{
1351 if (use_icount) {
Alex Bennéeeda5f7c2017-04-05 12:35:48 +01001352 int insns_left;
Alex Bennée05248382017-03-29 16:46:59 +01001353
1354 /* These should always be cleared by process_icount_data after
1355 * each vCPU execution. However u16.high can be raised
1356 * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
1357 */
1358 g_assert(cpu->icount_decr.u16.low == 0);
1359 g_assert(cpu->icount_extra == 0);
1360
Alex Bennéeeda5f7c2017-04-05 12:35:48 +01001361 cpu->icount_budget = tcg_get_icount_limit();
1362 insns_left = MIN(0xffff, cpu->icount_budget);
1363 cpu->icount_decr.u16.low = insns_left;
1364 cpu->icount_extra = cpu->icount_budget - insns_left;
Alex Bennéed759c952018-02-27 12:52:48 +03001365
1366 replay_mutex_lock();
Alex Bennée05248382017-03-29 16:46:59 +01001367 }
1368}
1369
1370static void process_icount_data(CPUState *cpu)
1371{
1372 if (use_icount) {
Alex Bennéee4cd9652017-03-31 16:09:42 +01001373 /* Account for executed instructions */
Alex Bennée512d3c82017-04-05 12:32:37 +01001374 cpu_update_icount(cpu);
Alex Bennée05248382017-03-29 16:46:59 +01001375
1376 /* Reset the counters */
1377 cpu->icount_decr.u16.low = 0;
1378 cpu->icount_extra = 0;
Alex Bennéee4cd9652017-03-31 16:09:42 +01001379 cpu->icount_budget = 0;
1380
Alex Bennée05248382017-03-29 16:46:59 +01001381 replay_account_executed_instructions();
Alex Bennéed759c952018-02-27 12:52:48 +03001382
1383 replay_mutex_unlock();
Alex Bennée05248382017-03-29 16:46:59 +01001384 }
1385}
1386
1387
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001388static int tcg_cpu_exec(CPUState *cpu)
1389{
1390 int ret;
1391#ifdef CONFIG_PROFILER
1392 int64_t ti;
1393#endif
1394
Emilio G. Cotaf28d0df2018-06-22 13:45:31 -04001395 assert(tcg_enabled());
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001396#ifdef CONFIG_PROFILER
1397 ti = profile_getclock();
1398#endif
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001399 cpu_exec_start(cpu);
1400 ret = cpu_exec(cpu);
1401 cpu_exec_end(cpu);
1402#ifdef CONFIG_PROFILER
1403 tcg_time += profile_getclock() - ti;
1404#endif
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001405 return ret;
1406}
1407
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001408/* Destroy any remaining vCPUs which have been unplugged and have
1409 * finished running
1410 */
1411static void deal_with_unplugged_cpus(void)
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001412{
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001413 CPUState *cpu;
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001414
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001415 CPU_FOREACH(cpu) {
1416 if (cpu->unplug && !cpu_can_run(cpu)) {
1417 qemu_tcg_destroy_vcpu(cpu);
1418 cpu->created = false;
1419 qemu_cond_signal(&qemu_cpu_cond);
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001420 break;
1421 }
1422 }
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001423}
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001424
Alex Bennée65467062017-02-23 18:29:09 +00001425/* Single-threaded TCG
1426 *
1427 * In the single-threaded case each vCPU is simulated in turn. If
1428 * there is more than a single vCPU we create a simple timer to kick
1429 * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
1430 * This is done explicitly rather than relying on side-effects
1431 * elsewhere.
1432 */
1433
Alex Bennée37257942017-02-23 18:29:14 +00001434static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +00001435{
Andreas Färberc3586ba2012-05-03 01:41:24 +02001436 CPUState *cpu = arg;
Blue Swirl296af7c2010-03-29 19:23:50 +00001437
Emilio G. Cotaf28d0df2018-06-22 13:45:31 -04001438 assert(tcg_enabled());
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001439 rcu_register_thread();
Emilio G. Cota3468b592017-07-19 18:57:58 -04001440 tcg_register_thread();
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001441
Paolo Bonzini2e7f7a32015-06-18 18:47:18 +02001442 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +02001443 qemu_thread_get_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +00001444
David Hildenbrand5a9c9732018-02-09 20:52:39 +01001445 cpu->thread_id = qemu_get_thread_id();
1446 cpu->created = true;
1447 cpu->can_do_io = 1;
Blue Swirl296af7c2010-03-29 19:23:50 +00001448 qemu_cond_signal(&qemu_cpu_cond);
1449
Jan Kiszkafa7d1862011-08-22 18:35:25 +02001450 /* wait for initial kick-off after machine start */
Emilio G. Cotac28e3992015-04-27 12:45:28 -04001451 while (first_cpu->stopped) {
KONRAD Fredericd5f8d612015-08-10 17:27:06 +02001452 qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka8e564b42012-02-17 18:31:15 +01001453
1454 /* process any pending work */
Andreas Färberbdc44642013-06-24 23:50:24 +02001455 CPU_FOREACH(cpu) {
Alex Bennée37257942017-02-23 18:29:14 +00001456 current_cpu = cpu;
Andreas Färber182735e2013-05-29 22:29:20 +02001457 qemu_wait_io_event_common(cpu);
Jan Kiszka8e564b42012-02-17 18:31:15 +01001458 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001459 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001460
Alex Bennée65467062017-02-23 18:29:09 +00001461 start_tcg_kick_timer();
1462
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001463 cpu = first_cpu;
1464
Alex Bennéee5143e32017-02-23 18:29:12 +00001465 /* process any pending work */
1466 cpu->exit_request = 1;
1467
Blue Swirl296af7c2010-03-29 19:23:50 +00001468 while (1) {
Alex Bennéed759c952018-02-27 12:52:48 +03001469 qemu_mutex_unlock_iothread();
1470 replay_mutex_lock();
1471 qemu_mutex_lock_iothread();
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001472 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1473 qemu_account_warp_timer();
1474
Paolo Bonzini6b8f0182017-03-02 19:56:40 +01001475 /* Run the timers here. This is much more efficient than
1476 * waking up the I/O thread and waiting for completion.
1477 */
1478 handle_icount_deadline();
1479
Alex Bennéed759c952018-02-27 12:52:48 +03001480 replay_mutex_unlock();
1481
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001482 if (!cpu) {
1483 cpu = first_cpu;
1484 }
1485
Alex Bennéee5143e32017-02-23 18:29:12 +00001486 while (cpu && !cpu->queued_work_first && !cpu->exit_request) {
1487
Alex Bennée791158d2017-02-23 18:29:10 +00001488 atomic_mb_set(&tcg_current_rr_cpu, cpu);
Alex Bennée37257942017-02-23 18:29:14 +00001489 current_cpu = cpu;
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001490
1491 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1492 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1493
1494 if (cpu_can_run(cpu)) {
1495 int r;
Alex Bennée05248382017-03-29 16:46:59 +01001496
Alex Bennéed759c952018-02-27 12:52:48 +03001497 qemu_mutex_unlock_iothread();
Alex Bennée05248382017-03-29 16:46:59 +01001498 prepare_icount_for_run(cpu);
1499
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001500 r = tcg_cpu_exec(cpu);
Alex Bennée05248382017-03-29 16:46:59 +01001501
1502 process_icount_data(cpu);
Alex Bennéed759c952018-02-27 12:52:48 +03001503 qemu_mutex_lock_iothread();
Alex Bennée05248382017-03-29 16:46:59 +01001504
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001505 if (r == EXCP_DEBUG) {
1506 cpu_handle_guest_debug(cpu);
1507 break;
Pranith Kumar08e73c42017-02-23 18:29:15 +00001508 } else if (r == EXCP_ATOMIC) {
1509 qemu_mutex_unlock_iothread();
1510 cpu_exec_step_atomic(cpu);
1511 qemu_mutex_lock_iothread();
1512 break;
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001513 }
Alex Bennée37257942017-02-23 18:29:14 +00001514 } else if (cpu->stop) {
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001515 if (cpu->unplug) {
1516 cpu = CPU_NEXT(cpu);
1517 }
1518 break;
1519 }
1520
Alex Bennéee5143e32017-02-23 18:29:12 +00001521 cpu = CPU_NEXT(cpu);
1522 } /* while (cpu && !cpu->exit_request).. */
1523
Alex Bennée791158d2017-02-23 18:29:10 +00001524 /* Does not need atomic_mb_set because a spurious wakeup is okay. */
1525 atomic_set(&tcg_current_rr_cpu, NULL);
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001526
Alex Bennéee5143e32017-02-23 18:29:12 +00001527 if (cpu && cpu->exit_request) {
1528 atomic_mb_set(&cpu->exit_request, 0);
1529 }
Alex Blighac70aaf2013-08-21 16:02:57 +01001530
Emilio G. Cota068a5ea2018-08-19 05:13:35 -04001531 qemu_tcg_rr_wait_io_event(cpu ? cpu : first_cpu);
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001532 deal_with_unplugged_cpus();
Blue Swirl296af7c2010-03-29 19:23:50 +00001533 }
1534
Paolo Bonzini9b0605f2018-01-30 11:05:06 -05001535 rcu_unregister_thread();
Blue Swirl296af7c2010-03-29 19:23:50 +00001536 return NULL;
1537}
1538
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001539static void *qemu_hax_cpu_thread_fn(void *arg)
1540{
1541 CPUState *cpu = arg;
1542 int r;
Vincent Palatinb3d3a422017-03-20 11:15:49 +01001543
Paolo Bonzini9857c2d2018-01-30 16:28:49 +01001544 rcu_register_thread();
Vincent Palatinb3d3a422017-03-20 11:15:49 +01001545 qemu_mutex_lock_iothread();
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001546 qemu_thread_get_self(cpu->thread);
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001547
1548 cpu->thread_id = qemu_get_thread_id();
1549 cpu->created = true;
1550 cpu->halted = 0;
1551 current_cpu = cpu;
1552
1553 hax_init_vcpu(cpu);
1554 qemu_cond_signal(&qemu_cpu_cond);
1555
Paolo Bonzini9857c2d2018-01-30 16:28:49 +01001556 do {
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001557 if (cpu_can_run(cpu)) {
1558 r = hax_smp_cpu_exec(cpu);
1559 if (r == EXCP_DEBUG) {
1560 cpu_handle_guest_debug(cpu);
1561 }
1562 }
1563
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001564 qemu_wait_io_event(cpu);
Paolo Bonzini9857c2d2018-01-30 16:28:49 +01001565 } while (!cpu->unplug || cpu_can_run(cpu));
1566 rcu_unregister_thread();
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001567 return NULL;
1568}
1569
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001570/* The HVF-specific vCPU thread function. This one should only run when the host
1571 * CPU supports the VMX "unrestricted guest" feature. */
1572static void *qemu_hvf_cpu_thread_fn(void *arg)
1573{
1574 CPUState *cpu = arg;
1575
1576 int r;
1577
1578 assert(hvf_enabled());
1579
1580 rcu_register_thread();
1581
1582 qemu_mutex_lock_iothread();
1583 qemu_thread_get_self(cpu->thread);
1584
1585 cpu->thread_id = qemu_get_thread_id();
1586 cpu->can_do_io = 1;
1587 current_cpu = cpu;
1588
1589 hvf_init_vcpu(cpu);
1590
1591 /* signal CPU creation */
1592 cpu->created = true;
1593 qemu_cond_signal(&qemu_cpu_cond);
1594
1595 do {
1596 if (cpu_can_run(cpu)) {
1597 r = hvf_vcpu_exec(cpu);
1598 if (r == EXCP_DEBUG) {
1599 cpu_handle_guest_debug(cpu);
1600 }
1601 }
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001602 qemu_wait_io_event(cpu);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001603 } while (!cpu->unplug || cpu_can_run(cpu));
1604
1605 hvf_vcpu_destroy(cpu);
1606 cpu->created = false;
1607 qemu_cond_signal(&qemu_cpu_cond);
1608 qemu_mutex_unlock_iothread();
Paolo Bonzini8178e632018-01-30 11:05:21 -05001609 rcu_unregister_thread();
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001610 return NULL;
1611}
1612
Justin Terry (VM)19306802018-01-22 13:07:49 -08001613static void *qemu_whpx_cpu_thread_fn(void *arg)
1614{
1615 CPUState *cpu = arg;
1616 int r;
1617
1618 rcu_register_thread();
1619
1620 qemu_mutex_lock_iothread();
1621 qemu_thread_get_self(cpu->thread);
1622 cpu->thread_id = qemu_get_thread_id();
1623 current_cpu = cpu;
1624
1625 r = whpx_init_vcpu(cpu);
1626 if (r < 0) {
1627 fprintf(stderr, "whpx_init_vcpu failed: %s\n", strerror(-r));
1628 exit(1);
1629 }
1630
1631 /* signal CPU creation */
1632 cpu->created = true;
1633 qemu_cond_signal(&qemu_cpu_cond);
1634
1635 do {
1636 if (cpu_can_run(cpu)) {
1637 r = whpx_vcpu_exec(cpu);
1638 if (r == EXCP_DEBUG) {
1639 cpu_handle_guest_debug(cpu);
1640 }
1641 }
1642 while (cpu_thread_is_idle(cpu)) {
1643 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1644 }
1645 qemu_wait_io_event_common(cpu);
1646 } while (!cpu->unplug || cpu_can_run(cpu));
1647
1648 whpx_destroy_vcpu(cpu);
1649 cpu->created = false;
1650 qemu_cond_signal(&qemu_cpu_cond);
1651 qemu_mutex_unlock_iothread();
1652 rcu_unregister_thread();
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001653 return NULL;
1654}
1655
1656#ifdef _WIN32
1657static void CALLBACK dummy_apc_func(ULONG_PTR unused)
1658{
1659}
1660#endif
1661
Alex Bennée37257942017-02-23 18:29:14 +00001662/* Multi-threaded TCG
1663 *
1664 * In the multi-threaded case each vCPU has its own thread. The TLS
1665 * variable current_cpu can be used deep in the code to find the
1666 * current CPUState for a given thread.
1667 */
1668
1669static void *qemu_tcg_cpu_thread_fn(void *arg)
1670{
1671 CPUState *cpu = arg;
1672
Emilio G. Cotaf28d0df2018-06-22 13:45:31 -04001673 assert(tcg_enabled());
Alex Bennéebf51c722017-03-30 18:32:29 +01001674 g_assert(!use_icount);
1675
Alex Bennée37257942017-02-23 18:29:14 +00001676 rcu_register_thread();
Emilio G. Cota3468b592017-07-19 18:57:58 -04001677 tcg_register_thread();
Alex Bennée37257942017-02-23 18:29:14 +00001678
1679 qemu_mutex_lock_iothread();
1680 qemu_thread_get_self(cpu->thread);
1681
1682 cpu->thread_id = qemu_get_thread_id();
1683 cpu->created = true;
1684 cpu->can_do_io = 1;
1685 current_cpu = cpu;
1686 qemu_cond_signal(&qemu_cpu_cond);
1687
1688 /* process any pending work */
1689 cpu->exit_request = 1;
1690
Cédric Le Goater54961aa2018-04-25 15:18:28 +02001691 do {
Alex Bennée37257942017-02-23 18:29:14 +00001692 if (cpu_can_run(cpu)) {
1693 int r;
Alex Bennéed759c952018-02-27 12:52:48 +03001694 qemu_mutex_unlock_iothread();
Alex Bennée37257942017-02-23 18:29:14 +00001695 r = tcg_cpu_exec(cpu);
Alex Bennéed759c952018-02-27 12:52:48 +03001696 qemu_mutex_lock_iothread();
Alex Bennée37257942017-02-23 18:29:14 +00001697 switch (r) {
1698 case EXCP_DEBUG:
1699 cpu_handle_guest_debug(cpu);
1700 break;
1701 case EXCP_HALTED:
1702 /* during start-up the vCPU is reset and the thread is
1703 * kicked several times. If we don't ensure we go back
1704 * to sleep in the halted state we won't cleanly
1705 * start-up when the vCPU is enabled.
1706 *
1707 * cpu->halted should ensure we sleep in wait_io_event
1708 */
1709 g_assert(cpu->halted);
1710 break;
Pranith Kumar08e73c42017-02-23 18:29:15 +00001711 case EXCP_ATOMIC:
1712 qemu_mutex_unlock_iothread();
1713 cpu_exec_step_atomic(cpu);
1714 qemu_mutex_lock_iothread();
Alex Bennée37257942017-02-23 18:29:14 +00001715 default:
1716 /* Ignore everything else? */
1717 break;
1718 }
1719 }
1720
Alex Bennée37257942017-02-23 18:29:14 +00001721 atomic_mb_set(&cpu->exit_request, 0);
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001722 qemu_wait_io_event(cpu);
Paolo Bonzini9b0605f2018-01-30 11:05:06 -05001723 } while (!cpu->unplug || cpu_can_run(cpu));
Alex Bennée37257942017-02-23 18:29:14 +00001724
Paolo Bonzini9b0605f2018-01-30 11:05:06 -05001725 qemu_tcg_destroy_vcpu(cpu);
1726 cpu->created = false;
1727 qemu_cond_signal(&qemu_cpu_cond);
1728 qemu_mutex_unlock_iothread();
1729 rcu_unregister_thread();
Alex Bennée37257942017-02-23 18:29:14 +00001730 return NULL;
1731}
1732
Andreas Färber2ff09a42012-05-03 00:23:30 +02001733static void qemu_cpu_kick_thread(CPUState *cpu)
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001734{
1735#ifndef _WIN32
1736 int err;
1737
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001738 if (cpu->thread_kicked) {
1739 return;
Paolo Bonzini9102ded2015-08-18 06:52:09 -07001740 }
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001741 cpu->thread_kicked = true;
Andreas Färber814e6122012-05-02 17:00:37 +02001742 err = pthread_kill(cpu->thread->thread, SIG_IPI);
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001743 if (err) {
1744 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1745 exit(1);
1746 }
1747#else /* _WIN32 */
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001748 if (!qemu_cpu_is_self(cpu)) {
Justin Terry (VM)19306802018-01-22 13:07:49 -08001749 if (whpx_enabled()) {
1750 whpx_vcpu_kick(cpu);
1751 } else if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) {
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001752 fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n",
1753 __func__, GetLastError());
1754 exit(1);
1755 }
1756 }
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001757#endif
1758}
1759
Andreas Färberc08d7422012-05-03 04:34:15 +02001760void qemu_cpu_kick(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001761{
Andreas Färberf5c121b2012-05-03 01:22:49 +02001762 qemu_cond_broadcast(cpu->halt_cond);
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001763 if (tcg_enabled()) {
Alex Bennée791158d2017-02-23 18:29:10 +00001764 cpu_exit(cpu);
Alex Bennée37257942017-02-23 18:29:14 +00001765 /* NOP unless doing single-thread RR */
Alex Bennée791158d2017-02-23 18:29:10 +00001766 qemu_cpu_kick_rr_cpu();
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001767 } else {
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001768 if (hax_enabled()) {
1769 /*
1770 * FIXME: race condition with the exit_request check in
1771 * hax_vcpu_hax_exec
1772 */
1773 cpu->exit_request = 1;
1774 }
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001775 qemu_cpu_kick_thread(cpu);
1776 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001777}
1778
Jan Kiszka46d62fa2011-02-01 22:15:59 +01001779void qemu_cpu_kick_self(void)
1780{
Andreas Färber4917cf42013-05-27 05:17:50 +02001781 assert(current_cpu);
Paolo Bonzini9102ded2015-08-18 06:52:09 -07001782 qemu_cpu_kick_thread(current_cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001783}
1784
Andreas Färber60e82572012-05-02 22:23:49 +02001785bool qemu_cpu_is_self(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001786{
Andreas Färber814e6122012-05-02 17:00:37 +02001787 return qemu_thread_is_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +00001788}
1789
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001790bool qemu_in_vcpu_thread(void)
Juan Quintelaaa723c22012-09-18 16:30:11 +02001791{
Andreas Färber4917cf42013-05-27 05:17:50 +02001792 return current_cpu && qemu_cpu_is_self(current_cpu);
Juan Quintelaaa723c22012-09-18 16:30:11 +02001793}
1794
Paolo Bonziniafbe7052015-06-18 18:47:19 +02001795static __thread bool iothread_locked = false;
1796
1797bool qemu_mutex_iothread_locked(void)
1798{
1799 return iothread_locked;
1800}
1801
Emilio G. Cotacb764d02017-10-28 02:16:41 -04001802/*
1803 * The BQL is taken from so many places that it is worth profiling the
1804 * callers directly, instead of funneling them all through a single function.
1805 */
1806void qemu_mutex_lock_iothread_impl(const char *file, int line)
Blue Swirl296af7c2010-03-29 19:23:50 +00001807{
Emilio G. Cotacb764d02017-10-28 02:16:41 -04001808 QemuMutexLockFunc bql_lock = atomic_read(&qemu_bql_mutex_lock_func);
1809
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001810 g_assert(!qemu_mutex_iothread_locked());
Emilio G. Cotacb764d02017-10-28 02:16:41 -04001811 bql_lock(&qemu_global_mutex, file, line);
Paolo Bonziniafbe7052015-06-18 18:47:19 +02001812 iothread_locked = true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001813}
1814
1815void qemu_mutex_unlock_iothread(void)
1816{
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001817 g_assert(qemu_mutex_iothread_locked());
Paolo Bonziniafbe7052015-06-18 18:47:19 +02001818 iothread_locked = false;
Blue Swirl296af7c2010-03-29 19:23:50 +00001819 qemu_mutex_unlock(&qemu_global_mutex);
1820}
1821
Alex Bennéee8faee02016-10-27 16:09:58 +01001822static bool all_vcpus_paused(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001823{
Andreas Färberbdc44642013-06-24 23:50:24 +02001824 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001825
Andreas Färberbdc44642013-06-24 23:50:24 +02001826 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001827 if (!cpu->stopped) {
Alex Bennéee8faee02016-10-27 16:09:58 +01001828 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001829 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001830 }
1831
Alex Bennéee8faee02016-10-27 16:09:58 +01001832 return true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001833}
1834
1835void pause_all_vcpus(void)
1836{
Andreas Färberbdc44642013-06-24 23:50:24 +02001837 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001838
Alex Bligh40daca52013-08-21 16:03:02 +01001839 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
Andreas Färberbdc44642013-06-24 23:50:24 +02001840 CPU_FOREACH(cpu) {
David Hildenbrandebd05fe2017-11-29 20:12:15 +01001841 if (qemu_cpu_is_self(cpu)) {
1842 qemu_cpu_stop(cpu, true);
1843 } else {
1844 cpu->stop = true;
1845 qemu_cpu_kick(cpu);
1846 }
Jan Kiszkad798e972012-02-17 18:31:16 +01001847 }
1848
Alex Bennéed759c952018-02-27 12:52:48 +03001849 /* We need to drop the replay_lock so any vCPU threads woken up
1850 * can finish their replay tasks
1851 */
1852 replay_mutex_unlock();
1853
Blue Swirl296af7c2010-03-29 19:23:50 +00001854 while (!all_vcpus_paused()) {
Paolo Bonzinibe7d6c52011-03-12 17:44:02 +01001855 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
Andreas Färberbdc44642013-06-24 23:50:24 +02001856 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001857 qemu_cpu_kick(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001858 }
1859 }
Alex Bennéed759c952018-02-27 12:52:48 +03001860
1861 qemu_mutex_unlock_iothread();
1862 replay_mutex_lock();
1863 qemu_mutex_lock_iothread();
Blue Swirl296af7c2010-03-29 19:23:50 +00001864}
1865
Igor Mammedov29936832013-04-23 10:29:37 +02001866void cpu_resume(CPUState *cpu)
1867{
1868 cpu->stop = false;
1869 cpu->stopped = false;
1870 qemu_cpu_kick(cpu);
1871}
1872
Blue Swirl296af7c2010-03-29 19:23:50 +00001873void resume_all_vcpus(void)
1874{
Andreas Färberbdc44642013-06-24 23:50:24 +02001875 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001876
Alex Bligh40daca52013-08-21 16:03:02 +01001877 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
Andreas Färberbdc44642013-06-24 23:50:24 +02001878 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001879 cpu_resume(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001880 }
1881}
1882
Paolo Bonzinidbadee42018-01-30 16:40:12 +01001883void cpu_remove_sync(CPUState *cpu)
Gu Zheng4c055ab2016-05-12 09:18:13 +05301884{
1885 cpu->stop = true;
1886 cpu->unplug = true;
1887 qemu_cpu_kick(cpu);
Paolo Bonzinidbadee42018-01-30 16:40:12 +01001888 qemu_mutex_unlock_iothread();
1889 qemu_thread_join(cpu->thread);
1890 qemu_mutex_lock_iothread();
Bharata B Rao2c579042016-05-12 09:18:14 +05301891}
1892
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001893/* For temporary buffers for forming a name */
1894#define VCPU_THREAD_NAME_SIZE 16
1895
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001896static void qemu_tcg_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001897{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001898 char thread_name[VCPU_THREAD_NAME_SIZE];
Alex Bennée37257942017-02-23 18:29:14 +00001899 static QemuCond *single_tcg_halt_cond;
1900 static QemuThread *single_tcg_cpu_thread;
Emilio G. Cotae8feb962017-07-07 19:24:20 -04001901 static int tcg_region_inited;
1902
Emilio G. Cotaf28d0df2018-06-22 13:45:31 -04001903 assert(tcg_enabled());
Emilio G. Cotae8feb962017-07-07 19:24:20 -04001904 /*
1905 * Initialize TCG regions--once. Now is a good time, because:
1906 * (1) TCG's init context, prologue and target globals have been set up.
1907 * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the
1908 * -accel flag is processed, so the check doesn't work then).
1909 */
1910 if (!tcg_region_inited) {
1911 tcg_region_inited = 1;
1912 tcg_region_init();
1913 }
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001914
Alex Bennée37257942017-02-23 18:29:14 +00001915 if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
Andreas Färber814e6122012-05-02 17:00:37 +02001916 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001917 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1918 qemu_cond_init(cpu->halt_cond);
Alex Bennée37257942017-02-23 18:29:14 +00001919
1920 if (qemu_tcg_mttcg_enabled()) {
1921 /* create a thread per vCPU with TCG (MTTCG) */
1922 parallel_cpus = true;
1923 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001924 cpu->cpu_index);
Alex Bennée37257942017-02-23 18:29:14 +00001925
1926 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1927 cpu, QEMU_THREAD_JOINABLE);
1928
1929 } else {
1930 /* share a single thread for all cpus with TCG */
1931 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
1932 qemu_thread_create(cpu->thread, thread_name,
1933 qemu_tcg_rr_cpu_thread_fn,
1934 cpu, QEMU_THREAD_JOINABLE);
1935
1936 single_tcg_halt_cond = cpu->halt_cond;
1937 single_tcg_cpu_thread = cpu->thread;
1938 }
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001939#ifdef _WIN32
Andreas Färber814e6122012-05-02 17:00:37 +02001940 cpu->hThread = qemu_thread_get_handle(cpu->thread);
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001941#endif
Blue Swirl296af7c2010-03-29 19:23:50 +00001942 } else {
Alex Bennée37257942017-02-23 18:29:14 +00001943 /* For non-MTTCG cases we share the thread */
1944 cpu->thread = single_tcg_cpu_thread;
1945 cpu->halt_cond = single_tcg_halt_cond;
David Hildenbranda3421732018-02-09 20:52:37 +01001946 cpu->thread_id = first_cpu->thread_id;
1947 cpu->can_do_io = 1;
1948 cpu->created = true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001949 }
1950}
1951
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001952static void qemu_hax_start_vcpu(CPUState *cpu)
1953{
1954 char thread_name[VCPU_THREAD_NAME_SIZE];
1955
1956 cpu->thread = g_malloc0(sizeof(QemuThread));
1957 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1958 qemu_cond_init(cpu->halt_cond);
1959
1960 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX",
1961 cpu->cpu_index);
1962 qemu_thread_create(cpu->thread, thread_name, qemu_hax_cpu_thread_fn,
1963 cpu, QEMU_THREAD_JOINABLE);
1964#ifdef _WIN32
1965 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1966#endif
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001967}
1968
Andreas Färber48a106b2013-05-27 02:20:39 +02001969static void qemu_kvm_start_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001970{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001971 char thread_name[VCPU_THREAD_NAME_SIZE];
1972
Andreas Färber814e6122012-05-02 17:00:37 +02001973 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001974 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1975 qemu_cond_init(cpu->halt_cond);
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001976 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1977 cpu->cpu_index);
1978 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1979 cpu, QEMU_THREAD_JOINABLE);
Blue Swirl296af7c2010-03-29 19:23:50 +00001980}
1981
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001982static void qemu_hvf_start_vcpu(CPUState *cpu)
1983{
1984 char thread_name[VCPU_THREAD_NAME_SIZE];
1985
1986 /* HVF currently does not support TCG, and only runs in
1987 * unrestricted-guest mode. */
1988 assert(hvf_enabled());
1989
1990 cpu->thread = g_malloc0(sizeof(QemuThread));
1991 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1992 qemu_cond_init(cpu->halt_cond);
1993
1994 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
1995 cpu->cpu_index);
1996 qemu_thread_create(cpu->thread, thread_name, qemu_hvf_cpu_thread_fn,
1997 cpu, QEMU_THREAD_JOINABLE);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001998}
1999
Justin Terry (VM)19306802018-01-22 13:07:49 -08002000static void qemu_whpx_start_vcpu(CPUState *cpu)
2001{
2002 char thread_name[VCPU_THREAD_NAME_SIZE];
2003
2004 cpu->thread = g_malloc0(sizeof(QemuThread));
2005 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2006 qemu_cond_init(cpu->halt_cond);
2007 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/WHPX",
2008 cpu->cpu_index);
2009 qemu_thread_create(cpu->thread, thread_name, qemu_whpx_cpu_thread_fn,
2010 cpu, QEMU_THREAD_JOINABLE);
2011#ifdef _WIN32
2012 cpu->hThread = qemu_thread_get_handle(cpu->thread);
2013#endif
Justin Terry (VM)19306802018-01-22 13:07:49 -08002014}
2015
Andreas Färber10a90212013-05-27 02:24:35 +02002016static void qemu_dummy_start_vcpu(CPUState *cpu)
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02002017{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00002018 char thread_name[VCPU_THREAD_NAME_SIZE];
2019
Andreas Färber814e6122012-05-02 17:00:37 +02002020 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02002021 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2022 qemu_cond_init(cpu->halt_cond);
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00002023 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
2024 cpu->cpu_index);
2025 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02002026 QEMU_THREAD_JOINABLE);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02002027}
2028
Andreas Färberc643bed2013-05-27 03:23:24 +02002029void qemu_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00002030{
Andreas Färberce3960e2012-12-17 03:27:07 +01002031 cpu->nr_cores = smp_cores;
2032 cpu->nr_threads = smp_threads;
Andreas Färberf324e762012-05-02 23:26:21 +02002033 cpu->stopped = true;
Peter Maydell56943e82016-01-21 14:15:04 +00002034
2035 if (!cpu->as) {
2036 /* If the target cpu hasn't set up any address spaces itself,
2037 * give it the default one.
2038 */
Peter Maydell12ebc9a2016-01-21 14:15:04 +00002039 cpu->num_ases = 1;
Peter Xu80ceb072017-11-23 17:23:32 +08002040 cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
Peter Maydell56943e82016-01-21 14:15:04 +00002041 }
2042
Jan Kiszka0ab07c62011-02-07 12:19:14 +01002043 if (kvm_enabled()) {
Andreas Färber48a106b2013-05-27 02:20:39 +02002044 qemu_kvm_start_vcpu(cpu);
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01002045 } else if (hax_enabled()) {
2046 qemu_hax_start_vcpu(cpu);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05002047 } else if (hvf_enabled()) {
2048 qemu_hvf_start_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02002049 } else if (tcg_enabled()) {
Andreas Färbere5ab30a2012-05-03 01:50:44 +02002050 qemu_tcg_init_vcpu(cpu);
Justin Terry (VM)19306802018-01-22 13:07:49 -08002051 } else if (whpx_enabled()) {
2052 qemu_whpx_start_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02002053 } else {
Andreas Färber10a90212013-05-27 02:24:35 +02002054 qemu_dummy_start_vcpu(cpu);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01002055 }
David Hildenbrand81e96312018-02-09 20:52:38 +01002056
2057 while (!cpu->created) {
2058 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
2059 }
Blue Swirl296af7c2010-03-29 19:23:50 +00002060}
2061
Jan Kiszkab4a3d962011-02-01 22:15:43 +01002062void cpu_stop_current(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00002063{
Andreas Färber4917cf42013-05-27 05:17:50 +02002064 if (current_cpu) {
David Hildenbrandebd05fe2017-11-29 20:12:15 +01002065 qemu_cpu_stop(current_cpu, true);
Jan Kiszkab4a3d962011-02-01 22:15:43 +01002066 }
Blue Swirl296af7c2010-03-29 19:23:50 +00002067}
2068
Kevin Wolf56983462013-07-05 13:49:54 +02002069int vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +00002070{
Juan Quintelaaa723c22012-09-18 16:30:11 +02002071 if (qemu_in_vcpu_thread()) {
Paolo Bonzini74892d22014-06-05 14:53:58 +02002072 qemu_system_vmstop_request_prepare();
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -03002073 qemu_system_vmstop_request(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00002074 /*
2075 * FIXME: should not return to device code in case
2076 * vm_stop() has been requested.
2077 */
Jan Kiszkab4a3d962011-02-01 22:15:43 +01002078 cpu_stop_current();
Kevin Wolf56983462013-07-05 13:49:54 +02002079 return 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00002080 }
Kevin Wolf56983462013-07-05 13:49:54 +02002081
Stefan Hajnoczi4486e892018-03-07 14:42:05 +00002082 return do_vm_stop(state, true);
Blue Swirl296af7c2010-03-29 19:23:50 +00002083}
2084
Claudio Imbrenda2d76e822017-02-14 18:07:47 +01002085/**
2086 * Prepare for (re)starting the VM.
2087 * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already
2088 * running or in case of an error condition), 0 otherwise.
2089 */
2090int vm_prepare_start(void)
2091{
2092 RunState requested;
Claudio Imbrenda2d76e822017-02-14 18:07:47 +01002093
2094 qemu_vmstop_requested(&requested);
2095 if (runstate_is_running() && requested == RUN_STATE__MAX) {
2096 return -1;
2097 }
2098
2099 /* Ensure that a STOP/RESUME pair of events is emitted if a
2100 * vmstop request was pending. The BLOCK_IO_ERROR event, for
2101 * example, according to documentation is always followed by
2102 * the STOP event.
2103 */
2104 if (runstate_is_running()) {
2105 qapi_event_send_stop(&error_abort);
Markus Armbrusterf0561582018-04-23 10:45:18 +02002106 qapi_event_send_resume(&error_abort);
2107 return -1;
Claudio Imbrenda2d76e822017-02-14 18:07:47 +01002108 }
2109
2110 /* We are sending this now, but the CPUs will be resumed shortly later */
2111 qapi_event_send_resume(&error_abort);
Markus Armbrusterf0561582018-04-23 10:45:18 +02002112
2113 replay_enable_events();
2114 cpu_enable_ticks();
2115 runstate_set(RUN_STATE_RUNNING);
2116 vm_state_notify(1, RUN_STATE_RUNNING);
2117 return 0;
Claudio Imbrenda2d76e822017-02-14 18:07:47 +01002118}
2119
2120void vm_start(void)
2121{
2122 if (!vm_prepare_start()) {
2123 resume_all_vcpus();
2124 }
2125}
2126
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03002127/* does a state transition even if the VM is already stopped,
2128 current state is forgotten forever */
Kevin Wolf56983462013-07-05 13:49:54 +02002129int vm_stop_force_state(RunState state)
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03002130{
2131 if (runstate_is_running()) {
Kevin Wolf56983462013-07-05 13:49:54 +02002132 return vm_stop(state);
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03002133 } else {
2134 runstate_set(state);
Wen Congyangb2780d32015-11-20 17:34:38 +08002135
2136 bdrv_drain_all();
Kevin Wolf594a45c2013-07-18 14:52:19 +02002137 /* Make sure to return an error if the flush in a previous vm_stop()
2138 * failed. */
John Snow22af08e2016-09-22 21:45:51 -04002139 return bdrv_flush_all();
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03002140 }
2141}
2142
Stefan Weil9a78eea2010-10-22 23:03:33 +02002143void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
Blue Swirl262353c2010-05-04 19:55:35 +00002144{
2145 /* XXX: implement xxx_cpu_list for targets that still miss it */
Peter Maydelle916cbf2012-09-05 17:41:08 -03002146#if defined(cpu_list)
2147 cpu_list(f, cpu_fprintf);
Blue Swirl262353c2010-05-04 19:55:35 +00002148#endif
2149}
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002150
2151CpuInfoList *qmp_query_cpus(Error **errp)
2152{
Igor Mammedovafed5a52017-05-10 13:29:55 +02002153 MachineState *ms = MACHINE(qdev_get_machine());
2154 MachineClass *mc = MACHINE_GET_CLASS(ms);
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002155 CpuInfoList *head = NULL, *cur_item = NULL;
Andreas Färber182735e2013-05-29 22:29:20 +02002156 CPUState *cpu;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002157
Andreas Färberbdc44642013-06-24 23:50:24 +02002158 CPU_FOREACH(cpu) {
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002159 CpuInfoList *info;
Andreas Färber182735e2013-05-29 22:29:20 +02002160#if defined(TARGET_I386)
2161 X86CPU *x86_cpu = X86_CPU(cpu);
2162 CPUX86State *env = &x86_cpu->env;
2163#elif defined(TARGET_PPC)
2164 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
2165 CPUPPCState *env = &ppc_cpu->env;
2166#elif defined(TARGET_SPARC)
2167 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
2168 CPUSPARCState *env = &sparc_cpu->env;
Michael Clark25fa1942018-03-03 01:32:59 +13002169#elif defined(TARGET_RISCV)
2170 RISCVCPU *riscv_cpu = RISCV_CPU(cpu);
2171 CPURISCVState *env = &riscv_cpu->env;
Andreas Färber182735e2013-05-29 22:29:20 +02002172#elif defined(TARGET_MIPS)
2173 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
2174 CPUMIPSState *env = &mips_cpu->env;
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +01002175#elif defined(TARGET_TRICORE)
2176 TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
2177 CPUTriCoreState *env = &tricore_cpu->env;
Viktor Mihajlovski9d0306d2018-02-16 17:08:37 +01002178#elif defined(TARGET_S390X)
2179 S390CPU *s390_cpu = S390_CPU(cpu);
2180 CPUS390XState *env = &s390_cpu->env;
Andreas Färber182735e2013-05-29 22:29:20 +02002181#endif
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002182
Andreas Färbercb446ec2013-05-01 14:24:52 +02002183 cpu_synchronize_state(cpu);
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002184
2185 info = g_malloc0(sizeof(*info));
2186 info->value = g_malloc0(sizeof(*info->value));
Andreas Färber55e5c282012-12-17 06:18:02 +01002187 info->value->CPU = cpu->cpu_index;
Andreas Färber182735e2013-05-29 22:29:20 +02002188 info->value->current = (cpu == first_cpu);
Andreas Färber259186a2013-01-17 18:51:17 +01002189 info->value->halted = cpu->halted;
Eduardo Habkost58f88d42015-05-08 16:04:22 -03002190 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
Andreas Färber9f09e182012-05-03 06:59:07 +02002191 info->value->thread_id = cpu->thread_id;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002192#if defined(TARGET_I386)
Eric Blake86f4b682015-11-18 01:52:59 -07002193 info->value->arch = CPU_INFO_ARCH_X86;
Eric Blake544a3732016-02-17 23:48:27 -07002194 info->value->u.x86.pc = env->eip + env->segs[R_CS].base;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002195#elif defined(TARGET_PPC)
Eric Blake86f4b682015-11-18 01:52:59 -07002196 info->value->arch = CPU_INFO_ARCH_PPC;
Eric Blake544a3732016-02-17 23:48:27 -07002197 info->value->u.ppc.nip = env->nip;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002198#elif defined(TARGET_SPARC)
Eric Blake86f4b682015-11-18 01:52:59 -07002199 info->value->arch = CPU_INFO_ARCH_SPARC;
Eric Blake544a3732016-02-17 23:48:27 -07002200 info->value->u.q_sparc.pc = env->pc;
2201 info->value->u.q_sparc.npc = env->npc;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002202#elif defined(TARGET_MIPS)
Eric Blake86f4b682015-11-18 01:52:59 -07002203 info->value->arch = CPU_INFO_ARCH_MIPS;
Eric Blake544a3732016-02-17 23:48:27 -07002204 info->value->u.q_mips.PC = env->active_tc.PC;
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +01002205#elif defined(TARGET_TRICORE)
Eric Blake86f4b682015-11-18 01:52:59 -07002206 info->value->arch = CPU_INFO_ARCH_TRICORE;
Eric Blake544a3732016-02-17 23:48:27 -07002207 info->value->u.tricore.PC = env->PC;
Viktor Mihajlovski9d0306d2018-02-16 17:08:37 +01002208#elif defined(TARGET_S390X)
2209 info->value->arch = CPU_INFO_ARCH_S390;
2210 info->value->u.s390.cpu_state = env->cpu_state;
Michael Clark25fa1942018-03-03 01:32:59 +13002211#elif defined(TARGET_RISCV)
2212 info->value->arch = CPU_INFO_ARCH_RISCV;
2213 info->value->u.riscv.pc = env->pc;
Eric Blake86f4b682015-11-18 01:52:59 -07002214#else
2215 info->value->arch = CPU_INFO_ARCH_OTHER;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002216#endif
Igor Mammedovafed5a52017-05-10 13:29:55 +02002217 info->value->has_props = !!mc->cpu_index_to_instance_props;
2218 if (info->value->has_props) {
2219 CpuInstanceProperties *props;
2220 props = g_malloc0(sizeof(*props));
2221 *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
2222 info->value->props = props;
2223 }
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002224
2225 /* XXX: waiting for the qapi to support GSList */
2226 if (!cur_item) {
2227 head = cur_item = info;
2228 } else {
2229 cur_item->next = info;
2230 cur_item = info;
2231 }
2232 }
2233
2234 return head;
2235}
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002236
Laszlo Ersekdaa9d2b2018-04-27 21:28:51 +02002237static CpuInfoArch sysemu_target_to_cpuinfo_arch(SysEmuTarget target)
2238{
2239 /*
2240 * The @SysEmuTarget -> @CpuInfoArch mapping below is based on the
2241 * TARGET_ARCH -> TARGET_BASE_ARCH mapping in the "configure" script.
2242 */
2243 switch (target) {
2244 case SYS_EMU_TARGET_I386:
2245 case SYS_EMU_TARGET_X86_64:
2246 return CPU_INFO_ARCH_X86;
2247
2248 case SYS_EMU_TARGET_PPC:
2249 case SYS_EMU_TARGET_PPCEMB:
2250 case SYS_EMU_TARGET_PPC64:
2251 return CPU_INFO_ARCH_PPC;
2252
2253 case SYS_EMU_TARGET_SPARC:
2254 case SYS_EMU_TARGET_SPARC64:
2255 return CPU_INFO_ARCH_SPARC;
2256
2257 case SYS_EMU_TARGET_MIPS:
2258 case SYS_EMU_TARGET_MIPSEL:
2259 case SYS_EMU_TARGET_MIPS64:
2260 case SYS_EMU_TARGET_MIPS64EL:
2261 return CPU_INFO_ARCH_MIPS;
2262
2263 case SYS_EMU_TARGET_TRICORE:
2264 return CPU_INFO_ARCH_TRICORE;
2265
2266 case SYS_EMU_TARGET_S390X:
2267 return CPU_INFO_ARCH_S390;
2268
2269 case SYS_EMU_TARGET_RISCV32:
2270 case SYS_EMU_TARGET_RISCV64:
2271 return CPU_INFO_ARCH_RISCV;
2272
2273 default:
2274 return CPU_INFO_ARCH_OTHER;
2275 }
2276}
2277
2278static void cpustate_to_cpuinfo_s390(CpuInfoS390 *info, const CPUState *cpu)
2279{
2280#ifdef TARGET_S390X
2281 S390CPU *s390_cpu = S390_CPU(cpu);
2282 CPUS390XState *env = &s390_cpu->env;
2283
2284 info->cpu_state = env->cpu_state;
2285#else
2286 abort();
2287#endif
2288}
2289
Luiz Capitulinoce74ee32018-02-16 17:08:38 +01002290/*
2291 * fast means: we NEVER interrupt vCPU threads to retrieve
2292 * information from KVM.
2293 */
2294CpuInfoFastList *qmp_query_cpus_fast(Error **errp)
2295{
2296 MachineState *ms = MACHINE(qdev_get_machine());
2297 MachineClass *mc = MACHINE_GET_CLASS(ms);
2298 CpuInfoFastList *head = NULL, *cur_item = NULL;
Laszlo Ersekdaa9d2b2018-04-27 21:28:51 +02002299 SysEmuTarget target = qapi_enum_parse(&SysEmuTarget_lookup, TARGET_NAME,
2300 -1, &error_abort);
Luiz Capitulinoce74ee32018-02-16 17:08:38 +01002301 CPUState *cpu;
2302
2303 CPU_FOREACH(cpu) {
2304 CpuInfoFastList *info = g_malloc0(sizeof(*info));
2305 info->value = g_malloc0(sizeof(*info->value));
2306
2307 info->value->cpu_index = cpu->cpu_index;
2308 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
2309 info->value->thread_id = cpu->thread_id;
2310
2311 info->value->has_props = !!mc->cpu_index_to_instance_props;
2312 if (info->value->has_props) {
2313 CpuInstanceProperties *props;
2314 props = g_malloc0(sizeof(*props));
2315 *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
2316 info->value->props = props;
2317 }
2318
Laszlo Ersekdaa9d2b2018-04-27 21:28:51 +02002319 info->value->arch = sysemu_target_to_cpuinfo_arch(target);
2320 info->value->target = target;
2321 if (target == SYS_EMU_TARGET_S390X) {
2322 cpustate_to_cpuinfo_s390(&info->value->u.s390x, cpu);
Laszlo Ersekdaa9d2b2018-04-27 21:28:51 +02002323 }
2324
Luiz Capitulinoce74ee32018-02-16 17:08:38 +01002325 if (!cur_item) {
2326 head = cur_item = info;
2327 } else {
2328 cur_item->next = info;
2329 cur_item = info;
2330 }
2331 }
2332
2333 return head;
2334}
2335
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002336void qmp_memsave(int64_t addr, int64_t size, const char *filename,
2337 bool has_cpu, int64_t cpu_index, Error **errp)
2338{
2339 FILE *f;
2340 uint32_t l;
Andreas Färber55e5c282012-12-17 06:18:02 +01002341 CPUState *cpu;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002342 uint8_t buf[1024];
Borislav Petkov0dc9daf2015-02-08 13:14:38 +01002343 int64_t orig_addr = addr, orig_size = size;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002344
2345 if (!has_cpu) {
2346 cpu_index = 0;
2347 }
2348
Andreas Färber151d1322013-02-15 15:41:49 +01002349 cpu = qemu_get_cpu(cpu_index);
2350 if (cpu == NULL) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01002351 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
2352 "a CPU number");
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002353 return;
2354 }
2355
2356 f = fopen(filename, "wb");
2357 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04002358 error_setg_file_open(errp, errno, filename);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002359 return;
2360 }
2361
2362 while (size != 0) {
2363 l = sizeof(buf);
2364 if (l > size)
2365 l = size;
Aneesh Kumar K.V2f4d0f52013-10-01 21:49:30 +05302366 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
Borislav Petkov0dc9daf2015-02-08 13:14:38 +01002367 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
2368 " specified", orig_addr, orig_size);
Aneesh Kumar K.V2f4d0f52013-10-01 21:49:30 +05302369 goto exit;
2370 }
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002371 if (fwrite(buf, 1, l, f) != l) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01002372 error_setg(errp, QERR_IO_ERROR);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002373 goto exit;
2374 }
2375 addr += l;
2376 size -= l;
2377 }
2378
2379exit:
2380 fclose(f);
2381}
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02002382
2383void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
2384 Error **errp)
2385{
2386 FILE *f;
2387 uint32_t l;
2388 uint8_t buf[1024];
2389
2390 f = fopen(filename, "wb");
2391 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04002392 error_setg_file_open(errp, errno, filename);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02002393 return;
2394 }
2395
2396 while (size != 0) {
2397 l = sizeof(buf);
2398 if (l > size)
2399 l = size;
Stefan Weileb6282f2014-04-07 20:28:23 +02002400 cpu_physical_memory_read(addr, buf, l);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02002401 if (fwrite(buf, 1, l, f) != l) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01002402 error_setg(errp, QERR_IO_ERROR);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02002403 goto exit;
2404 }
2405 addr += l;
2406 size -= l;
2407 }
2408
2409exit:
2410 fclose(f);
2411}
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02002412
2413void qmp_inject_nmi(Error **errp)
2414{
Alexey Kardashevskiy9cb805f2014-08-20 22:16:33 +10002415 nmi_monitor_handle(monitor_get_cpu_index(), errp);
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02002416}
Sebastian Tanase27498be2014-07-25 11:56:33 +02002417
2418void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
2419{
2420 if (!use_icount) {
2421 return;
2422 }
2423
2424 cpu_fprintf(f, "Host - Guest clock %"PRIi64" ms\n",
2425 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
2426 if (icount_align_option) {
2427 cpu_fprintf(f, "Max guest delay %"PRIi64" ms\n", -max_delay/SCALE_MS);
2428 cpu_fprintf(f, "Max guest advance %"PRIi64" ms\n", max_advance/SCALE_MS);
2429 } else {
2430 cpu_fprintf(f, "Max guest delay NA\n");
2431 cpu_fprintf(f, "Max guest advance NA\n");
2432 }
2433}