blob: fed8ec1dff1375508861f33d18b553bc516bf5f3 [file] [log] [blame]
Blue Swirl296af7c2010-03-29 19:23:50 +00001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
Peter Maydell7b31bbc2016-01-26 18:16:56 +000025#include "qemu/osdep.h"
KONRAD Frederic8d4e9142017-02-23 18:29:08 +000026#include "qemu/config-file.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010027#include "cpu.h"
Paolo Bonzini83c90892012-12-17 18:19:49 +010028#include "monitor/monitor.h"
Markus Armbrustere688df62018-02-01 12:18:31 +010029#include "qapi/error.h"
Markus Armbruster112ed242018-02-26 17:13:27 -060030#include "qapi/qapi-commands-misc.h"
Markus Armbruster9af23982018-02-11 10:36:01 +010031#include "qapi/qapi-events-run-state.h"
Wenchao Xiaa4e15de2014-06-18 08:43:36 +020032#include "qapi/qmp/qerror.h"
Markus Armbrusterd49b6832015-03-17 18:29:20 +010033#include "qemu/error-report.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/sysemu.h"
Max Reitzda31d592016-03-16 19:54:32 +010035#include "sysemu/block-backend.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010036#include "exec/gdbstub.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010037#include "sysemu/dma.h"
Vincent Palatinb3946622017-01-10 11:59:55 +010038#include "sysemu/hw_accel.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/kvm.h"
Vincent Palatinb0cb0a62017-01-10 11:59:57 +010040#include "sysemu/hax.h"
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -050041#include "sysemu/hvf.h"
Justin Terry (VM)19306802018-01-22 13:07:49 -080042#include "sysemu/whpx.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010043#include "exec/exec-all.h"
Blue Swirl296af7c2010-03-29 19:23:50 +000044
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010045#include "qemu/thread.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/cpus.h"
47#include "sysemu/qtest.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010048#include "qemu/main-loop.h"
Markus Armbruster922a01a2018-02-01 12:18:46 +010049#include "qemu/option.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010050#include "qemu/bitmap.h"
Liu Ping Fancb365642013-09-25 14:20:58 +080051#include "qemu/seqlock.h"
KONRAD Frederic8d4e9142017-02-23 18:29:08 +000052#include "tcg.h"
Alexey Kardashevskiy9cb805f2014-08-20 22:16:33 +100053#include "hw/nmi.h"
Pavel Dovgalyuk8b427042015-09-17 19:24:05 +030054#include "sysemu/replay.h"
Igor Mammedovafed5a52017-05-10 13:29:55 +020055#include "hw/boards.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020056
Jan Kiszka6d9cb732011-02-01 22:15:58 +010057#ifdef CONFIG_LINUX
58
59#include <sys/prctl.h>
60
Marcelo Tosattic0532a72010-10-11 15:31:21 -030061#ifndef PR_MCE_KILL
62#define PR_MCE_KILL 33
63#endif
64
Jan Kiszka6d9cb732011-02-01 22:15:58 +010065#ifndef PR_MCE_KILL_SET
66#define PR_MCE_KILL_SET 1
67#endif
68
69#ifndef PR_MCE_KILL_EARLY
70#define PR_MCE_KILL_EARLY 1
71#endif
72
73#endif /* CONFIG_LINUX */
74
Sebastian Tanase27498be2014-07-25 11:56:33 +020075int64_t max_delay;
76int64_t max_advance;
Blue Swirl296af7c2010-03-29 19:23:50 +000077
Jason J. Herne2adcc852015-09-08 13:12:33 -040078/* vcpu throttling controls */
79static QEMUTimer *throttle_timer;
80static unsigned int throttle_percentage;
81
82#define CPU_THROTTLE_PCT_MIN 1
83#define CPU_THROTTLE_PCT_MAX 99
84#define CPU_THROTTLE_TIMESLICE_NS 10000000
85
Tiejun Chen321bc0b2013-08-02 09:43:09 +080086bool cpu_is_stopped(CPUState *cpu)
87{
88 return cpu->stopped || !runstate_is_running();
89}
90
Andreas Färbera98ae1d2013-05-26 23:21:08 +020091static bool cpu_thread_is_idle(CPUState *cpu)
Peter Maydellac873f12012-07-19 16:52:27 +010092{
Andreas Färberc64ca812012-05-03 02:11:45 +020093 if (cpu->stop || cpu->queued_work_first) {
Peter Maydellac873f12012-07-19 16:52:27 +010094 return false;
95 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +080096 if (cpu_is_stopped(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010097 return true;
98 }
Andreas Färber8c2e1b02013-08-25 18:53:55 +020099 if (!cpu->halted || cpu_has_work(cpu) ||
Alexander Graf215e79c2013-04-24 22:24:12 +0200100 kvm_halt_in_kernel()) {
Peter Maydellac873f12012-07-19 16:52:27 +0100101 return false;
102 }
103 return true;
104}
105
106static bool all_cpu_threads_idle(void)
107{
Andreas Färber182735e2013-05-29 22:29:20 +0200108 CPUState *cpu;
Peter Maydellac873f12012-07-19 16:52:27 +0100109
Andreas Färberbdc44642013-06-24 23:50:24 +0200110 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200111 if (!cpu_thread_is_idle(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +0100112 return false;
113 }
114 }
115 return true;
116}
117
Blue Swirl296af7c2010-03-29 19:23:50 +0000118/***********************************************************/
Paolo Bonzini946fb272011-09-12 13:57:37 +0200119/* guest cycle counter */
120
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200121/* Protected by TimersState seqlock */
122
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200123static bool icount_sleep = true;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200124/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
125#define MAX_ICOUNT_SHIFT 10
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200126
Paolo Bonzini946fb272011-09-12 13:57:37 +0200127typedef struct TimersState {
Liu Ping Fancb365642013-09-25 14:20:58 +0800128 /* Protected by BQL. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200129 int64_t cpu_ticks_prev;
130 int64_t cpu_ticks_offset;
Liu Ping Fancb365642013-09-25 14:20:58 +0800131
Paolo Bonzini94377112018-08-14 09:57:16 +0200132 /* Protect fields that can be respectively read outside the
133 * BQL, and written from multiple threads.
Liu Ping Fancb365642013-09-25 14:20:58 +0800134 */
135 QemuSeqLock vm_clock_seqlock;
Paolo Bonzini94377112018-08-14 09:57:16 +0200136 QemuSpin vm_clock_lock;
137
138 int16_t cpu_ticks_enabled;
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200139
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200140 /* Conversion factor from emulated instructions to virtual clock ticks. */
Paolo Bonzini94377112018-08-14 09:57:16 +0200141 int16_t icount_time_shift;
142
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200143 /* Compensate for varying guest execution speed. */
144 int64_t qemu_icount_bias;
Paolo Bonzini94377112018-08-14 09:57:16 +0200145
146 int64_t vm_clock_warp_start;
147 int64_t cpu_clock_offset;
148
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200149 /* Only written by TCG thread */
150 int64_t qemu_icount;
Paolo Bonzini94377112018-08-14 09:57:16 +0200151
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300152 /* for adjusting icount */
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300153 QEMUTimer *icount_rt_timer;
154 QEMUTimer *icount_vm_timer;
155 QEMUTimer *icount_warp_timer;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200156} TimersState;
157
Liu Ping Fand9cd4002013-07-21 08:43:00 +0000158static TimersState timers_state;
KONRAD Frederic8d4e9142017-02-23 18:29:08 +0000159bool mttcg_enabled;
160
161/*
162 * We default to false if we know other options have been enabled
163 * which are currently incompatible with MTTCG. Otherwise when each
164 * guest (target) has been updated to support:
165 * - atomic instructions
166 * - memory ordering primitives (barriers)
167 * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
168 *
169 * Once a guest architecture has been converted to the new primitives
170 * there are two remaining limitations to check.
171 *
172 * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
173 * - The host must have a stronger memory order than the guest
174 *
175 * It may be possible in future to support strong guests on weak hosts
176 * but that will require tagging all load/stores in a guest with their
177 * implicit memory order requirements which would likely slow things
178 * down a lot.
179 */
180
181static bool check_tcg_memory_orders_compatible(void)
182{
183#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
184 return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
185#else
186 return false;
187#endif
188}
189
190static bool default_mttcg_enabled(void)
191{
Alex Bennée83fd9622017-02-27 17:09:01 +0000192 if (use_icount || TCG_OVERSIZED_GUEST) {
KONRAD Frederic8d4e9142017-02-23 18:29:08 +0000193 return false;
194 } else {
195#ifdef TARGET_SUPPORTS_MTTCG
196 return check_tcg_memory_orders_compatible();
197#else
198 return false;
199#endif
200 }
201}
202
203void qemu_tcg_configure(QemuOpts *opts, Error **errp)
204{
205 const char *t = qemu_opt_get(opts, "thread");
206 if (t) {
207 if (strcmp(t, "multi") == 0) {
208 if (TCG_OVERSIZED_GUEST) {
209 error_setg(errp, "No MTTCG when guest word size > hosts");
Alex Bennée83fd9622017-02-27 17:09:01 +0000210 } else if (use_icount) {
211 error_setg(errp, "No MTTCG when icount is enabled");
KONRAD Frederic8d4e9142017-02-23 18:29:08 +0000212 } else {
Nikunj A Dadhania86953502017-04-10 11:36:55 +0530213#ifndef TARGET_SUPPORTS_MTTCG
Alex Bennéec34c7622017-02-28 14:40:17 +0000214 error_report("Guest not yet converted to MTTCG - "
215 "you may get unexpected results");
216#endif
KONRAD Frederic8d4e9142017-02-23 18:29:08 +0000217 if (!check_tcg_memory_orders_compatible()) {
218 error_report("Guest expects a stronger memory ordering "
219 "than the host provides");
Pranith Kumar8cfef892017-03-25 16:19:23 -0400220 error_printf("This may cause strange/hard to debug errors\n");
KONRAD Frederic8d4e9142017-02-23 18:29:08 +0000221 }
222 mttcg_enabled = true;
223 }
224 } else if (strcmp(t, "single") == 0) {
225 mttcg_enabled = false;
226 } else {
227 error_setg(errp, "Invalid 'thread' setting %s", t);
228 }
229 } else {
230 mttcg_enabled = default_mttcg_enabled();
231 }
232}
Paolo Bonzini946fb272011-09-12 13:57:37 +0200233
Alex Bennéee4cd9652017-03-31 16:09:42 +0100234/* The current number of executed instructions is based on what we
235 * originally budgeted minus the current state of the decrementing
236 * icount counters in extra/u16.low.
237 */
238static int64_t cpu_get_icount_executed(CPUState *cpu)
239{
240 return cpu->icount_budget - (cpu->icount_decr.u16.low + cpu->icount_extra);
241}
242
Alex Bennée512d3c82017-04-05 12:32:37 +0100243/*
244 * Update the global shared timer_state.qemu_icount to take into
245 * account executed instructions. This is done by the TCG vCPU
246 * thread so the main-loop can see time has moved forward.
247 */
Paolo Bonzini9b4e6f42018-09-11 13:15:32 +0200248static void cpu_update_icount_locked(CPUState *cpu)
Alex Bennée512d3c82017-04-05 12:32:37 +0100249{
250 int64_t executed = cpu_get_icount_executed(cpu);
251 cpu->icount_budget -= executed;
252
Emilio G. Cota38adcb62018-09-10 19:27:49 -0400253 atomic_set_i64(&timers_state.qemu_icount,
254 timers_state.qemu_icount + executed);
Paolo Bonzini9b4e6f42018-09-11 13:15:32 +0200255}
256
257/*
258 * Update the global shared timer_state.qemu_icount to take into
259 * account executed instructions. This is done by the TCG vCPU
260 * thread so the main-loop can see time has moved forward.
261 */
262void cpu_update_icount(CPUState *cpu)
263{
264 seqlock_write_lock(&timers_state.vm_clock_seqlock,
265 &timers_state.vm_clock_lock);
266 cpu_update_icount_locked(cpu);
Paolo Bonzini94377112018-08-14 09:57:16 +0200267 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
268 &timers_state.vm_clock_lock);
Alex Bennée512d3c82017-04-05 12:32:37 +0100269}
270
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200271static int64_t cpu_get_icount_raw_locked(void)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200272{
Andreas Färber4917cf42013-05-27 05:17:50 +0200273 CPUState *cpu = current_cpu;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200274
Alex Bennée243c5f72017-03-30 18:49:22 +0100275 if (cpu && cpu->running) {
Paolo Bonzini414b15c2015-06-24 14:16:26 +0200276 if (!cpu->can_do_io) {
Alistair Francis493d89b2018-02-03 09:43:14 +0100277 error_report("Bad icount read");
Pavel Dovgalyuk2a629142014-12-08 10:53:45 +0300278 exit(1);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200279 }
Alex Bennéee4cd9652017-03-31 16:09:42 +0100280 /* Take into account what has run */
Paolo Bonzini9b4e6f42018-09-11 13:15:32 +0200281 cpu_update_icount_locked(cpu);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200282 }
Emilio G. Cota38adcb62018-09-10 19:27:49 -0400283 /* The read is protected by the seqlock, but needs atomic64 to avoid UB */
284 return atomic_read_i64(&timers_state.qemu_icount);
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200285}
286
287static int64_t cpu_get_icount_locked(void)
288{
289 int64_t icount = cpu_get_icount_raw_locked();
290 return atomic_read__nocheck(&timers_state.qemu_icount_bias) + cpu_icount_to_ns(icount);
291}
292
293int64_t cpu_get_icount_raw(void)
294{
295 int64_t icount;
296 unsigned start;
297
298 do {
299 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
300 icount = cpu_get_icount_raw_locked();
301 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
302
303 return icount;
Pavel Dovgalyuk2a629142014-12-08 10:53:45 +0300304}
305
306/* Return the virtual CPU time, based on the instruction counter. */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200307int64_t cpu_get_icount(void)
308{
309 int64_t icount;
310 unsigned start;
311
312 do {
313 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
314 icount = cpu_get_icount_locked();
315 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
316
317 return icount;
318}
319
KONRAD Frederic3f031312014-08-01 01:37:15 +0200320int64_t cpu_icount_to_ns(int64_t icount)
321{
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200322 return icount << atomic_read(&timers_state.icount_time_shift);
KONRAD Frederic3f031312014-08-01 01:37:15 +0200323}
324
Paolo Bonzinif2a4ad62018-08-18 09:36:16 +0200325static int64_t cpu_get_ticks_locked(void)
326{
327 int64_t ticks = timers_state.cpu_ticks_offset;
328 if (timers_state.cpu_ticks_enabled) {
329 ticks += cpu_get_host_ticks();
330 }
331
332 if (timers_state.cpu_ticks_prev > ticks) {
333 /* Non increasing ticks may happen if the host uses software suspend. */
334 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
335 ticks = timers_state.cpu_ticks_prev;
336 }
337
338 timers_state.cpu_ticks_prev = ticks;
339 return ticks;
340}
341
Cao jind90f3cc2016-07-29 19:05:38 +0800342/* return the time elapsed in VM between vm_start and vm_stop. Unless
343 * icount is active, cpu_get_ticks() uses units of the host CPU cycle
344 * counter.
Cao jind90f3cc2016-07-29 19:05:38 +0800345 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200346int64_t cpu_get_ticks(void)
347{
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100348 int64_t ticks;
349
Paolo Bonzini946fb272011-09-12 13:57:37 +0200350 if (use_icount) {
351 return cpu_get_icount();
352 }
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100353
Paolo Bonzinif2a4ad62018-08-18 09:36:16 +0200354 qemu_spin_lock(&timers_state.vm_clock_lock);
355 ticks = cpu_get_ticks_locked();
356 qemu_spin_unlock(&timers_state.vm_clock_lock);
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100357 return ticks;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200358}
359
Liu Ping Fancb365642013-09-25 14:20:58 +0800360static int64_t cpu_get_clock_locked(void)
361{
Cao jin1d45cea2016-07-29 19:05:37 +0800362 int64_t time;
Liu Ping Fancb365642013-09-25 14:20:58 +0800363
Cao jin1d45cea2016-07-29 19:05:37 +0800364 time = timers_state.cpu_clock_offset;
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100365 if (timers_state.cpu_ticks_enabled) {
Cao jin1d45cea2016-07-29 19:05:37 +0800366 time += get_clock();
Liu Ping Fancb365642013-09-25 14:20:58 +0800367 }
368
Cao jin1d45cea2016-07-29 19:05:37 +0800369 return time;
Liu Ping Fancb365642013-09-25 14:20:58 +0800370}
371
Cao jind90f3cc2016-07-29 19:05:38 +0800372/* Return the monotonic time elapsed in VM, i.e.,
Peter Maydell8212ff82016-09-15 10:24:22 +0100373 * the time between vm_start and vm_stop
374 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200375int64_t cpu_get_clock(void)
376{
377 int64_t ti;
Liu Ping Fancb365642013-09-25 14:20:58 +0800378 unsigned start;
379
380 do {
381 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
382 ti = cpu_get_clock_locked();
383 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
384
385 return ti;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200386}
387
Liu Ping Fancb365642013-09-25 14:20:58 +0800388/* enable cpu_get_ticks()
Cao jin3224e872016-07-08 18:31:37 +0800389 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
Liu Ping Fancb365642013-09-25 14:20:58 +0800390 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200391void cpu_enable_ticks(void)
392{
Paolo Bonzini94377112018-08-14 09:57:16 +0200393 seqlock_write_lock(&timers_state.vm_clock_seqlock,
394 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200395 if (!timers_state.cpu_ticks_enabled) {
Christopher Covington4a7428c2015-09-25 10:42:21 -0400396 timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200397 timers_state.cpu_clock_offset -= get_clock();
398 timers_state.cpu_ticks_enabled = 1;
399 }
Paolo Bonzini94377112018-08-14 09:57:16 +0200400 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
401 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200402}
403
404/* disable cpu_get_ticks() : the clock is stopped. You must not call
Liu Ping Fancb365642013-09-25 14:20:58 +0800405 * cpu_get_ticks() after that.
Cao jin3224e872016-07-08 18:31:37 +0800406 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
Liu Ping Fancb365642013-09-25 14:20:58 +0800407 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200408void cpu_disable_ticks(void)
409{
Paolo Bonzini94377112018-08-14 09:57:16 +0200410 seqlock_write_lock(&timers_state.vm_clock_seqlock,
411 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200412 if (timers_state.cpu_ticks_enabled) {
Christopher Covington4a7428c2015-09-25 10:42:21 -0400413 timers_state.cpu_ticks_offset += cpu_get_host_ticks();
Liu Ping Fancb365642013-09-25 14:20:58 +0800414 timers_state.cpu_clock_offset = cpu_get_clock_locked();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200415 timers_state.cpu_ticks_enabled = 0;
416 }
Paolo Bonzini94377112018-08-14 09:57:16 +0200417 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
418 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200419}
420
421/* Correlation between real and virtual time is always going to be
422 fairly approximate, so ignore small variation.
423 When the guest is idle real and virtual time will be aligned in
424 the IO wait loop. */
Rutuja Shah73bcb242016-03-21 21:32:30 +0530425#define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200426
427static void icount_adjust(void)
428{
429 int64_t cur_time;
430 int64_t cur_icount;
431 int64_t delta;
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200432
433 /* Protected by TimersState mutex. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200434 static int64_t last_delta;
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200435
Paolo Bonzini946fb272011-09-12 13:57:37 +0200436 /* If the VM is not running, then do nothing. */
437 if (!runstate_is_running()) {
438 return;
439 }
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200440
Paolo Bonzini94377112018-08-14 09:57:16 +0200441 seqlock_write_lock(&timers_state.vm_clock_seqlock,
442 &timers_state.vm_clock_lock);
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200443 cur_time = cpu_get_clock_locked();
444 cur_icount = cpu_get_icount_locked();
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200445
Paolo Bonzini946fb272011-09-12 13:57:37 +0200446 delta = cur_icount - cur_time;
447 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
448 if (delta > 0
449 && last_delta + ICOUNT_WOBBLE < delta * 2
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200450 && timers_state.icount_time_shift > 0) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200451 /* The guest is getting too far ahead. Slow time down. */
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200452 atomic_set(&timers_state.icount_time_shift,
453 timers_state.icount_time_shift - 1);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200454 }
455 if (delta < 0
456 && last_delta - ICOUNT_WOBBLE > delta * 2
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200457 && timers_state.icount_time_shift < MAX_ICOUNT_SHIFT) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200458 /* The guest is getting too far behind. Speed time up. */
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200459 atomic_set(&timers_state.icount_time_shift,
460 timers_state.icount_time_shift + 1);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200461 }
462 last_delta = delta;
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200463 atomic_set__nocheck(&timers_state.qemu_icount_bias,
464 cur_icount - (timers_state.qemu_icount
465 << timers_state.icount_time_shift));
Paolo Bonzini94377112018-08-14 09:57:16 +0200466 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
467 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200468}
469
470static void icount_adjust_rt(void *opaque)
471{
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300472 timer_mod(timers_state.icount_rt_timer,
Pavel Dovgalyuk1979b902015-01-12 15:00:43 +0300473 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200474 icount_adjust();
475}
476
477static void icount_adjust_vm(void *opaque)
478{
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300479 timer_mod(timers_state.icount_vm_timer,
Alex Bligh40daca52013-08-21 16:03:02 +0100480 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
Rutuja Shah73bcb242016-03-21 21:32:30 +0530481 NANOSECONDS_PER_SECOND / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200482 icount_adjust();
483}
484
485static int64_t qemu_icount_round(int64_t count)
486{
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200487 int shift = atomic_read(&timers_state.icount_time_shift);
488 return (count + (1 << shift) - 1) >> shift;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200489}
490
Pavel Dovgalyukefab87c2015-09-17 19:24:39 +0300491static void icount_warp_rt(void)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200492{
Alex Bennéeccffff42016-04-04 15:35:48 +0100493 unsigned seq;
494 int64_t warp_start;
495
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200496 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
497 * changes from -1 to another value, so the race here is okay.
498 */
Alex Bennéeccffff42016-04-04 15:35:48 +0100499 do {
500 seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300501 warp_start = timers_state.vm_clock_warp_start;
Alex Bennéeccffff42016-04-04 15:35:48 +0100502 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
503
504 if (warp_start == -1) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200505 return;
506 }
507
Paolo Bonzini94377112018-08-14 09:57:16 +0200508 seqlock_write_lock(&timers_state.vm_clock_seqlock,
509 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200510 if (runstate_is_running()) {
Pavel Dovgalyuk8eda2062015-09-17 19:24:28 +0300511 int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT,
512 cpu_get_clock_locked());
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200513 int64_t warp_delta;
514
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300515 warp_delta = clock - timers_state.vm_clock_warp_start;
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200516 if (use_icount == 2) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200517 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100518 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
Paolo Bonzini946fb272011-09-12 13:57:37 +0200519 * far ahead of real time.
520 */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200521 int64_t cur_icount = cpu_get_icount_locked();
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300522 int64_t delta = clock - cur_icount;
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200523 warp_delta = MIN(warp_delta, delta);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200524 }
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200525 atomic_set__nocheck(&timers_state.qemu_icount_bias,
526 timers_state.qemu_icount_bias + warp_delta);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200527 }
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300528 timers_state.vm_clock_warp_start = -1;
Paolo Bonzini94377112018-08-14 09:57:16 +0200529 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
530 &timers_state.vm_clock_lock);
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200531
532 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
533 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
534 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200535}
536
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300537static void icount_timer_cb(void *opaque)
Pavel Dovgalyukefab87c2015-09-17 19:24:39 +0300538{
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300539 /* No need for a checkpoint because the timer already synchronizes
540 * with CHECKPOINT_CLOCK_VIRTUAL_RT.
541 */
542 icount_warp_rt();
Pavel Dovgalyukefab87c2015-09-17 19:24:39 +0300543}
544
Paolo Bonzini8156be52012-03-28 15:42:04 +0200545void qtest_clock_warp(int64_t dest)
546{
Alex Bligh40daca52013-08-21 16:03:02 +0100547 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Fam Zhengefef88b2015-01-19 17:51:43 +0800548 AioContext *aio_context;
Paolo Bonzini8156be52012-03-28 15:42:04 +0200549 assert(qtest_enabled());
Fam Zhengefef88b2015-01-19 17:51:43 +0800550 aio_context = qemu_get_aio_context();
Paolo Bonzini8156be52012-03-28 15:42:04 +0200551 while (clock < dest) {
Alex Bligh40daca52013-08-21 16:03:02 +0100552 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Sergey Fedorovc9299e22014-06-10 13:10:28 +0400553 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
Fam Zhengefef88b2015-01-19 17:51:43 +0800554
Paolo Bonzini94377112018-08-14 09:57:16 +0200555 seqlock_write_lock(&timers_state.vm_clock_seqlock,
556 &timers_state.vm_clock_lock);
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200557 atomic_set__nocheck(&timers_state.qemu_icount_bias,
558 timers_state.qemu_icount_bias + warp);
Paolo Bonzini94377112018-08-14 09:57:16 +0200559 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
560 &timers_state.vm_clock_lock);
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200561
Alex Bligh40daca52013-08-21 16:03:02 +0100562 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
Fam Zhengefef88b2015-01-19 17:51:43 +0800563 timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
Alex Bligh40daca52013-08-21 16:03:02 +0100564 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200565 }
Alex Bligh40daca52013-08-21 16:03:02 +0100566 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200567}
568
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300569void qemu_start_warp_timer(void)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200570{
Paolo Bonzinice78d182013-10-07 17:30:02 +0200571 int64_t clock;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200572 int64_t deadline;
573
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300574 if (!use_icount) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200575 return;
576 }
577
Pavel Dovgalyuk8bd7f712015-09-17 19:24:44 +0300578 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
579 * do not fire, so computing the deadline does not make sense.
580 */
581 if (!runstate_is_running()) {
582 return;
583 }
584
585 /* warp clock deterministically in record/replay mode */
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300586 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
Pavel Dovgalyuk8bd7f712015-09-17 19:24:44 +0300587 return;
588 }
589
Paolo Bonzinice78d182013-10-07 17:30:02 +0200590 if (!all_cpu_threads_idle()) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200591 return;
592 }
593
Paolo Bonzini8156be52012-03-28 15:42:04 +0200594 if (qtest_enabled()) {
595 /* When testing, qtest commands advance icount. */
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300596 return;
Paolo Bonzini8156be52012-03-28 15:42:04 +0200597 }
598
Alex Blighac70aaf2013-08-21 16:02:57 +0100599 /* We want to use the earliest deadline from ALL vm_clocks */
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300600 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
Alex Bligh40daca52013-08-21 16:03:02 +0100601 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200602 if (deadline < 0) {
Victor CLEMENTd7a0f712015-05-29 17:14:06 +0200603 static bool notified;
604 if (!icount_sleep && !notified) {
Alistair Francis3dc6f862017-07-12 06:57:41 -0700605 warn_report("icount sleep disabled and no active timers");
Victor CLEMENTd7a0f712015-05-29 17:14:06 +0200606 notified = true;
607 }
Paolo Bonzinice78d182013-10-07 17:30:02 +0200608 return;
Alex Blighac70aaf2013-08-21 16:02:57 +0100609 }
610
Paolo Bonzini946fb272011-09-12 13:57:37 +0200611 if (deadline > 0) {
612 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100613 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
Paolo Bonzini946fb272011-09-12 13:57:37 +0200614 * sleep. Otherwise, the CPU might be waiting for a future timer
615 * interrupt to wake it up, but the interrupt never comes because
616 * the vCPU isn't running any insns and thus doesn't advance the
Alex Bligh40daca52013-08-21 16:03:02 +0100617 * QEMU_CLOCK_VIRTUAL.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200618 */
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200619 if (!icount_sleep) {
620 /*
621 * We never let VCPUs sleep in no sleep icount mode.
622 * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
623 * to the next QEMU_CLOCK_VIRTUAL event and notify it.
624 * It is useful when we want a deterministic execution time,
625 * isolated from host latencies.
626 */
Paolo Bonzini94377112018-08-14 09:57:16 +0200627 seqlock_write_lock(&timers_state.vm_clock_seqlock,
628 &timers_state.vm_clock_lock);
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200629 atomic_set__nocheck(&timers_state.qemu_icount_bias,
630 timers_state.qemu_icount_bias + deadline);
Paolo Bonzini94377112018-08-14 09:57:16 +0200631 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
632 &timers_state.vm_clock_lock);
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200633 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
634 } else {
635 /*
636 * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
637 * "real" time, (related to the time left until the next event) has
638 * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
639 * This avoids that the warps are visible externally; for example,
640 * you will not be sending network packets continuously instead of
641 * every 100ms.
642 */
Paolo Bonzini94377112018-08-14 09:57:16 +0200643 seqlock_write_lock(&timers_state.vm_clock_seqlock,
644 &timers_state.vm_clock_lock);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300645 if (timers_state.vm_clock_warp_start == -1
646 || timers_state.vm_clock_warp_start > clock) {
647 timers_state.vm_clock_warp_start = clock;
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200648 }
Paolo Bonzini94377112018-08-14 09:57:16 +0200649 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
650 &timers_state.vm_clock_lock);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300651 timer_mod_anticipate(timers_state.icount_warp_timer,
652 clock + deadline);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200653 }
Alex Blighac70aaf2013-08-21 16:02:57 +0100654 } else if (deadline == 0) {
Alex Bligh40daca52013-08-21 16:03:02 +0100655 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200656 }
657}
658
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300659static void qemu_account_warp_timer(void)
660{
661 if (!use_icount || !icount_sleep) {
662 return;
663 }
664
665 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
666 * do not fire, so computing the deadline does not make sense.
667 */
668 if (!runstate_is_running()) {
669 return;
670 }
671
672 /* warp clock deterministically in record/replay mode */
673 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
674 return;
675 }
676
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300677 timer_del(timers_state.icount_warp_timer);
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300678 icount_warp_rt();
679}
680
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200681static bool icount_state_needed(void *opaque)
682{
683 return use_icount;
684}
685
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300686static bool warp_timer_state_needed(void *opaque)
687{
688 TimersState *s = opaque;
689 return s->icount_warp_timer != NULL;
690}
691
692static bool adjust_timers_state_needed(void *opaque)
693{
694 TimersState *s = opaque;
695 return s->icount_rt_timer != NULL;
696}
697
698/*
699 * Subsection for warp timer migration is optional, because may not be created
700 */
701static const VMStateDescription icount_vmstate_warp_timer = {
702 .name = "timer/icount/warp_timer",
703 .version_id = 1,
704 .minimum_version_id = 1,
705 .needed = warp_timer_state_needed,
706 .fields = (VMStateField[]) {
707 VMSTATE_INT64(vm_clock_warp_start, TimersState),
708 VMSTATE_TIMER_PTR(icount_warp_timer, TimersState),
709 VMSTATE_END_OF_LIST()
710 }
711};
712
713static const VMStateDescription icount_vmstate_adjust_timers = {
714 .name = "timer/icount/timers",
715 .version_id = 1,
716 .minimum_version_id = 1,
717 .needed = adjust_timers_state_needed,
718 .fields = (VMStateField[]) {
719 VMSTATE_TIMER_PTR(icount_rt_timer, TimersState),
720 VMSTATE_TIMER_PTR(icount_vm_timer, TimersState),
721 VMSTATE_END_OF_LIST()
722 }
723};
724
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200725/*
726 * This is a subsection for icount migration.
727 */
728static const VMStateDescription icount_vmstate_timers = {
729 .name = "timer/icount",
730 .version_id = 1,
731 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200732 .needed = icount_state_needed,
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200733 .fields = (VMStateField[]) {
734 VMSTATE_INT64(qemu_icount_bias, TimersState),
735 VMSTATE_INT64(qemu_icount, TimersState),
736 VMSTATE_END_OF_LIST()
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300737 },
738 .subsections = (const VMStateDescription*[]) {
739 &icount_vmstate_warp_timer,
740 &icount_vmstate_adjust_timers,
741 NULL
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200742 }
743};
744
Paolo Bonzini946fb272011-09-12 13:57:37 +0200745static const VMStateDescription vmstate_timers = {
746 .name = "timer",
747 .version_id = 2,
748 .minimum_version_id = 1,
Juan Quintela35d08452014-04-16 16:01:33 +0200749 .fields = (VMStateField[]) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200750 VMSTATE_INT64(cpu_ticks_offset, TimersState),
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200751 VMSTATE_UNUSED(8),
Paolo Bonzini946fb272011-09-12 13:57:37 +0200752 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
753 VMSTATE_END_OF_LIST()
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200754 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200755 .subsections = (const VMStateDescription*[]) {
756 &icount_vmstate_timers,
757 NULL
Paolo Bonzini946fb272011-09-12 13:57:37 +0200758 }
759};
760
Paolo Bonzini14e6fe12016-10-31 10:36:08 +0100761static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
Jason J. Herne2adcc852015-09-08 13:12:33 -0400762{
Jason J. Herne2adcc852015-09-08 13:12:33 -0400763 double pct;
764 double throttle_ratio;
765 long sleeptime_ns;
766
767 if (!cpu_throttle_get_percentage()) {
768 return;
769 }
770
771 pct = (double)cpu_throttle_get_percentage()/100;
772 throttle_ratio = pct / (1 - pct);
773 sleeptime_ns = (long)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS);
774
775 qemu_mutex_unlock_iothread();
Jason J. Herne2adcc852015-09-08 13:12:33 -0400776 g_usleep(sleeptime_ns / 1000); /* Convert ns to us for usleep call */
777 qemu_mutex_lock_iothread();
Felipe Franciosi90bb0c02017-05-19 22:29:50 +0100778 atomic_set(&cpu->throttle_thread_scheduled, 0);
Jason J. Herne2adcc852015-09-08 13:12:33 -0400779}
780
781static void cpu_throttle_timer_tick(void *opaque)
782{
783 CPUState *cpu;
784 double pct;
785
786 /* Stop the timer if needed */
787 if (!cpu_throttle_get_percentage()) {
788 return;
789 }
790 CPU_FOREACH(cpu) {
791 if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
Paolo Bonzini14e6fe12016-10-31 10:36:08 +0100792 async_run_on_cpu(cpu, cpu_throttle_thread,
793 RUN_ON_CPU_NULL);
Jason J. Herne2adcc852015-09-08 13:12:33 -0400794 }
795 }
796
797 pct = (double)cpu_throttle_get_percentage()/100;
798 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
799 CPU_THROTTLE_TIMESLICE_NS / (1-pct));
800}
801
802void cpu_throttle_set(int new_throttle_pct)
803{
804 /* Ensure throttle percentage is within valid range */
805 new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
806 new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
807
808 atomic_set(&throttle_percentage, new_throttle_pct);
809
810 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
811 CPU_THROTTLE_TIMESLICE_NS);
812}
813
814void cpu_throttle_stop(void)
815{
816 atomic_set(&throttle_percentage, 0);
817}
818
819bool cpu_throttle_active(void)
820{
821 return (cpu_throttle_get_percentage() != 0);
822}
823
824int cpu_throttle_get_percentage(void)
825{
826 return atomic_read(&throttle_percentage);
827}
828
Pavel Dovgalyuk4603ea02014-09-01 09:34:49 +0400829void cpu_ticks_init(void)
830{
Emilio G. Cotaccdb3c12016-06-08 14:55:20 -0400831 seqlock_init(&timers_state.vm_clock_seqlock);
Emilio G. Cota87a09cd2018-09-03 13:18:29 -0400832 qemu_spin_init(&timers_state.vm_clock_lock);
Pavel Dovgalyuk4603ea02014-09-01 09:34:49 +0400833 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
Jason J. Herne2adcc852015-09-08 13:12:33 -0400834 throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
835 cpu_throttle_timer_tick, NULL);
Pavel Dovgalyuk4603ea02014-09-01 09:34:49 +0400836}
837
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200838void configure_icount(QemuOpts *opts, Error **errp)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200839{
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200840 const char *option;
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200841 char *rem_str = NULL;
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200842
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200843 option = qemu_opt_get(opts, "shift");
Paolo Bonzini946fb272011-09-12 13:57:37 +0200844 if (!option) {
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200845 if (qemu_opt_get(opts, "align") != NULL) {
846 error_setg(errp, "Please specify shift option when using align");
847 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200848 return;
849 }
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200850
851 icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200852 if (icount_sleep) {
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300853 timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300854 icount_timer_cb, NULL);
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200855 }
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200856
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200857 icount_align_option = qemu_opt_get_bool(opts, "align", false);
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200858
859 if (icount_align_option && !icount_sleep) {
Pranith Kumar778d9f92016-02-26 10:16:51 -0500860 error_setg(errp, "align=on and sleep=off are incompatible");
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200861 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200862 if (strcmp(option, "auto") != 0) {
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200863 errno = 0;
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200864 timers_state.icount_time_shift = strtol(option, &rem_str, 0);
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200865 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
866 error_setg(errp, "icount: Invalid shift value");
867 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200868 use_icount = 1;
869 return;
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200870 } else if (icount_align_option) {
871 error_setg(errp, "shift=auto and align=on are incompatible");
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200872 } else if (!icount_sleep) {
Pranith Kumar778d9f92016-02-26 10:16:51 -0500873 error_setg(errp, "shift=auto and sleep=off are incompatible");
Paolo Bonzini946fb272011-09-12 13:57:37 +0200874 }
875
876 use_icount = 2;
877
878 /* 125MIPS seems a reasonable initial guess at the guest speed.
879 It will be corrected fairly quickly anyway. */
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200880 timers_state.icount_time_shift = 3;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200881
882 /* Have both realtime and virtual time triggers for speed adjustment.
883 The realtime trigger catches emulated time passing too slowly,
884 the virtual time trigger catches emulated time passing too fast.
885 Realtime triggers occur even when idle, so use them less frequently
886 than VM triggers. */
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300887 timers_state.vm_clock_warp_start = -1;
888 timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300889 icount_adjust_rt, NULL);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300890 timer_mod(timers_state.icount_rt_timer,
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300891 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300892 timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
Alex Bligh40daca52013-08-21 16:03:02 +0100893 icount_adjust_vm, NULL);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300894 timer_mod(timers_state.icount_vm_timer,
Alex Bligh40daca52013-08-21 16:03:02 +0100895 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
Rutuja Shah73bcb242016-03-21 21:32:30 +0530896 NANOSECONDS_PER_SECOND / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200897}
898
899/***********************************************************/
Alex Bennée65467062017-02-23 18:29:09 +0000900/* TCG vCPU kick timer
901 *
902 * The kick timer is responsible for moving single threaded vCPU
903 * emulation on to the next vCPU. If more than one vCPU is running a
904 * timer event with force a cpu->exit so the next vCPU can get
905 * scheduled.
906 *
907 * The timer is removed if all vCPUs are idle and restarted again once
908 * idleness is complete.
909 */
910
911static QEMUTimer *tcg_kick_vcpu_timer;
Alex Bennée791158d2017-02-23 18:29:10 +0000912static CPUState *tcg_current_rr_cpu;
Alex Bennée65467062017-02-23 18:29:09 +0000913
914#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
915
916static inline int64_t qemu_tcg_next_kick(void)
917{
918 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
919}
920
Alex Bennée791158d2017-02-23 18:29:10 +0000921/* Kick the currently round-robin scheduled vCPU */
922static void qemu_cpu_kick_rr_cpu(void)
923{
924 CPUState *cpu;
Alex Bennée791158d2017-02-23 18:29:10 +0000925 do {
926 cpu = atomic_mb_read(&tcg_current_rr_cpu);
927 if (cpu) {
928 cpu_exit(cpu);
929 }
930 } while (cpu != atomic_mb_read(&tcg_current_rr_cpu));
931}
932
Paolo Bonzini6b8f0182017-03-02 19:56:40 +0100933static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
934{
935}
936
Paolo Bonzini3f53bc62017-03-03 11:50:29 +0100937void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
938{
Paolo Bonzini6b8f0182017-03-02 19:56:40 +0100939 if (!use_icount || type != QEMU_CLOCK_VIRTUAL) {
940 qemu_notify_event();
941 return;
942 }
943
Peter Maydellc52e7132018-04-10 13:02:25 +0100944 if (qemu_in_vcpu_thread()) {
945 /* A CPU is currently running; kick it back out to the
946 * tcg_cpu_exec() loop so it will recalculate its
947 * icount deadline immediately.
948 */
949 qemu_cpu_kick(current_cpu);
950 } else if (first_cpu) {
Paolo Bonzini6b8f0182017-03-02 19:56:40 +0100951 /* qemu_cpu_kick is not enough to kick a halted CPU out of
952 * qemu_tcg_wait_io_event. async_run_on_cpu, instead,
953 * causes cpu_thread_is_idle to return false. This way,
954 * handle_icount_deadline can run.
Peter Maydellc52e7132018-04-10 13:02:25 +0100955 * If we have no CPUs at all for some reason, we don't
956 * need to do anything.
Paolo Bonzini6b8f0182017-03-02 19:56:40 +0100957 */
958 async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
959 }
Paolo Bonzini3f53bc62017-03-03 11:50:29 +0100960}
961
Alex Bennée65467062017-02-23 18:29:09 +0000962static void kick_tcg_thread(void *opaque)
963{
964 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
Alex Bennée791158d2017-02-23 18:29:10 +0000965 qemu_cpu_kick_rr_cpu();
Alex Bennée65467062017-02-23 18:29:09 +0000966}
967
968static void start_tcg_kick_timer(void)
969{
Paolo Bonzinidb08b682018-01-11 13:53:12 +0100970 assert(!mttcg_enabled);
971 if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
Alex Bennée65467062017-02-23 18:29:09 +0000972 tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
973 kick_tcg_thread, NULL);
974 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
975 }
976}
977
978static void stop_tcg_kick_timer(void)
979{
Paolo Bonzinidb08b682018-01-11 13:53:12 +0100980 assert(!mttcg_enabled);
Alex Bennée65467062017-02-23 18:29:09 +0000981 if (tcg_kick_vcpu_timer) {
982 timer_del(tcg_kick_vcpu_timer);
983 tcg_kick_vcpu_timer = NULL;
984 }
985}
986
Alex Bennée65467062017-02-23 18:29:09 +0000987/***********************************************************/
Blue Swirl296af7c2010-03-29 19:23:50 +0000988void hw_error(const char *fmt, ...)
989{
990 va_list ap;
Andreas Färber55e5c282012-12-17 06:18:02 +0100991 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000992
993 va_start(ap, fmt);
994 fprintf(stderr, "qemu: hardware error: ");
995 vfprintf(stderr, fmt, ap);
996 fprintf(stderr, "\n");
Andreas Färberbdc44642013-06-24 23:50:24 +0200997 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100998 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
Andreas Färber878096e2013-05-27 01:33:50 +0200999 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
Blue Swirl296af7c2010-03-29 19:23:50 +00001000 }
1001 va_end(ap);
1002 abort();
1003}
1004
1005void cpu_synchronize_all_states(void)
1006{
Andreas Färber182735e2013-05-29 22:29:20 +02001007 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001008
Andreas Färberbdc44642013-06-24 23:50:24 +02001009 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001010 cpu_synchronize_state(cpu);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001011 /* TODO: move to cpu_synchronize_state() */
1012 if (hvf_enabled()) {
1013 hvf_cpu_synchronize_state(cpu);
1014 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001015 }
1016}
1017
1018void cpu_synchronize_all_post_reset(void)
1019{
Andreas Färber182735e2013-05-29 22:29:20 +02001020 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001021
Andreas Färberbdc44642013-06-24 23:50:24 +02001022 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001023 cpu_synchronize_post_reset(cpu);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001024 /* TODO: move to cpu_synchronize_post_reset() */
1025 if (hvf_enabled()) {
1026 hvf_cpu_synchronize_post_reset(cpu);
1027 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001028 }
1029}
1030
1031void cpu_synchronize_all_post_init(void)
1032{
Andreas Färber182735e2013-05-29 22:29:20 +02001033 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001034
Andreas Färberbdc44642013-06-24 23:50:24 +02001035 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001036 cpu_synchronize_post_init(cpu);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001037 /* TODO: move to cpu_synchronize_post_init() */
1038 if (hvf_enabled()) {
1039 hvf_cpu_synchronize_post_init(cpu);
1040 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001041 }
1042}
1043
David Gibson75e972d2017-05-26 14:46:28 +10001044void cpu_synchronize_all_pre_loadvm(void)
1045{
1046 CPUState *cpu;
1047
1048 CPU_FOREACH(cpu) {
1049 cpu_synchronize_pre_loadvm(cpu);
1050 }
1051}
1052
Stefan Hajnoczi4486e892018-03-07 14:42:05 +00001053static int do_vm_stop(RunState state, bool send_stop)
Blue Swirl296af7c2010-03-29 19:23:50 +00001054{
Kevin Wolf56983462013-07-05 13:49:54 +02001055 int ret = 0;
1056
Luiz Capitulino13548692011-07-29 15:36:43 -03001057 if (runstate_is_running()) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001058 cpu_disable_ticks();
Blue Swirl296af7c2010-03-29 19:23:50 +00001059 pause_all_vcpus();
Luiz Capitulinof5bbfba2011-07-29 15:04:45 -03001060 runstate_set(state);
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -03001061 vm_state_notify(0, state);
Stefan Hajnoczi4486e892018-03-07 14:42:05 +00001062 if (send_stop) {
Peter Xu3ab72382018-08-15 21:37:37 +08001063 qapi_event_send_stop();
Stefan Hajnoczi4486e892018-03-07 14:42:05 +00001064 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001065 }
Kevin Wolf56983462013-07-05 13:49:54 +02001066
Kevin Wolf594a45c2013-07-18 14:52:19 +02001067 bdrv_drain_all();
Pavel Dovgalyuk6d0ceb82016-09-26 11:08:16 +03001068 replay_disable_events();
John Snow22af08e2016-09-22 21:45:51 -04001069 ret = bdrv_flush_all();
Kevin Wolf594a45c2013-07-18 14:52:19 +02001070
Kevin Wolf56983462013-07-05 13:49:54 +02001071 return ret;
Blue Swirl296af7c2010-03-29 19:23:50 +00001072}
1073
Stefan Hajnoczi4486e892018-03-07 14:42:05 +00001074/* Special vm_stop() variant for terminating the process. Historically clients
1075 * did not expect a QMP STOP event and so we need to retain compatibility.
1076 */
1077int vm_shutdown(void)
1078{
1079 return do_vm_stop(RUN_STATE_SHUTDOWN, false);
1080}
1081
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001082static bool cpu_can_run(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001083{
Andreas Färber4fdeee72012-05-02 23:10:09 +02001084 if (cpu->stop) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001085 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001086 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +08001087 if (cpu_is_stopped(cpu)) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001088 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001089 }
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001090 return true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001091}
1092
Andreas Färber91325042013-05-27 02:07:49 +02001093static void cpu_handle_guest_debug(CPUState *cpu)
Jan Kiszka3c638d02010-06-25 16:56:56 +02001094{
Andreas Färber64f6b342013-05-27 02:06:09 +02001095 gdb_set_stop_cpu(cpu);
Jan Kiszka8cf71712011-02-07 12:19:16 +01001096 qemu_system_debug_request();
Andreas Färberf324e762012-05-02 23:26:21 +02001097 cpu->stopped = true;
Jan Kiszka3c638d02010-06-25 16:56:56 +02001098}
1099
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001100#ifdef CONFIG_LINUX
1101static void sigbus_reraise(void)
1102{
1103 sigset_t set;
1104 struct sigaction action;
1105
1106 memset(&action, 0, sizeof(action));
1107 action.sa_handler = SIG_DFL;
1108 if (!sigaction(SIGBUS, &action, NULL)) {
1109 raise(SIGBUS);
1110 sigemptyset(&set);
1111 sigaddset(&set, SIGBUS);
Peter Maydella2d17612016-05-16 18:33:59 +01001112 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001113 }
1114 perror("Failed to re-raise SIGBUS!\n");
1115 abort();
1116}
1117
Paolo Bonzinid98d4072017-02-08 13:22:12 +01001118static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx)
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001119{
Paolo Bonzinia16fc072017-02-09 09:50:02 +01001120 if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) {
1121 sigbus_reraise();
1122 }
1123
Paolo Bonzini2ae41db2017-02-08 12:48:54 +01001124 if (current_cpu) {
1125 /* Called asynchronously in VCPU thread. */
1126 if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) {
1127 sigbus_reraise();
1128 }
1129 } else {
1130 /* Called synchronously (via signalfd) in main thread. */
1131 if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) {
1132 sigbus_reraise();
1133 }
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001134 }
1135}
1136
1137static void qemu_init_sigbus(void)
1138{
1139 struct sigaction action;
1140
1141 memset(&action, 0, sizeof(action));
1142 action.sa_flags = SA_SIGINFO;
Paolo Bonzinid98d4072017-02-08 13:22:12 +01001143 action.sa_sigaction = sigbus_handler;
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001144 sigaction(SIGBUS, &action, NULL);
1145
1146 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
1147}
Paolo Bonzinia16fc072017-02-09 09:50:02 +01001148#else /* !CONFIG_LINUX */
1149static void qemu_init_sigbus(void)
1150{
1151}
Paolo Bonzinia16fc072017-02-09 09:50:02 +01001152#endif /* !CONFIG_LINUX */
Blue Swirl296af7c2010-03-29 19:23:50 +00001153
Stefan Weilb2532d82012-09-27 07:41:42 +02001154static QemuMutex qemu_global_mutex;
Blue Swirl296af7c2010-03-29 19:23:50 +00001155
1156static QemuThread io_thread;
1157
Blue Swirl296af7c2010-03-29 19:23:50 +00001158/* cpu creation */
1159static QemuCond qemu_cpu_cond;
1160/* system init */
Blue Swirl296af7c2010-03-29 19:23:50 +00001161static QemuCond qemu_pause_cond;
1162
Paolo Bonzinid3b12f52011-09-13 10:30:52 +02001163void qemu_init_cpu_loop(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001164{
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001165 qemu_init_sigbus();
Anthony Liguoried945922011-02-08 18:18:18 +01001166 qemu_cond_init(&qemu_cpu_cond);
Anthony Liguoried945922011-02-08 18:18:18 +01001167 qemu_cond_init(&qemu_pause_cond);
Blue Swirl296af7c2010-03-29 19:23:50 +00001168 qemu_mutex_init(&qemu_global_mutex);
Blue Swirl296af7c2010-03-29 19:23:50 +00001169
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001170 qemu_thread_get_self(&io_thread);
Blue Swirl296af7c2010-03-29 19:23:50 +00001171}
1172
Paolo Bonzini14e6fe12016-10-31 10:36:08 +01001173void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -03001174{
Sergey Fedorovd148d902016-08-29 09:51:00 +02001175 do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
Chegu Vinod3c022702013-06-24 03:49:41 -06001176}
1177
Gu Zheng4c055ab2016-05-12 09:18:13 +05301178static void qemu_kvm_destroy_vcpu(CPUState *cpu)
1179{
1180 if (kvm_destroy_vcpu(cpu) < 0) {
1181 error_report("kvm_destroy_vcpu failed");
1182 exit(EXIT_FAILURE);
1183 }
1184}
1185
1186static void qemu_tcg_destroy_vcpu(CPUState *cpu)
1187{
1188}
1189
David Hildenbrandebd05fe2017-11-29 20:12:15 +01001190static void qemu_cpu_stop(CPUState *cpu, bool exit)
1191{
1192 g_assert(qemu_cpu_is_self(cpu));
1193 cpu->stop = false;
1194 cpu->stopped = true;
1195 if (exit) {
1196 cpu_exit(cpu);
1197 }
1198 qemu_cond_broadcast(&qemu_pause_cond);
1199}
1200
Andreas Färber509a0d72012-05-03 02:18:09 +02001201static void qemu_wait_io_event_common(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001202{
Alex Bennée37257942017-02-23 18:29:14 +00001203 atomic_mb_set(&cpu->thread_kicked, false);
Andreas Färber4fdeee72012-05-02 23:10:09 +02001204 if (cpu->stop) {
David Hildenbrandebd05fe2017-11-29 20:12:15 +01001205 qemu_cpu_stop(cpu, false);
Blue Swirl296af7c2010-03-29 19:23:50 +00001206 }
Sergey Fedorova5403c62016-08-02 18:27:36 +01001207 process_queued_cpu_work(cpu);
Alex Bennée37257942017-02-23 18:29:14 +00001208}
1209
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001210static void qemu_tcg_rr_wait_io_event(CPUState *cpu)
Alex Bennée37257942017-02-23 18:29:14 +00001211{
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001212 while (all_cpu_threads_idle()) {
Alex Bennée65467062017-02-23 18:29:09 +00001213 stop_tcg_kick_timer();
KONRAD Fredericd5f8d612015-08-10 17:27:06 +02001214 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +01001215 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001216
Alex Bennée65467062017-02-23 18:29:09 +00001217 start_tcg_kick_timer();
1218
Alex Bennée37257942017-02-23 18:29:14 +00001219 qemu_wait_io_event_common(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001220}
1221
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001222static void qemu_wait_io_event(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001223{
Andreas Färbera98ae1d2013-05-26 23:21:08 +02001224 while (cpu_thread_is_idle(cpu)) {
Andreas Färberf5c121b2012-05-03 01:22:49 +02001225 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +01001226 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001227
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001228#ifdef _WIN32
1229 /* Eat dummy APC queued by qemu_cpu_kick_thread. */
1230 if (!tcg_enabled()) {
1231 SleepEx(0, TRUE);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001232 }
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001233#endif
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001234 qemu_wait_io_event_common(cpu);
1235}
1236
Jan Kiszka7e97cd82011-02-07 12:19:12 +01001237static void *qemu_kvm_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +00001238{
Andreas Färber48a106b2013-05-27 02:20:39 +02001239 CPUState *cpu = arg;
Jan Kiszka84b49152011-02-01 22:15:50 +01001240 int r;
Blue Swirl296af7c2010-03-29 19:23:50 +00001241
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001242 rcu_register_thread();
1243
Paolo Bonzini2e7f7a32015-06-18 18:47:18 +02001244 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +02001245 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +02001246 cpu->thread_id = qemu_get_thread_id();
Pavel Dovgalyuk626cf8f2014-12-08 10:53:17 +03001247 cpu->can_do_io = 1;
Andreas Färber4917cf42013-05-27 05:17:50 +02001248 current_cpu = cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001249
Andreas Färber504134d2012-12-17 06:38:45 +01001250 r = kvm_init_vcpu(cpu);
Jan Kiszka84b49152011-02-01 22:15:50 +01001251 if (r < 0) {
Alistair Francis493d89b2018-02-03 09:43:14 +01001252 error_report("kvm_init_vcpu failed: %s", strerror(-r));
Jan Kiszka84b49152011-02-01 22:15:50 +01001253 exit(1);
1254 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001255
Paolo Bonzini18268b62017-02-09 09:41:14 +01001256 kvm_init_cpu_signals(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001257
1258 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +02001259 cpu->created = true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001260 qemu_cond_signal(&qemu_cpu_cond);
1261
Gu Zheng4c055ab2016-05-12 09:18:13 +05301262 do {
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001263 if (cpu_can_run(cpu)) {
Andreas Färber1458c362013-05-26 23:46:55 +02001264 r = kvm_cpu_exec(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +01001265 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +02001266 cpu_handle_guest_debug(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +01001267 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001268 }
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001269 qemu_wait_io_event(cpu);
Gu Zheng4c055ab2016-05-12 09:18:13 +05301270 } while (!cpu->unplug || cpu_can_run(cpu));
Blue Swirl296af7c2010-03-29 19:23:50 +00001271
Gu Zheng4c055ab2016-05-12 09:18:13 +05301272 qemu_kvm_destroy_vcpu(cpu);
Bharata B Rao2c579042016-05-12 09:18:14 +05301273 cpu->created = false;
1274 qemu_cond_signal(&qemu_cpu_cond);
Gu Zheng4c055ab2016-05-12 09:18:13 +05301275 qemu_mutex_unlock_iothread();
Paolo Bonzini57615ed2018-01-30 11:04:36 -05001276 rcu_unregister_thread();
Blue Swirl296af7c2010-03-29 19:23:50 +00001277 return NULL;
1278}
1279
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001280static void *qemu_dummy_cpu_thread_fn(void *arg)
1281{
1282#ifdef _WIN32
Alistair Francis493d89b2018-02-03 09:43:14 +01001283 error_report("qtest is not supported under Windows");
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001284 exit(1);
1285#else
Andreas Färber10a90212013-05-27 02:24:35 +02001286 CPUState *cpu = arg;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001287 sigset_t waitset;
1288 int r;
1289
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001290 rcu_register_thread();
1291
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001292 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +02001293 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +02001294 cpu->thread_id = qemu_get_thread_id();
Pavel Dovgalyuk626cf8f2014-12-08 10:53:17 +03001295 cpu->can_do_io = 1;
Alex Bennée37257942017-02-23 18:29:14 +00001296 current_cpu = cpu;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001297
1298 sigemptyset(&waitset);
1299 sigaddset(&waitset, SIG_IPI);
1300
1301 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +02001302 cpu->created = true;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001303 qemu_cond_signal(&qemu_cpu_cond);
1304
Paolo Bonzinid2831ab2018-01-30 11:04:53 -05001305 do {
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001306 qemu_mutex_unlock_iothread();
1307 do {
1308 int sig;
1309 r = sigwait(&waitset, &sig);
1310 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1311 if (r == -1) {
1312 perror("sigwait");
1313 exit(1);
1314 }
1315 qemu_mutex_lock_iothread();
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001316 qemu_wait_io_event(cpu);
Paolo Bonzinid2831ab2018-01-30 11:04:53 -05001317 } while (!cpu->unplug);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001318
Paolo Bonzinid2831ab2018-01-30 11:04:53 -05001319 rcu_unregister_thread();
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001320 return NULL;
1321#endif
1322}
1323
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001324static int64_t tcg_get_icount_limit(void)
1325{
1326 int64_t deadline;
1327
1328 if (replay_mode != REPLAY_MODE_PLAY) {
1329 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1330
1331 /* Maintain prior (possibly buggy) behaviour where if no deadline
1332 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1333 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1334 * nanoseconds.
1335 */
1336 if ((deadline < 0) || (deadline > INT32_MAX)) {
1337 deadline = INT32_MAX;
1338 }
1339
1340 return qemu_icount_round(deadline);
1341 } else {
1342 return replay_get_instructions();
1343 }
1344}
1345
Alex Bennée12e97002016-10-27 16:10:14 +01001346static void handle_icount_deadline(void)
1347{
Paolo Bonzini6b8f0182017-03-02 19:56:40 +01001348 assert(qemu_in_vcpu_thread());
Alex Bennée12e97002016-10-27 16:10:14 +01001349 if (use_icount) {
1350 int64_t deadline =
1351 qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1352
1353 if (deadline == 0) {
Paolo Bonzini6b8f0182017-03-02 19:56:40 +01001354 /* Wake up other AioContexts. */
Alex Bennée12e97002016-10-27 16:10:14 +01001355 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini6b8f0182017-03-02 19:56:40 +01001356 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
Alex Bennée12e97002016-10-27 16:10:14 +01001357 }
1358 }
1359}
1360
Alex Bennée05248382017-03-29 16:46:59 +01001361static void prepare_icount_for_run(CPUState *cpu)
1362{
1363 if (use_icount) {
Alex Bennéeeda5f7c2017-04-05 12:35:48 +01001364 int insns_left;
Alex Bennée05248382017-03-29 16:46:59 +01001365
1366 /* These should always be cleared by process_icount_data after
1367 * each vCPU execution. However u16.high can be raised
1368 * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
1369 */
1370 g_assert(cpu->icount_decr.u16.low == 0);
1371 g_assert(cpu->icount_extra == 0);
1372
Alex Bennéeeda5f7c2017-04-05 12:35:48 +01001373 cpu->icount_budget = tcg_get_icount_limit();
1374 insns_left = MIN(0xffff, cpu->icount_budget);
1375 cpu->icount_decr.u16.low = insns_left;
1376 cpu->icount_extra = cpu->icount_budget - insns_left;
Alex Bennéed759c952018-02-27 12:52:48 +03001377
1378 replay_mutex_lock();
Alex Bennée05248382017-03-29 16:46:59 +01001379 }
1380}
1381
1382static void process_icount_data(CPUState *cpu)
1383{
1384 if (use_icount) {
Alex Bennéee4cd9652017-03-31 16:09:42 +01001385 /* Account for executed instructions */
Alex Bennée512d3c82017-04-05 12:32:37 +01001386 cpu_update_icount(cpu);
Alex Bennée05248382017-03-29 16:46:59 +01001387
1388 /* Reset the counters */
1389 cpu->icount_decr.u16.low = 0;
1390 cpu->icount_extra = 0;
Alex Bennéee4cd9652017-03-31 16:09:42 +01001391 cpu->icount_budget = 0;
1392
Alex Bennée05248382017-03-29 16:46:59 +01001393 replay_account_executed_instructions();
Alex Bennéed759c952018-02-27 12:52:48 +03001394
1395 replay_mutex_unlock();
Alex Bennée05248382017-03-29 16:46:59 +01001396 }
1397}
1398
1399
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001400static int tcg_cpu_exec(CPUState *cpu)
1401{
1402 int ret;
1403#ifdef CONFIG_PROFILER
1404 int64_t ti;
1405#endif
1406
Emilio G. Cotaf28d0df2018-06-22 13:45:31 -04001407 assert(tcg_enabled());
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001408#ifdef CONFIG_PROFILER
1409 ti = profile_getclock();
1410#endif
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001411 cpu_exec_start(cpu);
1412 ret = cpu_exec(cpu);
1413 cpu_exec_end(cpu);
1414#ifdef CONFIG_PROFILER
1415 tcg_time += profile_getclock() - ti;
1416#endif
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001417 return ret;
1418}
1419
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001420/* Destroy any remaining vCPUs which have been unplugged and have
1421 * finished running
1422 */
1423static void deal_with_unplugged_cpus(void)
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001424{
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001425 CPUState *cpu;
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001426
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001427 CPU_FOREACH(cpu) {
1428 if (cpu->unplug && !cpu_can_run(cpu)) {
1429 qemu_tcg_destroy_vcpu(cpu);
1430 cpu->created = false;
1431 qemu_cond_signal(&qemu_cpu_cond);
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001432 break;
1433 }
1434 }
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001435}
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001436
Alex Bennée65467062017-02-23 18:29:09 +00001437/* Single-threaded TCG
1438 *
1439 * In the single-threaded case each vCPU is simulated in turn. If
1440 * there is more than a single vCPU we create a simple timer to kick
1441 * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
1442 * This is done explicitly rather than relying on side-effects
1443 * elsewhere.
1444 */
1445
Alex Bennée37257942017-02-23 18:29:14 +00001446static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +00001447{
Andreas Färberc3586ba2012-05-03 01:41:24 +02001448 CPUState *cpu = arg;
Blue Swirl296af7c2010-03-29 19:23:50 +00001449
Emilio G. Cotaf28d0df2018-06-22 13:45:31 -04001450 assert(tcg_enabled());
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001451 rcu_register_thread();
Emilio G. Cota3468b592017-07-19 18:57:58 -04001452 tcg_register_thread();
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001453
Paolo Bonzini2e7f7a32015-06-18 18:47:18 +02001454 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +02001455 qemu_thread_get_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +00001456
David Hildenbrand5a9c9732018-02-09 20:52:39 +01001457 cpu->thread_id = qemu_get_thread_id();
1458 cpu->created = true;
1459 cpu->can_do_io = 1;
Blue Swirl296af7c2010-03-29 19:23:50 +00001460 qemu_cond_signal(&qemu_cpu_cond);
1461
Jan Kiszkafa7d1862011-08-22 18:35:25 +02001462 /* wait for initial kick-off after machine start */
Emilio G. Cotac28e3992015-04-27 12:45:28 -04001463 while (first_cpu->stopped) {
KONRAD Fredericd5f8d612015-08-10 17:27:06 +02001464 qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka8e564b42012-02-17 18:31:15 +01001465
1466 /* process any pending work */
Andreas Färberbdc44642013-06-24 23:50:24 +02001467 CPU_FOREACH(cpu) {
Alex Bennée37257942017-02-23 18:29:14 +00001468 current_cpu = cpu;
Andreas Färber182735e2013-05-29 22:29:20 +02001469 qemu_wait_io_event_common(cpu);
Jan Kiszka8e564b42012-02-17 18:31:15 +01001470 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001471 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001472
Alex Bennée65467062017-02-23 18:29:09 +00001473 start_tcg_kick_timer();
1474
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001475 cpu = first_cpu;
1476
Alex Bennéee5143e32017-02-23 18:29:12 +00001477 /* process any pending work */
1478 cpu->exit_request = 1;
1479
Blue Swirl296af7c2010-03-29 19:23:50 +00001480 while (1) {
Alex Bennéed759c952018-02-27 12:52:48 +03001481 qemu_mutex_unlock_iothread();
1482 replay_mutex_lock();
1483 qemu_mutex_lock_iothread();
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001484 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1485 qemu_account_warp_timer();
1486
Paolo Bonzini6b8f0182017-03-02 19:56:40 +01001487 /* Run the timers here. This is much more efficient than
1488 * waking up the I/O thread and waiting for completion.
1489 */
1490 handle_icount_deadline();
1491
Alex Bennéed759c952018-02-27 12:52:48 +03001492 replay_mutex_unlock();
1493
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001494 if (!cpu) {
1495 cpu = first_cpu;
1496 }
1497
Alex Bennéee5143e32017-02-23 18:29:12 +00001498 while (cpu && !cpu->queued_work_first && !cpu->exit_request) {
1499
Alex Bennée791158d2017-02-23 18:29:10 +00001500 atomic_mb_set(&tcg_current_rr_cpu, cpu);
Alex Bennée37257942017-02-23 18:29:14 +00001501 current_cpu = cpu;
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001502
1503 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1504 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1505
1506 if (cpu_can_run(cpu)) {
1507 int r;
Alex Bennée05248382017-03-29 16:46:59 +01001508
Alex Bennéed759c952018-02-27 12:52:48 +03001509 qemu_mutex_unlock_iothread();
Alex Bennée05248382017-03-29 16:46:59 +01001510 prepare_icount_for_run(cpu);
1511
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001512 r = tcg_cpu_exec(cpu);
Alex Bennée05248382017-03-29 16:46:59 +01001513
1514 process_icount_data(cpu);
Alex Bennéed759c952018-02-27 12:52:48 +03001515 qemu_mutex_lock_iothread();
Alex Bennée05248382017-03-29 16:46:59 +01001516
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001517 if (r == EXCP_DEBUG) {
1518 cpu_handle_guest_debug(cpu);
1519 break;
Pranith Kumar08e73c42017-02-23 18:29:15 +00001520 } else if (r == EXCP_ATOMIC) {
1521 qemu_mutex_unlock_iothread();
1522 cpu_exec_step_atomic(cpu);
1523 qemu_mutex_lock_iothread();
1524 break;
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001525 }
Alex Bennée37257942017-02-23 18:29:14 +00001526 } else if (cpu->stop) {
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001527 if (cpu->unplug) {
1528 cpu = CPU_NEXT(cpu);
1529 }
1530 break;
1531 }
1532
Alex Bennéee5143e32017-02-23 18:29:12 +00001533 cpu = CPU_NEXT(cpu);
1534 } /* while (cpu && !cpu->exit_request).. */
1535
Alex Bennée791158d2017-02-23 18:29:10 +00001536 /* Does not need atomic_mb_set because a spurious wakeup is okay. */
1537 atomic_set(&tcg_current_rr_cpu, NULL);
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001538
Alex Bennéee5143e32017-02-23 18:29:12 +00001539 if (cpu && cpu->exit_request) {
1540 atomic_mb_set(&cpu->exit_request, 0);
1541 }
Alex Blighac70aaf2013-08-21 16:02:57 +01001542
Emilio G. Cota068a5ea2018-08-19 05:13:35 -04001543 qemu_tcg_rr_wait_io_event(cpu ? cpu : first_cpu);
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001544 deal_with_unplugged_cpus();
Blue Swirl296af7c2010-03-29 19:23:50 +00001545 }
1546
Paolo Bonzini9b0605f2018-01-30 11:05:06 -05001547 rcu_unregister_thread();
Blue Swirl296af7c2010-03-29 19:23:50 +00001548 return NULL;
1549}
1550
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001551static void *qemu_hax_cpu_thread_fn(void *arg)
1552{
1553 CPUState *cpu = arg;
1554 int r;
Vincent Palatinb3d3a422017-03-20 11:15:49 +01001555
Paolo Bonzini9857c2d2018-01-30 16:28:49 +01001556 rcu_register_thread();
Vincent Palatinb3d3a422017-03-20 11:15:49 +01001557 qemu_mutex_lock_iothread();
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001558 qemu_thread_get_self(cpu->thread);
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001559
1560 cpu->thread_id = qemu_get_thread_id();
1561 cpu->created = true;
1562 cpu->halted = 0;
1563 current_cpu = cpu;
1564
1565 hax_init_vcpu(cpu);
1566 qemu_cond_signal(&qemu_cpu_cond);
1567
Paolo Bonzini9857c2d2018-01-30 16:28:49 +01001568 do {
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001569 if (cpu_can_run(cpu)) {
1570 r = hax_smp_cpu_exec(cpu);
1571 if (r == EXCP_DEBUG) {
1572 cpu_handle_guest_debug(cpu);
1573 }
1574 }
1575
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001576 qemu_wait_io_event(cpu);
Paolo Bonzini9857c2d2018-01-30 16:28:49 +01001577 } while (!cpu->unplug || cpu_can_run(cpu));
1578 rcu_unregister_thread();
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001579 return NULL;
1580}
1581
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001582/* The HVF-specific vCPU thread function. This one should only run when the host
1583 * CPU supports the VMX "unrestricted guest" feature. */
1584static void *qemu_hvf_cpu_thread_fn(void *arg)
1585{
1586 CPUState *cpu = arg;
1587
1588 int r;
1589
1590 assert(hvf_enabled());
1591
1592 rcu_register_thread();
1593
1594 qemu_mutex_lock_iothread();
1595 qemu_thread_get_self(cpu->thread);
1596
1597 cpu->thread_id = qemu_get_thread_id();
1598 cpu->can_do_io = 1;
1599 current_cpu = cpu;
1600
1601 hvf_init_vcpu(cpu);
1602
1603 /* signal CPU creation */
1604 cpu->created = true;
1605 qemu_cond_signal(&qemu_cpu_cond);
1606
1607 do {
1608 if (cpu_can_run(cpu)) {
1609 r = hvf_vcpu_exec(cpu);
1610 if (r == EXCP_DEBUG) {
1611 cpu_handle_guest_debug(cpu);
1612 }
1613 }
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001614 qemu_wait_io_event(cpu);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001615 } while (!cpu->unplug || cpu_can_run(cpu));
1616
1617 hvf_vcpu_destroy(cpu);
1618 cpu->created = false;
1619 qemu_cond_signal(&qemu_cpu_cond);
1620 qemu_mutex_unlock_iothread();
Paolo Bonzini8178e632018-01-30 11:05:21 -05001621 rcu_unregister_thread();
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001622 return NULL;
1623}
1624
Justin Terry (VM)19306802018-01-22 13:07:49 -08001625static void *qemu_whpx_cpu_thread_fn(void *arg)
1626{
1627 CPUState *cpu = arg;
1628 int r;
1629
1630 rcu_register_thread();
1631
1632 qemu_mutex_lock_iothread();
1633 qemu_thread_get_self(cpu->thread);
1634 cpu->thread_id = qemu_get_thread_id();
1635 current_cpu = cpu;
1636
1637 r = whpx_init_vcpu(cpu);
1638 if (r < 0) {
1639 fprintf(stderr, "whpx_init_vcpu failed: %s\n", strerror(-r));
1640 exit(1);
1641 }
1642
1643 /* signal CPU creation */
1644 cpu->created = true;
1645 qemu_cond_signal(&qemu_cpu_cond);
1646
1647 do {
1648 if (cpu_can_run(cpu)) {
1649 r = whpx_vcpu_exec(cpu);
1650 if (r == EXCP_DEBUG) {
1651 cpu_handle_guest_debug(cpu);
1652 }
1653 }
1654 while (cpu_thread_is_idle(cpu)) {
1655 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1656 }
1657 qemu_wait_io_event_common(cpu);
1658 } while (!cpu->unplug || cpu_can_run(cpu));
1659
1660 whpx_destroy_vcpu(cpu);
1661 cpu->created = false;
1662 qemu_cond_signal(&qemu_cpu_cond);
1663 qemu_mutex_unlock_iothread();
1664 rcu_unregister_thread();
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001665 return NULL;
1666}
1667
1668#ifdef _WIN32
1669static void CALLBACK dummy_apc_func(ULONG_PTR unused)
1670{
1671}
1672#endif
1673
Alex Bennée37257942017-02-23 18:29:14 +00001674/* Multi-threaded TCG
1675 *
1676 * In the multi-threaded case each vCPU has its own thread. The TLS
1677 * variable current_cpu can be used deep in the code to find the
1678 * current CPUState for a given thread.
1679 */
1680
1681static void *qemu_tcg_cpu_thread_fn(void *arg)
1682{
1683 CPUState *cpu = arg;
1684
Emilio G. Cotaf28d0df2018-06-22 13:45:31 -04001685 assert(tcg_enabled());
Alex Bennéebf51c722017-03-30 18:32:29 +01001686 g_assert(!use_icount);
1687
Alex Bennée37257942017-02-23 18:29:14 +00001688 rcu_register_thread();
Emilio G. Cota3468b592017-07-19 18:57:58 -04001689 tcg_register_thread();
Alex Bennée37257942017-02-23 18:29:14 +00001690
1691 qemu_mutex_lock_iothread();
1692 qemu_thread_get_self(cpu->thread);
1693
1694 cpu->thread_id = qemu_get_thread_id();
1695 cpu->created = true;
1696 cpu->can_do_io = 1;
1697 current_cpu = cpu;
1698 qemu_cond_signal(&qemu_cpu_cond);
1699
1700 /* process any pending work */
1701 cpu->exit_request = 1;
1702
Cédric Le Goater54961aa2018-04-25 15:18:28 +02001703 do {
Alex Bennée37257942017-02-23 18:29:14 +00001704 if (cpu_can_run(cpu)) {
1705 int r;
Alex Bennéed759c952018-02-27 12:52:48 +03001706 qemu_mutex_unlock_iothread();
Alex Bennée37257942017-02-23 18:29:14 +00001707 r = tcg_cpu_exec(cpu);
Alex Bennéed759c952018-02-27 12:52:48 +03001708 qemu_mutex_lock_iothread();
Alex Bennée37257942017-02-23 18:29:14 +00001709 switch (r) {
1710 case EXCP_DEBUG:
1711 cpu_handle_guest_debug(cpu);
1712 break;
1713 case EXCP_HALTED:
1714 /* during start-up the vCPU is reset and the thread is
1715 * kicked several times. If we don't ensure we go back
1716 * to sleep in the halted state we won't cleanly
1717 * start-up when the vCPU is enabled.
1718 *
1719 * cpu->halted should ensure we sleep in wait_io_event
1720 */
1721 g_assert(cpu->halted);
1722 break;
Pranith Kumar08e73c42017-02-23 18:29:15 +00001723 case EXCP_ATOMIC:
1724 qemu_mutex_unlock_iothread();
1725 cpu_exec_step_atomic(cpu);
1726 qemu_mutex_lock_iothread();
Alex Bennée37257942017-02-23 18:29:14 +00001727 default:
1728 /* Ignore everything else? */
1729 break;
1730 }
1731 }
1732
Alex Bennée37257942017-02-23 18:29:14 +00001733 atomic_mb_set(&cpu->exit_request, 0);
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001734 qemu_wait_io_event(cpu);
Paolo Bonzini9b0605f2018-01-30 11:05:06 -05001735 } while (!cpu->unplug || cpu_can_run(cpu));
Alex Bennée37257942017-02-23 18:29:14 +00001736
Paolo Bonzini9b0605f2018-01-30 11:05:06 -05001737 qemu_tcg_destroy_vcpu(cpu);
1738 cpu->created = false;
1739 qemu_cond_signal(&qemu_cpu_cond);
1740 qemu_mutex_unlock_iothread();
1741 rcu_unregister_thread();
Alex Bennée37257942017-02-23 18:29:14 +00001742 return NULL;
1743}
1744
Andreas Färber2ff09a42012-05-03 00:23:30 +02001745static void qemu_cpu_kick_thread(CPUState *cpu)
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001746{
1747#ifndef _WIN32
1748 int err;
1749
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001750 if (cpu->thread_kicked) {
1751 return;
Paolo Bonzini9102ded2015-08-18 06:52:09 -07001752 }
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001753 cpu->thread_kicked = true;
Andreas Färber814e6122012-05-02 17:00:37 +02001754 err = pthread_kill(cpu->thread->thread, SIG_IPI);
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001755 if (err) {
1756 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1757 exit(1);
1758 }
1759#else /* _WIN32 */
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001760 if (!qemu_cpu_is_self(cpu)) {
Justin Terry (VM)19306802018-01-22 13:07:49 -08001761 if (whpx_enabled()) {
1762 whpx_vcpu_kick(cpu);
1763 } else if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) {
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001764 fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n",
1765 __func__, GetLastError());
1766 exit(1);
1767 }
1768 }
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001769#endif
1770}
1771
Andreas Färberc08d7422012-05-03 04:34:15 +02001772void qemu_cpu_kick(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001773{
Andreas Färberf5c121b2012-05-03 01:22:49 +02001774 qemu_cond_broadcast(cpu->halt_cond);
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001775 if (tcg_enabled()) {
Alex Bennée791158d2017-02-23 18:29:10 +00001776 cpu_exit(cpu);
Alex Bennée37257942017-02-23 18:29:14 +00001777 /* NOP unless doing single-thread RR */
Alex Bennée791158d2017-02-23 18:29:10 +00001778 qemu_cpu_kick_rr_cpu();
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001779 } else {
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001780 if (hax_enabled()) {
1781 /*
1782 * FIXME: race condition with the exit_request check in
1783 * hax_vcpu_hax_exec
1784 */
1785 cpu->exit_request = 1;
1786 }
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001787 qemu_cpu_kick_thread(cpu);
1788 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001789}
1790
Jan Kiszka46d62fa2011-02-01 22:15:59 +01001791void qemu_cpu_kick_self(void)
1792{
Andreas Färber4917cf42013-05-27 05:17:50 +02001793 assert(current_cpu);
Paolo Bonzini9102ded2015-08-18 06:52:09 -07001794 qemu_cpu_kick_thread(current_cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001795}
1796
Andreas Färber60e82572012-05-02 22:23:49 +02001797bool qemu_cpu_is_self(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001798{
Andreas Färber814e6122012-05-02 17:00:37 +02001799 return qemu_thread_is_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +00001800}
1801
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001802bool qemu_in_vcpu_thread(void)
Juan Quintelaaa723c22012-09-18 16:30:11 +02001803{
Andreas Färber4917cf42013-05-27 05:17:50 +02001804 return current_cpu && qemu_cpu_is_self(current_cpu);
Juan Quintelaaa723c22012-09-18 16:30:11 +02001805}
1806
Paolo Bonziniafbe7052015-06-18 18:47:19 +02001807static __thread bool iothread_locked = false;
1808
1809bool qemu_mutex_iothread_locked(void)
1810{
1811 return iothread_locked;
1812}
1813
Emilio G. Cotacb764d02017-10-28 02:16:41 -04001814/*
1815 * The BQL is taken from so many places that it is worth profiling the
1816 * callers directly, instead of funneling them all through a single function.
1817 */
1818void qemu_mutex_lock_iothread_impl(const char *file, int line)
Blue Swirl296af7c2010-03-29 19:23:50 +00001819{
Emilio G. Cotacb764d02017-10-28 02:16:41 -04001820 QemuMutexLockFunc bql_lock = atomic_read(&qemu_bql_mutex_lock_func);
1821
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001822 g_assert(!qemu_mutex_iothread_locked());
Emilio G. Cotacb764d02017-10-28 02:16:41 -04001823 bql_lock(&qemu_global_mutex, file, line);
Paolo Bonziniafbe7052015-06-18 18:47:19 +02001824 iothread_locked = true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001825}
1826
1827void qemu_mutex_unlock_iothread(void)
1828{
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001829 g_assert(qemu_mutex_iothread_locked());
Paolo Bonziniafbe7052015-06-18 18:47:19 +02001830 iothread_locked = false;
Blue Swirl296af7c2010-03-29 19:23:50 +00001831 qemu_mutex_unlock(&qemu_global_mutex);
1832}
1833
Alex Bennéee8faee02016-10-27 16:09:58 +01001834static bool all_vcpus_paused(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001835{
Andreas Färberbdc44642013-06-24 23:50:24 +02001836 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001837
Andreas Färberbdc44642013-06-24 23:50:24 +02001838 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001839 if (!cpu->stopped) {
Alex Bennéee8faee02016-10-27 16:09:58 +01001840 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001841 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001842 }
1843
Alex Bennéee8faee02016-10-27 16:09:58 +01001844 return true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001845}
1846
1847void pause_all_vcpus(void)
1848{
Andreas Färberbdc44642013-06-24 23:50:24 +02001849 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001850
Alex Bligh40daca52013-08-21 16:03:02 +01001851 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
Andreas Färberbdc44642013-06-24 23:50:24 +02001852 CPU_FOREACH(cpu) {
David Hildenbrandebd05fe2017-11-29 20:12:15 +01001853 if (qemu_cpu_is_self(cpu)) {
1854 qemu_cpu_stop(cpu, true);
1855 } else {
1856 cpu->stop = true;
1857 qemu_cpu_kick(cpu);
1858 }
Jan Kiszkad798e972012-02-17 18:31:16 +01001859 }
1860
Alex Bennéed759c952018-02-27 12:52:48 +03001861 /* We need to drop the replay_lock so any vCPU threads woken up
1862 * can finish their replay tasks
1863 */
1864 replay_mutex_unlock();
1865
Blue Swirl296af7c2010-03-29 19:23:50 +00001866 while (!all_vcpus_paused()) {
Paolo Bonzinibe7d6c52011-03-12 17:44:02 +01001867 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
Andreas Färberbdc44642013-06-24 23:50:24 +02001868 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001869 qemu_cpu_kick(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001870 }
1871 }
Alex Bennéed759c952018-02-27 12:52:48 +03001872
1873 qemu_mutex_unlock_iothread();
1874 replay_mutex_lock();
1875 qemu_mutex_lock_iothread();
Blue Swirl296af7c2010-03-29 19:23:50 +00001876}
1877
Igor Mammedov29936832013-04-23 10:29:37 +02001878void cpu_resume(CPUState *cpu)
1879{
1880 cpu->stop = false;
1881 cpu->stopped = false;
1882 qemu_cpu_kick(cpu);
1883}
1884
Blue Swirl296af7c2010-03-29 19:23:50 +00001885void resume_all_vcpus(void)
1886{
Andreas Färberbdc44642013-06-24 23:50:24 +02001887 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001888
Alex Bligh40daca52013-08-21 16:03:02 +01001889 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
Andreas Färberbdc44642013-06-24 23:50:24 +02001890 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001891 cpu_resume(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001892 }
1893}
1894
Paolo Bonzinidbadee42018-01-30 16:40:12 +01001895void cpu_remove_sync(CPUState *cpu)
Gu Zheng4c055ab2016-05-12 09:18:13 +05301896{
1897 cpu->stop = true;
1898 cpu->unplug = true;
1899 qemu_cpu_kick(cpu);
Paolo Bonzinidbadee42018-01-30 16:40:12 +01001900 qemu_mutex_unlock_iothread();
1901 qemu_thread_join(cpu->thread);
1902 qemu_mutex_lock_iothread();
Bharata B Rao2c579042016-05-12 09:18:14 +05301903}
1904
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001905/* For temporary buffers for forming a name */
1906#define VCPU_THREAD_NAME_SIZE 16
1907
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001908static void qemu_tcg_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001909{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001910 char thread_name[VCPU_THREAD_NAME_SIZE];
Alex Bennée37257942017-02-23 18:29:14 +00001911 static QemuCond *single_tcg_halt_cond;
1912 static QemuThread *single_tcg_cpu_thread;
Emilio G. Cotae8feb962017-07-07 19:24:20 -04001913 static int tcg_region_inited;
1914
Emilio G. Cotaf28d0df2018-06-22 13:45:31 -04001915 assert(tcg_enabled());
Emilio G. Cotae8feb962017-07-07 19:24:20 -04001916 /*
1917 * Initialize TCG regions--once. Now is a good time, because:
1918 * (1) TCG's init context, prologue and target globals have been set up.
1919 * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the
1920 * -accel flag is processed, so the check doesn't work then).
1921 */
1922 if (!tcg_region_inited) {
1923 tcg_region_inited = 1;
1924 tcg_region_init();
1925 }
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001926
Alex Bennée37257942017-02-23 18:29:14 +00001927 if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
Andreas Färber814e6122012-05-02 17:00:37 +02001928 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001929 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1930 qemu_cond_init(cpu->halt_cond);
Alex Bennée37257942017-02-23 18:29:14 +00001931
1932 if (qemu_tcg_mttcg_enabled()) {
1933 /* create a thread per vCPU with TCG (MTTCG) */
1934 parallel_cpus = true;
1935 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001936 cpu->cpu_index);
Alex Bennée37257942017-02-23 18:29:14 +00001937
1938 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1939 cpu, QEMU_THREAD_JOINABLE);
1940
1941 } else {
1942 /* share a single thread for all cpus with TCG */
1943 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
1944 qemu_thread_create(cpu->thread, thread_name,
1945 qemu_tcg_rr_cpu_thread_fn,
1946 cpu, QEMU_THREAD_JOINABLE);
1947
1948 single_tcg_halt_cond = cpu->halt_cond;
1949 single_tcg_cpu_thread = cpu->thread;
1950 }
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001951#ifdef _WIN32
Andreas Färber814e6122012-05-02 17:00:37 +02001952 cpu->hThread = qemu_thread_get_handle(cpu->thread);
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001953#endif
Blue Swirl296af7c2010-03-29 19:23:50 +00001954 } else {
Alex Bennée37257942017-02-23 18:29:14 +00001955 /* For non-MTTCG cases we share the thread */
1956 cpu->thread = single_tcg_cpu_thread;
1957 cpu->halt_cond = single_tcg_halt_cond;
David Hildenbranda3421732018-02-09 20:52:37 +01001958 cpu->thread_id = first_cpu->thread_id;
1959 cpu->can_do_io = 1;
1960 cpu->created = true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001961 }
1962}
1963
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001964static void qemu_hax_start_vcpu(CPUState *cpu)
1965{
1966 char thread_name[VCPU_THREAD_NAME_SIZE];
1967
1968 cpu->thread = g_malloc0(sizeof(QemuThread));
1969 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1970 qemu_cond_init(cpu->halt_cond);
1971
1972 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX",
1973 cpu->cpu_index);
1974 qemu_thread_create(cpu->thread, thread_name, qemu_hax_cpu_thread_fn,
1975 cpu, QEMU_THREAD_JOINABLE);
1976#ifdef _WIN32
1977 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1978#endif
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001979}
1980
Andreas Färber48a106b2013-05-27 02:20:39 +02001981static void qemu_kvm_start_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001982{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001983 char thread_name[VCPU_THREAD_NAME_SIZE];
1984
Andreas Färber814e6122012-05-02 17:00:37 +02001985 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001986 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1987 qemu_cond_init(cpu->halt_cond);
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001988 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1989 cpu->cpu_index);
1990 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1991 cpu, QEMU_THREAD_JOINABLE);
Blue Swirl296af7c2010-03-29 19:23:50 +00001992}
1993
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001994static void qemu_hvf_start_vcpu(CPUState *cpu)
1995{
1996 char thread_name[VCPU_THREAD_NAME_SIZE];
1997
1998 /* HVF currently does not support TCG, and only runs in
1999 * unrestricted-guest mode. */
2000 assert(hvf_enabled());
2001
2002 cpu->thread = g_malloc0(sizeof(QemuThread));
2003 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2004 qemu_cond_init(cpu->halt_cond);
2005
2006 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
2007 cpu->cpu_index);
2008 qemu_thread_create(cpu->thread, thread_name, qemu_hvf_cpu_thread_fn,
2009 cpu, QEMU_THREAD_JOINABLE);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05002010}
2011
Justin Terry (VM)19306802018-01-22 13:07:49 -08002012static void qemu_whpx_start_vcpu(CPUState *cpu)
2013{
2014 char thread_name[VCPU_THREAD_NAME_SIZE];
2015
2016 cpu->thread = g_malloc0(sizeof(QemuThread));
2017 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2018 qemu_cond_init(cpu->halt_cond);
2019 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/WHPX",
2020 cpu->cpu_index);
2021 qemu_thread_create(cpu->thread, thread_name, qemu_whpx_cpu_thread_fn,
2022 cpu, QEMU_THREAD_JOINABLE);
2023#ifdef _WIN32
2024 cpu->hThread = qemu_thread_get_handle(cpu->thread);
2025#endif
Justin Terry (VM)19306802018-01-22 13:07:49 -08002026}
2027
Andreas Färber10a90212013-05-27 02:24:35 +02002028static void qemu_dummy_start_vcpu(CPUState *cpu)
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02002029{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00002030 char thread_name[VCPU_THREAD_NAME_SIZE];
2031
Andreas Färber814e6122012-05-02 17:00:37 +02002032 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02002033 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2034 qemu_cond_init(cpu->halt_cond);
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00002035 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
2036 cpu->cpu_index);
2037 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02002038 QEMU_THREAD_JOINABLE);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02002039}
2040
Andreas Färberc643bed2013-05-27 03:23:24 +02002041void qemu_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00002042{
Andreas Färberce3960e2012-12-17 03:27:07 +01002043 cpu->nr_cores = smp_cores;
2044 cpu->nr_threads = smp_threads;
Andreas Färberf324e762012-05-02 23:26:21 +02002045 cpu->stopped = true;
Peter Maydell56943e82016-01-21 14:15:04 +00002046
2047 if (!cpu->as) {
2048 /* If the target cpu hasn't set up any address spaces itself,
2049 * give it the default one.
2050 */
Peter Maydell12ebc9a2016-01-21 14:15:04 +00002051 cpu->num_ases = 1;
Peter Xu80ceb072017-11-23 17:23:32 +08002052 cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
Peter Maydell56943e82016-01-21 14:15:04 +00002053 }
2054
Jan Kiszka0ab07c62011-02-07 12:19:14 +01002055 if (kvm_enabled()) {
Andreas Färber48a106b2013-05-27 02:20:39 +02002056 qemu_kvm_start_vcpu(cpu);
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01002057 } else if (hax_enabled()) {
2058 qemu_hax_start_vcpu(cpu);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05002059 } else if (hvf_enabled()) {
2060 qemu_hvf_start_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02002061 } else if (tcg_enabled()) {
Andreas Färbere5ab30a2012-05-03 01:50:44 +02002062 qemu_tcg_init_vcpu(cpu);
Justin Terry (VM)19306802018-01-22 13:07:49 -08002063 } else if (whpx_enabled()) {
2064 qemu_whpx_start_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02002065 } else {
Andreas Färber10a90212013-05-27 02:24:35 +02002066 qemu_dummy_start_vcpu(cpu);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01002067 }
David Hildenbrand81e96312018-02-09 20:52:38 +01002068
2069 while (!cpu->created) {
2070 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
2071 }
Blue Swirl296af7c2010-03-29 19:23:50 +00002072}
2073
Jan Kiszkab4a3d962011-02-01 22:15:43 +01002074void cpu_stop_current(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00002075{
Andreas Färber4917cf42013-05-27 05:17:50 +02002076 if (current_cpu) {
David Hildenbrandebd05fe2017-11-29 20:12:15 +01002077 qemu_cpu_stop(current_cpu, true);
Jan Kiszkab4a3d962011-02-01 22:15:43 +01002078 }
Blue Swirl296af7c2010-03-29 19:23:50 +00002079}
2080
Kevin Wolf56983462013-07-05 13:49:54 +02002081int vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +00002082{
Juan Quintelaaa723c22012-09-18 16:30:11 +02002083 if (qemu_in_vcpu_thread()) {
Paolo Bonzini74892d22014-06-05 14:53:58 +02002084 qemu_system_vmstop_request_prepare();
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -03002085 qemu_system_vmstop_request(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00002086 /*
2087 * FIXME: should not return to device code in case
2088 * vm_stop() has been requested.
2089 */
Jan Kiszkab4a3d962011-02-01 22:15:43 +01002090 cpu_stop_current();
Kevin Wolf56983462013-07-05 13:49:54 +02002091 return 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00002092 }
Kevin Wolf56983462013-07-05 13:49:54 +02002093
Stefan Hajnoczi4486e892018-03-07 14:42:05 +00002094 return do_vm_stop(state, true);
Blue Swirl296af7c2010-03-29 19:23:50 +00002095}
2096
Claudio Imbrenda2d76e822017-02-14 18:07:47 +01002097/**
2098 * Prepare for (re)starting the VM.
2099 * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already
2100 * running or in case of an error condition), 0 otherwise.
2101 */
2102int vm_prepare_start(void)
2103{
2104 RunState requested;
Claudio Imbrenda2d76e822017-02-14 18:07:47 +01002105
2106 qemu_vmstop_requested(&requested);
2107 if (runstate_is_running() && requested == RUN_STATE__MAX) {
2108 return -1;
2109 }
2110
2111 /* Ensure that a STOP/RESUME pair of events is emitted if a
2112 * vmstop request was pending. The BLOCK_IO_ERROR event, for
2113 * example, according to documentation is always followed by
2114 * the STOP event.
2115 */
2116 if (runstate_is_running()) {
Peter Xu3ab72382018-08-15 21:37:37 +08002117 qapi_event_send_stop();
2118 qapi_event_send_resume();
Markus Armbrusterf0561582018-04-23 10:45:18 +02002119 return -1;
Claudio Imbrenda2d76e822017-02-14 18:07:47 +01002120 }
2121
2122 /* We are sending this now, but the CPUs will be resumed shortly later */
Peter Xu3ab72382018-08-15 21:37:37 +08002123 qapi_event_send_resume();
Markus Armbrusterf0561582018-04-23 10:45:18 +02002124
2125 replay_enable_events();
2126 cpu_enable_ticks();
2127 runstate_set(RUN_STATE_RUNNING);
2128 vm_state_notify(1, RUN_STATE_RUNNING);
2129 return 0;
Claudio Imbrenda2d76e822017-02-14 18:07:47 +01002130}
2131
2132void vm_start(void)
2133{
2134 if (!vm_prepare_start()) {
2135 resume_all_vcpus();
2136 }
2137}
2138
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03002139/* does a state transition even if the VM is already stopped,
2140 current state is forgotten forever */
Kevin Wolf56983462013-07-05 13:49:54 +02002141int vm_stop_force_state(RunState state)
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03002142{
2143 if (runstate_is_running()) {
Kevin Wolf56983462013-07-05 13:49:54 +02002144 return vm_stop(state);
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03002145 } else {
2146 runstate_set(state);
Wen Congyangb2780d32015-11-20 17:34:38 +08002147
2148 bdrv_drain_all();
Kevin Wolf594a45c2013-07-18 14:52:19 +02002149 /* Make sure to return an error if the flush in a previous vm_stop()
2150 * failed. */
John Snow22af08e2016-09-22 21:45:51 -04002151 return bdrv_flush_all();
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03002152 }
2153}
2154
Stefan Weil9a78eea2010-10-22 23:03:33 +02002155void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
Blue Swirl262353c2010-05-04 19:55:35 +00002156{
2157 /* XXX: implement xxx_cpu_list for targets that still miss it */
Peter Maydelle916cbf2012-09-05 17:41:08 -03002158#if defined(cpu_list)
2159 cpu_list(f, cpu_fprintf);
Blue Swirl262353c2010-05-04 19:55:35 +00002160#endif
2161}
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002162
2163CpuInfoList *qmp_query_cpus(Error **errp)
2164{
Igor Mammedovafed5a52017-05-10 13:29:55 +02002165 MachineState *ms = MACHINE(qdev_get_machine());
2166 MachineClass *mc = MACHINE_GET_CLASS(ms);
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002167 CpuInfoList *head = NULL, *cur_item = NULL;
Andreas Färber182735e2013-05-29 22:29:20 +02002168 CPUState *cpu;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002169
Andreas Färberbdc44642013-06-24 23:50:24 +02002170 CPU_FOREACH(cpu) {
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002171 CpuInfoList *info;
Andreas Färber182735e2013-05-29 22:29:20 +02002172#if defined(TARGET_I386)
2173 X86CPU *x86_cpu = X86_CPU(cpu);
2174 CPUX86State *env = &x86_cpu->env;
2175#elif defined(TARGET_PPC)
2176 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
2177 CPUPPCState *env = &ppc_cpu->env;
2178#elif defined(TARGET_SPARC)
2179 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
2180 CPUSPARCState *env = &sparc_cpu->env;
Michael Clark25fa1942018-03-03 01:32:59 +13002181#elif defined(TARGET_RISCV)
2182 RISCVCPU *riscv_cpu = RISCV_CPU(cpu);
2183 CPURISCVState *env = &riscv_cpu->env;
Andreas Färber182735e2013-05-29 22:29:20 +02002184#elif defined(TARGET_MIPS)
2185 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
2186 CPUMIPSState *env = &mips_cpu->env;
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +01002187#elif defined(TARGET_TRICORE)
2188 TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
2189 CPUTriCoreState *env = &tricore_cpu->env;
Viktor Mihajlovski9d0306d2018-02-16 17:08:37 +01002190#elif defined(TARGET_S390X)
2191 S390CPU *s390_cpu = S390_CPU(cpu);
2192 CPUS390XState *env = &s390_cpu->env;
Andreas Färber182735e2013-05-29 22:29:20 +02002193#endif
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002194
Andreas Färbercb446ec2013-05-01 14:24:52 +02002195 cpu_synchronize_state(cpu);
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002196
2197 info = g_malloc0(sizeof(*info));
2198 info->value = g_malloc0(sizeof(*info->value));
Andreas Färber55e5c282012-12-17 06:18:02 +01002199 info->value->CPU = cpu->cpu_index;
Andreas Färber182735e2013-05-29 22:29:20 +02002200 info->value->current = (cpu == first_cpu);
Andreas Färber259186a2013-01-17 18:51:17 +01002201 info->value->halted = cpu->halted;
Eduardo Habkost58f88d42015-05-08 16:04:22 -03002202 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
Andreas Färber9f09e182012-05-03 06:59:07 +02002203 info->value->thread_id = cpu->thread_id;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002204#if defined(TARGET_I386)
Eric Blake86f4b682015-11-18 01:52:59 -07002205 info->value->arch = CPU_INFO_ARCH_X86;
Eric Blake544a3732016-02-17 23:48:27 -07002206 info->value->u.x86.pc = env->eip + env->segs[R_CS].base;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002207#elif defined(TARGET_PPC)
Eric Blake86f4b682015-11-18 01:52:59 -07002208 info->value->arch = CPU_INFO_ARCH_PPC;
Eric Blake544a3732016-02-17 23:48:27 -07002209 info->value->u.ppc.nip = env->nip;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002210#elif defined(TARGET_SPARC)
Eric Blake86f4b682015-11-18 01:52:59 -07002211 info->value->arch = CPU_INFO_ARCH_SPARC;
Eric Blake544a3732016-02-17 23:48:27 -07002212 info->value->u.q_sparc.pc = env->pc;
2213 info->value->u.q_sparc.npc = env->npc;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002214#elif defined(TARGET_MIPS)
Eric Blake86f4b682015-11-18 01:52:59 -07002215 info->value->arch = CPU_INFO_ARCH_MIPS;
Eric Blake544a3732016-02-17 23:48:27 -07002216 info->value->u.q_mips.PC = env->active_tc.PC;
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +01002217#elif defined(TARGET_TRICORE)
Eric Blake86f4b682015-11-18 01:52:59 -07002218 info->value->arch = CPU_INFO_ARCH_TRICORE;
Eric Blake544a3732016-02-17 23:48:27 -07002219 info->value->u.tricore.PC = env->PC;
Viktor Mihajlovski9d0306d2018-02-16 17:08:37 +01002220#elif defined(TARGET_S390X)
2221 info->value->arch = CPU_INFO_ARCH_S390;
2222 info->value->u.s390.cpu_state = env->cpu_state;
Michael Clark25fa1942018-03-03 01:32:59 +13002223#elif defined(TARGET_RISCV)
2224 info->value->arch = CPU_INFO_ARCH_RISCV;
2225 info->value->u.riscv.pc = env->pc;
Eric Blake86f4b682015-11-18 01:52:59 -07002226#else
2227 info->value->arch = CPU_INFO_ARCH_OTHER;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002228#endif
Igor Mammedovafed5a52017-05-10 13:29:55 +02002229 info->value->has_props = !!mc->cpu_index_to_instance_props;
2230 if (info->value->has_props) {
2231 CpuInstanceProperties *props;
2232 props = g_malloc0(sizeof(*props));
2233 *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
2234 info->value->props = props;
2235 }
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002236
2237 /* XXX: waiting for the qapi to support GSList */
2238 if (!cur_item) {
2239 head = cur_item = info;
2240 } else {
2241 cur_item->next = info;
2242 cur_item = info;
2243 }
2244 }
2245
2246 return head;
2247}
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002248
Laszlo Ersekdaa9d2b2018-04-27 21:28:51 +02002249static CpuInfoArch sysemu_target_to_cpuinfo_arch(SysEmuTarget target)
2250{
2251 /*
2252 * The @SysEmuTarget -> @CpuInfoArch mapping below is based on the
2253 * TARGET_ARCH -> TARGET_BASE_ARCH mapping in the "configure" script.
2254 */
2255 switch (target) {
2256 case SYS_EMU_TARGET_I386:
2257 case SYS_EMU_TARGET_X86_64:
2258 return CPU_INFO_ARCH_X86;
2259
2260 case SYS_EMU_TARGET_PPC:
Laszlo Ersekdaa9d2b2018-04-27 21:28:51 +02002261 case SYS_EMU_TARGET_PPC64:
2262 return CPU_INFO_ARCH_PPC;
2263
2264 case SYS_EMU_TARGET_SPARC:
2265 case SYS_EMU_TARGET_SPARC64:
2266 return CPU_INFO_ARCH_SPARC;
2267
2268 case SYS_EMU_TARGET_MIPS:
2269 case SYS_EMU_TARGET_MIPSEL:
2270 case SYS_EMU_TARGET_MIPS64:
2271 case SYS_EMU_TARGET_MIPS64EL:
2272 return CPU_INFO_ARCH_MIPS;
2273
2274 case SYS_EMU_TARGET_TRICORE:
2275 return CPU_INFO_ARCH_TRICORE;
2276
2277 case SYS_EMU_TARGET_S390X:
2278 return CPU_INFO_ARCH_S390;
2279
2280 case SYS_EMU_TARGET_RISCV32:
2281 case SYS_EMU_TARGET_RISCV64:
2282 return CPU_INFO_ARCH_RISCV;
2283
2284 default:
2285 return CPU_INFO_ARCH_OTHER;
2286 }
2287}
2288
2289static void cpustate_to_cpuinfo_s390(CpuInfoS390 *info, const CPUState *cpu)
2290{
2291#ifdef TARGET_S390X
2292 S390CPU *s390_cpu = S390_CPU(cpu);
2293 CPUS390XState *env = &s390_cpu->env;
2294
2295 info->cpu_state = env->cpu_state;
2296#else
2297 abort();
2298#endif
2299}
2300
Luiz Capitulinoce74ee32018-02-16 17:08:38 +01002301/*
2302 * fast means: we NEVER interrupt vCPU threads to retrieve
2303 * information from KVM.
2304 */
2305CpuInfoFastList *qmp_query_cpus_fast(Error **errp)
2306{
2307 MachineState *ms = MACHINE(qdev_get_machine());
2308 MachineClass *mc = MACHINE_GET_CLASS(ms);
2309 CpuInfoFastList *head = NULL, *cur_item = NULL;
Laszlo Ersekdaa9d2b2018-04-27 21:28:51 +02002310 SysEmuTarget target = qapi_enum_parse(&SysEmuTarget_lookup, TARGET_NAME,
2311 -1, &error_abort);
Luiz Capitulinoce74ee32018-02-16 17:08:38 +01002312 CPUState *cpu;
2313
2314 CPU_FOREACH(cpu) {
2315 CpuInfoFastList *info = g_malloc0(sizeof(*info));
2316 info->value = g_malloc0(sizeof(*info->value));
2317
2318 info->value->cpu_index = cpu->cpu_index;
2319 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
2320 info->value->thread_id = cpu->thread_id;
2321
2322 info->value->has_props = !!mc->cpu_index_to_instance_props;
2323 if (info->value->has_props) {
2324 CpuInstanceProperties *props;
2325 props = g_malloc0(sizeof(*props));
2326 *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
2327 info->value->props = props;
2328 }
2329
Laszlo Ersekdaa9d2b2018-04-27 21:28:51 +02002330 info->value->arch = sysemu_target_to_cpuinfo_arch(target);
2331 info->value->target = target;
2332 if (target == SYS_EMU_TARGET_S390X) {
2333 cpustate_to_cpuinfo_s390(&info->value->u.s390x, cpu);
Laszlo Ersekdaa9d2b2018-04-27 21:28:51 +02002334 }
2335
Luiz Capitulinoce74ee32018-02-16 17:08:38 +01002336 if (!cur_item) {
2337 head = cur_item = info;
2338 } else {
2339 cur_item->next = info;
2340 cur_item = info;
2341 }
2342 }
2343
2344 return head;
2345}
2346
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002347void qmp_memsave(int64_t addr, int64_t size, const char *filename,
2348 bool has_cpu, int64_t cpu_index, Error **errp)
2349{
2350 FILE *f;
2351 uint32_t l;
Andreas Färber55e5c282012-12-17 06:18:02 +01002352 CPUState *cpu;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002353 uint8_t buf[1024];
Borislav Petkov0dc9daf2015-02-08 13:14:38 +01002354 int64_t orig_addr = addr, orig_size = size;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002355
2356 if (!has_cpu) {
2357 cpu_index = 0;
2358 }
2359
Andreas Färber151d1322013-02-15 15:41:49 +01002360 cpu = qemu_get_cpu(cpu_index);
2361 if (cpu == NULL) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01002362 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
2363 "a CPU number");
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002364 return;
2365 }
2366
2367 f = fopen(filename, "wb");
2368 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04002369 error_setg_file_open(errp, errno, filename);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002370 return;
2371 }
2372
2373 while (size != 0) {
2374 l = sizeof(buf);
2375 if (l > size)
2376 l = size;
Aneesh Kumar K.V2f4d0f52013-10-01 21:49:30 +05302377 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
Borislav Petkov0dc9daf2015-02-08 13:14:38 +01002378 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
2379 " specified", orig_addr, orig_size);
Aneesh Kumar K.V2f4d0f52013-10-01 21:49:30 +05302380 goto exit;
2381 }
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002382 if (fwrite(buf, 1, l, f) != l) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01002383 error_setg(errp, QERR_IO_ERROR);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002384 goto exit;
2385 }
2386 addr += l;
2387 size -= l;
2388 }
2389
2390exit:
2391 fclose(f);
2392}
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02002393
2394void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
2395 Error **errp)
2396{
2397 FILE *f;
2398 uint32_t l;
2399 uint8_t buf[1024];
2400
2401 f = fopen(filename, "wb");
2402 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04002403 error_setg_file_open(errp, errno, filename);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02002404 return;
2405 }
2406
2407 while (size != 0) {
2408 l = sizeof(buf);
2409 if (l > size)
2410 l = size;
Stefan Weileb6282f2014-04-07 20:28:23 +02002411 cpu_physical_memory_read(addr, buf, l);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02002412 if (fwrite(buf, 1, l, f) != l) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01002413 error_setg(errp, QERR_IO_ERROR);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02002414 goto exit;
2415 }
2416 addr += l;
2417 size -= l;
2418 }
2419
2420exit:
2421 fclose(f);
2422}
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02002423
2424void qmp_inject_nmi(Error **errp)
2425{
Alexey Kardashevskiy9cb805f2014-08-20 22:16:33 +10002426 nmi_monitor_handle(monitor_get_cpu_index(), errp);
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02002427}
Sebastian Tanase27498be2014-07-25 11:56:33 +02002428
2429void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
2430{
2431 if (!use_icount) {
2432 return;
2433 }
2434
2435 cpu_fprintf(f, "Host - Guest clock %"PRIi64" ms\n",
2436 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
2437 if (icount_align_option) {
2438 cpu_fprintf(f, "Max guest delay %"PRIi64" ms\n", -max_delay/SCALE_MS);
2439 cpu_fprintf(f, "Max guest advance %"PRIi64" ms\n", max_advance/SCALE_MS);
2440 } else {
2441 cpu_fprintf(f, "Max guest delay NA\n");
2442 cpu_fprintf(f, "Max guest advance NA\n");
2443 }
2444}