blob: 5d7be0c5a2b0781d07f23394a2b638eed48a5440 [file] [log] [blame]
Blue Swirl296af7c2010-03-29 19:23:50 +00001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
Peter Maydell7b31bbc2016-01-26 18:16:56 +000025#include "qemu/osdep.h"
KONRAD Frederic8d4e9142017-02-23 18:29:08 +000026#include "qemu/config-file.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010027#include "cpu.h"
Paolo Bonzini83c90892012-12-17 18:19:49 +010028#include "monitor/monitor.h"
Markus Armbrustere688df62018-02-01 12:18:31 +010029#include "qapi/error.h"
Markus Armbruster112ed242018-02-26 17:13:27 -060030#include "qapi/qapi-commands-misc.h"
Markus Armbruster9af23982018-02-11 10:36:01 +010031#include "qapi/qapi-events-run-state.h"
Wenchao Xiaa4e15de2014-06-18 08:43:36 +020032#include "qapi/qmp/qerror.h"
Markus Armbrusterd49b6832015-03-17 18:29:20 +010033#include "qemu/error-report.h"
Markus Armbruster76c86612019-04-17 21:17:53 +020034#include "qemu/qemu-print.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010035#include "sysemu/sysemu.h"
Markus Armbruster14a48c12019-05-23 16:35:05 +020036#include "sysemu/tcg.h"
Max Reitzda31d592016-03-16 19:54:32 +010037#include "sysemu/block-backend.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/gdbstub.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/dma.h"
Vincent Palatinb3946622017-01-10 11:59:55 +010040#include "sysemu/hw_accel.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/kvm.h"
Vincent Palatinb0cb0a62017-01-10 11:59:57 +010042#include "sysemu/hax.h"
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -050043#include "sysemu/hvf.h"
Justin Terry (VM)19306802018-01-22 13:07:49 -080044#include "sysemu/whpx.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010045#include "exec/exec-all.h"
Blue Swirl296af7c2010-03-29 19:23:50 +000046
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010047#include "qemu/thread.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010048#include "sysemu/cpus.h"
49#include "sysemu/qtest.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010050#include "qemu/main-loop.h"
Markus Armbruster922a01a2018-02-01 12:18:46 +010051#include "qemu/option.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010052#include "qemu/bitmap.h"
Liu Ping Fancb365642013-09-25 14:20:58 +080053#include "qemu/seqlock.h"
Richard Henderson9c09a252019-03-14 13:06:29 -070054#include "qemu/guest-random.h"
KONRAD Frederic8d4e9142017-02-23 18:29:08 +000055#include "tcg.h"
Alexey Kardashevskiy9cb805f2014-08-20 22:16:33 +100056#include "hw/nmi.h"
Pavel Dovgalyuk8b427042015-09-17 19:24:05 +030057#include "sysemu/replay.h"
Igor Mammedovafed5a52017-05-10 13:29:55 +020058#include "hw/boards.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020059
Jan Kiszka6d9cb732011-02-01 22:15:58 +010060#ifdef CONFIG_LINUX
61
62#include <sys/prctl.h>
63
Marcelo Tosattic0532a72010-10-11 15:31:21 -030064#ifndef PR_MCE_KILL
65#define PR_MCE_KILL 33
66#endif
67
Jan Kiszka6d9cb732011-02-01 22:15:58 +010068#ifndef PR_MCE_KILL_SET
69#define PR_MCE_KILL_SET 1
70#endif
71
72#ifndef PR_MCE_KILL_EARLY
73#define PR_MCE_KILL_EARLY 1
74#endif
75
76#endif /* CONFIG_LINUX */
77
Sebastian Tanase27498be2014-07-25 11:56:33 +020078int64_t max_delay;
79int64_t max_advance;
Blue Swirl296af7c2010-03-29 19:23:50 +000080
Jason J. Herne2adcc852015-09-08 13:12:33 -040081/* vcpu throttling controls */
82static QEMUTimer *throttle_timer;
83static unsigned int throttle_percentage;
84
85#define CPU_THROTTLE_PCT_MIN 1
86#define CPU_THROTTLE_PCT_MAX 99
87#define CPU_THROTTLE_TIMESLICE_NS 10000000
88
Tiejun Chen321bc0b2013-08-02 09:43:09 +080089bool cpu_is_stopped(CPUState *cpu)
90{
91 return cpu->stopped || !runstate_is_running();
92}
93
Andreas Färbera98ae1d2013-05-26 23:21:08 +020094static bool cpu_thread_is_idle(CPUState *cpu)
Peter Maydellac873f12012-07-19 16:52:27 +010095{
Andreas Färberc64ca812012-05-03 02:11:45 +020096 if (cpu->stop || cpu->queued_work_first) {
Peter Maydellac873f12012-07-19 16:52:27 +010097 return false;
98 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +080099 if (cpu_is_stopped(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +0100100 return true;
101 }
Andreas Färber8c2e1b02013-08-25 18:53:55 +0200102 if (!cpu->halted || cpu_has_work(cpu) ||
Alexander Graf215e79c2013-04-24 22:24:12 +0200103 kvm_halt_in_kernel()) {
Peter Maydellac873f12012-07-19 16:52:27 +0100104 return false;
105 }
106 return true;
107}
108
109static bool all_cpu_threads_idle(void)
110{
Andreas Färber182735e2013-05-29 22:29:20 +0200111 CPUState *cpu;
Peter Maydellac873f12012-07-19 16:52:27 +0100112
Andreas Färberbdc44642013-06-24 23:50:24 +0200113 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200114 if (!cpu_thread_is_idle(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +0100115 return false;
116 }
117 }
118 return true;
119}
120
Blue Swirl296af7c2010-03-29 19:23:50 +0000121/***********************************************************/
Paolo Bonzini946fb272011-09-12 13:57:37 +0200122/* guest cycle counter */
123
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200124/* Protected by TimersState seqlock */
125
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200126static bool icount_sleep = true;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200127/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
128#define MAX_ICOUNT_SHIFT 10
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200129
Paolo Bonzini946fb272011-09-12 13:57:37 +0200130typedef struct TimersState {
Liu Ping Fancb365642013-09-25 14:20:58 +0800131 /* Protected by BQL. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200132 int64_t cpu_ticks_prev;
133 int64_t cpu_ticks_offset;
Liu Ping Fancb365642013-09-25 14:20:58 +0800134
Paolo Bonzini94377112018-08-14 09:57:16 +0200135 /* Protect fields that can be respectively read outside the
136 * BQL, and written from multiple threads.
Liu Ping Fancb365642013-09-25 14:20:58 +0800137 */
138 QemuSeqLock vm_clock_seqlock;
Paolo Bonzini94377112018-08-14 09:57:16 +0200139 QemuSpin vm_clock_lock;
140
141 int16_t cpu_ticks_enabled;
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200142
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200143 /* Conversion factor from emulated instructions to virtual clock ticks. */
Paolo Bonzini94377112018-08-14 09:57:16 +0200144 int16_t icount_time_shift;
145
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200146 /* Compensate for varying guest execution speed. */
147 int64_t qemu_icount_bias;
Paolo Bonzini94377112018-08-14 09:57:16 +0200148
149 int64_t vm_clock_warp_start;
150 int64_t cpu_clock_offset;
151
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200152 /* Only written by TCG thread */
153 int64_t qemu_icount;
Paolo Bonzini94377112018-08-14 09:57:16 +0200154
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300155 /* for adjusting icount */
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300156 QEMUTimer *icount_rt_timer;
157 QEMUTimer *icount_vm_timer;
158 QEMUTimer *icount_warp_timer;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200159} TimersState;
160
Liu Ping Fand9cd4002013-07-21 08:43:00 +0000161static TimersState timers_state;
KONRAD Frederic8d4e9142017-02-23 18:29:08 +0000162bool mttcg_enabled;
163
164/*
165 * We default to false if we know other options have been enabled
166 * which are currently incompatible with MTTCG. Otherwise when each
167 * guest (target) has been updated to support:
168 * - atomic instructions
169 * - memory ordering primitives (barriers)
170 * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
171 *
172 * Once a guest architecture has been converted to the new primitives
173 * there are two remaining limitations to check.
174 *
175 * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
176 * - The host must have a stronger memory order than the guest
177 *
178 * It may be possible in future to support strong guests on weak hosts
179 * but that will require tagging all load/stores in a guest with their
180 * implicit memory order requirements which would likely slow things
181 * down a lot.
182 */
183
184static bool check_tcg_memory_orders_compatible(void)
185{
186#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
187 return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
188#else
189 return false;
190#endif
191}
192
193static bool default_mttcg_enabled(void)
194{
Alex Bennée83fd9622017-02-27 17:09:01 +0000195 if (use_icount || TCG_OVERSIZED_GUEST) {
KONRAD Frederic8d4e9142017-02-23 18:29:08 +0000196 return false;
197 } else {
198#ifdef TARGET_SUPPORTS_MTTCG
199 return check_tcg_memory_orders_compatible();
200#else
201 return false;
202#endif
203 }
204}
205
206void qemu_tcg_configure(QemuOpts *opts, Error **errp)
207{
208 const char *t = qemu_opt_get(opts, "thread");
209 if (t) {
210 if (strcmp(t, "multi") == 0) {
211 if (TCG_OVERSIZED_GUEST) {
212 error_setg(errp, "No MTTCG when guest word size > hosts");
Alex Bennée83fd9622017-02-27 17:09:01 +0000213 } else if (use_icount) {
214 error_setg(errp, "No MTTCG when icount is enabled");
KONRAD Frederic8d4e9142017-02-23 18:29:08 +0000215 } else {
Nikunj A Dadhania86953502017-04-10 11:36:55 +0530216#ifndef TARGET_SUPPORTS_MTTCG
Markus Armbruster07656912018-10-17 10:26:28 +0200217 warn_report("Guest not yet converted to MTTCG - "
218 "you may get unexpected results");
Alex Bennéec34c7622017-02-28 14:40:17 +0000219#endif
KONRAD Frederic8d4e9142017-02-23 18:29:08 +0000220 if (!check_tcg_memory_orders_compatible()) {
Markus Armbruster07656912018-10-17 10:26:28 +0200221 warn_report("Guest expects a stronger memory ordering "
222 "than the host provides");
Pranith Kumar8cfef892017-03-25 16:19:23 -0400223 error_printf("This may cause strange/hard to debug errors\n");
KONRAD Frederic8d4e9142017-02-23 18:29:08 +0000224 }
225 mttcg_enabled = true;
226 }
227 } else if (strcmp(t, "single") == 0) {
228 mttcg_enabled = false;
229 } else {
230 error_setg(errp, "Invalid 'thread' setting %s", t);
231 }
232 } else {
233 mttcg_enabled = default_mttcg_enabled();
234 }
235}
Paolo Bonzini946fb272011-09-12 13:57:37 +0200236
Alex Bennéee4cd9652017-03-31 16:09:42 +0100237/* The current number of executed instructions is based on what we
238 * originally budgeted minus the current state of the decrementing
239 * icount counters in extra/u16.low.
240 */
241static int64_t cpu_get_icount_executed(CPUState *cpu)
242{
Richard Henderson5e140192019-03-28 11:54:23 -1000243 return (cpu->icount_budget -
244 (cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra));
Alex Bennéee4cd9652017-03-31 16:09:42 +0100245}
246
Alex Bennée512d3c82017-04-05 12:32:37 +0100247/*
248 * Update the global shared timer_state.qemu_icount to take into
249 * account executed instructions. This is done by the TCG vCPU
250 * thread so the main-loop can see time has moved forward.
251 */
Paolo Bonzini9b4e6f42018-09-11 13:15:32 +0200252static void cpu_update_icount_locked(CPUState *cpu)
Alex Bennée512d3c82017-04-05 12:32:37 +0100253{
254 int64_t executed = cpu_get_icount_executed(cpu);
255 cpu->icount_budget -= executed;
256
Emilio G. Cota38adcb62018-09-10 19:27:49 -0400257 atomic_set_i64(&timers_state.qemu_icount,
258 timers_state.qemu_icount + executed);
Paolo Bonzini9b4e6f42018-09-11 13:15:32 +0200259}
260
261/*
262 * Update the global shared timer_state.qemu_icount to take into
263 * account executed instructions. This is done by the TCG vCPU
264 * thread so the main-loop can see time has moved forward.
265 */
266void cpu_update_icount(CPUState *cpu)
267{
268 seqlock_write_lock(&timers_state.vm_clock_seqlock,
269 &timers_state.vm_clock_lock);
270 cpu_update_icount_locked(cpu);
Paolo Bonzini94377112018-08-14 09:57:16 +0200271 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
272 &timers_state.vm_clock_lock);
Alex Bennée512d3c82017-04-05 12:32:37 +0100273}
274
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200275static int64_t cpu_get_icount_raw_locked(void)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200276{
Andreas Färber4917cf42013-05-27 05:17:50 +0200277 CPUState *cpu = current_cpu;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200278
Alex Bennée243c5f72017-03-30 18:49:22 +0100279 if (cpu && cpu->running) {
Paolo Bonzini414b15c2015-06-24 14:16:26 +0200280 if (!cpu->can_do_io) {
Alistair Francis493d89b2018-02-03 09:43:14 +0100281 error_report("Bad icount read");
Pavel Dovgalyuk2a629142014-12-08 10:53:45 +0300282 exit(1);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200283 }
Alex Bennéee4cd9652017-03-31 16:09:42 +0100284 /* Take into account what has run */
Paolo Bonzini9b4e6f42018-09-11 13:15:32 +0200285 cpu_update_icount_locked(cpu);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200286 }
Emilio G. Cota38adcb62018-09-10 19:27:49 -0400287 /* The read is protected by the seqlock, but needs atomic64 to avoid UB */
288 return atomic_read_i64(&timers_state.qemu_icount);
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200289}
290
291static int64_t cpu_get_icount_locked(void)
292{
293 int64_t icount = cpu_get_icount_raw_locked();
Emilio G. Cotac97595d2018-09-10 19:27:50 -0400294 return atomic_read_i64(&timers_state.qemu_icount_bias) +
295 cpu_icount_to_ns(icount);
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200296}
297
298int64_t cpu_get_icount_raw(void)
299{
300 int64_t icount;
301 unsigned start;
302
303 do {
304 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
305 icount = cpu_get_icount_raw_locked();
306 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
307
308 return icount;
Pavel Dovgalyuk2a629142014-12-08 10:53:45 +0300309}
310
311/* Return the virtual CPU time, based on the instruction counter. */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200312int64_t cpu_get_icount(void)
313{
314 int64_t icount;
315 unsigned start;
316
317 do {
318 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
319 icount = cpu_get_icount_locked();
320 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
321
322 return icount;
323}
324
KONRAD Frederic3f031312014-08-01 01:37:15 +0200325int64_t cpu_icount_to_ns(int64_t icount)
326{
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200327 return icount << atomic_read(&timers_state.icount_time_shift);
KONRAD Frederic3f031312014-08-01 01:37:15 +0200328}
329
Paolo Bonzinif2a4ad62018-08-18 09:36:16 +0200330static int64_t cpu_get_ticks_locked(void)
331{
332 int64_t ticks = timers_state.cpu_ticks_offset;
333 if (timers_state.cpu_ticks_enabled) {
334 ticks += cpu_get_host_ticks();
335 }
336
337 if (timers_state.cpu_ticks_prev > ticks) {
338 /* Non increasing ticks may happen if the host uses software suspend. */
339 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
340 ticks = timers_state.cpu_ticks_prev;
341 }
342
343 timers_state.cpu_ticks_prev = ticks;
344 return ticks;
345}
346
Cao jind90f3cc2016-07-29 19:05:38 +0800347/* return the time elapsed in VM between vm_start and vm_stop. Unless
348 * icount is active, cpu_get_ticks() uses units of the host CPU cycle
349 * counter.
Cao jind90f3cc2016-07-29 19:05:38 +0800350 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200351int64_t cpu_get_ticks(void)
352{
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100353 int64_t ticks;
354
Paolo Bonzini946fb272011-09-12 13:57:37 +0200355 if (use_icount) {
356 return cpu_get_icount();
357 }
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100358
Paolo Bonzinif2a4ad62018-08-18 09:36:16 +0200359 qemu_spin_lock(&timers_state.vm_clock_lock);
360 ticks = cpu_get_ticks_locked();
361 qemu_spin_unlock(&timers_state.vm_clock_lock);
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100362 return ticks;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200363}
364
Liu Ping Fancb365642013-09-25 14:20:58 +0800365static int64_t cpu_get_clock_locked(void)
366{
Cao jin1d45cea2016-07-29 19:05:37 +0800367 int64_t time;
Liu Ping Fancb365642013-09-25 14:20:58 +0800368
Cao jin1d45cea2016-07-29 19:05:37 +0800369 time = timers_state.cpu_clock_offset;
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100370 if (timers_state.cpu_ticks_enabled) {
Cao jin1d45cea2016-07-29 19:05:37 +0800371 time += get_clock();
Liu Ping Fancb365642013-09-25 14:20:58 +0800372 }
373
Cao jin1d45cea2016-07-29 19:05:37 +0800374 return time;
Liu Ping Fancb365642013-09-25 14:20:58 +0800375}
376
Cao jind90f3cc2016-07-29 19:05:38 +0800377/* Return the monotonic time elapsed in VM, i.e.,
Peter Maydell8212ff82016-09-15 10:24:22 +0100378 * the time between vm_start and vm_stop
379 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200380int64_t cpu_get_clock(void)
381{
382 int64_t ti;
Liu Ping Fancb365642013-09-25 14:20:58 +0800383 unsigned start;
384
385 do {
386 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
387 ti = cpu_get_clock_locked();
388 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
389
390 return ti;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200391}
392
Liu Ping Fancb365642013-09-25 14:20:58 +0800393/* enable cpu_get_ticks()
Cao jin3224e872016-07-08 18:31:37 +0800394 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
Liu Ping Fancb365642013-09-25 14:20:58 +0800395 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200396void cpu_enable_ticks(void)
397{
Paolo Bonzini94377112018-08-14 09:57:16 +0200398 seqlock_write_lock(&timers_state.vm_clock_seqlock,
399 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200400 if (!timers_state.cpu_ticks_enabled) {
Christopher Covington4a7428c2015-09-25 10:42:21 -0400401 timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200402 timers_state.cpu_clock_offset -= get_clock();
403 timers_state.cpu_ticks_enabled = 1;
404 }
Paolo Bonzini94377112018-08-14 09:57:16 +0200405 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
406 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200407}
408
409/* disable cpu_get_ticks() : the clock is stopped. You must not call
Liu Ping Fancb365642013-09-25 14:20:58 +0800410 * cpu_get_ticks() after that.
Cao jin3224e872016-07-08 18:31:37 +0800411 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
Liu Ping Fancb365642013-09-25 14:20:58 +0800412 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200413void cpu_disable_ticks(void)
414{
Paolo Bonzini94377112018-08-14 09:57:16 +0200415 seqlock_write_lock(&timers_state.vm_clock_seqlock,
416 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200417 if (timers_state.cpu_ticks_enabled) {
Christopher Covington4a7428c2015-09-25 10:42:21 -0400418 timers_state.cpu_ticks_offset += cpu_get_host_ticks();
Liu Ping Fancb365642013-09-25 14:20:58 +0800419 timers_state.cpu_clock_offset = cpu_get_clock_locked();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200420 timers_state.cpu_ticks_enabled = 0;
421 }
Paolo Bonzini94377112018-08-14 09:57:16 +0200422 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
423 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200424}
425
426/* Correlation between real and virtual time is always going to be
427 fairly approximate, so ignore small variation.
428 When the guest is idle real and virtual time will be aligned in
429 the IO wait loop. */
Rutuja Shah73bcb242016-03-21 21:32:30 +0530430#define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200431
432static void icount_adjust(void)
433{
434 int64_t cur_time;
435 int64_t cur_icount;
436 int64_t delta;
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200437
438 /* Protected by TimersState mutex. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200439 static int64_t last_delta;
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200440
Paolo Bonzini946fb272011-09-12 13:57:37 +0200441 /* If the VM is not running, then do nothing. */
442 if (!runstate_is_running()) {
443 return;
444 }
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200445
Paolo Bonzini94377112018-08-14 09:57:16 +0200446 seqlock_write_lock(&timers_state.vm_clock_seqlock,
447 &timers_state.vm_clock_lock);
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200448 cur_time = cpu_get_clock_locked();
449 cur_icount = cpu_get_icount_locked();
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200450
Paolo Bonzini946fb272011-09-12 13:57:37 +0200451 delta = cur_icount - cur_time;
452 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
453 if (delta > 0
454 && last_delta + ICOUNT_WOBBLE < delta * 2
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200455 && timers_state.icount_time_shift > 0) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200456 /* The guest is getting too far ahead. Slow time down. */
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200457 atomic_set(&timers_state.icount_time_shift,
458 timers_state.icount_time_shift - 1);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200459 }
460 if (delta < 0
461 && last_delta - ICOUNT_WOBBLE > delta * 2
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200462 && timers_state.icount_time_shift < MAX_ICOUNT_SHIFT) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200463 /* The guest is getting too far behind. Speed time up. */
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200464 atomic_set(&timers_state.icount_time_shift,
465 timers_state.icount_time_shift + 1);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200466 }
467 last_delta = delta;
Emilio G. Cotac97595d2018-09-10 19:27:50 -0400468 atomic_set_i64(&timers_state.qemu_icount_bias,
469 cur_icount - (timers_state.qemu_icount
470 << timers_state.icount_time_shift));
Paolo Bonzini94377112018-08-14 09:57:16 +0200471 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
472 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200473}
474
475static void icount_adjust_rt(void *opaque)
476{
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300477 timer_mod(timers_state.icount_rt_timer,
Pavel Dovgalyuk1979b902015-01-12 15:00:43 +0300478 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200479 icount_adjust();
480}
481
482static void icount_adjust_vm(void *opaque)
483{
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300484 timer_mod(timers_state.icount_vm_timer,
Alex Bligh40daca52013-08-21 16:03:02 +0100485 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
Rutuja Shah73bcb242016-03-21 21:32:30 +0530486 NANOSECONDS_PER_SECOND / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200487 icount_adjust();
488}
489
490static int64_t qemu_icount_round(int64_t count)
491{
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200492 int shift = atomic_read(&timers_state.icount_time_shift);
493 return (count + (1 << shift) - 1) >> shift;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200494}
495
Pavel Dovgalyukefab87c2015-09-17 19:24:39 +0300496static void icount_warp_rt(void)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200497{
Alex Bennéeccffff42016-04-04 15:35:48 +0100498 unsigned seq;
499 int64_t warp_start;
500
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200501 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
502 * changes from -1 to another value, so the race here is okay.
503 */
Alex Bennéeccffff42016-04-04 15:35:48 +0100504 do {
505 seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300506 warp_start = timers_state.vm_clock_warp_start;
Alex Bennéeccffff42016-04-04 15:35:48 +0100507 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
508
509 if (warp_start == -1) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200510 return;
511 }
512
Paolo Bonzini94377112018-08-14 09:57:16 +0200513 seqlock_write_lock(&timers_state.vm_clock_seqlock,
514 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200515 if (runstate_is_running()) {
Paolo Bonzini74c0b812018-10-08 13:24:14 +0200516 int64_t clock = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT,
517 cpu_get_clock_locked());
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200518 int64_t warp_delta;
519
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300520 warp_delta = clock - timers_state.vm_clock_warp_start;
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200521 if (use_icount == 2) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200522 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100523 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
Paolo Bonzini946fb272011-09-12 13:57:37 +0200524 * far ahead of real time.
525 */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200526 int64_t cur_icount = cpu_get_icount_locked();
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300527 int64_t delta = clock - cur_icount;
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200528 warp_delta = MIN(warp_delta, delta);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200529 }
Emilio G. Cotac97595d2018-09-10 19:27:50 -0400530 atomic_set_i64(&timers_state.qemu_icount_bias,
531 timers_state.qemu_icount_bias + warp_delta);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200532 }
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300533 timers_state.vm_clock_warp_start = -1;
Paolo Bonzini94377112018-08-14 09:57:16 +0200534 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
535 &timers_state.vm_clock_lock);
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200536
537 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
538 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
539 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200540}
541
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300542static void icount_timer_cb(void *opaque)
Pavel Dovgalyukefab87c2015-09-17 19:24:39 +0300543{
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300544 /* No need for a checkpoint because the timer already synchronizes
545 * with CHECKPOINT_CLOCK_VIRTUAL_RT.
546 */
547 icount_warp_rt();
Pavel Dovgalyukefab87c2015-09-17 19:24:39 +0300548}
549
Paolo Bonzini8156be52012-03-28 15:42:04 +0200550void qtest_clock_warp(int64_t dest)
551{
Alex Bligh40daca52013-08-21 16:03:02 +0100552 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Fam Zhengefef88b2015-01-19 17:51:43 +0800553 AioContext *aio_context;
Paolo Bonzini8156be52012-03-28 15:42:04 +0200554 assert(qtest_enabled());
Fam Zhengefef88b2015-01-19 17:51:43 +0800555 aio_context = qemu_get_aio_context();
Paolo Bonzini8156be52012-03-28 15:42:04 +0200556 while (clock < dest) {
Alex Bligh40daca52013-08-21 16:03:02 +0100557 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Sergey Fedorovc9299e22014-06-10 13:10:28 +0400558 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
Fam Zhengefef88b2015-01-19 17:51:43 +0800559
Paolo Bonzini94377112018-08-14 09:57:16 +0200560 seqlock_write_lock(&timers_state.vm_clock_seqlock,
561 &timers_state.vm_clock_lock);
Emilio G. Cotac97595d2018-09-10 19:27:50 -0400562 atomic_set_i64(&timers_state.qemu_icount_bias,
563 timers_state.qemu_icount_bias + warp);
Paolo Bonzini94377112018-08-14 09:57:16 +0200564 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
565 &timers_state.vm_clock_lock);
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200566
Alex Bligh40daca52013-08-21 16:03:02 +0100567 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
Fam Zhengefef88b2015-01-19 17:51:43 +0800568 timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
Alex Bligh40daca52013-08-21 16:03:02 +0100569 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200570 }
Alex Bligh40daca52013-08-21 16:03:02 +0100571 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200572}
573
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300574void qemu_start_warp_timer(void)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200575{
Paolo Bonzinice78d182013-10-07 17:30:02 +0200576 int64_t clock;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200577 int64_t deadline;
578
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300579 if (!use_icount) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200580 return;
581 }
582
Pavel Dovgalyuk8bd7f712015-09-17 19:24:44 +0300583 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
584 * do not fire, so computing the deadline does not make sense.
585 */
586 if (!runstate_is_running()) {
587 return;
588 }
589
Pavel Dovgalyuk0c081852018-09-12 11:19:45 +0300590 if (replay_mode != REPLAY_MODE_PLAY) {
591 if (!all_cpu_threads_idle()) {
592 return;
593 }
Pavel Dovgalyuk8bd7f712015-09-17 19:24:44 +0300594
Pavel Dovgalyuk0c081852018-09-12 11:19:45 +0300595 if (qtest_enabled()) {
596 /* When testing, qtest commands advance icount. */
597 return;
598 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200599
Pavel Dovgalyuk0c081852018-09-12 11:19:45 +0300600 replay_checkpoint(CHECKPOINT_CLOCK_WARP_START);
601 } else {
602 /* warp clock deterministically in record/replay mode */
603 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
604 /* vCPU is sleeping and warp can't be started.
605 It is probably a race condition: notification sent
606 to vCPU was processed in advance and vCPU went to sleep.
607 Therefore we have to wake it up for doing someting. */
608 if (replay_has_checkpoint()) {
609 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
610 }
611 return;
612 }
Paolo Bonzini8156be52012-03-28 15:42:04 +0200613 }
614
Alex Blighac70aaf2013-08-21 16:02:57 +0100615 /* We want to use the earliest deadline from ALL vm_clocks */
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300616 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
Alex Bligh40daca52013-08-21 16:03:02 +0100617 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200618 if (deadline < 0) {
Victor CLEMENTd7a0f712015-05-29 17:14:06 +0200619 static bool notified;
620 if (!icount_sleep && !notified) {
Alistair Francis3dc6f862017-07-12 06:57:41 -0700621 warn_report("icount sleep disabled and no active timers");
Victor CLEMENTd7a0f712015-05-29 17:14:06 +0200622 notified = true;
623 }
Paolo Bonzinice78d182013-10-07 17:30:02 +0200624 return;
Alex Blighac70aaf2013-08-21 16:02:57 +0100625 }
626
Paolo Bonzini946fb272011-09-12 13:57:37 +0200627 if (deadline > 0) {
628 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100629 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
Paolo Bonzini946fb272011-09-12 13:57:37 +0200630 * sleep. Otherwise, the CPU might be waiting for a future timer
631 * interrupt to wake it up, but the interrupt never comes because
632 * the vCPU isn't running any insns and thus doesn't advance the
Alex Bligh40daca52013-08-21 16:03:02 +0100633 * QEMU_CLOCK_VIRTUAL.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200634 */
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200635 if (!icount_sleep) {
636 /*
637 * We never let VCPUs sleep in no sleep icount mode.
638 * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
639 * to the next QEMU_CLOCK_VIRTUAL event and notify it.
640 * It is useful when we want a deterministic execution time,
641 * isolated from host latencies.
642 */
Paolo Bonzini94377112018-08-14 09:57:16 +0200643 seqlock_write_lock(&timers_state.vm_clock_seqlock,
644 &timers_state.vm_clock_lock);
Emilio G. Cotac97595d2018-09-10 19:27:50 -0400645 atomic_set_i64(&timers_state.qemu_icount_bias,
646 timers_state.qemu_icount_bias + deadline);
Paolo Bonzini94377112018-08-14 09:57:16 +0200647 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
648 &timers_state.vm_clock_lock);
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200649 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
650 } else {
651 /*
652 * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
653 * "real" time, (related to the time left until the next event) has
654 * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
655 * This avoids that the warps are visible externally; for example,
656 * you will not be sending network packets continuously instead of
657 * every 100ms.
658 */
Paolo Bonzini94377112018-08-14 09:57:16 +0200659 seqlock_write_lock(&timers_state.vm_clock_seqlock,
660 &timers_state.vm_clock_lock);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300661 if (timers_state.vm_clock_warp_start == -1
662 || timers_state.vm_clock_warp_start > clock) {
663 timers_state.vm_clock_warp_start = clock;
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200664 }
Paolo Bonzini94377112018-08-14 09:57:16 +0200665 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
666 &timers_state.vm_clock_lock);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300667 timer_mod_anticipate(timers_state.icount_warp_timer,
668 clock + deadline);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200669 }
Alex Blighac70aaf2013-08-21 16:02:57 +0100670 } else if (deadline == 0) {
Alex Bligh40daca52013-08-21 16:03:02 +0100671 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200672 }
673}
674
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300675static void qemu_account_warp_timer(void)
676{
677 if (!use_icount || !icount_sleep) {
678 return;
679 }
680
681 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
682 * do not fire, so computing the deadline does not make sense.
683 */
684 if (!runstate_is_running()) {
685 return;
686 }
687
688 /* warp clock deterministically in record/replay mode */
689 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
690 return;
691 }
692
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300693 timer_del(timers_state.icount_warp_timer);
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300694 icount_warp_rt();
695}
696
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200697static bool icount_state_needed(void *opaque)
698{
699 return use_icount;
700}
701
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300702static bool warp_timer_state_needed(void *opaque)
703{
704 TimersState *s = opaque;
705 return s->icount_warp_timer != NULL;
706}
707
708static bool adjust_timers_state_needed(void *opaque)
709{
710 TimersState *s = opaque;
711 return s->icount_rt_timer != NULL;
712}
713
714/*
715 * Subsection for warp timer migration is optional, because may not be created
716 */
717static const VMStateDescription icount_vmstate_warp_timer = {
718 .name = "timer/icount/warp_timer",
719 .version_id = 1,
720 .minimum_version_id = 1,
721 .needed = warp_timer_state_needed,
722 .fields = (VMStateField[]) {
723 VMSTATE_INT64(vm_clock_warp_start, TimersState),
724 VMSTATE_TIMER_PTR(icount_warp_timer, TimersState),
725 VMSTATE_END_OF_LIST()
726 }
727};
728
729static const VMStateDescription icount_vmstate_adjust_timers = {
730 .name = "timer/icount/timers",
731 .version_id = 1,
732 .minimum_version_id = 1,
733 .needed = adjust_timers_state_needed,
734 .fields = (VMStateField[]) {
735 VMSTATE_TIMER_PTR(icount_rt_timer, TimersState),
736 VMSTATE_TIMER_PTR(icount_vm_timer, TimersState),
737 VMSTATE_END_OF_LIST()
738 }
739};
740
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200741/*
742 * This is a subsection for icount migration.
743 */
744static const VMStateDescription icount_vmstate_timers = {
745 .name = "timer/icount",
746 .version_id = 1,
747 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200748 .needed = icount_state_needed,
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200749 .fields = (VMStateField[]) {
750 VMSTATE_INT64(qemu_icount_bias, TimersState),
751 VMSTATE_INT64(qemu_icount, TimersState),
752 VMSTATE_END_OF_LIST()
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300753 },
754 .subsections = (const VMStateDescription*[]) {
755 &icount_vmstate_warp_timer,
756 &icount_vmstate_adjust_timers,
757 NULL
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200758 }
759};
760
Paolo Bonzini946fb272011-09-12 13:57:37 +0200761static const VMStateDescription vmstate_timers = {
762 .name = "timer",
763 .version_id = 2,
764 .minimum_version_id = 1,
Juan Quintela35d08452014-04-16 16:01:33 +0200765 .fields = (VMStateField[]) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200766 VMSTATE_INT64(cpu_ticks_offset, TimersState),
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200767 VMSTATE_UNUSED(8),
Paolo Bonzini946fb272011-09-12 13:57:37 +0200768 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
769 VMSTATE_END_OF_LIST()
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200770 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200771 .subsections = (const VMStateDescription*[]) {
772 &icount_vmstate_timers,
773 NULL
Paolo Bonzini946fb272011-09-12 13:57:37 +0200774 }
775};
776
Paolo Bonzini14e6fe12016-10-31 10:36:08 +0100777static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
Jason J. Herne2adcc852015-09-08 13:12:33 -0400778{
Jason J. Herne2adcc852015-09-08 13:12:33 -0400779 double pct;
780 double throttle_ratio;
781 long sleeptime_ns;
782
783 if (!cpu_throttle_get_percentage()) {
784 return;
785 }
786
787 pct = (double)cpu_throttle_get_percentage()/100;
788 throttle_ratio = pct / (1 - pct);
789 sleeptime_ns = (long)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS);
790
791 qemu_mutex_unlock_iothread();
Jason J. Herne2adcc852015-09-08 13:12:33 -0400792 g_usleep(sleeptime_ns / 1000); /* Convert ns to us for usleep call */
793 qemu_mutex_lock_iothread();
Felipe Franciosi90bb0c02017-05-19 22:29:50 +0100794 atomic_set(&cpu->throttle_thread_scheduled, 0);
Jason J. Herne2adcc852015-09-08 13:12:33 -0400795}
796
797static void cpu_throttle_timer_tick(void *opaque)
798{
799 CPUState *cpu;
800 double pct;
801
802 /* Stop the timer if needed */
803 if (!cpu_throttle_get_percentage()) {
804 return;
805 }
806 CPU_FOREACH(cpu) {
807 if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
Paolo Bonzini14e6fe12016-10-31 10:36:08 +0100808 async_run_on_cpu(cpu, cpu_throttle_thread,
809 RUN_ON_CPU_NULL);
Jason J. Herne2adcc852015-09-08 13:12:33 -0400810 }
811 }
812
813 pct = (double)cpu_throttle_get_percentage()/100;
814 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
815 CPU_THROTTLE_TIMESLICE_NS / (1-pct));
816}
817
818void cpu_throttle_set(int new_throttle_pct)
819{
820 /* Ensure throttle percentage is within valid range */
821 new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
822 new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
823
824 atomic_set(&throttle_percentage, new_throttle_pct);
825
826 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
827 CPU_THROTTLE_TIMESLICE_NS);
828}
829
830void cpu_throttle_stop(void)
831{
832 atomic_set(&throttle_percentage, 0);
833}
834
835bool cpu_throttle_active(void)
836{
837 return (cpu_throttle_get_percentage() != 0);
838}
839
840int cpu_throttle_get_percentage(void)
841{
842 return atomic_read(&throttle_percentage);
843}
844
Pavel Dovgalyuk4603ea02014-09-01 09:34:49 +0400845void cpu_ticks_init(void)
846{
Emilio G. Cotaccdb3c12016-06-08 14:55:20 -0400847 seqlock_init(&timers_state.vm_clock_seqlock);
Emilio G. Cota87a09cd2018-09-03 13:18:29 -0400848 qemu_spin_init(&timers_state.vm_clock_lock);
Pavel Dovgalyuk4603ea02014-09-01 09:34:49 +0400849 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
Jason J. Herne2adcc852015-09-08 13:12:33 -0400850 throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
851 cpu_throttle_timer_tick, NULL);
Pavel Dovgalyuk4603ea02014-09-01 09:34:49 +0400852}
853
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200854void configure_icount(QemuOpts *opts, Error **errp)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200855{
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200856 const char *option;
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200857 char *rem_str = NULL;
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200858
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200859 option = qemu_opt_get(opts, "shift");
Paolo Bonzini946fb272011-09-12 13:57:37 +0200860 if (!option) {
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200861 if (qemu_opt_get(opts, "align") != NULL) {
862 error_setg(errp, "Please specify shift option when using align");
863 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200864 return;
865 }
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200866
867 icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200868 if (icount_sleep) {
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300869 timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300870 icount_timer_cb, NULL);
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200871 }
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200872
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200873 icount_align_option = qemu_opt_get_bool(opts, "align", false);
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200874
875 if (icount_align_option && !icount_sleep) {
Pranith Kumar778d9f92016-02-26 10:16:51 -0500876 error_setg(errp, "align=on and sleep=off are incompatible");
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200877 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200878 if (strcmp(option, "auto") != 0) {
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200879 errno = 0;
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200880 timers_state.icount_time_shift = strtol(option, &rem_str, 0);
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200881 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
882 error_setg(errp, "icount: Invalid shift value");
883 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200884 use_icount = 1;
885 return;
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200886 } else if (icount_align_option) {
887 error_setg(errp, "shift=auto and align=on are incompatible");
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200888 } else if (!icount_sleep) {
Pranith Kumar778d9f92016-02-26 10:16:51 -0500889 error_setg(errp, "shift=auto and sleep=off are incompatible");
Paolo Bonzini946fb272011-09-12 13:57:37 +0200890 }
891
892 use_icount = 2;
893
894 /* 125MIPS seems a reasonable initial guess at the guest speed.
895 It will be corrected fairly quickly anyway. */
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200896 timers_state.icount_time_shift = 3;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200897
898 /* Have both realtime and virtual time triggers for speed adjustment.
899 The realtime trigger catches emulated time passing too slowly,
900 the virtual time trigger catches emulated time passing too fast.
901 Realtime triggers occur even when idle, so use them less frequently
902 than VM triggers. */
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300903 timers_state.vm_clock_warp_start = -1;
904 timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300905 icount_adjust_rt, NULL);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300906 timer_mod(timers_state.icount_rt_timer,
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300907 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300908 timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
Alex Bligh40daca52013-08-21 16:03:02 +0100909 icount_adjust_vm, NULL);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300910 timer_mod(timers_state.icount_vm_timer,
Alex Bligh40daca52013-08-21 16:03:02 +0100911 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
Rutuja Shah73bcb242016-03-21 21:32:30 +0530912 NANOSECONDS_PER_SECOND / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200913}
914
915/***********************************************************/
Alex Bennée65467062017-02-23 18:29:09 +0000916/* TCG vCPU kick timer
917 *
918 * The kick timer is responsible for moving single threaded vCPU
919 * emulation on to the next vCPU. If more than one vCPU is running a
920 * timer event with force a cpu->exit so the next vCPU can get
921 * scheduled.
922 *
923 * The timer is removed if all vCPUs are idle and restarted again once
924 * idleness is complete.
925 */
926
927static QEMUTimer *tcg_kick_vcpu_timer;
Alex Bennée791158d2017-02-23 18:29:10 +0000928static CPUState *tcg_current_rr_cpu;
Alex Bennée65467062017-02-23 18:29:09 +0000929
930#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
931
932static inline int64_t qemu_tcg_next_kick(void)
933{
934 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
935}
936
Alex Bennée791158d2017-02-23 18:29:10 +0000937/* Kick the currently round-robin scheduled vCPU */
938static void qemu_cpu_kick_rr_cpu(void)
939{
940 CPUState *cpu;
Alex Bennée791158d2017-02-23 18:29:10 +0000941 do {
942 cpu = atomic_mb_read(&tcg_current_rr_cpu);
943 if (cpu) {
944 cpu_exit(cpu);
945 }
946 } while (cpu != atomic_mb_read(&tcg_current_rr_cpu));
947}
948
Paolo Bonzini6b8f0182017-03-02 19:56:40 +0100949static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
950{
951}
952
Paolo Bonzini3f53bc62017-03-03 11:50:29 +0100953void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
954{
Paolo Bonzini6b8f0182017-03-02 19:56:40 +0100955 if (!use_icount || type != QEMU_CLOCK_VIRTUAL) {
956 qemu_notify_event();
957 return;
958 }
959
Peter Maydellc52e7132018-04-10 13:02:25 +0100960 if (qemu_in_vcpu_thread()) {
961 /* A CPU is currently running; kick it back out to the
962 * tcg_cpu_exec() loop so it will recalculate its
963 * icount deadline immediately.
964 */
965 qemu_cpu_kick(current_cpu);
966 } else if (first_cpu) {
Paolo Bonzini6b8f0182017-03-02 19:56:40 +0100967 /* qemu_cpu_kick is not enough to kick a halted CPU out of
968 * qemu_tcg_wait_io_event. async_run_on_cpu, instead,
969 * causes cpu_thread_is_idle to return false. This way,
970 * handle_icount_deadline can run.
Peter Maydellc52e7132018-04-10 13:02:25 +0100971 * If we have no CPUs at all for some reason, we don't
972 * need to do anything.
Paolo Bonzini6b8f0182017-03-02 19:56:40 +0100973 */
974 async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
975 }
Paolo Bonzini3f53bc62017-03-03 11:50:29 +0100976}
977
Alex Bennée65467062017-02-23 18:29:09 +0000978static void kick_tcg_thread(void *opaque)
979{
980 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
Alex Bennée791158d2017-02-23 18:29:10 +0000981 qemu_cpu_kick_rr_cpu();
Alex Bennée65467062017-02-23 18:29:09 +0000982}
983
984static void start_tcg_kick_timer(void)
985{
Paolo Bonzinidb08b682018-01-11 13:53:12 +0100986 assert(!mttcg_enabled);
987 if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
Alex Bennée65467062017-02-23 18:29:09 +0000988 tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
989 kick_tcg_thread, NULL);
Alex Bennée1926ab22018-09-27 18:17:24 +0100990 }
991 if (tcg_kick_vcpu_timer && !timer_pending(tcg_kick_vcpu_timer)) {
Alex Bennée65467062017-02-23 18:29:09 +0000992 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
993 }
994}
995
996static void stop_tcg_kick_timer(void)
997{
Paolo Bonzinidb08b682018-01-11 13:53:12 +0100998 assert(!mttcg_enabled);
Alex Bennée1926ab22018-09-27 18:17:24 +0100999 if (tcg_kick_vcpu_timer && timer_pending(tcg_kick_vcpu_timer)) {
Alex Bennée65467062017-02-23 18:29:09 +00001000 timer_del(tcg_kick_vcpu_timer);
Alex Bennée65467062017-02-23 18:29:09 +00001001 }
1002}
1003
Alex Bennée65467062017-02-23 18:29:09 +00001004/***********************************************************/
Blue Swirl296af7c2010-03-29 19:23:50 +00001005void hw_error(const char *fmt, ...)
1006{
1007 va_list ap;
Andreas Färber55e5c282012-12-17 06:18:02 +01001008 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001009
1010 va_start(ap, fmt);
1011 fprintf(stderr, "qemu: hardware error: ");
1012 vfprintf(stderr, fmt, ap);
1013 fprintf(stderr, "\n");
Andreas Färberbdc44642013-06-24 23:50:24 +02001014 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +01001015 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
Markus Armbruster90c84c52019-04-17 21:18:02 +02001016 cpu_dump_state(cpu, stderr, CPU_DUMP_FPU);
Blue Swirl296af7c2010-03-29 19:23:50 +00001017 }
1018 va_end(ap);
1019 abort();
1020}
1021
1022void cpu_synchronize_all_states(void)
1023{
Andreas Färber182735e2013-05-29 22:29:20 +02001024 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001025
Andreas Färberbdc44642013-06-24 23:50:24 +02001026 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001027 cpu_synchronize_state(cpu);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001028 /* TODO: move to cpu_synchronize_state() */
1029 if (hvf_enabled()) {
1030 hvf_cpu_synchronize_state(cpu);
1031 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001032 }
1033}
1034
1035void cpu_synchronize_all_post_reset(void)
1036{
Andreas Färber182735e2013-05-29 22:29:20 +02001037 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001038
Andreas Färberbdc44642013-06-24 23:50:24 +02001039 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001040 cpu_synchronize_post_reset(cpu);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001041 /* TODO: move to cpu_synchronize_post_reset() */
1042 if (hvf_enabled()) {
1043 hvf_cpu_synchronize_post_reset(cpu);
1044 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001045 }
1046}
1047
1048void cpu_synchronize_all_post_init(void)
1049{
Andreas Färber182735e2013-05-29 22:29:20 +02001050 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001051
Andreas Färberbdc44642013-06-24 23:50:24 +02001052 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001053 cpu_synchronize_post_init(cpu);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001054 /* TODO: move to cpu_synchronize_post_init() */
1055 if (hvf_enabled()) {
1056 hvf_cpu_synchronize_post_init(cpu);
1057 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001058 }
1059}
1060
David Gibson75e972d2017-05-26 14:46:28 +10001061void cpu_synchronize_all_pre_loadvm(void)
1062{
1063 CPUState *cpu;
1064
1065 CPU_FOREACH(cpu) {
1066 cpu_synchronize_pre_loadvm(cpu);
1067 }
1068}
1069
Stefan Hajnoczi4486e892018-03-07 14:42:05 +00001070static int do_vm_stop(RunState state, bool send_stop)
Blue Swirl296af7c2010-03-29 19:23:50 +00001071{
Kevin Wolf56983462013-07-05 13:49:54 +02001072 int ret = 0;
1073
Luiz Capitulino13548692011-07-29 15:36:43 -03001074 if (runstate_is_running()) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001075 cpu_disable_ticks();
Blue Swirl296af7c2010-03-29 19:23:50 +00001076 pause_all_vcpus();
Luiz Capitulinof5bbfba2011-07-29 15:04:45 -03001077 runstate_set(state);
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -03001078 vm_state_notify(0, state);
Stefan Hajnoczi4486e892018-03-07 14:42:05 +00001079 if (send_stop) {
Peter Xu3ab72382018-08-15 21:37:37 +08001080 qapi_event_send_stop();
Stefan Hajnoczi4486e892018-03-07 14:42:05 +00001081 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001082 }
Kevin Wolf56983462013-07-05 13:49:54 +02001083
Kevin Wolf594a45c2013-07-18 14:52:19 +02001084 bdrv_drain_all();
Pavel Dovgalyuk6d0ceb82016-09-26 11:08:16 +03001085 replay_disable_events();
John Snow22af08e2016-09-22 21:45:51 -04001086 ret = bdrv_flush_all();
Kevin Wolf594a45c2013-07-18 14:52:19 +02001087
Kevin Wolf56983462013-07-05 13:49:54 +02001088 return ret;
Blue Swirl296af7c2010-03-29 19:23:50 +00001089}
1090
Stefan Hajnoczi4486e892018-03-07 14:42:05 +00001091/* Special vm_stop() variant for terminating the process. Historically clients
1092 * did not expect a QMP STOP event and so we need to retain compatibility.
1093 */
1094int vm_shutdown(void)
1095{
1096 return do_vm_stop(RUN_STATE_SHUTDOWN, false);
1097}
1098
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001099static bool cpu_can_run(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001100{
Andreas Färber4fdeee72012-05-02 23:10:09 +02001101 if (cpu->stop) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001102 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001103 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +08001104 if (cpu_is_stopped(cpu)) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001105 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001106 }
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001107 return true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001108}
1109
Andreas Färber91325042013-05-27 02:07:49 +02001110static void cpu_handle_guest_debug(CPUState *cpu)
Jan Kiszka3c638d02010-06-25 16:56:56 +02001111{
Andreas Färber64f6b342013-05-27 02:06:09 +02001112 gdb_set_stop_cpu(cpu);
Jan Kiszka8cf71712011-02-07 12:19:16 +01001113 qemu_system_debug_request();
Andreas Färberf324e762012-05-02 23:26:21 +02001114 cpu->stopped = true;
Jan Kiszka3c638d02010-06-25 16:56:56 +02001115}
1116
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001117#ifdef CONFIG_LINUX
1118static void sigbus_reraise(void)
1119{
1120 sigset_t set;
1121 struct sigaction action;
1122
1123 memset(&action, 0, sizeof(action));
1124 action.sa_handler = SIG_DFL;
1125 if (!sigaction(SIGBUS, &action, NULL)) {
1126 raise(SIGBUS);
1127 sigemptyset(&set);
1128 sigaddset(&set, SIGBUS);
Peter Maydella2d17612016-05-16 18:33:59 +01001129 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001130 }
1131 perror("Failed to re-raise SIGBUS!\n");
1132 abort();
1133}
1134
Paolo Bonzinid98d4072017-02-08 13:22:12 +01001135static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx)
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001136{
Paolo Bonzinia16fc072017-02-09 09:50:02 +01001137 if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) {
1138 sigbus_reraise();
1139 }
1140
Paolo Bonzini2ae41db2017-02-08 12:48:54 +01001141 if (current_cpu) {
1142 /* Called asynchronously in VCPU thread. */
1143 if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) {
1144 sigbus_reraise();
1145 }
1146 } else {
1147 /* Called synchronously (via signalfd) in main thread. */
1148 if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) {
1149 sigbus_reraise();
1150 }
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001151 }
1152}
1153
1154static void qemu_init_sigbus(void)
1155{
1156 struct sigaction action;
1157
1158 memset(&action, 0, sizeof(action));
1159 action.sa_flags = SA_SIGINFO;
Paolo Bonzinid98d4072017-02-08 13:22:12 +01001160 action.sa_sigaction = sigbus_handler;
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001161 sigaction(SIGBUS, &action, NULL);
1162
1163 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
1164}
Paolo Bonzinia16fc072017-02-09 09:50:02 +01001165#else /* !CONFIG_LINUX */
1166static void qemu_init_sigbus(void)
1167{
1168}
Paolo Bonzinia16fc072017-02-09 09:50:02 +01001169#endif /* !CONFIG_LINUX */
Blue Swirl296af7c2010-03-29 19:23:50 +00001170
Stefan Weilb2532d82012-09-27 07:41:42 +02001171static QemuMutex qemu_global_mutex;
Blue Swirl296af7c2010-03-29 19:23:50 +00001172
1173static QemuThread io_thread;
1174
Blue Swirl296af7c2010-03-29 19:23:50 +00001175/* cpu creation */
1176static QemuCond qemu_cpu_cond;
1177/* system init */
Blue Swirl296af7c2010-03-29 19:23:50 +00001178static QemuCond qemu_pause_cond;
1179
Paolo Bonzinid3b12f52011-09-13 10:30:52 +02001180void qemu_init_cpu_loop(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001181{
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001182 qemu_init_sigbus();
Anthony Liguoried945922011-02-08 18:18:18 +01001183 qemu_cond_init(&qemu_cpu_cond);
Anthony Liguoried945922011-02-08 18:18:18 +01001184 qemu_cond_init(&qemu_pause_cond);
Blue Swirl296af7c2010-03-29 19:23:50 +00001185 qemu_mutex_init(&qemu_global_mutex);
Blue Swirl296af7c2010-03-29 19:23:50 +00001186
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001187 qemu_thread_get_self(&io_thread);
Blue Swirl296af7c2010-03-29 19:23:50 +00001188}
1189
Paolo Bonzini14e6fe12016-10-31 10:36:08 +01001190void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -03001191{
Sergey Fedorovd148d902016-08-29 09:51:00 +02001192 do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
Chegu Vinod3c022702013-06-24 03:49:41 -06001193}
1194
Gu Zheng4c055ab2016-05-12 09:18:13 +05301195static void qemu_kvm_destroy_vcpu(CPUState *cpu)
1196{
1197 if (kvm_destroy_vcpu(cpu) < 0) {
1198 error_report("kvm_destroy_vcpu failed");
1199 exit(EXIT_FAILURE);
1200 }
1201}
1202
1203static void qemu_tcg_destroy_vcpu(CPUState *cpu)
1204{
1205}
1206
David Hildenbrandebd05fe2017-11-29 20:12:15 +01001207static void qemu_cpu_stop(CPUState *cpu, bool exit)
1208{
1209 g_assert(qemu_cpu_is_self(cpu));
1210 cpu->stop = false;
1211 cpu->stopped = true;
1212 if (exit) {
1213 cpu_exit(cpu);
1214 }
1215 qemu_cond_broadcast(&qemu_pause_cond);
1216}
1217
Andreas Färber509a0d72012-05-03 02:18:09 +02001218static void qemu_wait_io_event_common(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001219{
Alex Bennée37257942017-02-23 18:29:14 +00001220 atomic_mb_set(&cpu->thread_kicked, false);
Andreas Färber4fdeee72012-05-02 23:10:09 +02001221 if (cpu->stop) {
David Hildenbrandebd05fe2017-11-29 20:12:15 +01001222 qemu_cpu_stop(cpu, false);
Blue Swirl296af7c2010-03-29 19:23:50 +00001223 }
Sergey Fedorova5403c62016-08-02 18:27:36 +01001224 process_queued_cpu_work(cpu);
Alex Bennée37257942017-02-23 18:29:14 +00001225}
1226
Paolo Bonzinia8efa602018-11-14 12:36:57 +01001227static void qemu_tcg_rr_wait_io_event(void)
Alex Bennée37257942017-02-23 18:29:14 +00001228{
Paolo Bonzinia8efa602018-11-14 12:36:57 +01001229 CPUState *cpu;
1230
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001231 while (all_cpu_threads_idle()) {
Alex Bennée65467062017-02-23 18:29:09 +00001232 stop_tcg_kick_timer();
Paolo Bonzinia8efa602018-11-14 12:36:57 +01001233 qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +01001234 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001235
Alex Bennée65467062017-02-23 18:29:09 +00001236 start_tcg_kick_timer();
1237
Paolo Bonzinia8efa602018-11-14 12:36:57 +01001238 CPU_FOREACH(cpu) {
1239 qemu_wait_io_event_common(cpu);
1240 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001241}
1242
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001243static void qemu_wait_io_event(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001244{
Andreas Färbera98ae1d2013-05-26 23:21:08 +02001245 while (cpu_thread_is_idle(cpu)) {
Andreas Färberf5c121b2012-05-03 01:22:49 +02001246 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +01001247 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001248
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001249#ifdef _WIN32
1250 /* Eat dummy APC queued by qemu_cpu_kick_thread. */
1251 if (!tcg_enabled()) {
1252 SleepEx(0, TRUE);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001253 }
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001254#endif
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001255 qemu_wait_io_event_common(cpu);
1256}
1257
Jan Kiszka7e97cd82011-02-07 12:19:12 +01001258static void *qemu_kvm_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +00001259{
Andreas Färber48a106b2013-05-27 02:20:39 +02001260 CPUState *cpu = arg;
Jan Kiszka84b49152011-02-01 22:15:50 +01001261 int r;
Blue Swirl296af7c2010-03-29 19:23:50 +00001262
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001263 rcu_register_thread();
1264
Paolo Bonzini2e7f7a32015-06-18 18:47:18 +02001265 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +02001266 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +02001267 cpu->thread_id = qemu_get_thread_id();
Pavel Dovgalyuk626cf8f2014-12-08 10:53:17 +03001268 cpu->can_do_io = 1;
Andreas Färber4917cf42013-05-27 05:17:50 +02001269 current_cpu = cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001270
Andreas Färber504134d2012-12-17 06:38:45 +01001271 r = kvm_init_vcpu(cpu);
Jan Kiszka84b49152011-02-01 22:15:50 +01001272 if (r < 0) {
Alistair Francis493d89b2018-02-03 09:43:14 +01001273 error_report("kvm_init_vcpu failed: %s", strerror(-r));
Jan Kiszka84b49152011-02-01 22:15:50 +01001274 exit(1);
1275 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001276
Paolo Bonzini18268b62017-02-09 09:41:14 +01001277 kvm_init_cpu_signals(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001278
1279 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +02001280 cpu->created = true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001281 qemu_cond_signal(&qemu_cpu_cond);
Richard Henderson9c09a252019-03-14 13:06:29 -07001282 qemu_guest_random_seed_thread_part2(cpu->random_seed);
Blue Swirl296af7c2010-03-29 19:23:50 +00001283
Gu Zheng4c055ab2016-05-12 09:18:13 +05301284 do {
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001285 if (cpu_can_run(cpu)) {
Andreas Färber1458c362013-05-26 23:46:55 +02001286 r = kvm_cpu_exec(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +01001287 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +02001288 cpu_handle_guest_debug(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +01001289 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001290 }
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001291 qemu_wait_io_event(cpu);
Gu Zheng4c055ab2016-05-12 09:18:13 +05301292 } while (!cpu->unplug || cpu_can_run(cpu));
Blue Swirl296af7c2010-03-29 19:23:50 +00001293
Gu Zheng4c055ab2016-05-12 09:18:13 +05301294 qemu_kvm_destroy_vcpu(cpu);
Bharata B Rao2c579042016-05-12 09:18:14 +05301295 cpu->created = false;
1296 qemu_cond_signal(&qemu_cpu_cond);
Gu Zheng4c055ab2016-05-12 09:18:13 +05301297 qemu_mutex_unlock_iothread();
Paolo Bonzini57615ed2018-01-30 11:04:36 -05001298 rcu_unregister_thread();
Blue Swirl296af7c2010-03-29 19:23:50 +00001299 return NULL;
1300}
1301
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001302static void *qemu_dummy_cpu_thread_fn(void *arg)
1303{
1304#ifdef _WIN32
Alistair Francis493d89b2018-02-03 09:43:14 +01001305 error_report("qtest is not supported under Windows");
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001306 exit(1);
1307#else
Andreas Färber10a90212013-05-27 02:24:35 +02001308 CPUState *cpu = arg;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001309 sigset_t waitset;
1310 int r;
1311
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001312 rcu_register_thread();
1313
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001314 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +02001315 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +02001316 cpu->thread_id = qemu_get_thread_id();
Pavel Dovgalyuk626cf8f2014-12-08 10:53:17 +03001317 cpu->can_do_io = 1;
Alex Bennée37257942017-02-23 18:29:14 +00001318 current_cpu = cpu;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001319
1320 sigemptyset(&waitset);
1321 sigaddset(&waitset, SIG_IPI);
1322
1323 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +02001324 cpu->created = true;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001325 qemu_cond_signal(&qemu_cpu_cond);
Richard Henderson9c09a252019-03-14 13:06:29 -07001326 qemu_guest_random_seed_thread_part2(cpu->random_seed);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001327
Paolo Bonzinid2831ab2018-01-30 11:04:53 -05001328 do {
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001329 qemu_mutex_unlock_iothread();
1330 do {
1331 int sig;
1332 r = sigwait(&waitset, &sig);
1333 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1334 if (r == -1) {
1335 perror("sigwait");
1336 exit(1);
1337 }
1338 qemu_mutex_lock_iothread();
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001339 qemu_wait_io_event(cpu);
Paolo Bonzinid2831ab2018-01-30 11:04:53 -05001340 } while (!cpu->unplug);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001341
David Hildenbrandd40bfcb2019-02-18 10:21:57 +01001342 qemu_mutex_unlock_iothread();
Paolo Bonzinid2831ab2018-01-30 11:04:53 -05001343 rcu_unregister_thread();
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001344 return NULL;
1345#endif
1346}
1347
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001348static int64_t tcg_get_icount_limit(void)
1349{
1350 int64_t deadline;
1351
1352 if (replay_mode != REPLAY_MODE_PLAY) {
1353 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1354
1355 /* Maintain prior (possibly buggy) behaviour where if no deadline
1356 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1357 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1358 * nanoseconds.
1359 */
1360 if ((deadline < 0) || (deadline > INT32_MAX)) {
1361 deadline = INT32_MAX;
1362 }
1363
1364 return qemu_icount_round(deadline);
1365 } else {
1366 return replay_get_instructions();
1367 }
1368}
1369
Alex Bennée12e97002016-10-27 16:10:14 +01001370static void handle_icount_deadline(void)
1371{
Paolo Bonzini6b8f0182017-03-02 19:56:40 +01001372 assert(qemu_in_vcpu_thread());
Alex Bennée12e97002016-10-27 16:10:14 +01001373 if (use_icount) {
1374 int64_t deadline =
1375 qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1376
1377 if (deadline == 0) {
Paolo Bonzini6b8f0182017-03-02 19:56:40 +01001378 /* Wake up other AioContexts. */
Alex Bennée12e97002016-10-27 16:10:14 +01001379 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini6b8f0182017-03-02 19:56:40 +01001380 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
Alex Bennée12e97002016-10-27 16:10:14 +01001381 }
1382 }
1383}
1384
Alex Bennée05248382017-03-29 16:46:59 +01001385static void prepare_icount_for_run(CPUState *cpu)
1386{
1387 if (use_icount) {
Alex Bennéeeda5f7c2017-04-05 12:35:48 +01001388 int insns_left;
Alex Bennée05248382017-03-29 16:46:59 +01001389
1390 /* These should always be cleared by process_icount_data after
1391 * each vCPU execution. However u16.high can be raised
1392 * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
1393 */
Richard Henderson5e140192019-03-28 11:54:23 -10001394 g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
Alex Bennée05248382017-03-29 16:46:59 +01001395 g_assert(cpu->icount_extra == 0);
1396
Alex Bennéeeda5f7c2017-04-05 12:35:48 +01001397 cpu->icount_budget = tcg_get_icount_limit();
1398 insns_left = MIN(0xffff, cpu->icount_budget);
Richard Henderson5e140192019-03-28 11:54:23 -10001399 cpu_neg(cpu)->icount_decr.u16.low = insns_left;
Alex Bennéeeda5f7c2017-04-05 12:35:48 +01001400 cpu->icount_extra = cpu->icount_budget - insns_left;
Alex Bennéed759c952018-02-27 12:52:48 +03001401
1402 replay_mutex_lock();
Alex Bennée05248382017-03-29 16:46:59 +01001403 }
1404}
1405
1406static void process_icount_data(CPUState *cpu)
1407{
1408 if (use_icount) {
Alex Bennéee4cd9652017-03-31 16:09:42 +01001409 /* Account for executed instructions */
Alex Bennée512d3c82017-04-05 12:32:37 +01001410 cpu_update_icount(cpu);
Alex Bennée05248382017-03-29 16:46:59 +01001411
1412 /* Reset the counters */
Richard Henderson5e140192019-03-28 11:54:23 -10001413 cpu_neg(cpu)->icount_decr.u16.low = 0;
Alex Bennée05248382017-03-29 16:46:59 +01001414 cpu->icount_extra = 0;
Alex Bennéee4cd9652017-03-31 16:09:42 +01001415 cpu->icount_budget = 0;
1416
Alex Bennée05248382017-03-29 16:46:59 +01001417 replay_account_executed_instructions();
Alex Bennéed759c952018-02-27 12:52:48 +03001418
1419 replay_mutex_unlock();
Alex Bennée05248382017-03-29 16:46:59 +01001420 }
1421}
1422
1423
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001424static int tcg_cpu_exec(CPUState *cpu)
1425{
1426 int ret;
1427#ifdef CONFIG_PROFILER
1428 int64_t ti;
1429#endif
1430
Emilio G. Cotaf28d0df2018-06-22 13:45:31 -04001431 assert(tcg_enabled());
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001432#ifdef CONFIG_PROFILER
1433 ti = profile_getclock();
1434#endif
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001435 cpu_exec_start(cpu);
1436 ret = cpu_exec(cpu);
1437 cpu_exec_end(cpu);
1438#ifdef CONFIG_PROFILER
Emilio G. Cota72fd2ef2018-10-10 10:48:53 -04001439 atomic_set(&tcg_ctx->prof.cpu_exec_time,
1440 tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001441#endif
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001442 return ret;
1443}
1444
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001445/* Destroy any remaining vCPUs which have been unplugged and have
1446 * finished running
1447 */
1448static void deal_with_unplugged_cpus(void)
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001449{
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001450 CPUState *cpu;
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001451
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001452 CPU_FOREACH(cpu) {
1453 if (cpu->unplug && !cpu_can_run(cpu)) {
1454 qemu_tcg_destroy_vcpu(cpu);
1455 cpu->created = false;
1456 qemu_cond_signal(&qemu_cpu_cond);
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001457 break;
1458 }
1459 }
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001460}
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001461
Alex Bennée65467062017-02-23 18:29:09 +00001462/* Single-threaded TCG
1463 *
1464 * In the single-threaded case each vCPU is simulated in turn. If
1465 * there is more than a single vCPU we create a simple timer to kick
1466 * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
1467 * This is done explicitly rather than relying on side-effects
1468 * elsewhere.
1469 */
1470
Alex Bennée37257942017-02-23 18:29:14 +00001471static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +00001472{
Andreas Färberc3586ba2012-05-03 01:41:24 +02001473 CPUState *cpu = arg;
Blue Swirl296af7c2010-03-29 19:23:50 +00001474
Emilio G. Cotaf28d0df2018-06-22 13:45:31 -04001475 assert(tcg_enabled());
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001476 rcu_register_thread();
Emilio G. Cota3468b592017-07-19 18:57:58 -04001477 tcg_register_thread();
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001478
Paolo Bonzini2e7f7a32015-06-18 18:47:18 +02001479 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +02001480 qemu_thread_get_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +00001481
David Hildenbrand5a9c9732018-02-09 20:52:39 +01001482 cpu->thread_id = qemu_get_thread_id();
1483 cpu->created = true;
1484 cpu->can_do_io = 1;
Blue Swirl296af7c2010-03-29 19:23:50 +00001485 qemu_cond_signal(&qemu_cpu_cond);
Richard Henderson9c09a252019-03-14 13:06:29 -07001486 qemu_guest_random_seed_thread_part2(cpu->random_seed);
Blue Swirl296af7c2010-03-29 19:23:50 +00001487
Jan Kiszkafa7d1862011-08-22 18:35:25 +02001488 /* wait for initial kick-off after machine start */
Emilio G. Cotac28e3992015-04-27 12:45:28 -04001489 while (first_cpu->stopped) {
KONRAD Fredericd5f8d612015-08-10 17:27:06 +02001490 qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka8e564b42012-02-17 18:31:15 +01001491
1492 /* process any pending work */
Andreas Färberbdc44642013-06-24 23:50:24 +02001493 CPU_FOREACH(cpu) {
Alex Bennée37257942017-02-23 18:29:14 +00001494 current_cpu = cpu;
Andreas Färber182735e2013-05-29 22:29:20 +02001495 qemu_wait_io_event_common(cpu);
Jan Kiszka8e564b42012-02-17 18:31:15 +01001496 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001497 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001498
Alex Bennée65467062017-02-23 18:29:09 +00001499 start_tcg_kick_timer();
1500
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001501 cpu = first_cpu;
1502
Alex Bennéee5143e32017-02-23 18:29:12 +00001503 /* process any pending work */
1504 cpu->exit_request = 1;
1505
Blue Swirl296af7c2010-03-29 19:23:50 +00001506 while (1) {
Alex Bennéed759c952018-02-27 12:52:48 +03001507 qemu_mutex_unlock_iothread();
1508 replay_mutex_lock();
1509 qemu_mutex_lock_iothread();
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001510 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1511 qemu_account_warp_timer();
1512
Paolo Bonzini6b8f0182017-03-02 19:56:40 +01001513 /* Run the timers here. This is much more efficient than
1514 * waking up the I/O thread and waiting for completion.
1515 */
1516 handle_icount_deadline();
1517
Alex Bennéed759c952018-02-27 12:52:48 +03001518 replay_mutex_unlock();
1519
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001520 if (!cpu) {
1521 cpu = first_cpu;
1522 }
1523
Alex Bennéee5143e32017-02-23 18:29:12 +00001524 while (cpu && !cpu->queued_work_first && !cpu->exit_request) {
1525
Alex Bennée791158d2017-02-23 18:29:10 +00001526 atomic_mb_set(&tcg_current_rr_cpu, cpu);
Alex Bennée37257942017-02-23 18:29:14 +00001527 current_cpu = cpu;
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001528
1529 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1530 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1531
1532 if (cpu_can_run(cpu)) {
1533 int r;
Alex Bennée05248382017-03-29 16:46:59 +01001534
Alex Bennéed759c952018-02-27 12:52:48 +03001535 qemu_mutex_unlock_iothread();
Alex Bennée05248382017-03-29 16:46:59 +01001536 prepare_icount_for_run(cpu);
1537
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001538 r = tcg_cpu_exec(cpu);
Alex Bennée05248382017-03-29 16:46:59 +01001539
1540 process_icount_data(cpu);
Alex Bennéed759c952018-02-27 12:52:48 +03001541 qemu_mutex_lock_iothread();
Alex Bennée05248382017-03-29 16:46:59 +01001542
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001543 if (r == EXCP_DEBUG) {
1544 cpu_handle_guest_debug(cpu);
1545 break;
Pranith Kumar08e73c42017-02-23 18:29:15 +00001546 } else if (r == EXCP_ATOMIC) {
1547 qemu_mutex_unlock_iothread();
1548 cpu_exec_step_atomic(cpu);
1549 qemu_mutex_lock_iothread();
1550 break;
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001551 }
Alex Bennée37257942017-02-23 18:29:14 +00001552 } else if (cpu->stop) {
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001553 if (cpu->unplug) {
1554 cpu = CPU_NEXT(cpu);
1555 }
1556 break;
1557 }
1558
Alex Bennéee5143e32017-02-23 18:29:12 +00001559 cpu = CPU_NEXT(cpu);
1560 } /* while (cpu && !cpu->exit_request).. */
1561
Alex Bennée791158d2017-02-23 18:29:10 +00001562 /* Does not need atomic_mb_set because a spurious wakeup is okay. */
1563 atomic_set(&tcg_current_rr_cpu, NULL);
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001564
Alex Bennéee5143e32017-02-23 18:29:12 +00001565 if (cpu && cpu->exit_request) {
1566 atomic_mb_set(&cpu->exit_request, 0);
1567 }
Alex Blighac70aaf2013-08-21 16:02:57 +01001568
Clement Deschamps013aabd2018-10-21 16:21:03 +02001569 if (use_icount && all_cpu_threads_idle()) {
1570 /*
1571 * When all cpus are sleeping (e.g in WFI), to avoid a deadlock
1572 * in the main_loop, wake it up in order to start the warp timer.
1573 */
1574 qemu_notify_event();
1575 }
1576
Paolo Bonzinia8efa602018-11-14 12:36:57 +01001577 qemu_tcg_rr_wait_io_event();
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001578 deal_with_unplugged_cpus();
Blue Swirl296af7c2010-03-29 19:23:50 +00001579 }
1580
Paolo Bonzini9b0605f2018-01-30 11:05:06 -05001581 rcu_unregister_thread();
Blue Swirl296af7c2010-03-29 19:23:50 +00001582 return NULL;
1583}
1584
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001585static void *qemu_hax_cpu_thread_fn(void *arg)
1586{
1587 CPUState *cpu = arg;
1588 int r;
Vincent Palatinb3d3a422017-03-20 11:15:49 +01001589
Paolo Bonzini9857c2d2018-01-30 16:28:49 +01001590 rcu_register_thread();
Vincent Palatinb3d3a422017-03-20 11:15:49 +01001591 qemu_mutex_lock_iothread();
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001592 qemu_thread_get_self(cpu->thread);
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001593
1594 cpu->thread_id = qemu_get_thread_id();
1595 cpu->created = true;
1596 cpu->halted = 0;
1597 current_cpu = cpu;
1598
1599 hax_init_vcpu(cpu);
1600 qemu_cond_signal(&qemu_cpu_cond);
Richard Henderson9c09a252019-03-14 13:06:29 -07001601 qemu_guest_random_seed_thread_part2(cpu->random_seed);
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001602
Paolo Bonzini9857c2d2018-01-30 16:28:49 +01001603 do {
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001604 if (cpu_can_run(cpu)) {
1605 r = hax_smp_cpu_exec(cpu);
1606 if (r == EXCP_DEBUG) {
1607 cpu_handle_guest_debug(cpu);
1608 }
1609 }
1610
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001611 qemu_wait_io_event(cpu);
Paolo Bonzini9857c2d2018-01-30 16:28:49 +01001612 } while (!cpu->unplug || cpu_can_run(cpu));
1613 rcu_unregister_thread();
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001614 return NULL;
1615}
1616
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001617/* The HVF-specific vCPU thread function. This one should only run when the host
1618 * CPU supports the VMX "unrestricted guest" feature. */
1619static void *qemu_hvf_cpu_thread_fn(void *arg)
1620{
1621 CPUState *cpu = arg;
1622
1623 int r;
1624
1625 assert(hvf_enabled());
1626
1627 rcu_register_thread();
1628
1629 qemu_mutex_lock_iothread();
1630 qemu_thread_get_self(cpu->thread);
1631
1632 cpu->thread_id = qemu_get_thread_id();
1633 cpu->can_do_io = 1;
1634 current_cpu = cpu;
1635
1636 hvf_init_vcpu(cpu);
1637
1638 /* signal CPU creation */
1639 cpu->created = true;
1640 qemu_cond_signal(&qemu_cpu_cond);
Richard Henderson9c09a252019-03-14 13:06:29 -07001641 qemu_guest_random_seed_thread_part2(cpu->random_seed);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001642
1643 do {
1644 if (cpu_can_run(cpu)) {
1645 r = hvf_vcpu_exec(cpu);
1646 if (r == EXCP_DEBUG) {
1647 cpu_handle_guest_debug(cpu);
1648 }
1649 }
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001650 qemu_wait_io_event(cpu);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001651 } while (!cpu->unplug || cpu_can_run(cpu));
1652
1653 hvf_vcpu_destroy(cpu);
1654 cpu->created = false;
1655 qemu_cond_signal(&qemu_cpu_cond);
1656 qemu_mutex_unlock_iothread();
Paolo Bonzini8178e632018-01-30 11:05:21 -05001657 rcu_unregister_thread();
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001658 return NULL;
1659}
1660
Justin Terry (VM)19306802018-01-22 13:07:49 -08001661static void *qemu_whpx_cpu_thread_fn(void *arg)
1662{
1663 CPUState *cpu = arg;
1664 int r;
1665
1666 rcu_register_thread();
1667
1668 qemu_mutex_lock_iothread();
1669 qemu_thread_get_self(cpu->thread);
1670 cpu->thread_id = qemu_get_thread_id();
1671 current_cpu = cpu;
1672
1673 r = whpx_init_vcpu(cpu);
1674 if (r < 0) {
1675 fprintf(stderr, "whpx_init_vcpu failed: %s\n", strerror(-r));
1676 exit(1);
1677 }
1678
1679 /* signal CPU creation */
1680 cpu->created = true;
1681 qemu_cond_signal(&qemu_cpu_cond);
Richard Henderson9c09a252019-03-14 13:06:29 -07001682 qemu_guest_random_seed_thread_part2(cpu->random_seed);
Justin Terry (VM)19306802018-01-22 13:07:49 -08001683
1684 do {
1685 if (cpu_can_run(cpu)) {
1686 r = whpx_vcpu_exec(cpu);
1687 if (r == EXCP_DEBUG) {
1688 cpu_handle_guest_debug(cpu);
1689 }
1690 }
1691 while (cpu_thread_is_idle(cpu)) {
1692 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1693 }
1694 qemu_wait_io_event_common(cpu);
1695 } while (!cpu->unplug || cpu_can_run(cpu));
1696
1697 whpx_destroy_vcpu(cpu);
1698 cpu->created = false;
1699 qemu_cond_signal(&qemu_cpu_cond);
1700 qemu_mutex_unlock_iothread();
1701 rcu_unregister_thread();
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001702 return NULL;
1703}
1704
1705#ifdef _WIN32
1706static void CALLBACK dummy_apc_func(ULONG_PTR unused)
1707{
1708}
1709#endif
1710
Alex Bennée37257942017-02-23 18:29:14 +00001711/* Multi-threaded TCG
1712 *
1713 * In the multi-threaded case each vCPU has its own thread. The TLS
1714 * variable current_cpu can be used deep in the code to find the
1715 * current CPUState for a given thread.
1716 */
1717
1718static void *qemu_tcg_cpu_thread_fn(void *arg)
1719{
1720 CPUState *cpu = arg;
1721
Emilio G. Cotaf28d0df2018-06-22 13:45:31 -04001722 assert(tcg_enabled());
Alex Bennéebf51c722017-03-30 18:32:29 +01001723 g_assert(!use_icount);
1724
Alex Bennée37257942017-02-23 18:29:14 +00001725 rcu_register_thread();
Emilio G. Cota3468b592017-07-19 18:57:58 -04001726 tcg_register_thread();
Alex Bennée37257942017-02-23 18:29:14 +00001727
1728 qemu_mutex_lock_iothread();
1729 qemu_thread_get_self(cpu->thread);
1730
1731 cpu->thread_id = qemu_get_thread_id();
1732 cpu->created = true;
1733 cpu->can_do_io = 1;
1734 current_cpu = cpu;
1735 qemu_cond_signal(&qemu_cpu_cond);
Richard Henderson9c09a252019-03-14 13:06:29 -07001736 qemu_guest_random_seed_thread_part2(cpu->random_seed);
Alex Bennée37257942017-02-23 18:29:14 +00001737
1738 /* process any pending work */
1739 cpu->exit_request = 1;
1740
Cédric Le Goater54961aa2018-04-25 15:18:28 +02001741 do {
Alex Bennée37257942017-02-23 18:29:14 +00001742 if (cpu_can_run(cpu)) {
1743 int r;
Alex Bennéed759c952018-02-27 12:52:48 +03001744 qemu_mutex_unlock_iothread();
Alex Bennée37257942017-02-23 18:29:14 +00001745 r = tcg_cpu_exec(cpu);
Alex Bennéed759c952018-02-27 12:52:48 +03001746 qemu_mutex_lock_iothread();
Alex Bennée37257942017-02-23 18:29:14 +00001747 switch (r) {
1748 case EXCP_DEBUG:
1749 cpu_handle_guest_debug(cpu);
1750 break;
1751 case EXCP_HALTED:
1752 /* during start-up the vCPU is reset and the thread is
1753 * kicked several times. If we don't ensure we go back
1754 * to sleep in the halted state we won't cleanly
1755 * start-up when the vCPU is enabled.
1756 *
1757 * cpu->halted should ensure we sleep in wait_io_event
1758 */
1759 g_assert(cpu->halted);
1760 break;
Pranith Kumar08e73c42017-02-23 18:29:15 +00001761 case EXCP_ATOMIC:
1762 qemu_mutex_unlock_iothread();
1763 cpu_exec_step_atomic(cpu);
1764 qemu_mutex_lock_iothread();
Alex Bennée37257942017-02-23 18:29:14 +00001765 default:
1766 /* Ignore everything else? */
1767 break;
1768 }
1769 }
1770
Alex Bennée37257942017-02-23 18:29:14 +00001771 atomic_mb_set(&cpu->exit_request, 0);
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001772 qemu_wait_io_event(cpu);
Paolo Bonzini9b0605f2018-01-30 11:05:06 -05001773 } while (!cpu->unplug || cpu_can_run(cpu));
Alex Bennée37257942017-02-23 18:29:14 +00001774
Paolo Bonzini9b0605f2018-01-30 11:05:06 -05001775 qemu_tcg_destroy_vcpu(cpu);
1776 cpu->created = false;
1777 qemu_cond_signal(&qemu_cpu_cond);
1778 qemu_mutex_unlock_iothread();
1779 rcu_unregister_thread();
Alex Bennée37257942017-02-23 18:29:14 +00001780 return NULL;
1781}
1782
Andreas Färber2ff09a42012-05-03 00:23:30 +02001783static void qemu_cpu_kick_thread(CPUState *cpu)
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001784{
1785#ifndef _WIN32
1786 int err;
1787
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001788 if (cpu->thread_kicked) {
1789 return;
Paolo Bonzini9102ded2015-08-18 06:52:09 -07001790 }
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001791 cpu->thread_kicked = true;
Andreas Färber814e6122012-05-02 17:00:37 +02001792 err = pthread_kill(cpu->thread->thread, SIG_IPI);
Laurent Vivierd455ebc2019-01-02 15:16:03 +01001793 if (err && err != ESRCH) {
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001794 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1795 exit(1);
1796 }
1797#else /* _WIN32 */
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001798 if (!qemu_cpu_is_self(cpu)) {
Justin Terry (VM)19306802018-01-22 13:07:49 -08001799 if (whpx_enabled()) {
1800 whpx_vcpu_kick(cpu);
1801 } else if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) {
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001802 fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n",
1803 __func__, GetLastError());
1804 exit(1);
1805 }
1806 }
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001807#endif
1808}
1809
Andreas Färberc08d7422012-05-03 04:34:15 +02001810void qemu_cpu_kick(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001811{
Andreas Färberf5c121b2012-05-03 01:22:49 +02001812 qemu_cond_broadcast(cpu->halt_cond);
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001813 if (tcg_enabled()) {
Alex Bennée791158d2017-02-23 18:29:10 +00001814 cpu_exit(cpu);
Alex Bennée37257942017-02-23 18:29:14 +00001815 /* NOP unless doing single-thread RR */
Alex Bennée791158d2017-02-23 18:29:10 +00001816 qemu_cpu_kick_rr_cpu();
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001817 } else {
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001818 if (hax_enabled()) {
1819 /*
1820 * FIXME: race condition with the exit_request check in
1821 * hax_vcpu_hax_exec
1822 */
1823 cpu->exit_request = 1;
1824 }
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001825 qemu_cpu_kick_thread(cpu);
1826 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001827}
1828
Jan Kiszka46d62fa2011-02-01 22:15:59 +01001829void qemu_cpu_kick_self(void)
1830{
Andreas Färber4917cf42013-05-27 05:17:50 +02001831 assert(current_cpu);
Paolo Bonzini9102ded2015-08-18 06:52:09 -07001832 qemu_cpu_kick_thread(current_cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001833}
1834
Andreas Färber60e82572012-05-02 22:23:49 +02001835bool qemu_cpu_is_self(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001836{
Andreas Färber814e6122012-05-02 17:00:37 +02001837 return qemu_thread_is_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +00001838}
1839
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001840bool qemu_in_vcpu_thread(void)
Juan Quintelaaa723c22012-09-18 16:30:11 +02001841{
Andreas Färber4917cf42013-05-27 05:17:50 +02001842 return current_cpu && qemu_cpu_is_self(current_cpu);
Juan Quintelaaa723c22012-09-18 16:30:11 +02001843}
1844
Paolo Bonziniafbe7052015-06-18 18:47:19 +02001845static __thread bool iothread_locked = false;
1846
1847bool qemu_mutex_iothread_locked(void)
1848{
1849 return iothread_locked;
1850}
1851
Emilio G. Cotacb764d02017-10-28 02:16:41 -04001852/*
1853 * The BQL is taken from so many places that it is worth profiling the
1854 * callers directly, instead of funneling them all through a single function.
1855 */
1856void qemu_mutex_lock_iothread_impl(const char *file, int line)
Blue Swirl296af7c2010-03-29 19:23:50 +00001857{
Emilio G. Cotacb764d02017-10-28 02:16:41 -04001858 QemuMutexLockFunc bql_lock = atomic_read(&qemu_bql_mutex_lock_func);
1859
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001860 g_assert(!qemu_mutex_iothread_locked());
Emilio G. Cotacb764d02017-10-28 02:16:41 -04001861 bql_lock(&qemu_global_mutex, file, line);
Paolo Bonziniafbe7052015-06-18 18:47:19 +02001862 iothread_locked = true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001863}
1864
1865void qemu_mutex_unlock_iothread(void)
1866{
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001867 g_assert(qemu_mutex_iothread_locked());
Paolo Bonziniafbe7052015-06-18 18:47:19 +02001868 iothread_locked = false;
Blue Swirl296af7c2010-03-29 19:23:50 +00001869 qemu_mutex_unlock(&qemu_global_mutex);
1870}
1871
Alex Bennéee8faee02016-10-27 16:09:58 +01001872static bool all_vcpus_paused(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001873{
Andreas Färberbdc44642013-06-24 23:50:24 +02001874 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001875
Andreas Färberbdc44642013-06-24 23:50:24 +02001876 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001877 if (!cpu->stopped) {
Alex Bennéee8faee02016-10-27 16:09:58 +01001878 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001879 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001880 }
1881
Alex Bennéee8faee02016-10-27 16:09:58 +01001882 return true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001883}
1884
1885void pause_all_vcpus(void)
1886{
Andreas Färberbdc44642013-06-24 23:50:24 +02001887 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001888
Alex Bligh40daca52013-08-21 16:03:02 +01001889 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
Andreas Färberbdc44642013-06-24 23:50:24 +02001890 CPU_FOREACH(cpu) {
David Hildenbrandebd05fe2017-11-29 20:12:15 +01001891 if (qemu_cpu_is_self(cpu)) {
1892 qemu_cpu_stop(cpu, true);
1893 } else {
1894 cpu->stop = true;
1895 qemu_cpu_kick(cpu);
1896 }
Jan Kiszkad798e972012-02-17 18:31:16 +01001897 }
1898
Alex Bennéed759c952018-02-27 12:52:48 +03001899 /* We need to drop the replay_lock so any vCPU threads woken up
1900 * can finish their replay tasks
1901 */
1902 replay_mutex_unlock();
1903
Blue Swirl296af7c2010-03-29 19:23:50 +00001904 while (!all_vcpus_paused()) {
Paolo Bonzinibe7d6c52011-03-12 17:44:02 +01001905 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
Andreas Färberbdc44642013-06-24 23:50:24 +02001906 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001907 qemu_cpu_kick(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001908 }
1909 }
Alex Bennéed759c952018-02-27 12:52:48 +03001910
1911 qemu_mutex_unlock_iothread();
1912 replay_mutex_lock();
1913 qemu_mutex_lock_iothread();
Blue Swirl296af7c2010-03-29 19:23:50 +00001914}
1915
Igor Mammedov29936832013-04-23 10:29:37 +02001916void cpu_resume(CPUState *cpu)
1917{
1918 cpu->stop = false;
1919 cpu->stopped = false;
1920 qemu_cpu_kick(cpu);
1921}
1922
Blue Swirl296af7c2010-03-29 19:23:50 +00001923void resume_all_vcpus(void)
1924{
Andreas Färberbdc44642013-06-24 23:50:24 +02001925 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001926
Alex Bligh40daca52013-08-21 16:03:02 +01001927 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
Andreas Färberbdc44642013-06-24 23:50:24 +02001928 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001929 cpu_resume(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001930 }
1931}
1932
Paolo Bonzinidbadee42018-01-30 16:40:12 +01001933void cpu_remove_sync(CPUState *cpu)
Gu Zheng4c055ab2016-05-12 09:18:13 +05301934{
1935 cpu->stop = true;
1936 cpu->unplug = true;
1937 qemu_cpu_kick(cpu);
Paolo Bonzinidbadee42018-01-30 16:40:12 +01001938 qemu_mutex_unlock_iothread();
1939 qemu_thread_join(cpu->thread);
1940 qemu_mutex_lock_iothread();
Bharata B Rao2c579042016-05-12 09:18:14 +05301941}
1942
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001943/* For temporary buffers for forming a name */
1944#define VCPU_THREAD_NAME_SIZE 16
1945
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001946static void qemu_tcg_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001947{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001948 char thread_name[VCPU_THREAD_NAME_SIZE];
Alex Bennée37257942017-02-23 18:29:14 +00001949 static QemuCond *single_tcg_halt_cond;
1950 static QemuThread *single_tcg_cpu_thread;
Emilio G. Cotae8feb962017-07-07 19:24:20 -04001951 static int tcg_region_inited;
1952
Emilio G. Cotaf28d0df2018-06-22 13:45:31 -04001953 assert(tcg_enabled());
Emilio G. Cotae8feb962017-07-07 19:24:20 -04001954 /*
1955 * Initialize TCG regions--once. Now is a good time, because:
1956 * (1) TCG's init context, prologue and target globals have been set up.
1957 * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the
1958 * -accel flag is processed, so the check doesn't work then).
1959 */
1960 if (!tcg_region_inited) {
1961 tcg_region_inited = 1;
1962 tcg_region_init();
1963 }
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001964
Alex Bennée37257942017-02-23 18:29:14 +00001965 if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
Andreas Färber814e6122012-05-02 17:00:37 +02001966 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001967 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1968 qemu_cond_init(cpu->halt_cond);
Alex Bennée37257942017-02-23 18:29:14 +00001969
1970 if (qemu_tcg_mttcg_enabled()) {
1971 /* create a thread per vCPU with TCG (MTTCG) */
1972 parallel_cpus = true;
1973 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001974 cpu->cpu_index);
Alex Bennée37257942017-02-23 18:29:14 +00001975
1976 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1977 cpu, QEMU_THREAD_JOINABLE);
1978
1979 } else {
1980 /* share a single thread for all cpus with TCG */
1981 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
1982 qemu_thread_create(cpu->thread, thread_name,
1983 qemu_tcg_rr_cpu_thread_fn,
1984 cpu, QEMU_THREAD_JOINABLE);
1985
1986 single_tcg_halt_cond = cpu->halt_cond;
1987 single_tcg_cpu_thread = cpu->thread;
1988 }
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001989#ifdef _WIN32
Andreas Färber814e6122012-05-02 17:00:37 +02001990 cpu->hThread = qemu_thread_get_handle(cpu->thread);
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001991#endif
Blue Swirl296af7c2010-03-29 19:23:50 +00001992 } else {
Alex Bennée37257942017-02-23 18:29:14 +00001993 /* For non-MTTCG cases we share the thread */
1994 cpu->thread = single_tcg_cpu_thread;
1995 cpu->halt_cond = single_tcg_halt_cond;
David Hildenbranda3421732018-02-09 20:52:37 +01001996 cpu->thread_id = first_cpu->thread_id;
1997 cpu->can_do_io = 1;
1998 cpu->created = true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001999 }
2000}
2001
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01002002static void qemu_hax_start_vcpu(CPUState *cpu)
2003{
2004 char thread_name[VCPU_THREAD_NAME_SIZE];
2005
2006 cpu->thread = g_malloc0(sizeof(QemuThread));
2007 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2008 qemu_cond_init(cpu->halt_cond);
2009
2010 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX",
2011 cpu->cpu_index);
2012 qemu_thread_create(cpu->thread, thread_name, qemu_hax_cpu_thread_fn,
2013 cpu, QEMU_THREAD_JOINABLE);
2014#ifdef _WIN32
2015 cpu->hThread = qemu_thread_get_handle(cpu->thread);
2016#endif
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01002017}
2018
Andreas Färber48a106b2013-05-27 02:20:39 +02002019static void qemu_kvm_start_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00002020{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00002021 char thread_name[VCPU_THREAD_NAME_SIZE];
2022
Andreas Färber814e6122012-05-02 17:00:37 +02002023 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02002024 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2025 qemu_cond_init(cpu->halt_cond);
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00002026 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
2027 cpu->cpu_index);
2028 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
2029 cpu, QEMU_THREAD_JOINABLE);
Blue Swirl296af7c2010-03-29 19:23:50 +00002030}
2031
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05002032static void qemu_hvf_start_vcpu(CPUState *cpu)
2033{
2034 char thread_name[VCPU_THREAD_NAME_SIZE];
2035
2036 /* HVF currently does not support TCG, and only runs in
2037 * unrestricted-guest mode. */
2038 assert(hvf_enabled());
2039
2040 cpu->thread = g_malloc0(sizeof(QemuThread));
2041 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2042 qemu_cond_init(cpu->halt_cond);
2043
2044 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
2045 cpu->cpu_index);
2046 qemu_thread_create(cpu->thread, thread_name, qemu_hvf_cpu_thread_fn,
2047 cpu, QEMU_THREAD_JOINABLE);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05002048}
2049
Justin Terry (VM)19306802018-01-22 13:07:49 -08002050static void qemu_whpx_start_vcpu(CPUState *cpu)
2051{
2052 char thread_name[VCPU_THREAD_NAME_SIZE];
2053
2054 cpu->thread = g_malloc0(sizeof(QemuThread));
2055 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2056 qemu_cond_init(cpu->halt_cond);
2057 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/WHPX",
2058 cpu->cpu_index);
2059 qemu_thread_create(cpu->thread, thread_name, qemu_whpx_cpu_thread_fn,
2060 cpu, QEMU_THREAD_JOINABLE);
2061#ifdef _WIN32
2062 cpu->hThread = qemu_thread_get_handle(cpu->thread);
2063#endif
Justin Terry (VM)19306802018-01-22 13:07:49 -08002064}
2065
Andreas Färber10a90212013-05-27 02:24:35 +02002066static void qemu_dummy_start_vcpu(CPUState *cpu)
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02002067{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00002068 char thread_name[VCPU_THREAD_NAME_SIZE];
2069
Andreas Färber814e6122012-05-02 17:00:37 +02002070 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02002071 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2072 qemu_cond_init(cpu->halt_cond);
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00002073 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
2074 cpu->cpu_index);
2075 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02002076 QEMU_THREAD_JOINABLE);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02002077}
2078
Andreas Färberc643bed2013-05-27 03:23:24 +02002079void qemu_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00002080{
Andreas Färberce3960e2012-12-17 03:27:07 +01002081 cpu->nr_cores = smp_cores;
2082 cpu->nr_threads = smp_threads;
Andreas Färberf324e762012-05-02 23:26:21 +02002083 cpu->stopped = true;
Richard Henderson9c09a252019-03-14 13:06:29 -07002084 cpu->random_seed = qemu_guest_random_seed_thread_part1();
Peter Maydell56943e82016-01-21 14:15:04 +00002085
2086 if (!cpu->as) {
2087 /* If the target cpu hasn't set up any address spaces itself,
2088 * give it the default one.
2089 */
Peter Maydell12ebc9a2016-01-21 14:15:04 +00002090 cpu->num_ases = 1;
Peter Xu80ceb072017-11-23 17:23:32 +08002091 cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
Peter Maydell56943e82016-01-21 14:15:04 +00002092 }
2093
Jan Kiszka0ab07c62011-02-07 12:19:14 +01002094 if (kvm_enabled()) {
Andreas Färber48a106b2013-05-27 02:20:39 +02002095 qemu_kvm_start_vcpu(cpu);
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01002096 } else if (hax_enabled()) {
2097 qemu_hax_start_vcpu(cpu);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05002098 } else if (hvf_enabled()) {
2099 qemu_hvf_start_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02002100 } else if (tcg_enabled()) {
Andreas Färbere5ab30a2012-05-03 01:50:44 +02002101 qemu_tcg_init_vcpu(cpu);
Justin Terry (VM)19306802018-01-22 13:07:49 -08002102 } else if (whpx_enabled()) {
2103 qemu_whpx_start_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02002104 } else {
Andreas Färber10a90212013-05-27 02:24:35 +02002105 qemu_dummy_start_vcpu(cpu);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01002106 }
David Hildenbrand81e96312018-02-09 20:52:38 +01002107
2108 while (!cpu->created) {
2109 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
2110 }
Blue Swirl296af7c2010-03-29 19:23:50 +00002111}
2112
Jan Kiszkab4a3d962011-02-01 22:15:43 +01002113void cpu_stop_current(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00002114{
Andreas Färber4917cf42013-05-27 05:17:50 +02002115 if (current_cpu) {
Peter Maydell0ec7e672019-01-07 15:23:47 +00002116 current_cpu->stop = true;
2117 cpu_exit(current_cpu);
Jan Kiszkab4a3d962011-02-01 22:15:43 +01002118 }
Blue Swirl296af7c2010-03-29 19:23:50 +00002119}
2120
Kevin Wolf56983462013-07-05 13:49:54 +02002121int vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +00002122{
Juan Quintelaaa723c22012-09-18 16:30:11 +02002123 if (qemu_in_vcpu_thread()) {
Paolo Bonzini74892d22014-06-05 14:53:58 +02002124 qemu_system_vmstop_request_prepare();
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -03002125 qemu_system_vmstop_request(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00002126 /*
2127 * FIXME: should not return to device code in case
2128 * vm_stop() has been requested.
2129 */
Jan Kiszkab4a3d962011-02-01 22:15:43 +01002130 cpu_stop_current();
Kevin Wolf56983462013-07-05 13:49:54 +02002131 return 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00002132 }
Kevin Wolf56983462013-07-05 13:49:54 +02002133
Stefan Hajnoczi4486e892018-03-07 14:42:05 +00002134 return do_vm_stop(state, true);
Blue Swirl296af7c2010-03-29 19:23:50 +00002135}
2136
Claudio Imbrenda2d76e822017-02-14 18:07:47 +01002137/**
2138 * Prepare for (re)starting the VM.
2139 * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already
2140 * running or in case of an error condition), 0 otherwise.
2141 */
2142int vm_prepare_start(void)
2143{
2144 RunState requested;
Claudio Imbrenda2d76e822017-02-14 18:07:47 +01002145
2146 qemu_vmstop_requested(&requested);
2147 if (runstate_is_running() && requested == RUN_STATE__MAX) {
2148 return -1;
2149 }
2150
2151 /* Ensure that a STOP/RESUME pair of events is emitted if a
2152 * vmstop request was pending. The BLOCK_IO_ERROR event, for
2153 * example, according to documentation is always followed by
2154 * the STOP event.
2155 */
2156 if (runstate_is_running()) {
Peter Xu3ab72382018-08-15 21:37:37 +08002157 qapi_event_send_stop();
2158 qapi_event_send_resume();
Markus Armbrusterf0561582018-04-23 10:45:18 +02002159 return -1;
Claudio Imbrenda2d76e822017-02-14 18:07:47 +01002160 }
2161
2162 /* We are sending this now, but the CPUs will be resumed shortly later */
Peter Xu3ab72382018-08-15 21:37:37 +08002163 qapi_event_send_resume();
Markus Armbrusterf0561582018-04-23 10:45:18 +02002164
2165 replay_enable_events();
2166 cpu_enable_ticks();
2167 runstate_set(RUN_STATE_RUNNING);
2168 vm_state_notify(1, RUN_STATE_RUNNING);
2169 return 0;
Claudio Imbrenda2d76e822017-02-14 18:07:47 +01002170}
2171
2172void vm_start(void)
2173{
2174 if (!vm_prepare_start()) {
2175 resume_all_vcpus();
2176 }
2177}
2178
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03002179/* does a state transition even if the VM is already stopped,
2180 current state is forgotten forever */
Kevin Wolf56983462013-07-05 13:49:54 +02002181int vm_stop_force_state(RunState state)
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03002182{
2183 if (runstate_is_running()) {
Kevin Wolf56983462013-07-05 13:49:54 +02002184 return vm_stop(state);
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03002185 } else {
2186 runstate_set(state);
Wen Congyangb2780d32015-11-20 17:34:38 +08002187
2188 bdrv_drain_all();
Kevin Wolf594a45c2013-07-18 14:52:19 +02002189 /* Make sure to return an error if the flush in a previous vm_stop()
2190 * failed. */
John Snow22af08e2016-09-22 21:45:51 -04002191 return bdrv_flush_all();
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03002192 }
2193}
2194
Markus Armbruster04424282019-04-17 21:17:57 +02002195void list_cpus(const char *optarg)
Blue Swirl262353c2010-05-04 19:55:35 +00002196{
2197 /* XXX: implement xxx_cpu_list for targets that still miss it */
Peter Maydelle916cbf2012-09-05 17:41:08 -03002198#if defined(cpu_list)
Markus Armbruster04424282019-04-17 21:17:57 +02002199 cpu_list();
Blue Swirl262353c2010-05-04 19:55:35 +00002200#endif
2201}
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002202
2203CpuInfoList *qmp_query_cpus(Error **errp)
2204{
Igor Mammedovafed5a52017-05-10 13:29:55 +02002205 MachineState *ms = MACHINE(qdev_get_machine());
2206 MachineClass *mc = MACHINE_GET_CLASS(ms);
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002207 CpuInfoList *head = NULL, *cur_item = NULL;
Andreas Färber182735e2013-05-29 22:29:20 +02002208 CPUState *cpu;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002209
Andreas Färberbdc44642013-06-24 23:50:24 +02002210 CPU_FOREACH(cpu) {
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002211 CpuInfoList *info;
Andreas Färber182735e2013-05-29 22:29:20 +02002212#if defined(TARGET_I386)
2213 X86CPU *x86_cpu = X86_CPU(cpu);
2214 CPUX86State *env = &x86_cpu->env;
2215#elif defined(TARGET_PPC)
2216 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
2217 CPUPPCState *env = &ppc_cpu->env;
2218#elif defined(TARGET_SPARC)
2219 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
2220 CPUSPARCState *env = &sparc_cpu->env;
Michael Clark25fa1942018-03-03 01:32:59 +13002221#elif defined(TARGET_RISCV)
2222 RISCVCPU *riscv_cpu = RISCV_CPU(cpu);
2223 CPURISCVState *env = &riscv_cpu->env;
Andreas Färber182735e2013-05-29 22:29:20 +02002224#elif defined(TARGET_MIPS)
2225 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
2226 CPUMIPSState *env = &mips_cpu->env;
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +01002227#elif defined(TARGET_TRICORE)
2228 TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
2229 CPUTriCoreState *env = &tricore_cpu->env;
Viktor Mihajlovski9d0306d2018-02-16 17:08:37 +01002230#elif defined(TARGET_S390X)
2231 S390CPU *s390_cpu = S390_CPU(cpu);
2232 CPUS390XState *env = &s390_cpu->env;
Andreas Färber182735e2013-05-29 22:29:20 +02002233#endif
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002234
Andreas Färbercb446ec2013-05-01 14:24:52 +02002235 cpu_synchronize_state(cpu);
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002236
2237 info = g_malloc0(sizeof(*info));
2238 info->value = g_malloc0(sizeof(*info->value));
Andreas Färber55e5c282012-12-17 06:18:02 +01002239 info->value->CPU = cpu->cpu_index;
Andreas Färber182735e2013-05-29 22:29:20 +02002240 info->value->current = (cpu == first_cpu);
Andreas Färber259186a2013-01-17 18:51:17 +01002241 info->value->halted = cpu->halted;
Eduardo Habkost58f88d42015-05-08 16:04:22 -03002242 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
Andreas Färber9f09e182012-05-03 06:59:07 +02002243 info->value->thread_id = cpu->thread_id;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002244#if defined(TARGET_I386)
Eric Blake86f4b682015-11-18 01:52:59 -07002245 info->value->arch = CPU_INFO_ARCH_X86;
Eric Blake544a3732016-02-17 23:48:27 -07002246 info->value->u.x86.pc = env->eip + env->segs[R_CS].base;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002247#elif defined(TARGET_PPC)
Eric Blake86f4b682015-11-18 01:52:59 -07002248 info->value->arch = CPU_INFO_ARCH_PPC;
Eric Blake544a3732016-02-17 23:48:27 -07002249 info->value->u.ppc.nip = env->nip;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002250#elif defined(TARGET_SPARC)
Eric Blake86f4b682015-11-18 01:52:59 -07002251 info->value->arch = CPU_INFO_ARCH_SPARC;
Eric Blake544a3732016-02-17 23:48:27 -07002252 info->value->u.q_sparc.pc = env->pc;
2253 info->value->u.q_sparc.npc = env->npc;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002254#elif defined(TARGET_MIPS)
Eric Blake86f4b682015-11-18 01:52:59 -07002255 info->value->arch = CPU_INFO_ARCH_MIPS;
Eric Blake544a3732016-02-17 23:48:27 -07002256 info->value->u.q_mips.PC = env->active_tc.PC;
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +01002257#elif defined(TARGET_TRICORE)
Eric Blake86f4b682015-11-18 01:52:59 -07002258 info->value->arch = CPU_INFO_ARCH_TRICORE;
Eric Blake544a3732016-02-17 23:48:27 -07002259 info->value->u.tricore.PC = env->PC;
Viktor Mihajlovski9d0306d2018-02-16 17:08:37 +01002260#elif defined(TARGET_S390X)
2261 info->value->arch = CPU_INFO_ARCH_S390;
2262 info->value->u.s390.cpu_state = env->cpu_state;
Michael Clark25fa1942018-03-03 01:32:59 +13002263#elif defined(TARGET_RISCV)
2264 info->value->arch = CPU_INFO_ARCH_RISCV;
2265 info->value->u.riscv.pc = env->pc;
Eric Blake86f4b682015-11-18 01:52:59 -07002266#else
2267 info->value->arch = CPU_INFO_ARCH_OTHER;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002268#endif
Igor Mammedovafed5a52017-05-10 13:29:55 +02002269 info->value->has_props = !!mc->cpu_index_to_instance_props;
2270 if (info->value->has_props) {
2271 CpuInstanceProperties *props;
2272 props = g_malloc0(sizeof(*props));
2273 *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
2274 info->value->props = props;
2275 }
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002276
2277 /* XXX: waiting for the qapi to support GSList */
2278 if (!cur_item) {
2279 head = cur_item = info;
2280 } else {
2281 cur_item->next = info;
2282 cur_item = info;
2283 }
2284 }
2285
2286 return head;
2287}
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002288
Laszlo Ersekdaa9d2b2018-04-27 21:28:51 +02002289static CpuInfoArch sysemu_target_to_cpuinfo_arch(SysEmuTarget target)
2290{
2291 /*
2292 * The @SysEmuTarget -> @CpuInfoArch mapping below is based on the
2293 * TARGET_ARCH -> TARGET_BASE_ARCH mapping in the "configure" script.
2294 */
2295 switch (target) {
2296 case SYS_EMU_TARGET_I386:
2297 case SYS_EMU_TARGET_X86_64:
2298 return CPU_INFO_ARCH_X86;
2299
2300 case SYS_EMU_TARGET_PPC:
Laszlo Ersekdaa9d2b2018-04-27 21:28:51 +02002301 case SYS_EMU_TARGET_PPC64:
2302 return CPU_INFO_ARCH_PPC;
2303
2304 case SYS_EMU_TARGET_SPARC:
2305 case SYS_EMU_TARGET_SPARC64:
2306 return CPU_INFO_ARCH_SPARC;
2307
2308 case SYS_EMU_TARGET_MIPS:
2309 case SYS_EMU_TARGET_MIPSEL:
2310 case SYS_EMU_TARGET_MIPS64:
2311 case SYS_EMU_TARGET_MIPS64EL:
2312 return CPU_INFO_ARCH_MIPS;
2313
2314 case SYS_EMU_TARGET_TRICORE:
2315 return CPU_INFO_ARCH_TRICORE;
2316
2317 case SYS_EMU_TARGET_S390X:
2318 return CPU_INFO_ARCH_S390;
2319
2320 case SYS_EMU_TARGET_RISCV32:
2321 case SYS_EMU_TARGET_RISCV64:
2322 return CPU_INFO_ARCH_RISCV;
2323
2324 default:
2325 return CPU_INFO_ARCH_OTHER;
2326 }
2327}
2328
2329static void cpustate_to_cpuinfo_s390(CpuInfoS390 *info, const CPUState *cpu)
2330{
2331#ifdef TARGET_S390X
2332 S390CPU *s390_cpu = S390_CPU(cpu);
2333 CPUS390XState *env = &s390_cpu->env;
2334
2335 info->cpu_state = env->cpu_state;
2336#else
2337 abort();
2338#endif
2339}
2340
Luiz Capitulinoce74ee32018-02-16 17:08:38 +01002341/*
2342 * fast means: we NEVER interrupt vCPU threads to retrieve
2343 * information from KVM.
2344 */
2345CpuInfoFastList *qmp_query_cpus_fast(Error **errp)
2346{
2347 MachineState *ms = MACHINE(qdev_get_machine());
2348 MachineClass *mc = MACHINE_GET_CLASS(ms);
2349 CpuInfoFastList *head = NULL, *cur_item = NULL;
Laszlo Ersekdaa9d2b2018-04-27 21:28:51 +02002350 SysEmuTarget target = qapi_enum_parse(&SysEmuTarget_lookup, TARGET_NAME,
2351 -1, &error_abort);
Luiz Capitulinoce74ee32018-02-16 17:08:38 +01002352 CPUState *cpu;
2353
2354 CPU_FOREACH(cpu) {
2355 CpuInfoFastList *info = g_malloc0(sizeof(*info));
2356 info->value = g_malloc0(sizeof(*info->value));
2357
2358 info->value->cpu_index = cpu->cpu_index;
2359 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
2360 info->value->thread_id = cpu->thread_id;
2361
2362 info->value->has_props = !!mc->cpu_index_to_instance_props;
2363 if (info->value->has_props) {
2364 CpuInstanceProperties *props;
2365 props = g_malloc0(sizeof(*props));
2366 *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
2367 info->value->props = props;
2368 }
2369
Laszlo Ersekdaa9d2b2018-04-27 21:28:51 +02002370 info->value->arch = sysemu_target_to_cpuinfo_arch(target);
2371 info->value->target = target;
2372 if (target == SYS_EMU_TARGET_S390X) {
2373 cpustate_to_cpuinfo_s390(&info->value->u.s390x, cpu);
Laszlo Ersekdaa9d2b2018-04-27 21:28:51 +02002374 }
2375
Luiz Capitulinoce74ee32018-02-16 17:08:38 +01002376 if (!cur_item) {
2377 head = cur_item = info;
2378 } else {
2379 cur_item->next = info;
2380 cur_item = info;
2381 }
2382 }
2383
2384 return head;
2385}
2386
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002387void qmp_memsave(int64_t addr, int64_t size, const char *filename,
2388 bool has_cpu, int64_t cpu_index, Error **errp)
2389{
2390 FILE *f;
2391 uint32_t l;
Andreas Färber55e5c282012-12-17 06:18:02 +01002392 CPUState *cpu;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002393 uint8_t buf[1024];
Borislav Petkov0dc9daf2015-02-08 13:14:38 +01002394 int64_t orig_addr = addr, orig_size = size;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002395
2396 if (!has_cpu) {
2397 cpu_index = 0;
2398 }
2399
Andreas Färber151d1322013-02-15 15:41:49 +01002400 cpu = qemu_get_cpu(cpu_index);
2401 if (cpu == NULL) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01002402 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
2403 "a CPU number");
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002404 return;
2405 }
2406
2407 f = fopen(filename, "wb");
2408 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04002409 error_setg_file_open(errp, errno, filename);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002410 return;
2411 }
2412
2413 while (size != 0) {
2414 l = sizeof(buf);
2415 if (l > size)
2416 l = size;
Aneesh Kumar K.V2f4d0f52013-10-01 21:49:30 +05302417 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
Borislav Petkov0dc9daf2015-02-08 13:14:38 +01002418 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
2419 " specified", orig_addr, orig_size);
Aneesh Kumar K.V2f4d0f52013-10-01 21:49:30 +05302420 goto exit;
2421 }
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002422 if (fwrite(buf, 1, l, f) != l) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01002423 error_setg(errp, QERR_IO_ERROR);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002424 goto exit;
2425 }
2426 addr += l;
2427 size -= l;
2428 }
2429
2430exit:
2431 fclose(f);
2432}
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02002433
2434void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
2435 Error **errp)
2436{
2437 FILE *f;
2438 uint32_t l;
2439 uint8_t buf[1024];
2440
2441 f = fopen(filename, "wb");
2442 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04002443 error_setg_file_open(errp, errno, filename);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02002444 return;
2445 }
2446
2447 while (size != 0) {
2448 l = sizeof(buf);
2449 if (l > size)
2450 l = size;
Stefan Weileb6282f2014-04-07 20:28:23 +02002451 cpu_physical_memory_read(addr, buf, l);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02002452 if (fwrite(buf, 1, l, f) != l) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01002453 error_setg(errp, QERR_IO_ERROR);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02002454 goto exit;
2455 }
2456 addr += l;
2457 size -= l;
2458 }
2459
2460exit:
2461 fclose(f);
2462}
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02002463
2464void qmp_inject_nmi(Error **errp)
2465{
Alexey Kardashevskiy9cb805f2014-08-20 22:16:33 +10002466 nmi_monitor_handle(monitor_get_cpu_index(), errp);
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02002467}
Sebastian Tanase27498be2014-07-25 11:56:33 +02002468
Markus Armbruster76c86612019-04-17 21:17:53 +02002469void dump_drift_info(void)
Sebastian Tanase27498be2014-07-25 11:56:33 +02002470{
2471 if (!use_icount) {
2472 return;
2473 }
2474
Markus Armbruster76c86612019-04-17 21:17:53 +02002475 qemu_printf("Host - Guest clock %"PRIi64" ms\n",
Sebastian Tanase27498be2014-07-25 11:56:33 +02002476 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
2477 if (icount_align_option) {
Markus Armbruster76c86612019-04-17 21:17:53 +02002478 qemu_printf("Max guest delay %"PRIi64" ms\n",
2479 -max_delay / SCALE_MS);
2480 qemu_printf("Max guest advance %"PRIi64" ms\n",
2481 max_advance / SCALE_MS);
Sebastian Tanase27498be2014-07-25 11:56:33 +02002482 } else {
Markus Armbruster76c86612019-04-17 21:17:53 +02002483 qemu_printf("Max guest delay NA\n");
2484 qemu_printf("Max guest advance NA\n");
Sebastian Tanase27498be2014-07-25 11:56:33 +02002485 }
2486}