blob: 111ca4ed1cdb6dd94282b798863e6063b9b5d0cf [file] [log] [blame]
Blue Swirl296af7c2010-03-29 19:23:50 +00001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
Peter Maydell7b31bbc2016-01-26 18:16:56 +000025#include "qemu/osdep.h"
KONRAD Frederic8d4e9142017-02-23 18:29:08 +000026#include "qemu/config-file.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010027#include "cpu.h"
Paolo Bonzini83c90892012-12-17 18:19:49 +010028#include "monitor/monitor.h"
Markus Armbrustere688df62018-02-01 12:18:31 +010029#include "qapi/error.h"
Markus Armbruster112ed242018-02-26 17:13:27 -060030#include "qapi/qapi-commands-misc.h"
Markus Armbruster9af23982018-02-11 10:36:01 +010031#include "qapi/qapi-events-run-state.h"
Wenchao Xiaa4e15de2014-06-18 08:43:36 +020032#include "qapi/qmp/qerror.h"
Markus Armbrusterd49b6832015-03-17 18:29:20 +010033#include "qemu/error-report.h"
Markus Armbruster76c86612019-04-17 21:17:53 +020034#include "qemu/qemu-print.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010035#include "sysemu/sysemu.h"
Max Reitzda31d592016-03-16 19:54:32 +010036#include "sysemu/block-backend.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/gdbstub.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Vincent Palatinb3946622017-01-10 11:59:55 +010039#include "sysemu/hw_accel.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010040#include "sysemu/kvm.h"
Vincent Palatinb0cb0a62017-01-10 11:59:57 +010041#include "sysemu/hax.h"
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -050042#include "sysemu/hvf.h"
Justin Terry (VM)19306802018-01-22 13:07:49 -080043#include "sysemu/whpx.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010044#include "exec/exec-all.h"
Blue Swirl296af7c2010-03-29 19:23:50 +000045
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010046#include "qemu/thread.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010047#include "sysemu/cpus.h"
48#include "sysemu/qtest.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010049#include "qemu/main-loop.h"
Markus Armbruster922a01a2018-02-01 12:18:46 +010050#include "qemu/option.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010051#include "qemu/bitmap.h"
Liu Ping Fancb365642013-09-25 14:20:58 +080052#include "qemu/seqlock.h"
Richard Henderson9c09a252019-03-14 13:06:29 -070053#include "qemu/guest-random.h"
KONRAD Frederic8d4e9142017-02-23 18:29:08 +000054#include "tcg.h"
Alexey Kardashevskiy9cb805f2014-08-20 22:16:33 +100055#include "hw/nmi.h"
Pavel Dovgalyuk8b427042015-09-17 19:24:05 +030056#include "sysemu/replay.h"
Igor Mammedovafed5a52017-05-10 13:29:55 +020057#include "hw/boards.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020058
Jan Kiszka6d9cb732011-02-01 22:15:58 +010059#ifdef CONFIG_LINUX
60
61#include <sys/prctl.h>
62
Marcelo Tosattic0532a72010-10-11 15:31:21 -030063#ifndef PR_MCE_KILL
64#define PR_MCE_KILL 33
65#endif
66
Jan Kiszka6d9cb732011-02-01 22:15:58 +010067#ifndef PR_MCE_KILL_SET
68#define PR_MCE_KILL_SET 1
69#endif
70
71#ifndef PR_MCE_KILL_EARLY
72#define PR_MCE_KILL_EARLY 1
73#endif
74
75#endif /* CONFIG_LINUX */
76
Sebastian Tanase27498be2014-07-25 11:56:33 +020077int64_t max_delay;
78int64_t max_advance;
Blue Swirl296af7c2010-03-29 19:23:50 +000079
Jason J. Herne2adcc852015-09-08 13:12:33 -040080/* vcpu throttling controls */
81static QEMUTimer *throttle_timer;
82static unsigned int throttle_percentage;
83
84#define CPU_THROTTLE_PCT_MIN 1
85#define CPU_THROTTLE_PCT_MAX 99
86#define CPU_THROTTLE_TIMESLICE_NS 10000000
87
Tiejun Chen321bc0b2013-08-02 09:43:09 +080088bool cpu_is_stopped(CPUState *cpu)
89{
90 return cpu->stopped || !runstate_is_running();
91}
92
Andreas Färbera98ae1d2013-05-26 23:21:08 +020093static bool cpu_thread_is_idle(CPUState *cpu)
Peter Maydellac873f12012-07-19 16:52:27 +010094{
Andreas Färberc64ca812012-05-03 02:11:45 +020095 if (cpu->stop || cpu->queued_work_first) {
Peter Maydellac873f12012-07-19 16:52:27 +010096 return false;
97 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +080098 if (cpu_is_stopped(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010099 return true;
100 }
Andreas Färber8c2e1b02013-08-25 18:53:55 +0200101 if (!cpu->halted || cpu_has_work(cpu) ||
Alexander Graf215e79c2013-04-24 22:24:12 +0200102 kvm_halt_in_kernel()) {
Peter Maydellac873f12012-07-19 16:52:27 +0100103 return false;
104 }
105 return true;
106}
107
108static bool all_cpu_threads_idle(void)
109{
Andreas Färber182735e2013-05-29 22:29:20 +0200110 CPUState *cpu;
Peter Maydellac873f12012-07-19 16:52:27 +0100111
Andreas Färberbdc44642013-06-24 23:50:24 +0200112 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200113 if (!cpu_thread_is_idle(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +0100114 return false;
115 }
116 }
117 return true;
118}
119
Blue Swirl296af7c2010-03-29 19:23:50 +0000120/***********************************************************/
Paolo Bonzini946fb272011-09-12 13:57:37 +0200121/* guest cycle counter */
122
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200123/* Protected by TimersState seqlock */
124
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200125static bool icount_sleep = true;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200126/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
127#define MAX_ICOUNT_SHIFT 10
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200128
Paolo Bonzini946fb272011-09-12 13:57:37 +0200129typedef struct TimersState {
Liu Ping Fancb365642013-09-25 14:20:58 +0800130 /* Protected by BQL. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200131 int64_t cpu_ticks_prev;
132 int64_t cpu_ticks_offset;
Liu Ping Fancb365642013-09-25 14:20:58 +0800133
Paolo Bonzini94377112018-08-14 09:57:16 +0200134 /* Protect fields that can be respectively read outside the
135 * BQL, and written from multiple threads.
Liu Ping Fancb365642013-09-25 14:20:58 +0800136 */
137 QemuSeqLock vm_clock_seqlock;
Paolo Bonzini94377112018-08-14 09:57:16 +0200138 QemuSpin vm_clock_lock;
139
140 int16_t cpu_ticks_enabled;
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200141
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200142 /* Conversion factor from emulated instructions to virtual clock ticks. */
Paolo Bonzini94377112018-08-14 09:57:16 +0200143 int16_t icount_time_shift;
144
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200145 /* Compensate for varying guest execution speed. */
146 int64_t qemu_icount_bias;
Paolo Bonzini94377112018-08-14 09:57:16 +0200147
148 int64_t vm_clock_warp_start;
149 int64_t cpu_clock_offset;
150
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200151 /* Only written by TCG thread */
152 int64_t qemu_icount;
Paolo Bonzini94377112018-08-14 09:57:16 +0200153
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300154 /* for adjusting icount */
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300155 QEMUTimer *icount_rt_timer;
156 QEMUTimer *icount_vm_timer;
157 QEMUTimer *icount_warp_timer;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200158} TimersState;
159
Liu Ping Fand9cd4002013-07-21 08:43:00 +0000160static TimersState timers_state;
KONRAD Frederic8d4e9142017-02-23 18:29:08 +0000161bool mttcg_enabled;
162
163/*
164 * We default to false if we know other options have been enabled
165 * which are currently incompatible with MTTCG. Otherwise when each
166 * guest (target) has been updated to support:
167 * - atomic instructions
168 * - memory ordering primitives (barriers)
169 * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
170 *
171 * Once a guest architecture has been converted to the new primitives
172 * there are two remaining limitations to check.
173 *
174 * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
175 * - The host must have a stronger memory order than the guest
176 *
177 * It may be possible in future to support strong guests on weak hosts
178 * but that will require tagging all load/stores in a guest with their
179 * implicit memory order requirements which would likely slow things
180 * down a lot.
181 */
182
183static bool check_tcg_memory_orders_compatible(void)
184{
185#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
186 return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
187#else
188 return false;
189#endif
190}
191
192static bool default_mttcg_enabled(void)
193{
Alex Bennée83fd9622017-02-27 17:09:01 +0000194 if (use_icount || TCG_OVERSIZED_GUEST) {
KONRAD Frederic8d4e9142017-02-23 18:29:08 +0000195 return false;
196 } else {
197#ifdef TARGET_SUPPORTS_MTTCG
198 return check_tcg_memory_orders_compatible();
199#else
200 return false;
201#endif
202 }
203}
204
205void qemu_tcg_configure(QemuOpts *opts, Error **errp)
206{
207 const char *t = qemu_opt_get(opts, "thread");
208 if (t) {
209 if (strcmp(t, "multi") == 0) {
210 if (TCG_OVERSIZED_GUEST) {
211 error_setg(errp, "No MTTCG when guest word size > hosts");
Alex Bennée83fd9622017-02-27 17:09:01 +0000212 } else if (use_icount) {
213 error_setg(errp, "No MTTCG when icount is enabled");
KONRAD Frederic8d4e9142017-02-23 18:29:08 +0000214 } else {
Nikunj A Dadhania86953502017-04-10 11:36:55 +0530215#ifndef TARGET_SUPPORTS_MTTCG
Markus Armbruster07656912018-10-17 10:26:28 +0200216 warn_report("Guest not yet converted to MTTCG - "
217 "you may get unexpected results");
Alex Bennéec34c7622017-02-28 14:40:17 +0000218#endif
KONRAD Frederic8d4e9142017-02-23 18:29:08 +0000219 if (!check_tcg_memory_orders_compatible()) {
Markus Armbruster07656912018-10-17 10:26:28 +0200220 warn_report("Guest expects a stronger memory ordering "
221 "than the host provides");
Pranith Kumar8cfef892017-03-25 16:19:23 -0400222 error_printf("This may cause strange/hard to debug errors\n");
KONRAD Frederic8d4e9142017-02-23 18:29:08 +0000223 }
224 mttcg_enabled = true;
225 }
226 } else if (strcmp(t, "single") == 0) {
227 mttcg_enabled = false;
228 } else {
229 error_setg(errp, "Invalid 'thread' setting %s", t);
230 }
231 } else {
232 mttcg_enabled = default_mttcg_enabled();
233 }
234}
Paolo Bonzini946fb272011-09-12 13:57:37 +0200235
Alex Bennéee4cd9652017-03-31 16:09:42 +0100236/* The current number of executed instructions is based on what we
237 * originally budgeted minus the current state of the decrementing
238 * icount counters in extra/u16.low.
239 */
240static int64_t cpu_get_icount_executed(CPUState *cpu)
241{
Richard Henderson5e140192019-03-28 11:54:23 -1000242 return (cpu->icount_budget -
243 (cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra));
Alex Bennéee4cd9652017-03-31 16:09:42 +0100244}
245
Alex Bennée512d3c82017-04-05 12:32:37 +0100246/*
247 * Update the global shared timer_state.qemu_icount to take into
248 * account executed instructions. This is done by the TCG vCPU
249 * thread so the main-loop can see time has moved forward.
250 */
Paolo Bonzini9b4e6f42018-09-11 13:15:32 +0200251static void cpu_update_icount_locked(CPUState *cpu)
Alex Bennée512d3c82017-04-05 12:32:37 +0100252{
253 int64_t executed = cpu_get_icount_executed(cpu);
254 cpu->icount_budget -= executed;
255
Emilio G. Cota38adcb62018-09-10 19:27:49 -0400256 atomic_set_i64(&timers_state.qemu_icount,
257 timers_state.qemu_icount + executed);
Paolo Bonzini9b4e6f42018-09-11 13:15:32 +0200258}
259
260/*
261 * Update the global shared timer_state.qemu_icount to take into
262 * account executed instructions. This is done by the TCG vCPU
263 * thread so the main-loop can see time has moved forward.
264 */
265void cpu_update_icount(CPUState *cpu)
266{
267 seqlock_write_lock(&timers_state.vm_clock_seqlock,
268 &timers_state.vm_clock_lock);
269 cpu_update_icount_locked(cpu);
Paolo Bonzini94377112018-08-14 09:57:16 +0200270 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
271 &timers_state.vm_clock_lock);
Alex Bennée512d3c82017-04-05 12:32:37 +0100272}
273
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200274static int64_t cpu_get_icount_raw_locked(void)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200275{
Andreas Färber4917cf42013-05-27 05:17:50 +0200276 CPUState *cpu = current_cpu;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200277
Alex Bennée243c5f72017-03-30 18:49:22 +0100278 if (cpu && cpu->running) {
Paolo Bonzini414b15c2015-06-24 14:16:26 +0200279 if (!cpu->can_do_io) {
Alistair Francis493d89b2018-02-03 09:43:14 +0100280 error_report("Bad icount read");
Pavel Dovgalyuk2a629142014-12-08 10:53:45 +0300281 exit(1);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200282 }
Alex Bennéee4cd9652017-03-31 16:09:42 +0100283 /* Take into account what has run */
Paolo Bonzini9b4e6f42018-09-11 13:15:32 +0200284 cpu_update_icount_locked(cpu);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200285 }
Emilio G. Cota38adcb62018-09-10 19:27:49 -0400286 /* The read is protected by the seqlock, but needs atomic64 to avoid UB */
287 return atomic_read_i64(&timers_state.qemu_icount);
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200288}
289
290static int64_t cpu_get_icount_locked(void)
291{
292 int64_t icount = cpu_get_icount_raw_locked();
Emilio G. Cotac97595d2018-09-10 19:27:50 -0400293 return atomic_read_i64(&timers_state.qemu_icount_bias) +
294 cpu_icount_to_ns(icount);
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200295}
296
297int64_t cpu_get_icount_raw(void)
298{
299 int64_t icount;
300 unsigned start;
301
302 do {
303 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
304 icount = cpu_get_icount_raw_locked();
305 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
306
307 return icount;
Pavel Dovgalyuk2a629142014-12-08 10:53:45 +0300308}
309
310/* Return the virtual CPU time, based on the instruction counter. */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200311int64_t cpu_get_icount(void)
312{
313 int64_t icount;
314 unsigned start;
315
316 do {
317 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
318 icount = cpu_get_icount_locked();
319 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
320
321 return icount;
322}
323
KONRAD Frederic3f031312014-08-01 01:37:15 +0200324int64_t cpu_icount_to_ns(int64_t icount)
325{
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200326 return icount << atomic_read(&timers_state.icount_time_shift);
KONRAD Frederic3f031312014-08-01 01:37:15 +0200327}
328
Paolo Bonzinif2a4ad62018-08-18 09:36:16 +0200329static int64_t cpu_get_ticks_locked(void)
330{
331 int64_t ticks = timers_state.cpu_ticks_offset;
332 if (timers_state.cpu_ticks_enabled) {
333 ticks += cpu_get_host_ticks();
334 }
335
336 if (timers_state.cpu_ticks_prev > ticks) {
337 /* Non increasing ticks may happen if the host uses software suspend. */
338 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
339 ticks = timers_state.cpu_ticks_prev;
340 }
341
342 timers_state.cpu_ticks_prev = ticks;
343 return ticks;
344}
345
Cao jind90f3cc2016-07-29 19:05:38 +0800346/* return the time elapsed in VM between vm_start and vm_stop. Unless
347 * icount is active, cpu_get_ticks() uses units of the host CPU cycle
348 * counter.
Cao jind90f3cc2016-07-29 19:05:38 +0800349 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200350int64_t cpu_get_ticks(void)
351{
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100352 int64_t ticks;
353
Paolo Bonzini946fb272011-09-12 13:57:37 +0200354 if (use_icount) {
355 return cpu_get_icount();
356 }
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100357
Paolo Bonzinif2a4ad62018-08-18 09:36:16 +0200358 qemu_spin_lock(&timers_state.vm_clock_lock);
359 ticks = cpu_get_ticks_locked();
360 qemu_spin_unlock(&timers_state.vm_clock_lock);
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100361 return ticks;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200362}
363
Liu Ping Fancb365642013-09-25 14:20:58 +0800364static int64_t cpu_get_clock_locked(void)
365{
Cao jin1d45cea2016-07-29 19:05:37 +0800366 int64_t time;
Liu Ping Fancb365642013-09-25 14:20:58 +0800367
Cao jin1d45cea2016-07-29 19:05:37 +0800368 time = timers_state.cpu_clock_offset;
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100369 if (timers_state.cpu_ticks_enabled) {
Cao jin1d45cea2016-07-29 19:05:37 +0800370 time += get_clock();
Liu Ping Fancb365642013-09-25 14:20:58 +0800371 }
372
Cao jin1d45cea2016-07-29 19:05:37 +0800373 return time;
Liu Ping Fancb365642013-09-25 14:20:58 +0800374}
375
Cao jind90f3cc2016-07-29 19:05:38 +0800376/* Return the monotonic time elapsed in VM, i.e.,
Peter Maydell8212ff82016-09-15 10:24:22 +0100377 * the time between vm_start and vm_stop
378 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200379int64_t cpu_get_clock(void)
380{
381 int64_t ti;
Liu Ping Fancb365642013-09-25 14:20:58 +0800382 unsigned start;
383
384 do {
385 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
386 ti = cpu_get_clock_locked();
387 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
388
389 return ti;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200390}
391
Liu Ping Fancb365642013-09-25 14:20:58 +0800392/* enable cpu_get_ticks()
Cao jin3224e872016-07-08 18:31:37 +0800393 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
Liu Ping Fancb365642013-09-25 14:20:58 +0800394 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200395void cpu_enable_ticks(void)
396{
Paolo Bonzini94377112018-08-14 09:57:16 +0200397 seqlock_write_lock(&timers_state.vm_clock_seqlock,
398 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200399 if (!timers_state.cpu_ticks_enabled) {
Christopher Covington4a7428c2015-09-25 10:42:21 -0400400 timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200401 timers_state.cpu_clock_offset -= get_clock();
402 timers_state.cpu_ticks_enabled = 1;
403 }
Paolo Bonzini94377112018-08-14 09:57:16 +0200404 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
405 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200406}
407
408/* disable cpu_get_ticks() : the clock is stopped. You must not call
Liu Ping Fancb365642013-09-25 14:20:58 +0800409 * cpu_get_ticks() after that.
Cao jin3224e872016-07-08 18:31:37 +0800410 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
Liu Ping Fancb365642013-09-25 14:20:58 +0800411 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200412void cpu_disable_ticks(void)
413{
Paolo Bonzini94377112018-08-14 09:57:16 +0200414 seqlock_write_lock(&timers_state.vm_clock_seqlock,
415 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200416 if (timers_state.cpu_ticks_enabled) {
Christopher Covington4a7428c2015-09-25 10:42:21 -0400417 timers_state.cpu_ticks_offset += cpu_get_host_ticks();
Liu Ping Fancb365642013-09-25 14:20:58 +0800418 timers_state.cpu_clock_offset = cpu_get_clock_locked();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200419 timers_state.cpu_ticks_enabled = 0;
420 }
Paolo Bonzini94377112018-08-14 09:57:16 +0200421 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
422 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200423}
424
425/* Correlation between real and virtual time is always going to be
426 fairly approximate, so ignore small variation.
427 When the guest is idle real and virtual time will be aligned in
428 the IO wait loop. */
Rutuja Shah73bcb242016-03-21 21:32:30 +0530429#define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200430
431static void icount_adjust(void)
432{
433 int64_t cur_time;
434 int64_t cur_icount;
435 int64_t delta;
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200436
437 /* Protected by TimersState mutex. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200438 static int64_t last_delta;
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200439
Paolo Bonzini946fb272011-09-12 13:57:37 +0200440 /* If the VM is not running, then do nothing. */
441 if (!runstate_is_running()) {
442 return;
443 }
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200444
Paolo Bonzini94377112018-08-14 09:57:16 +0200445 seqlock_write_lock(&timers_state.vm_clock_seqlock,
446 &timers_state.vm_clock_lock);
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200447 cur_time = cpu_get_clock_locked();
448 cur_icount = cpu_get_icount_locked();
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200449
Paolo Bonzini946fb272011-09-12 13:57:37 +0200450 delta = cur_icount - cur_time;
451 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
452 if (delta > 0
453 && last_delta + ICOUNT_WOBBLE < delta * 2
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200454 && timers_state.icount_time_shift > 0) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200455 /* The guest is getting too far ahead. Slow time down. */
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200456 atomic_set(&timers_state.icount_time_shift,
457 timers_state.icount_time_shift - 1);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200458 }
459 if (delta < 0
460 && last_delta - ICOUNT_WOBBLE > delta * 2
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200461 && timers_state.icount_time_shift < MAX_ICOUNT_SHIFT) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200462 /* The guest is getting too far behind. Speed time up. */
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200463 atomic_set(&timers_state.icount_time_shift,
464 timers_state.icount_time_shift + 1);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200465 }
466 last_delta = delta;
Emilio G. Cotac97595d2018-09-10 19:27:50 -0400467 atomic_set_i64(&timers_state.qemu_icount_bias,
468 cur_icount - (timers_state.qemu_icount
469 << timers_state.icount_time_shift));
Paolo Bonzini94377112018-08-14 09:57:16 +0200470 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
471 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200472}
473
474static void icount_adjust_rt(void *opaque)
475{
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300476 timer_mod(timers_state.icount_rt_timer,
Pavel Dovgalyuk1979b902015-01-12 15:00:43 +0300477 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200478 icount_adjust();
479}
480
481static void icount_adjust_vm(void *opaque)
482{
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300483 timer_mod(timers_state.icount_vm_timer,
Alex Bligh40daca52013-08-21 16:03:02 +0100484 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
Rutuja Shah73bcb242016-03-21 21:32:30 +0530485 NANOSECONDS_PER_SECOND / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200486 icount_adjust();
487}
488
489static int64_t qemu_icount_round(int64_t count)
490{
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200491 int shift = atomic_read(&timers_state.icount_time_shift);
492 return (count + (1 << shift) - 1) >> shift;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200493}
494
Pavel Dovgalyukefab87c2015-09-17 19:24:39 +0300495static void icount_warp_rt(void)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200496{
Alex Bennéeccffff42016-04-04 15:35:48 +0100497 unsigned seq;
498 int64_t warp_start;
499
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200500 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
501 * changes from -1 to another value, so the race here is okay.
502 */
Alex Bennéeccffff42016-04-04 15:35:48 +0100503 do {
504 seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300505 warp_start = timers_state.vm_clock_warp_start;
Alex Bennéeccffff42016-04-04 15:35:48 +0100506 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
507
508 if (warp_start == -1) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200509 return;
510 }
511
Paolo Bonzini94377112018-08-14 09:57:16 +0200512 seqlock_write_lock(&timers_state.vm_clock_seqlock,
513 &timers_state.vm_clock_lock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200514 if (runstate_is_running()) {
Paolo Bonzini74c0b812018-10-08 13:24:14 +0200515 int64_t clock = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT,
516 cpu_get_clock_locked());
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200517 int64_t warp_delta;
518
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300519 warp_delta = clock - timers_state.vm_clock_warp_start;
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200520 if (use_icount == 2) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200521 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100522 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
Paolo Bonzini946fb272011-09-12 13:57:37 +0200523 * far ahead of real time.
524 */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200525 int64_t cur_icount = cpu_get_icount_locked();
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300526 int64_t delta = clock - cur_icount;
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200527 warp_delta = MIN(warp_delta, delta);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200528 }
Emilio G. Cotac97595d2018-09-10 19:27:50 -0400529 atomic_set_i64(&timers_state.qemu_icount_bias,
530 timers_state.qemu_icount_bias + warp_delta);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200531 }
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300532 timers_state.vm_clock_warp_start = -1;
Paolo Bonzini94377112018-08-14 09:57:16 +0200533 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
534 &timers_state.vm_clock_lock);
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200535
536 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
537 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
538 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200539}
540
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300541static void icount_timer_cb(void *opaque)
Pavel Dovgalyukefab87c2015-09-17 19:24:39 +0300542{
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300543 /* No need for a checkpoint because the timer already synchronizes
544 * with CHECKPOINT_CLOCK_VIRTUAL_RT.
545 */
546 icount_warp_rt();
Pavel Dovgalyukefab87c2015-09-17 19:24:39 +0300547}
548
Paolo Bonzini8156be52012-03-28 15:42:04 +0200549void qtest_clock_warp(int64_t dest)
550{
Alex Bligh40daca52013-08-21 16:03:02 +0100551 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Fam Zhengefef88b2015-01-19 17:51:43 +0800552 AioContext *aio_context;
Paolo Bonzini8156be52012-03-28 15:42:04 +0200553 assert(qtest_enabled());
Fam Zhengefef88b2015-01-19 17:51:43 +0800554 aio_context = qemu_get_aio_context();
Paolo Bonzini8156be52012-03-28 15:42:04 +0200555 while (clock < dest) {
Alex Bligh40daca52013-08-21 16:03:02 +0100556 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Sergey Fedorovc9299e22014-06-10 13:10:28 +0400557 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
Fam Zhengefef88b2015-01-19 17:51:43 +0800558
Paolo Bonzini94377112018-08-14 09:57:16 +0200559 seqlock_write_lock(&timers_state.vm_clock_seqlock,
560 &timers_state.vm_clock_lock);
Emilio G. Cotac97595d2018-09-10 19:27:50 -0400561 atomic_set_i64(&timers_state.qemu_icount_bias,
562 timers_state.qemu_icount_bias + warp);
Paolo Bonzini94377112018-08-14 09:57:16 +0200563 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
564 &timers_state.vm_clock_lock);
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200565
Alex Bligh40daca52013-08-21 16:03:02 +0100566 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
Fam Zhengefef88b2015-01-19 17:51:43 +0800567 timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
Alex Bligh40daca52013-08-21 16:03:02 +0100568 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200569 }
Alex Bligh40daca52013-08-21 16:03:02 +0100570 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200571}
572
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300573void qemu_start_warp_timer(void)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200574{
Paolo Bonzinice78d182013-10-07 17:30:02 +0200575 int64_t clock;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200576 int64_t deadline;
577
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300578 if (!use_icount) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200579 return;
580 }
581
Pavel Dovgalyuk8bd7f712015-09-17 19:24:44 +0300582 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
583 * do not fire, so computing the deadline does not make sense.
584 */
585 if (!runstate_is_running()) {
586 return;
587 }
588
Pavel Dovgalyuk0c081852018-09-12 11:19:45 +0300589 if (replay_mode != REPLAY_MODE_PLAY) {
590 if (!all_cpu_threads_idle()) {
591 return;
592 }
Pavel Dovgalyuk8bd7f712015-09-17 19:24:44 +0300593
Pavel Dovgalyuk0c081852018-09-12 11:19:45 +0300594 if (qtest_enabled()) {
595 /* When testing, qtest commands advance icount. */
596 return;
597 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200598
Pavel Dovgalyuk0c081852018-09-12 11:19:45 +0300599 replay_checkpoint(CHECKPOINT_CLOCK_WARP_START);
600 } else {
601 /* warp clock deterministically in record/replay mode */
602 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
603 /* vCPU is sleeping and warp can't be started.
604 It is probably a race condition: notification sent
605 to vCPU was processed in advance and vCPU went to sleep.
606 Therefore we have to wake it up for doing someting. */
607 if (replay_has_checkpoint()) {
608 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
609 }
610 return;
611 }
Paolo Bonzini8156be52012-03-28 15:42:04 +0200612 }
613
Alex Blighac70aaf2013-08-21 16:02:57 +0100614 /* We want to use the earliest deadline from ALL vm_clocks */
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300615 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
Alex Bligh40daca52013-08-21 16:03:02 +0100616 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200617 if (deadline < 0) {
Victor CLEMENTd7a0f712015-05-29 17:14:06 +0200618 static bool notified;
619 if (!icount_sleep && !notified) {
Alistair Francis3dc6f862017-07-12 06:57:41 -0700620 warn_report("icount sleep disabled and no active timers");
Victor CLEMENTd7a0f712015-05-29 17:14:06 +0200621 notified = true;
622 }
Paolo Bonzinice78d182013-10-07 17:30:02 +0200623 return;
Alex Blighac70aaf2013-08-21 16:02:57 +0100624 }
625
Paolo Bonzini946fb272011-09-12 13:57:37 +0200626 if (deadline > 0) {
627 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100628 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
Paolo Bonzini946fb272011-09-12 13:57:37 +0200629 * sleep. Otherwise, the CPU might be waiting for a future timer
630 * interrupt to wake it up, but the interrupt never comes because
631 * the vCPU isn't running any insns and thus doesn't advance the
Alex Bligh40daca52013-08-21 16:03:02 +0100632 * QEMU_CLOCK_VIRTUAL.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200633 */
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200634 if (!icount_sleep) {
635 /*
636 * We never let VCPUs sleep in no sleep icount mode.
637 * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
638 * to the next QEMU_CLOCK_VIRTUAL event and notify it.
639 * It is useful when we want a deterministic execution time,
640 * isolated from host latencies.
641 */
Paolo Bonzini94377112018-08-14 09:57:16 +0200642 seqlock_write_lock(&timers_state.vm_clock_seqlock,
643 &timers_state.vm_clock_lock);
Emilio G. Cotac97595d2018-09-10 19:27:50 -0400644 atomic_set_i64(&timers_state.qemu_icount_bias,
645 timers_state.qemu_icount_bias + deadline);
Paolo Bonzini94377112018-08-14 09:57:16 +0200646 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
647 &timers_state.vm_clock_lock);
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200648 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
649 } else {
650 /*
651 * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
652 * "real" time, (related to the time left until the next event) has
653 * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
654 * This avoids that the warps are visible externally; for example,
655 * you will not be sending network packets continuously instead of
656 * every 100ms.
657 */
Paolo Bonzini94377112018-08-14 09:57:16 +0200658 seqlock_write_lock(&timers_state.vm_clock_seqlock,
659 &timers_state.vm_clock_lock);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300660 if (timers_state.vm_clock_warp_start == -1
661 || timers_state.vm_clock_warp_start > clock) {
662 timers_state.vm_clock_warp_start = clock;
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200663 }
Paolo Bonzini94377112018-08-14 09:57:16 +0200664 seqlock_write_unlock(&timers_state.vm_clock_seqlock,
665 &timers_state.vm_clock_lock);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300666 timer_mod_anticipate(timers_state.icount_warp_timer,
667 clock + deadline);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200668 }
Alex Blighac70aaf2013-08-21 16:02:57 +0100669 } else if (deadline == 0) {
Alex Bligh40daca52013-08-21 16:03:02 +0100670 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200671 }
672}
673
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300674static void qemu_account_warp_timer(void)
675{
676 if (!use_icount || !icount_sleep) {
677 return;
678 }
679
680 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
681 * do not fire, so computing the deadline does not make sense.
682 */
683 if (!runstate_is_running()) {
684 return;
685 }
686
687 /* warp clock deterministically in record/replay mode */
688 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
689 return;
690 }
691
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300692 timer_del(timers_state.icount_warp_timer);
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300693 icount_warp_rt();
694}
695
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200696static bool icount_state_needed(void *opaque)
697{
698 return use_icount;
699}
700
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300701static bool warp_timer_state_needed(void *opaque)
702{
703 TimersState *s = opaque;
704 return s->icount_warp_timer != NULL;
705}
706
707static bool adjust_timers_state_needed(void *opaque)
708{
709 TimersState *s = opaque;
710 return s->icount_rt_timer != NULL;
711}
712
713/*
714 * Subsection for warp timer migration is optional, because may not be created
715 */
716static const VMStateDescription icount_vmstate_warp_timer = {
717 .name = "timer/icount/warp_timer",
718 .version_id = 1,
719 .minimum_version_id = 1,
720 .needed = warp_timer_state_needed,
721 .fields = (VMStateField[]) {
722 VMSTATE_INT64(vm_clock_warp_start, TimersState),
723 VMSTATE_TIMER_PTR(icount_warp_timer, TimersState),
724 VMSTATE_END_OF_LIST()
725 }
726};
727
728static const VMStateDescription icount_vmstate_adjust_timers = {
729 .name = "timer/icount/timers",
730 .version_id = 1,
731 .minimum_version_id = 1,
732 .needed = adjust_timers_state_needed,
733 .fields = (VMStateField[]) {
734 VMSTATE_TIMER_PTR(icount_rt_timer, TimersState),
735 VMSTATE_TIMER_PTR(icount_vm_timer, TimersState),
736 VMSTATE_END_OF_LIST()
737 }
738};
739
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200740/*
741 * This is a subsection for icount migration.
742 */
743static const VMStateDescription icount_vmstate_timers = {
744 .name = "timer/icount",
745 .version_id = 1,
746 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200747 .needed = icount_state_needed,
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200748 .fields = (VMStateField[]) {
749 VMSTATE_INT64(qemu_icount_bias, TimersState),
750 VMSTATE_INT64(qemu_icount, TimersState),
751 VMSTATE_END_OF_LIST()
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300752 },
753 .subsections = (const VMStateDescription*[]) {
754 &icount_vmstate_warp_timer,
755 &icount_vmstate_adjust_timers,
756 NULL
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200757 }
758};
759
Paolo Bonzini946fb272011-09-12 13:57:37 +0200760static const VMStateDescription vmstate_timers = {
761 .name = "timer",
762 .version_id = 2,
763 .minimum_version_id = 1,
Juan Quintela35d08452014-04-16 16:01:33 +0200764 .fields = (VMStateField[]) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200765 VMSTATE_INT64(cpu_ticks_offset, TimersState),
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200766 VMSTATE_UNUSED(8),
Paolo Bonzini946fb272011-09-12 13:57:37 +0200767 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
768 VMSTATE_END_OF_LIST()
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200769 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200770 .subsections = (const VMStateDescription*[]) {
771 &icount_vmstate_timers,
772 NULL
Paolo Bonzini946fb272011-09-12 13:57:37 +0200773 }
774};
775
Paolo Bonzini14e6fe12016-10-31 10:36:08 +0100776static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
Jason J. Herne2adcc852015-09-08 13:12:33 -0400777{
Jason J. Herne2adcc852015-09-08 13:12:33 -0400778 double pct;
779 double throttle_ratio;
780 long sleeptime_ns;
781
782 if (!cpu_throttle_get_percentage()) {
783 return;
784 }
785
786 pct = (double)cpu_throttle_get_percentage()/100;
787 throttle_ratio = pct / (1 - pct);
788 sleeptime_ns = (long)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS);
789
790 qemu_mutex_unlock_iothread();
Jason J. Herne2adcc852015-09-08 13:12:33 -0400791 g_usleep(sleeptime_ns / 1000); /* Convert ns to us for usleep call */
792 qemu_mutex_lock_iothread();
Felipe Franciosi90bb0c02017-05-19 22:29:50 +0100793 atomic_set(&cpu->throttle_thread_scheduled, 0);
Jason J. Herne2adcc852015-09-08 13:12:33 -0400794}
795
796static void cpu_throttle_timer_tick(void *opaque)
797{
798 CPUState *cpu;
799 double pct;
800
801 /* Stop the timer if needed */
802 if (!cpu_throttle_get_percentage()) {
803 return;
804 }
805 CPU_FOREACH(cpu) {
806 if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
Paolo Bonzini14e6fe12016-10-31 10:36:08 +0100807 async_run_on_cpu(cpu, cpu_throttle_thread,
808 RUN_ON_CPU_NULL);
Jason J. Herne2adcc852015-09-08 13:12:33 -0400809 }
810 }
811
812 pct = (double)cpu_throttle_get_percentage()/100;
813 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
814 CPU_THROTTLE_TIMESLICE_NS / (1-pct));
815}
816
817void cpu_throttle_set(int new_throttle_pct)
818{
819 /* Ensure throttle percentage is within valid range */
820 new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
821 new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
822
823 atomic_set(&throttle_percentage, new_throttle_pct);
824
825 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
826 CPU_THROTTLE_TIMESLICE_NS);
827}
828
829void cpu_throttle_stop(void)
830{
831 atomic_set(&throttle_percentage, 0);
832}
833
834bool cpu_throttle_active(void)
835{
836 return (cpu_throttle_get_percentage() != 0);
837}
838
839int cpu_throttle_get_percentage(void)
840{
841 return atomic_read(&throttle_percentage);
842}
843
Pavel Dovgalyuk4603ea02014-09-01 09:34:49 +0400844void cpu_ticks_init(void)
845{
Emilio G. Cotaccdb3c12016-06-08 14:55:20 -0400846 seqlock_init(&timers_state.vm_clock_seqlock);
Emilio G. Cota87a09cd2018-09-03 13:18:29 -0400847 qemu_spin_init(&timers_state.vm_clock_lock);
Pavel Dovgalyuk4603ea02014-09-01 09:34:49 +0400848 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
Jason J. Herne2adcc852015-09-08 13:12:33 -0400849 throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
850 cpu_throttle_timer_tick, NULL);
Pavel Dovgalyuk4603ea02014-09-01 09:34:49 +0400851}
852
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200853void configure_icount(QemuOpts *opts, Error **errp)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200854{
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200855 const char *option;
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200856 char *rem_str = NULL;
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200857
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200858 option = qemu_opt_get(opts, "shift");
Paolo Bonzini946fb272011-09-12 13:57:37 +0200859 if (!option) {
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200860 if (qemu_opt_get(opts, "align") != NULL) {
861 error_setg(errp, "Please specify shift option when using align");
862 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200863 return;
864 }
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200865
866 icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200867 if (icount_sleep) {
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300868 timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300869 icount_timer_cb, NULL);
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200870 }
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200871
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200872 icount_align_option = qemu_opt_get_bool(opts, "align", false);
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200873
874 if (icount_align_option && !icount_sleep) {
Pranith Kumar778d9f92016-02-26 10:16:51 -0500875 error_setg(errp, "align=on and sleep=off are incompatible");
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200876 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200877 if (strcmp(option, "auto") != 0) {
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200878 errno = 0;
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200879 timers_state.icount_time_shift = strtol(option, &rem_str, 0);
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200880 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
881 error_setg(errp, "icount: Invalid shift value");
882 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200883 use_icount = 1;
884 return;
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200885 } else if (icount_align_option) {
886 error_setg(errp, "shift=auto and align=on are incompatible");
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200887 } else if (!icount_sleep) {
Pranith Kumar778d9f92016-02-26 10:16:51 -0500888 error_setg(errp, "shift=auto and sleep=off are incompatible");
Paolo Bonzini946fb272011-09-12 13:57:37 +0200889 }
890
891 use_icount = 2;
892
893 /* 125MIPS seems a reasonable initial guess at the guest speed.
894 It will be corrected fairly quickly anyway. */
Paolo Bonzinic1ff0732018-08-14 09:31:58 +0200895 timers_state.icount_time_shift = 3;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200896
897 /* Have both realtime and virtual time triggers for speed adjustment.
898 The realtime trigger catches emulated time passing too slowly,
899 the virtual time trigger catches emulated time passing too fast.
900 Realtime triggers occur even when idle, so use them less frequently
901 than VM triggers. */
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300902 timers_state.vm_clock_warp_start = -1;
903 timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300904 icount_adjust_rt, NULL);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300905 timer_mod(timers_state.icount_rt_timer,
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300906 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300907 timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
Alex Bligh40daca52013-08-21 16:03:02 +0100908 icount_adjust_vm, NULL);
Pavel Dovgalyukb39e3f32018-01-11 11:26:10 +0300909 timer_mod(timers_state.icount_vm_timer,
Alex Bligh40daca52013-08-21 16:03:02 +0100910 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
Rutuja Shah73bcb242016-03-21 21:32:30 +0530911 NANOSECONDS_PER_SECOND / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200912}
913
914/***********************************************************/
Alex Bennée65467062017-02-23 18:29:09 +0000915/* TCG vCPU kick timer
916 *
917 * The kick timer is responsible for moving single threaded vCPU
918 * emulation on to the next vCPU. If more than one vCPU is running a
919 * timer event with force a cpu->exit so the next vCPU can get
920 * scheduled.
921 *
922 * The timer is removed if all vCPUs are idle and restarted again once
923 * idleness is complete.
924 */
925
926static QEMUTimer *tcg_kick_vcpu_timer;
Alex Bennée791158d2017-02-23 18:29:10 +0000927static CPUState *tcg_current_rr_cpu;
Alex Bennée65467062017-02-23 18:29:09 +0000928
929#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
930
931static inline int64_t qemu_tcg_next_kick(void)
932{
933 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
934}
935
Alex Bennée791158d2017-02-23 18:29:10 +0000936/* Kick the currently round-robin scheduled vCPU */
937static void qemu_cpu_kick_rr_cpu(void)
938{
939 CPUState *cpu;
Alex Bennée791158d2017-02-23 18:29:10 +0000940 do {
941 cpu = atomic_mb_read(&tcg_current_rr_cpu);
942 if (cpu) {
943 cpu_exit(cpu);
944 }
945 } while (cpu != atomic_mb_read(&tcg_current_rr_cpu));
946}
947
Paolo Bonzini6b8f0182017-03-02 19:56:40 +0100948static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
949{
950}
951
Paolo Bonzini3f53bc62017-03-03 11:50:29 +0100952void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
953{
Paolo Bonzini6b8f0182017-03-02 19:56:40 +0100954 if (!use_icount || type != QEMU_CLOCK_VIRTUAL) {
955 qemu_notify_event();
956 return;
957 }
958
Peter Maydellc52e7132018-04-10 13:02:25 +0100959 if (qemu_in_vcpu_thread()) {
960 /* A CPU is currently running; kick it back out to the
961 * tcg_cpu_exec() loop so it will recalculate its
962 * icount deadline immediately.
963 */
964 qemu_cpu_kick(current_cpu);
965 } else if (first_cpu) {
Paolo Bonzini6b8f0182017-03-02 19:56:40 +0100966 /* qemu_cpu_kick is not enough to kick a halted CPU out of
967 * qemu_tcg_wait_io_event. async_run_on_cpu, instead,
968 * causes cpu_thread_is_idle to return false. This way,
969 * handle_icount_deadline can run.
Peter Maydellc52e7132018-04-10 13:02:25 +0100970 * If we have no CPUs at all for some reason, we don't
971 * need to do anything.
Paolo Bonzini6b8f0182017-03-02 19:56:40 +0100972 */
973 async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
974 }
Paolo Bonzini3f53bc62017-03-03 11:50:29 +0100975}
976
Alex Bennée65467062017-02-23 18:29:09 +0000977static void kick_tcg_thread(void *opaque)
978{
979 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
Alex Bennée791158d2017-02-23 18:29:10 +0000980 qemu_cpu_kick_rr_cpu();
Alex Bennée65467062017-02-23 18:29:09 +0000981}
982
983static void start_tcg_kick_timer(void)
984{
Paolo Bonzinidb08b682018-01-11 13:53:12 +0100985 assert(!mttcg_enabled);
986 if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
Alex Bennée65467062017-02-23 18:29:09 +0000987 tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
988 kick_tcg_thread, NULL);
Alex Bennée1926ab22018-09-27 18:17:24 +0100989 }
990 if (tcg_kick_vcpu_timer && !timer_pending(tcg_kick_vcpu_timer)) {
Alex Bennée65467062017-02-23 18:29:09 +0000991 timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
992 }
993}
994
995static void stop_tcg_kick_timer(void)
996{
Paolo Bonzinidb08b682018-01-11 13:53:12 +0100997 assert(!mttcg_enabled);
Alex Bennée1926ab22018-09-27 18:17:24 +0100998 if (tcg_kick_vcpu_timer && timer_pending(tcg_kick_vcpu_timer)) {
Alex Bennée65467062017-02-23 18:29:09 +0000999 timer_del(tcg_kick_vcpu_timer);
Alex Bennée65467062017-02-23 18:29:09 +00001000 }
1001}
1002
Alex Bennée65467062017-02-23 18:29:09 +00001003/***********************************************************/
Blue Swirl296af7c2010-03-29 19:23:50 +00001004void hw_error(const char *fmt, ...)
1005{
1006 va_list ap;
Andreas Färber55e5c282012-12-17 06:18:02 +01001007 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001008
1009 va_start(ap, fmt);
1010 fprintf(stderr, "qemu: hardware error: ");
1011 vfprintf(stderr, fmt, ap);
1012 fprintf(stderr, "\n");
Andreas Färberbdc44642013-06-24 23:50:24 +02001013 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +01001014 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
Markus Armbruster90c84c52019-04-17 21:18:02 +02001015 cpu_dump_state(cpu, stderr, CPU_DUMP_FPU);
Blue Swirl296af7c2010-03-29 19:23:50 +00001016 }
1017 va_end(ap);
1018 abort();
1019}
1020
1021void cpu_synchronize_all_states(void)
1022{
Andreas Färber182735e2013-05-29 22:29:20 +02001023 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001024
Andreas Färberbdc44642013-06-24 23:50:24 +02001025 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001026 cpu_synchronize_state(cpu);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001027 /* TODO: move to cpu_synchronize_state() */
1028 if (hvf_enabled()) {
1029 hvf_cpu_synchronize_state(cpu);
1030 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001031 }
1032}
1033
1034void cpu_synchronize_all_post_reset(void)
1035{
Andreas Färber182735e2013-05-29 22:29:20 +02001036 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001037
Andreas Färberbdc44642013-06-24 23:50:24 +02001038 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001039 cpu_synchronize_post_reset(cpu);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001040 /* TODO: move to cpu_synchronize_post_reset() */
1041 if (hvf_enabled()) {
1042 hvf_cpu_synchronize_post_reset(cpu);
1043 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001044 }
1045}
1046
1047void cpu_synchronize_all_post_init(void)
1048{
Andreas Färber182735e2013-05-29 22:29:20 +02001049 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001050
Andreas Färberbdc44642013-06-24 23:50:24 +02001051 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001052 cpu_synchronize_post_init(cpu);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001053 /* TODO: move to cpu_synchronize_post_init() */
1054 if (hvf_enabled()) {
1055 hvf_cpu_synchronize_post_init(cpu);
1056 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001057 }
1058}
1059
David Gibson75e972d2017-05-26 14:46:28 +10001060void cpu_synchronize_all_pre_loadvm(void)
1061{
1062 CPUState *cpu;
1063
1064 CPU_FOREACH(cpu) {
1065 cpu_synchronize_pre_loadvm(cpu);
1066 }
1067}
1068
Stefan Hajnoczi4486e892018-03-07 14:42:05 +00001069static int do_vm_stop(RunState state, bool send_stop)
Blue Swirl296af7c2010-03-29 19:23:50 +00001070{
Kevin Wolf56983462013-07-05 13:49:54 +02001071 int ret = 0;
1072
Luiz Capitulino13548692011-07-29 15:36:43 -03001073 if (runstate_is_running()) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001074 cpu_disable_ticks();
Blue Swirl296af7c2010-03-29 19:23:50 +00001075 pause_all_vcpus();
Luiz Capitulinof5bbfba2011-07-29 15:04:45 -03001076 runstate_set(state);
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -03001077 vm_state_notify(0, state);
Stefan Hajnoczi4486e892018-03-07 14:42:05 +00001078 if (send_stop) {
Peter Xu3ab72382018-08-15 21:37:37 +08001079 qapi_event_send_stop();
Stefan Hajnoczi4486e892018-03-07 14:42:05 +00001080 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001081 }
Kevin Wolf56983462013-07-05 13:49:54 +02001082
Kevin Wolf594a45c2013-07-18 14:52:19 +02001083 bdrv_drain_all();
Pavel Dovgalyuk6d0ceb82016-09-26 11:08:16 +03001084 replay_disable_events();
John Snow22af08e2016-09-22 21:45:51 -04001085 ret = bdrv_flush_all();
Kevin Wolf594a45c2013-07-18 14:52:19 +02001086
Kevin Wolf56983462013-07-05 13:49:54 +02001087 return ret;
Blue Swirl296af7c2010-03-29 19:23:50 +00001088}
1089
Stefan Hajnoczi4486e892018-03-07 14:42:05 +00001090/* Special vm_stop() variant for terminating the process. Historically clients
1091 * did not expect a QMP STOP event and so we need to retain compatibility.
1092 */
1093int vm_shutdown(void)
1094{
1095 return do_vm_stop(RUN_STATE_SHUTDOWN, false);
1096}
1097
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001098static bool cpu_can_run(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001099{
Andreas Färber4fdeee72012-05-02 23:10:09 +02001100 if (cpu->stop) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001101 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001102 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +08001103 if (cpu_is_stopped(cpu)) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001104 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001105 }
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001106 return true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001107}
1108
Andreas Färber91325042013-05-27 02:07:49 +02001109static void cpu_handle_guest_debug(CPUState *cpu)
Jan Kiszka3c638d02010-06-25 16:56:56 +02001110{
Andreas Färber64f6b342013-05-27 02:06:09 +02001111 gdb_set_stop_cpu(cpu);
Jan Kiszka8cf71712011-02-07 12:19:16 +01001112 qemu_system_debug_request();
Andreas Färberf324e762012-05-02 23:26:21 +02001113 cpu->stopped = true;
Jan Kiszka3c638d02010-06-25 16:56:56 +02001114}
1115
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001116#ifdef CONFIG_LINUX
1117static void sigbus_reraise(void)
1118{
1119 sigset_t set;
1120 struct sigaction action;
1121
1122 memset(&action, 0, sizeof(action));
1123 action.sa_handler = SIG_DFL;
1124 if (!sigaction(SIGBUS, &action, NULL)) {
1125 raise(SIGBUS);
1126 sigemptyset(&set);
1127 sigaddset(&set, SIGBUS);
Peter Maydella2d17612016-05-16 18:33:59 +01001128 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001129 }
1130 perror("Failed to re-raise SIGBUS!\n");
1131 abort();
1132}
1133
Paolo Bonzinid98d4072017-02-08 13:22:12 +01001134static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx)
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001135{
Paolo Bonzinia16fc072017-02-09 09:50:02 +01001136 if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) {
1137 sigbus_reraise();
1138 }
1139
Paolo Bonzini2ae41db2017-02-08 12:48:54 +01001140 if (current_cpu) {
1141 /* Called asynchronously in VCPU thread. */
1142 if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) {
1143 sigbus_reraise();
1144 }
1145 } else {
1146 /* Called synchronously (via signalfd) in main thread. */
1147 if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) {
1148 sigbus_reraise();
1149 }
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001150 }
1151}
1152
1153static void qemu_init_sigbus(void)
1154{
1155 struct sigaction action;
1156
1157 memset(&action, 0, sizeof(action));
1158 action.sa_flags = SA_SIGINFO;
Paolo Bonzinid98d4072017-02-08 13:22:12 +01001159 action.sa_sigaction = sigbus_handler;
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001160 sigaction(SIGBUS, &action, NULL);
1161
1162 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
1163}
Paolo Bonzinia16fc072017-02-09 09:50:02 +01001164#else /* !CONFIG_LINUX */
1165static void qemu_init_sigbus(void)
1166{
1167}
Paolo Bonzinia16fc072017-02-09 09:50:02 +01001168#endif /* !CONFIG_LINUX */
Blue Swirl296af7c2010-03-29 19:23:50 +00001169
Stefan Weilb2532d82012-09-27 07:41:42 +02001170static QemuMutex qemu_global_mutex;
Blue Swirl296af7c2010-03-29 19:23:50 +00001171
1172static QemuThread io_thread;
1173
Blue Swirl296af7c2010-03-29 19:23:50 +00001174/* cpu creation */
1175static QemuCond qemu_cpu_cond;
1176/* system init */
Blue Swirl296af7c2010-03-29 19:23:50 +00001177static QemuCond qemu_pause_cond;
1178
Paolo Bonzinid3b12f52011-09-13 10:30:52 +02001179void qemu_init_cpu_loop(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001180{
Jan Kiszka6d9cb732011-02-01 22:15:58 +01001181 qemu_init_sigbus();
Anthony Liguoried945922011-02-08 18:18:18 +01001182 qemu_cond_init(&qemu_cpu_cond);
Anthony Liguoried945922011-02-08 18:18:18 +01001183 qemu_cond_init(&qemu_pause_cond);
Blue Swirl296af7c2010-03-29 19:23:50 +00001184 qemu_mutex_init(&qemu_global_mutex);
Blue Swirl296af7c2010-03-29 19:23:50 +00001185
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001186 qemu_thread_get_self(&io_thread);
Blue Swirl296af7c2010-03-29 19:23:50 +00001187}
1188
Paolo Bonzini14e6fe12016-10-31 10:36:08 +01001189void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -03001190{
Sergey Fedorovd148d902016-08-29 09:51:00 +02001191 do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
Chegu Vinod3c022702013-06-24 03:49:41 -06001192}
1193
Gu Zheng4c055ab2016-05-12 09:18:13 +05301194static void qemu_kvm_destroy_vcpu(CPUState *cpu)
1195{
1196 if (kvm_destroy_vcpu(cpu) < 0) {
1197 error_report("kvm_destroy_vcpu failed");
1198 exit(EXIT_FAILURE);
1199 }
1200}
1201
1202static void qemu_tcg_destroy_vcpu(CPUState *cpu)
1203{
1204}
1205
David Hildenbrandebd05fe2017-11-29 20:12:15 +01001206static void qemu_cpu_stop(CPUState *cpu, bool exit)
1207{
1208 g_assert(qemu_cpu_is_self(cpu));
1209 cpu->stop = false;
1210 cpu->stopped = true;
1211 if (exit) {
1212 cpu_exit(cpu);
1213 }
1214 qemu_cond_broadcast(&qemu_pause_cond);
1215}
1216
Andreas Färber509a0d72012-05-03 02:18:09 +02001217static void qemu_wait_io_event_common(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001218{
Alex Bennée37257942017-02-23 18:29:14 +00001219 atomic_mb_set(&cpu->thread_kicked, false);
Andreas Färber4fdeee72012-05-02 23:10:09 +02001220 if (cpu->stop) {
David Hildenbrandebd05fe2017-11-29 20:12:15 +01001221 qemu_cpu_stop(cpu, false);
Blue Swirl296af7c2010-03-29 19:23:50 +00001222 }
Sergey Fedorova5403c62016-08-02 18:27:36 +01001223 process_queued_cpu_work(cpu);
Alex Bennée37257942017-02-23 18:29:14 +00001224}
1225
Paolo Bonzinia8efa602018-11-14 12:36:57 +01001226static void qemu_tcg_rr_wait_io_event(void)
Alex Bennée37257942017-02-23 18:29:14 +00001227{
Paolo Bonzinia8efa602018-11-14 12:36:57 +01001228 CPUState *cpu;
1229
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001230 while (all_cpu_threads_idle()) {
Alex Bennée65467062017-02-23 18:29:09 +00001231 stop_tcg_kick_timer();
Paolo Bonzinia8efa602018-11-14 12:36:57 +01001232 qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +01001233 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001234
Alex Bennée65467062017-02-23 18:29:09 +00001235 start_tcg_kick_timer();
1236
Paolo Bonzinia8efa602018-11-14 12:36:57 +01001237 CPU_FOREACH(cpu) {
1238 qemu_wait_io_event_common(cpu);
1239 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001240}
1241
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001242static void qemu_wait_io_event(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001243{
Andreas Färbera98ae1d2013-05-26 23:21:08 +02001244 while (cpu_thread_is_idle(cpu)) {
Andreas Färberf5c121b2012-05-03 01:22:49 +02001245 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +01001246 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001247
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001248#ifdef _WIN32
1249 /* Eat dummy APC queued by qemu_cpu_kick_thread. */
1250 if (!tcg_enabled()) {
1251 SleepEx(0, TRUE);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001252 }
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001253#endif
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001254 qemu_wait_io_event_common(cpu);
1255}
1256
Jan Kiszka7e97cd82011-02-07 12:19:12 +01001257static void *qemu_kvm_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +00001258{
Andreas Färber48a106b2013-05-27 02:20:39 +02001259 CPUState *cpu = arg;
Jan Kiszka84b49152011-02-01 22:15:50 +01001260 int r;
Blue Swirl296af7c2010-03-29 19:23:50 +00001261
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001262 rcu_register_thread();
1263
Paolo Bonzini2e7f7a32015-06-18 18:47:18 +02001264 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +02001265 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +02001266 cpu->thread_id = qemu_get_thread_id();
Pavel Dovgalyuk626cf8f2014-12-08 10:53:17 +03001267 cpu->can_do_io = 1;
Andreas Färber4917cf42013-05-27 05:17:50 +02001268 current_cpu = cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001269
Andreas Färber504134d2012-12-17 06:38:45 +01001270 r = kvm_init_vcpu(cpu);
Jan Kiszka84b49152011-02-01 22:15:50 +01001271 if (r < 0) {
Alistair Francis493d89b2018-02-03 09:43:14 +01001272 error_report("kvm_init_vcpu failed: %s", strerror(-r));
Jan Kiszka84b49152011-02-01 22:15:50 +01001273 exit(1);
1274 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001275
Paolo Bonzini18268b62017-02-09 09:41:14 +01001276 kvm_init_cpu_signals(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001277
1278 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +02001279 cpu->created = true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001280 qemu_cond_signal(&qemu_cpu_cond);
Richard Henderson9c09a252019-03-14 13:06:29 -07001281 qemu_guest_random_seed_thread_part2(cpu->random_seed);
Blue Swirl296af7c2010-03-29 19:23:50 +00001282
Gu Zheng4c055ab2016-05-12 09:18:13 +05301283 do {
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001284 if (cpu_can_run(cpu)) {
Andreas Färber1458c362013-05-26 23:46:55 +02001285 r = kvm_cpu_exec(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +01001286 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +02001287 cpu_handle_guest_debug(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +01001288 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001289 }
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001290 qemu_wait_io_event(cpu);
Gu Zheng4c055ab2016-05-12 09:18:13 +05301291 } while (!cpu->unplug || cpu_can_run(cpu));
Blue Swirl296af7c2010-03-29 19:23:50 +00001292
Gu Zheng4c055ab2016-05-12 09:18:13 +05301293 qemu_kvm_destroy_vcpu(cpu);
Bharata B Rao2c579042016-05-12 09:18:14 +05301294 cpu->created = false;
1295 qemu_cond_signal(&qemu_cpu_cond);
Gu Zheng4c055ab2016-05-12 09:18:13 +05301296 qemu_mutex_unlock_iothread();
Paolo Bonzini57615ed2018-01-30 11:04:36 -05001297 rcu_unregister_thread();
Blue Swirl296af7c2010-03-29 19:23:50 +00001298 return NULL;
1299}
1300
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001301static void *qemu_dummy_cpu_thread_fn(void *arg)
1302{
1303#ifdef _WIN32
Alistair Francis493d89b2018-02-03 09:43:14 +01001304 error_report("qtest is not supported under Windows");
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001305 exit(1);
1306#else
Andreas Färber10a90212013-05-27 02:24:35 +02001307 CPUState *cpu = arg;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001308 sigset_t waitset;
1309 int r;
1310
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001311 rcu_register_thread();
1312
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001313 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +02001314 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +02001315 cpu->thread_id = qemu_get_thread_id();
Pavel Dovgalyuk626cf8f2014-12-08 10:53:17 +03001316 cpu->can_do_io = 1;
Alex Bennée37257942017-02-23 18:29:14 +00001317 current_cpu = cpu;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001318
1319 sigemptyset(&waitset);
1320 sigaddset(&waitset, SIG_IPI);
1321
1322 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +02001323 cpu->created = true;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001324 qemu_cond_signal(&qemu_cpu_cond);
Richard Henderson9c09a252019-03-14 13:06:29 -07001325 qemu_guest_random_seed_thread_part2(cpu->random_seed);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001326
Paolo Bonzinid2831ab2018-01-30 11:04:53 -05001327 do {
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001328 qemu_mutex_unlock_iothread();
1329 do {
1330 int sig;
1331 r = sigwait(&waitset, &sig);
1332 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1333 if (r == -1) {
1334 perror("sigwait");
1335 exit(1);
1336 }
1337 qemu_mutex_lock_iothread();
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001338 qemu_wait_io_event(cpu);
Paolo Bonzinid2831ab2018-01-30 11:04:53 -05001339 } while (!cpu->unplug);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001340
David Hildenbrandd40bfcb2019-02-18 10:21:57 +01001341 qemu_mutex_unlock_iothread();
Paolo Bonzinid2831ab2018-01-30 11:04:53 -05001342 rcu_unregister_thread();
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001343 return NULL;
1344#endif
1345}
1346
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001347static int64_t tcg_get_icount_limit(void)
1348{
1349 int64_t deadline;
1350
1351 if (replay_mode != REPLAY_MODE_PLAY) {
1352 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1353
1354 /* Maintain prior (possibly buggy) behaviour where if no deadline
1355 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1356 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1357 * nanoseconds.
1358 */
1359 if ((deadline < 0) || (deadline > INT32_MAX)) {
1360 deadline = INT32_MAX;
1361 }
1362
1363 return qemu_icount_round(deadline);
1364 } else {
1365 return replay_get_instructions();
1366 }
1367}
1368
Alex Bennée12e97002016-10-27 16:10:14 +01001369static void handle_icount_deadline(void)
1370{
Paolo Bonzini6b8f0182017-03-02 19:56:40 +01001371 assert(qemu_in_vcpu_thread());
Alex Bennée12e97002016-10-27 16:10:14 +01001372 if (use_icount) {
1373 int64_t deadline =
1374 qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1375
1376 if (deadline == 0) {
Paolo Bonzini6b8f0182017-03-02 19:56:40 +01001377 /* Wake up other AioContexts. */
Alex Bennée12e97002016-10-27 16:10:14 +01001378 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini6b8f0182017-03-02 19:56:40 +01001379 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
Alex Bennée12e97002016-10-27 16:10:14 +01001380 }
1381 }
1382}
1383
Alex Bennée05248382017-03-29 16:46:59 +01001384static void prepare_icount_for_run(CPUState *cpu)
1385{
1386 if (use_icount) {
Alex Bennéeeda5f7c2017-04-05 12:35:48 +01001387 int insns_left;
Alex Bennée05248382017-03-29 16:46:59 +01001388
1389 /* These should always be cleared by process_icount_data after
1390 * each vCPU execution. However u16.high can be raised
1391 * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
1392 */
Richard Henderson5e140192019-03-28 11:54:23 -10001393 g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
Alex Bennée05248382017-03-29 16:46:59 +01001394 g_assert(cpu->icount_extra == 0);
1395
Alex Bennéeeda5f7c2017-04-05 12:35:48 +01001396 cpu->icount_budget = tcg_get_icount_limit();
1397 insns_left = MIN(0xffff, cpu->icount_budget);
Richard Henderson5e140192019-03-28 11:54:23 -10001398 cpu_neg(cpu)->icount_decr.u16.low = insns_left;
Alex Bennéeeda5f7c2017-04-05 12:35:48 +01001399 cpu->icount_extra = cpu->icount_budget - insns_left;
Alex Bennéed759c952018-02-27 12:52:48 +03001400
1401 replay_mutex_lock();
Alex Bennée05248382017-03-29 16:46:59 +01001402 }
1403}
1404
1405static void process_icount_data(CPUState *cpu)
1406{
1407 if (use_icount) {
Alex Bennéee4cd9652017-03-31 16:09:42 +01001408 /* Account for executed instructions */
Alex Bennée512d3c82017-04-05 12:32:37 +01001409 cpu_update_icount(cpu);
Alex Bennée05248382017-03-29 16:46:59 +01001410
1411 /* Reset the counters */
Richard Henderson5e140192019-03-28 11:54:23 -10001412 cpu_neg(cpu)->icount_decr.u16.low = 0;
Alex Bennée05248382017-03-29 16:46:59 +01001413 cpu->icount_extra = 0;
Alex Bennéee4cd9652017-03-31 16:09:42 +01001414 cpu->icount_budget = 0;
1415
Alex Bennée05248382017-03-29 16:46:59 +01001416 replay_account_executed_instructions();
Alex Bennéed759c952018-02-27 12:52:48 +03001417
1418 replay_mutex_unlock();
Alex Bennée05248382017-03-29 16:46:59 +01001419 }
1420}
1421
1422
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001423static int tcg_cpu_exec(CPUState *cpu)
1424{
1425 int ret;
1426#ifdef CONFIG_PROFILER
1427 int64_t ti;
1428#endif
1429
Emilio G. Cotaf28d0df2018-06-22 13:45:31 -04001430 assert(tcg_enabled());
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001431#ifdef CONFIG_PROFILER
1432 ti = profile_getclock();
1433#endif
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001434 cpu_exec_start(cpu);
1435 ret = cpu_exec(cpu);
1436 cpu_exec_end(cpu);
1437#ifdef CONFIG_PROFILER
Emilio G. Cota72fd2ef2018-10-10 10:48:53 -04001438 atomic_set(&tcg_ctx->prof.cpu_exec_time,
1439 tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001440#endif
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001441 return ret;
1442}
1443
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001444/* Destroy any remaining vCPUs which have been unplugged and have
1445 * finished running
1446 */
1447static void deal_with_unplugged_cpus(void)
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001448{
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001449 CPUState *cpu;
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001450
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001451 CPU_FOREACH(cpu) {
1452 if (cpu->unplug && !cpu_can_run(cpu)) {
1453 qemu_tcg_destroy_vcpu(cpu);
1454 cpu->created = false;
1455 qemu_cond_signal(&qemu_cpu_cond);
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001456 break;
1457 }
1458 }
Alex Bennée1be7fcb2016-10-27 16:10:08 +01001459}
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001460
Alex Bennée65467062017-02-23 18:29:09 +00001461/* Single-threaded TCG
1462 *
1463 * In the single-threaded case each vCPU is simulated in turn. If
1464 * there is more than a single vCPU we create a simple timer to kick
1465 * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
1466 * This is done explicitly rather than relying on side-effects
1467 * elsewhere.
1468 */
1469
Alex Bennée37257942017-02-23 18:29:14 +00001470static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +00001471{
Andreas Färberc3586ba2012-05-03 01:41:24 +02001472 CPUState *cpu = arg;
Blue Swirl296af7c2010-03-29 19:23:50 +00001473
Emilio G. Cotaf28d0df2018-06-22 13:45:31 -04001474 assert(tcg_enabled());
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001475 rcu_register_thread();
Emilio G. Cota3468b592017-07-19 18:57:58 -04001476 tcg_register_thread();
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001477
Paolo Bonzini2e7f7a32015-06-18 18:47:18 +02001478 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +02001479 qemu_thread_get_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +00001480
David Hildenbrand5a9c9732018-02-09 20:52:39 +01001481 cpu->thread_id = qemu_get_thread_id();
1482 cpu->created = true;
1483 cpu->can_do_io = 1;
Blue Swirl296af7c2010-03-29 19:23:50 +00001484 qemu_cond_signal(&qemu_cpu_cond);
Richard Henderson9c09a252019-03-14 13:06:29 -07001485 qemu_guest_random_seed_thread_part2(cpu->random_seed);
Blue Swirl296af7c2010-03-29 19:23:50 +00001486
Jan Kiszkafa7d1862011-08-22 18:35:25 +02001487 /* wait for initial kick-off after machine start */
Emilio G. Cotac28e3992015-04-27 12:45:28 -04001488 while (first_cpu->stopped) {
KONRAD Fredericd5f8d612015-08-10 17:27:06 +02001489 qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka8e564b42012-02-17 18:31:15 +01001490
1491 /* process any pending work */
Andreas Färberbdc44642013-06-24 23:50:24 +02001492 CPU_FOREACH(cpu) {
Alex Bennée37257942017-02-23 18:29:14 +00001493 current_cpu = cpu;
Andreas Färber182735e2013-05-29 22:29:20 +02001494 qemu_wait_io_event_common(cpu);
Jan Kiszka8e564b42012-02-17 18:31:15 +01001495 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001496 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001497
Alex Bennée65467062017-02-23 18:29:09 +00001498 start_tcg_kick_timer();
1499
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001500 cpu = first_cpu;
1501
Alex Bennéee5143e32017-02-23 18:29:12 +00001502 /* process any pending work */
1503 cpu->exit_request = 1;
1504
Blue Swirl296af7c2010-03-29 19:23:50 +00001505 while (1) {
Alex Bennéed759c952018-02-27 12:52:48 +03001506 qemu_mutex_unlock_iothread();
1507 replay_mutex_lock();
1508 qemu_mutex_lock_iothread();
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001509 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1510 qemu_account_warp_timer();
1511
Paolo Bonzini6b8f0182017-03-02 19:56:40 +01001512 /* Run the timers here. This is much more efficient than
1513 * waking up the I/O thread and waiting for completion.
1514 */
1515 handle_icount_deadline();
1516
Alex Bennéed759c952018-02-27 12:52:48 +03001517 replay_mutex_unlock();
1518
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001519 if (!cpu) {
1520 cpu = first_cpu;
1521 }
1522
Alex Bennéee5143e32017-02-23 18:29:12 +00001523 while (cpu && !cpu->queued_work_first && !cpu->exit_request) {
1524
Alex Bennée791158d2017-02-23 18:29:10 +00001525 atomic_mb_set(&tcg_current_rr_cpu, cpu);
Alex Bennée37257942017-02-23 18:29:14 +00001526 current_cpu = cpu;
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001527
1528 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1529 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1530
1531 if (cpu_can_run(cpu)) {
1532 int r;
Alex Bennée05248382017-03-29 16:46:59 +01001533
Alex Bennéed759c952018-02-27 12:52:48 +03001534 qemu_mutex_unlock_iothread();
Alex Bennée05248382017-03-29 16:46:59 +01001535 prepare_icount_for_run(cpu);
1536
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001537 r = tcg_cpu_exec(cpu);
Alex Bennée05248382017-03-29 16:46:59 +01001538
1539 process_icount_data(cpu);
Alex Bennéed759c952018-02-27 12:52:48 +03001540 qemu_mutex_lock_iothread();
Alex Bennée05248382017-03-29 16:46:59 +01001541
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001542 if (r == EXCP_DEBUG) {
1543 cpu_handle_guest_debug(cpu);
1544 break;
Pranith Kumar08e73c42017-02-23 18:29:15 +00001545 } else if (r == EXCP_ATOMIC) {
1546 qemu_mutex_unlock_iothread();
1547 cpu_exec_step_atomic(cpu);
1548 qemu_mutex_lock_iothread();
1549 break;
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001550 }
Alex Bennée37257942017-02-23 18:29:14 +00001551 } else if (cpu->stop) {
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001552 if (cpu->unplug) {
1553 cpu = CPU_NEXT(cpu);
1554 }
1555 break;
1556 }
1557
Alex Bennéee5143e32017-02-23 18:29:12 +00001558 cpu = CPU_NEXT(cpu);
1559 } /* while (cpu && !cpu->exit_request).. */
1560
Alex Bennée791158d2017-02-23 18:29:10 +00001561 /* Does not need atomic_mb_set because a spurious wakeup is okay. */
1562 atomic_set(&tcg_current_rr_cpu, NULL);
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001563
Alex Bennéee5143e32017-02-23 18:29:12 +00001564 if (cpu && cpu->exit_request) {
1565 atomic_mb_set(&cpu->exit_request, 0);
1566 }
Alex Blighac70aaf2013-08-21 16:02:57 +01001567
Clement Deschamps013aabd2018-10-21 16:21:03 +02001568 if (use_icount && all_cpu_threads_idle()) {
1569 /*
1570 * When all cpus are sleeping (e.g in WFI), to avoid a deadlock
1571 * in the main_loop, wake it up in order to start the warp timer.
1572 */
1573 qemu_notify_event();
1574 }
1575
Paolo Bonzinia8efa602018-11-14 12:36:57 +01001576 qemu_tcg_rr_wait_io_event();
Alex Bennéec93bbbe2016-10-27 16:10:09 +01001577 deal_with_unplugged_cpus();
Blue Swirl296af7c2010-03-29 19:23:50 +00001578 }
1579
Paolo Bonzini9b0605f2018-01-30 11:05:06 -05001580 rcu_unregister_thread();
Blue Swirl296af7c2010-03-29 19:23:50 +00001581 return NULL;
1582}
1583
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001584static void *qemu_hax_cpu_thread_fn(void *arg)
1585{
1586 CPUState *cpu = arg;
1587 int r;
Vincent Palatinb3d3a422017-03-20 11:15:49 +01001588
Paolo Bonzini9857c2d2018-01-30 16:28:49 +01001589 rcu_register_thread();
Vincent Palatinb3d3a422017-03-20 11:15:49 +01001590 qemu_mutex_lock_iothread();
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001591 qemu_thread_get_self(cpu->thread);
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001592
1593 cpu->thread_id = qemu_get_thread_id();
1594 cpu->created = true;
1595 cpu->halted = 0;
1596 current_cpu = cpu;
1597
1598 hax_init_vcpu(cpu);
1599 qemu_cond_signal(&qemu_cpu_cond);
Richard Henderson9c09a252019-03-14 13:06:29 -07001600 qemu_guest_random_seed_thread_part2(cpu->random_seed);
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001601
Paolo Bonzini9857c2d2018-01-30 16:28:49 +01001602 do {
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001603 if (cpu_can_run(cpu)) {
1604 r = hax_smp_cpu_exec(cpu);
1605 if (r == EXCP_DEBUG) {
1606 cpu_handle_guest_debug(cpu);
1607 }
1608 }
1609
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001610 qemu_wait_io_event(cpu);
Paolo Bonzini9857c2d2018-01-30 16:28:49 +01001611 } while (!cpu->unplug || cpu_can_run(cpu));
1612 rcu_unregister_thread();
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001613 return NULL;
1614}
1615
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001616/* The HVF-specific vCPU thread function. This one should only run when the host
1617 * CPU supports the VMX "unrestricted guest" feature. */
1618static void *qemu_hvf_cpu_thread_fn(void *arg)
1619{
1620 CPUState *cpu = arg;
1621
1622 int r;
1623
1624 assert(hvf_enabled());
1625
1626 rcu_register_thread();
1627
1628 qemu_mutex_lock_iothread();
1629 qemu_thread_get_self(cpu->thread);
1630
1631 cpu->thread_id = qemu_get_thread_id();
1632 cpu->can_do_io = 1;
1633 current_cpu = cpu;
1634
1635 hvf_init_vcpu(cpu);
1636
1637 /* signal CPU creation */
1638 cpu->created = true;
1639 qemu_cond_signal(&qemu_cpu_cond);
Richard Henderson9c09a252019-03-14 13:06:29 -07001640 qemu_guest_random_seed_thread_part2(cpu->random_seed);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001641
1642 do {
1643 if (cpu_can_run(cpu)) {
1644 r = hvf_vcpu_exec(cpu);
1645 if (r == EXCP_DEBUG) {
1646 cpu_handle_guest_debug(cpu);
1647 }
1648 }
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001649 qemu_wait_io_event(cpu);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001650 } while (!cpu->unplug || cpu_can_run(cpu));
1651
1652 hvf_vcpu_destroy(cpu);
1653 cpu->created = false;
1654 qemu_cond_signal(&qemu_cpu_cond);
1655 qemu_mutex_unlock_iothread();
Paolo Bonzini8178e632018-01-30 11:05:21 -05001656 rcu_unregister_thread();
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05001657 return NULL;
1658}
1659
Justin Terry (VM)19306802018-01-22 13:07:49 -08001660static void *qemu_whpx_cpu_thread_fn(void *arg)
1661{
1662 CPUState *cpu = arg;
1663 int r;
1664
1665 rcu_register_thread();
1666
1667 qemu_mutex_lock_iothread();
1668 qemu_thread_get_self(cpu->thread);
1669 cpu->thread_id = qemu_get_thread_id();
1670 current_cpu = cpu;
1671
1672 r = whpx_init_vcpu(cpu);
1673 if (r < 0) {
1674 fprintf(stderr, "whpx_init_vcpu failed: %s\n", strerror(-r));
1675 exit(1);
1676 }
1677
1678 /* signal CPU creation */
1679 cpu->created = true;
1680 qemu_cond_signal(&qemu_cpu_cond);
Richard Henderson9c09a252019-03-14 13:06:29 -07001681 qemu_guest_random_seed_thread_part2(cpu->random_seed);
Justin Terry (VM)19306802018-01-22 13:07:49 -08001682
1683 do {
1684 if (cpu_can_run(cpu)) {
1685 r = whpx_vcpu_exec(cpu);
1686 if (r == EXCP_DEBUG) {
1687 cpu_handle_guest_debug(cpu);
1688 }
1689 }
1690 while (cpu_thread_is_idle(cpu)) {
1691 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1692 }
1693 qemu_wait_io_event_common(cpu);
1694 } while (!cpu->unplug || cpu_can_run(cpu));
1695
1696 whpx_destroy_vcpu(cpu);
1697 cpu->created = false;
1698 qemu_cond_signal(&qemu_cpu_cond);
1699 qemu_mutex_unlock_iothread();
1700 rcu_unregister_thread();
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001701 return NULL;
1702}
1703
1704#ifdef _WIN32
1705static void CALLBACK dummy_apc_func(ULONG_PTR unused)
1706{
1707}
1708#endif
1709
Alex Bennée37257942017-02-23 18:29:14 +00001710/* Multi-threaded TCG
1711 *
1712 * In the multi-threaded case each vCPU has its own thread. The TLS
1713 * variable current_cpu can be used deep in the code to find the
1714 * current CPUState for a given thread.
1715 */
1716
1717static void *qemu_tcg_cpu_thread_fn(void *arg)
1718{
1719 CPUState *cpu = arg;
1720
Emilio G. Cotaf28d0df2018-06-22 13:45:31 -04001721 assert(tcg_enabled());
Alex Bennéebf51c722017-03-30 18:32:29 +01001722 g_assert(!use_icount);
1723
Alex Bennée37257942017-02-23 18:29:14 +00001724 rcu_register_thread();
Emilio G. Cota3468b592017-07-19 18:57:58 -04001725 tcg_register_thread();
Alex Bennée37257942017-02-23 18:29:14 +00001726
1727 qemu_mutex_lock_iothread();
1728 qemu_thread_get_self(cpu->thread);
1729
1730 cpu->thread_id = qemu_get_thread_id();
1731 cpu->created = true;
1732 cpu->can_do_io = 1;
1733 current_cpu = cpu;
1734 qemu_cond_signal(&qemu_cpu_cond);
Richard Henderson9c09a252019-03-14 13:06:29 -07001735 qemu_guest_random_seed_thread_part2(cpu->random_seed);
Alex Bennée37257942017-02-23 18:29:14 +00001736
1737 /* process any pending work */
1738 cpu->exit_request = 1;
1739
Cédric Le Goater54961aa2018-04-25 15:18:28 +02001740 do {
Alex Bennée37257942017-02-23 18:29:14 +00001741 if (cpu_can_run(cpu)) {
1742 int r;
Alex Bennéed759c952018-02-27 12:52:48 +03001743 qemu_mutex_unlock_iothread();
Alex Bennée37257942017-02-23 18:29:14 +00001744 r = tcg_cpu_exec(cpu);
Alex Bennéed759c952018-02-27 12:52:48 +03001745 qemu_mutex_lock_iothread();
Alex Bennée37257942017-02-23 18:29:14 +00001746 switch (r) {
1747 case EXCP_DEBUG:
1748 cpu_handle_guest_debug(cpu);
1749 break;
1750 case EXCP_HALTED:
1751 /* during start-up the vCPU is reset and the thread is
1752 * kicked several times. If we don't ensure we go back
1753 * to sleep in the halted state we won't cleanly
1754 * start-up when the vCPU is enabled.
1755 *
1756 * cpu->halted should ensure we sleep in wait_io_event
1757 */
1758 g_assert(cpu->halted);
1759 break;
Pranith Kumar08e73c42017-02-23 18:29:15 +00001760 case EXCP_ATOMIC:
1761 qemu_mutex_unlock_iothread();
1762 cpu_exec_step_atomic(cpu);
1763 qemu_mutex_lock_iothread();
Alex Bennée37257942017-02-23 18:29:14 +00001764 default:
1765 /* Ignore everything else? */
1766 break;
1767 }
1768 }
1769
Alex Bennée37257942017-02-23 18:29:14 +00001770 atomic_mb_set(&cpu->exit_request, 0);
Paolo Bonzinidb08b682018-01-11 13:53:12 +01001771 qemu_wait_io_event(cpu);
Paolo Bonzini9b0605f2018-01-30 11:05:06 -05001772 } while (!cpu->unplug || cpu_can_run(cpu));
Alex Bennée37257942017-02-23 18:29:14 +00001773
Paolo Bonzini9b0605f2018-01-30 11:05:06 -05001774 qemu_tcg_destroy_vcpu(cpu);
1775 cpu->created = false;
1776 qemu_cond_signal(&qemu_cpu_cond);
1777 qemu_mutex_unlock_iothread();
1778 rcu_unregister_thread();
Alex Bennée37257942017-02-23 18:29:14 +00001779 return NULL;
1780}
1781
Andreas Färber2ff09a42012-05-03 00:23:30 +02001782static void qemu_cpu_kick_thread(CPUState *cpu)
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001783{
1784#ifndef _WIN32
1785 int err;
1786
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001787 if (cpu->thread_kicked) {
1788 return;
Paolo Bonzini9102ded2015-08-18 06:52:09 -07001789 }
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001790 cpu->thread_kicked = true;
Andreas Färber814e6122012-05-02 17:00:37 +02001791 err = pthread_kill(cpu->thread->thread, SIG_IPI);
Laurent Vivierd455ebc2019-01-02 15:16:03 +01001792 if (err && err != ESRCH) {
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001793 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1794 exit(1);
1795 }
1796#else /* _WIN32 */
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001797 if (!qemu_cpu_is_self(cpu)) {
Justin Terry (VM)19306802018-01-22 13:07:49 -08001798 if (whpx_enabled()) {
1799 whpx_vcpu_kick(cpu);
1800 } else if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) {
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001801 fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n",
1802 __func__, GetLastError());
1803 exit(1);
1804 }
1805 }
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001806#endif
1807}
1808
Andreas Färberc08d7422012-05-03 04:34:15 +02001809void qemu_cpu_kick(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001810{
Andreas Färberf5c121b2012-05-03 01:22:49 +02001811 qemu_cond_broadcast(cpu->halt_cond);
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001812 if (tcg_enabled()) {
Alex Bennée791158d2017-02-23 18:29:10 +00001813 cpu_exit(cpu);
Alex Bennée37257942017-02-23 18:29:14 +00001814 /* NOP unless doing single-thread RR */
Alex Bennée791158d2017-02-23 18:29:10 +00001815 qemu_cpu_kick_rr_cpu();
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001816 } else {
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01001817 if (hax_enabled()) {
1818 /*
1819 * FIXME: race condition with the exit_request check in
1820 * hax_vcpu_hax_exec
1821 */
1822 cpu->exit_request = 1;
1823 }
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001824 qemu_cpu_kick_thread(cpu);
1825 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001826}
1827
Jan Kiszka46d62fa2011-02-01 22:15:59 +01001828void qemu_cpu_kick_self(void)
1829{
Andreas Färber4917cf42013-05-27 05:17:50 +02001830 assert(current_cpu);
Paolo Bonzini9102ded2015-08-18 06:52:09 -07001831 qemu_cpu_kick_thread(current_cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001832}
1833
Andreas Färber60e82572012-05-02 22:23:49 +02001834bool qemu_cpu_is_self(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001835{
Andreas Färber814e6122012-05-02 17:00:37 +02001836 return qemu_thread_is_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +00001837}
1838
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001839bool qemu_in_vcpu_thread(void)
Juan Quintelaaa723c22012-09-18 16:30:11 +02001840{
Andreas Färber4917cf42013-05-27 05:17:50 +02001841 return current_cpu && qemu_cpu_is_self(current_cpu);
Juan Quintelaaa723c22012-09-18 16:30:11 +02001842}
1843
Paolo Bonziniafbe7052015-06-18 18:47:19 +02001844static __thread bool iothread_locked = false;
1845
1846bool qemu_mutex_iothread_locked(void)
1847{
1848 return iothread_locked;
1849}
1850
Emilio G. Cotacb764d02017-10-28 02:16:41 -04001851/*
1852 * The BQL is taken from so many places that it is worth profiling the
1853 * callers directly, instead of funneling them all through a single function.
1854 */
1855void qemu_mutex_lock_iothread_impl(const char *file, int line)
Blue Swirl296af7c2010-03-29 19:23:50 +00001856{
Emilio G. Cotacb764d02017-10-28 02:16:41 -04001857 QemuMutexLockFunc bql_lock = atomic_read(&qemu_bql_mutex_lock_func);
1858
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001859 g_assert(!qemu_mutex_iothread_locked());
Emilio G. Cotacb764d02017-10-28 02:16:41 -04001860 bql_lock(&qemu_global_mutex, file, line);
Paolo Bonziniafbe7052015-06-18 18:47:19 +02001861 iothread_locked = true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001862}
1863
1864void qemu_mutex_unlock_iothread(void)
1865{
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001866 g_assert(qemu_mutex_iothread_locked());
Paolo Bonziniafbe7052015-06-18 18:47:19 +02001867 iothread_locked = false;
Blue Swirl296af7c2010-03-29 19:23:50 +00001868 qemu_mutex_unlock(&qemu_global_mutex);
1869}
1870
Alex Bennéee8faee02016-10-27 16:09:58 +01001871static bool all_vcpus_paused(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001872{
Andreas Färberbdc44642013-06-24 23:50:24 +02001873 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001874
Andreas Färberbdc44642013-06-24 23:50:24 +02001875 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001876 if (!cpu->stopped) {
Alex Bennéee8faee02016-10-27 16:09:58 +01001877 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001878 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001879 }
1880
Alex Bennéee8faee02016-10-27 16:09:58 +01001881 return true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001882}
1883
1884void pause_all_vcpus(void)
1885{
Andreas Färberbdc44642013-06-24 23:50:24 +02001886 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001887
Alex Bligh40daca52013-08-21 16:03:02 +01001888 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
Andreas Färberbdc44642013-06-24 23:50:24 +02001889 CPU_FOREACH(cpu) {
David Hildenbrandebd05fe2017-11-29 20:12:15 +01001890 if (qemu_cpu_is_self(cpu)) {
1891 qemu_cpu_stop(cpu, true);
1892 } else {
1893 cpu->stop = true;
1894 qemu_cpu_kick(cpu);
1895 }
Jan Kiszkad798e972012-02-17 18:31:16 +01001896 }
1897
Alex Bennéed759c952018-02-27 12:52:48 +03001898 /* We need to drop the replay_lock so any vCPU threads woken up
1899 * can finish their replay tasks
1900 */
1901 replay_mutex_unlock();
1902
Blue Swirl296af7c2010-03-29 19:23:50 +00001903 while (!all_vcpus_paused()) {
Paolo Bonzinibe7d6c52011-03-12 17:44:02 +01001904 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
Andreas Färberbdc44642013-06-24 23:50:24 +02001905 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001906 qemu_cpu_kick(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001907 }
1908 }
Alex Bennéed759c952018-02-27 12:52:48 +03001909
1910 qemu_mutex_unlock_iothread();
1911 replay_mutex_lock();
1912 qemu_mutex_lock_iothread();
Blue Swirl296af7c2010-03-29 19:23:50 +00001913}
1914
Igor Mammedov29936832013-04-23 10:29:37 +02001915void cpu_resume(CPUState *cpu)
1916{
1917 cpu->stop = false;
1918 cpu->stopped = false;
1919 qemu_cpu_kick(cpu);
1920}
1921
Blue Swirl296af7c2010-03-29 19:23:50 +00001922void resume_all_vcpus(void)
1923{
Andreas Färberbdc44642013-06-24 23:50:24 +02001924 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001925
Alex Bligh40daca52013-08-21 16:03:02 +01001926 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
Andreas Färberbdc44642013-06-24 23:50:24 +02001927 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001928 cpu_resume(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001929 }
1930}
1931
Paolo Bonzinidbadee42018-01-30 16:40:12 +01001932void cpu_remove_sync(CPUState *cpu)
Gu Zheng4c055ab2016-05-12 09:18:13 +05301933{
1934 cpu->stop = true;
1935 cpu->unplug = true;
1936 qemu_cpu_kick(cpu);
Paolo Bonzinidbadee42018-01-30 16:40:12 +01001937 qemu_mutex_unlock_iothread();
1938 qemu_thread_join(cpu->thread);
1939 qemu_mutex_lock_iothread();
Bharata B Rao2c579042016-05-12 09:18:14 +05301940}
1941
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001942/* For temporary buffers for forming a name */
1943#define VCPU_THREAD_NAME_SIZE 16
1944
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001945static void qemu_tcg_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001946{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001947 char thread_name[VCPU_THREAD_NAME_SIZE];
Alex Bennée37257942017-02-23 18:29:14 +00001948 static QemuCond *single_tcg_halt_cond;
1949 static QemuThread *single_tcg_cpu_thread;
Emilio G. Cotae8feb962017-07-07 19:24:20 -04001950 static int tcg_region_inited;
1951
Emilio G. Cotaf28d0df2018-06-22 13:45:31 -04001952 assert(tcg_enabled());
Emilio G. Cotae8feb962017-07-07 19:24:20 -04001953 /*
1954 * Initialize TCG regions--once. Now is a good time, because:
1955 * (1) TCG's init context, prologue and target globals have been set up.
1956 * (2) qemu_tcg_mttcg_enabled() works now (TCG init code runs before the
1957 * -accel flag is processed, so the check doesn't work then).
1958 */
1959 if (!tcg_region_inited) {
1960 tcg_region_inited = 1;
1961 tcg_region_init();
1962 }
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001963
Alex Bennée37257942017-02-23 18:29:14 +00001964 if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
Andreas Färber814e6122012-05-02 17:00:37 +02001965 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001966 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1967 qemu_cond_init(cpu->halt_cond);
Alex Bennée37257942017-02-23 18:29:14 +00001968
1969 if (qemu_tcg_mttcg_enabled()) {
1970 /* create a thread per vCPU with TCG (MTTCG) */
1971 parallel_cpus = true;
1972 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001973 cpu->cpu_index);
Alex Bennée37257942017-02-23 18:29:14 +00001974
1975 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1976 cpu, QEMU_THREAD_JOINABLE);
1977
1978 } else {
1979 /* share a single thread for all cpus with TCG */
1980 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
1981 qemu_thread_create(cpu->thread, thread_name,
1982 qemu_tcg_rr_cpu_thread_fn,
1983 cpu, QEMU_THREAD_JOINABLE);
1984
1985 single_tcg_halt_cond = cpu->halt_cond;
1986 single_tcg_cpu_thread = cpu->thread;
1987 }
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001988#ifdef _WIN32
Andreas Färber814e6122012-05-02 17:00:37 +02001989 cpu->hThread = qemu_thread_get_handle(cpu->thread);
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001990#endif
Blue Swirl296af7c2010-03-29 19:23:50 +00001991 } else {
Alex Bennée37257942017-02-23 18:29:14 +00001992 /* For non-MTTCG cases we share the thread */
1993 cpu->thread = single_tcg_cpu_thread;
1994 cpu->halt_cond = single_tcg_halt_cond;
David Hildenbranda3421732018-02-09 20:52:37 +01001995 cpu->thread_id = first_cpu->thread_id;
1996 cpu->can_do_io = 1;
1997 cpu->created = true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001998 }
1999}
2000
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01002001static void qemu_hax_start_vcpu(CPUState *cpu)
2002{
2003 char thread_name[VCPU_THREAD_NAME_SIZE];
2004
2005 cpu->thread = g_malloc0(sizeof(QemuThread));
2006 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2007 qemu_cond_init(cpu->halt_cond);
2008
2009 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX",
2010 cpu->cpu_index);
2011 qemu_thread_create(cpu->thread, thread_name, qemu_hax_cpu_thread_fn,
2012 cpu, QEMU_THREAD_JOINABLE);
2013#ifdef _WIN32
2014 cpu->hThread = qemu_thread_get_handle(cpu->thread);
2015#endif
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01002016}
2017
Andreas Färber48a106b2013-05-27 02:20:39 +02002018static void qemu_kvm_start_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00002019{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00002020 char thread_name[VCPU_THREAD_NAME_SIZE];
2021
Andreas Färber814e6122012-05-02 17:00:37 +02002022 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02002023 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2024 qemu_cond_init(cpu->halt_cond);
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00002025 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
2026 cpu->cpu_index);
2027 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
2028 cpu, QEMU_THREAD_JOINABLE);
Blue Swirl296af7c2010-03-29 19:23:50 +00002029}
2030
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05002031static void qemu_hvf_start_vcpu(CPUState *cpu)
2032{
2033 char thread_name[VCPU_THREAD_NAME_SIZE];
2034
2035 /* HVF currently does not support TCG, and only runs in
2036 * unrestricted-guest mode. */
2037 assert(hvf_enabled());
2038
2039 cpu->thread = g_malloc0(sizeof(QemuThread));
2040 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2041 qemu_cond_init(cpu->halt_cond);
2042
2043 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
2044 cpu->cpu_index);
2045 qemu_thread_create(cpu->thread, thread_name, qemu_hvf_cpu_thread_fn,
2046 cpu, QEMU_THREAD_JOINABLE);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05002047}
2048
Justin Terry (VM)19306802018-01-22 13:07:49 -08002049static void qemu_whpx_start_vcpu(CPUState *cpu)
2050{
2051 char thread_name[VCPU_THREAD_NAME_SIZE];
2052
2053 cpu->thread = g_malloc0(sizeof(QemuThread));
2054 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2055 qemu_cond_init(cpu->halt_cond);
2056 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/WHPX",
2057 cpu->cpu_index);
2058 qemu_thread_create(cpu->thread, thread_name, qemu_whpx_cpu_thread_fn,
2059 cpu, QEMU_THREAD_JOINABLE);
2060#ifdef _WIN32
2061 cpu->hThread = qemu_thread_get_handle(cpu->thread);
2062#endif
Justin Terry (VM)19306802018-01-22 13:07:49 -08002063}
2064
Andreas Färber10a90212013-05-27 02:24:35 +02002065static void qemu_dummy_start_vcpu(CPUState *cpu)
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02002066{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00002067 char thread_name[VCPU_THREAD_NAME_SIZE];
2068
Andreas Färber814e6122012-05-02 17:00:37 +02002069 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02002070 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
2071 qemu_cond_init(cpu->halt_cond);
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00002072 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
2073 cpu->cpu_index);
2074 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02002075 QEMU_THREAD_JOINABLE);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02002076}
2077
Andreas Färberc643bed2013-05-27 03:23:24 +02002078void qemu_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00002079{
Andreas Färberce3960e2012-12-17 03:27:07 +01002080 cpu->nr_cores = smp_cores;
2081 cpu->nr_threads = smp_threads;
Andreas Färberf324e762012-05-02 23:26:21 +02002082 cpu->stopped = true;
Richard Henderson9c09a252019-03-14 13:06:29 -07002083 cpu->random_seed = qemu_guest_random_seed_thread_part1();
Peter Maydell56943e82016-01-21 14:15:04 +00002084
2085 if (!cpu->as) {
2086 /* If the target cpu hasn't set up any address spaces itself,
2087 * give it the default one.
2088 */
Peter Maydell12ebc9a2016-01-21 14:15:04 +00002089 cpu->num_ases = 1;
Peter Xu80ceb072017-11-23 17:23:32 +08002090 cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
Peter Maydell56943e82016-01-21 14:15:04 +00002091 }
2092
Jan Kiszka0ab07c62011-02-07 12:19:14 +01002093 if (kvm_enabled()) {
Andreas Färber48a106b2013-05-27 02:20:39 +02002094 qemu_kvm_start_vcpu(cpu);
Vincent Palatinb0cb0a62017-01-10 11:59:57 +01002095 } else if (hax_enabled()) {
2096 qemu_hax_start_vcpu(cpu);
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -05002097 } else if (hvf_enabled()) {
2098 qemu_hvf_start_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02002099 } else if (tcg_enabled()) {
Andreas Färbere5ab30a2012-05-03 01:50:44 +02002100 qemu_tcg_init_vcpu(cpu);
Justin Terry (VM)19306802018-01-22 13:07:49 -08002101 } else if (whpx_enabled()) {
2102 qemu_whpx_start_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02002103 } else {
Andreas Färber10a90212013-05-27 02:24:35 +02002104 qemu_dummy_start_vcpu(cpu);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01002105 }
David Hildenbrand81e96312018-02-09 20:52:38 +01002106
2107 while (!cpu->created) {
2108 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
2109 }
Blue Swirl296af7c2010-03-29 19:23:50 +00002110}
2111
Jan Kiszkab4a3d962011-02-01 22:15:43 +01002112void cpu_stop_current(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00002113{
Andreas Färber4917cf42013-05-27 05:17:50 +02002114 if (current_cpu) {
Peter Maydell0ec7e672019-01-07 15:23:47 +00002115 current_cpu->stop = true;
2116 cpu_exit(current_cpu);
Jan Kiszkab4a3d962011-02-01 22:15:43 +01002117 }
Blue Swirl296af7c2010-03-29 19:23:50 +00002118}
2119
Kevin Wolf56983462013-07-05 13:49:54 +02002120int vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +00002121{
Juan Quintelaaa723c22012-09-18 16:30:11 +02002122 if (qemu_in_vcpu_thread()) {
Paolo Bonzini74892d22014-06-05 14:53:58 +02002123 qemu_system_vmstop_request_prepare();
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -03002124 qemu_system_vmstop_request(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00002125 /*
2126 * FIXME: should not return to device code in case
2127 * vm_stop() has been requested.
2128 */
Jan Kiszkab4a3d962011-02-01 22:15:43 +01002129 cpu_stop_current();
Kevin Wolf56983462013-07-05 13:49:54 +02002130 return 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00002131 }
Kevin Wolf56983462013-07-05 13:49:54 +02002132
Stefan Hajnoczi4486e892018-03-07 14:42:05 +00002133 return do_vm_stop(state, true);
Blue Swirl296af7c2010-03-29 19:23:50 +00002134}
2135
Claudio Imbrenda2d76e822017-02-14 18:07:47 +01002136/**
2137 * Prepare for (re)starting the VM.
2138 * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already
2139 * running or in case of an error condition), 0 otherwise.
2140 */
2141int vm_prepare_start(void)
2142{
2143 RunState requested;
Claudio Imbrenda2d76e822017-02-14 18:07:47 +01002144
2145 qemu_vmstop_requested(&requested);
2146 if (runstate_is_running() && requested == RUN_STATE__MAX) {
2147 return -1;
2148 }
2149
2150 /* Ensure that a STOP/RESUME pair of events is emitted if a
2151 * vmstop request was pending. The BLOCK_IO_ERROR event, for
2152 * example, according to documentation is always followed by
2153 * the STOP event.
2154 */
2155 if (runstate_is_running()) {
Peter Xu3ab72382018-08-15 21:37:37 +08002156 qapi_event_send_stop();
2157 qapi_event_send_resume();
Markus Armbrusterf0561582018-04-23 10:45:18 +02002158 return -1;
Claudio Imbrenda2d76e822017-02-14 18:07:47 +01002159 }
2160
2161 /* We are sending this now, but the CPUs will be resumed shortly later */
Peter Xu3ab72382018-08-15 21:37:37 +08002162 qapi_event_send_resume();
Markus Armbrusterf0561582018-04-23 10:45:18 +02002163
2164 replay_enable_events();
2165 cpu_enable_ticks();
2166 runstate_set(RUN_STATE_RUNNING);
2167 vm_state_notify(1, RUN_STATE_RUNNING);
2168 return 0;
Claudio Imbrenda2d76e822017-02-14 18:07:47 +01002169}
2170
2171void vm_start(void)
2172{
2173 if (!vm_prepare_start()) {
2174 resume_all_vcpus();
2175 }
2176}
2177
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03002178/* does a state transition even if the VM is already stopped,
2179 current state is forgotten forever */
Kevin Wolf56983462013-07-05 13:49:54 +02002180int vm_stop_force_state(RunState state)
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03002181{
2182 if (runstate_is_running()) {
Kevin Wolf56983462013-07-05 13:49:54 +02002183 return vm_stop(state);
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03002184 } else {
2185 runstate_set(state);
Wen Congyangb2780d32015-11-20 17:34:38 +08002186
2187 bdrv_drain_all();
Kevin Wolf594a45c2013-07-18 14:52:19 +02002188 /* Make sure to return an error if the flush in a previous vm_stop()
2189 * failed. */
John Snow22af08e2016-09-22 21:45:51 -04002190 return bdrv_flush_all();
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03002191 }
2192}
2193
Markus Armbruster04424282019-04-17 21:17:57 +02002194void list_cpus(const char *optarg)
Blue Swirl262353c2010-05-04 19:55:35 +00002195{
2196 /* XXX: implement xxx_cpu_list for targets that still miss it */
Peter Maydelle916cbf2012-09-05 17:41:08 -03002197#if defined(cpu_list)
Markus Armbruster04424282019-04-17 21:17:57 +02002198 cpu_list();
Blue Swirl262353c2010-05-04 19:55:35 +00002199#endif
2200}
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002201
2202CpuInfoList *qmp_query_cpus(Error **errp)
2203{
Igor Mammedovafed5a52017-05-10 13:29:55 +02002204 MachineState *ms = MACHINE(qdev_get_machine());
2205 MachineClass *mc = MACHINE_GET_CLASS(ms);
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002206 CpuInfoList *head = NULL, *cur_item = NULL;
Andreas Färber182735e2013-05-29 22:29:20 +02002207 CPUState *cpu;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002208
Andreas Färberbdc44642013-06-24 23:50:24 +02002209 CPU_FOREACH(cpu) {
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002210 CpuInfoList *info;
Andreas Färber182735e2013-05-29 22:29:20 +02002211#if defined(TARGET_I386)
2212 X86CPU *x86_cpu = X86_CPU(cpu);
2213 CPUX86State *env = &x86_cpu->env;
2214#elif defined(TARGET_PPC)
2215 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
2216 CPUPPCState *env = &ppc_cpu->env;
2217#elif defined(TARGET_SPARC)
2218 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
2219 CPUSPARCState *env = &sparc_cpu->env;
Michael Clark25fa1942018-03-03 01:32:59 +13002220#elif defined(TARGET_RISCV)
2221 RISCVCPU *riscv_cpu = RISCV_CPU(cpu);
2222 CPURISCVState *env = &riscv_cpu->env;
Andreas Färber182735e2013-05-29 22:29:20 +02002223#elif defined(TARGET_MIPS)
2224 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
2225 CPUMIPSState *env = &mips_cpu->env;
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +01002226#elif defined(TARGET_TRICORE)
2227 TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
2228 CPUTriCoreState *env = &tricore_cpu->env;
Viktor Mihajlovski9d0306d2018-02-16 17:08:37 +01002229#elif defined(TARGET_S390X)
2230 S390CPU *s390_cpu = S390_CPU(cpu);
2231 CPUS390XState *env = &s390_cpu->env;
Andreas Färber182735e2013-05-29 22:29:20 +02002232#endif
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002233
Andreas Färbercb446ec2013-05-01 14:24:52 +02002234 cpu_synchronize_state(cpu);
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002235
2236 info = g_malloc0(sizeof(*info));
2237 info->value = g_malloc0(sizeof(*info->value));
Andreas Färber55e5c282012-12-17 06:18:02 +01002238 info->value->CPU = cpu->cpu_index;
Andreas Färber182735e2013-05-29 22:29:20 +02002239 info->value->current = (cpu == first_cpu);
Andreas Färber259186a2013-01-17 18:51:17 +01002240 info->value->halted = cpu->halted;
Eduardo Habkost58f88d42015-05-08 16:04:22 -03002241 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
Andreas Färber9f09e182012-05-03 06:59:07 +02002242 info->value->thread_id = cpu->thread_id;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002243#if defined(TARGET_I386)
Eric Blake86f4b682015-11-18 01:52:59 -07002244 info->value->arch = CPU_INFO_ARCH_X86;
Eric Blake544a3732016-02-17 23:48:27 -07002245 info->value->u.x86.pc = env->eip + env->segs[R_CS].base;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002246#elif defined(TARGET_PPC)
Eric Blake86f4b682015-11-18 01:52:59 -07002247 info->value->arch = CPU_INFO_ARCH_PPC;
Eric Blake544a3732016-02-17 23:48:27 -07002248 info->value->u.ppc.nip = env->nip;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002249#elif defined(TARGET_SPARC)
Eric Blake86f4b682015-11-18 01:52:59 -07002250 info->value->arch = CPU_INFO_ARCH_SPARC;
Eric Blake544a3732016-02-17 23:48:27 -07002251 info->value->u.q_sparc.pc = env->pc;
2252 info->value->u.q_sparc.npc = env->npc;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002253#elif defined(TARGET_MIPS)
Eric Blake86f4b682015-11-18 01:52:59 -07002254 info->value->arch = CPU_INFO_ARCH_MIPS;
Eric Blake544a3732016-02-17 23:48:27 -07002255 info->value->u.q_mips.PC = env->active_tc.PC;
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +01002256#elif defined(TARGET_TRICORE)
Eric Blake86f4b682015-11-18 01:52:59 -07002257 info->value->arch = CPU_INFO_ARCH_TRICORE;
Eric Blake544a3732016-02-17 23:48:27 -07002258 info->value->u.tricore.PC = env->PC;
Viktor Mihajlovski9d0306d2018-02-16 17:08:37 +01002259#elif defined(TARGET_S390X)
2260 info->value->arch = CPU_INFO_ARCH_S390;
2261 info->value->u.s390.cpu_state = env->cpu_state;
Michael Clark25fa1942018-03-03 01:32:59 +13002262#elif defined(TARGET_RISCV)
2263 info->value->arch = CPU_INFO_ARCH_RISCV;
2264 info->value->u.riscv.pc = env->pc;
Eric Blake86f4b682015-11-18 01:52:59 -07002265#else
2266 info->value->arch = CPU_INFO_ARCH_OTHER;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002267#endif
Igor Mammedovafed5a52017-05-10 13:29:55 +02002268 info->value->has_props = !!mc->cpu_index_to_instance_props;
2269 if (info->value->has_props) {
2270 CpuInstanceProperties *props;
2271 props = g_malloc0(sizeof(*props));
2272 *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
2273 info->value->props = props;
2274 }
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03002275
2276 /* XXX: waiting for the qapi to support GSList */
2277 if (!cur_item) {
2278 head = cur_item = info;
2279 } else {
2280 cur_item->next = info;
2281 cur_item = info;
2282 }
2283 }
2284
2285 return head;
2286}
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002287
Laszlo Ersekdaa9d2b2018-04-27 21:28:51 +02002288static CpuInfoArch sysemu_target_to_cpuinfo_arch(SysEmuTarget target)
2289{
2290 /*
2291 * The @SysEmuTarget -> @CpuInfoArch mapping below is based on the
2292 * TARGET_ARCH -> TARGET_BASE_ARCH mapping in the "configure" script.
2293 */
2294 switch (target) {
2295 case SYS_EMU_TARGET_I386:
2296 case SYS_EMU_TARGET_X86_64:
2297 return CPU_INFO_ARCH_X86;
2298
2299 case SYS_EMU_TARGET_PPC:
Laszlo Ersekdaa9d2b2018-04-27 21:28:51 +02002300 case SYS_EMU_TARGET_PPC64:
2301 return CPU_INFO_ARCH_PPC;
2302
2303 case SYS_EMU_TARGET_SPARC:
2304 case SYS_EMU_TARGET_SPARC64:
2305 return CPU_INFO_ARCH_SPARC;
2306
2307 case SYS_EMU_TARGET_MIPS:
2308 case SYS_EMU_TARGET_MIPSEL:
2309 case SYS_EMU_TARGET_MIPS64:
2310 case SYS_EMU_TARGET_MIPS64EL:
2311 return CPU_INFO_ARCH_MIPS;
2312
2313 case SYS_EMU_TARGET_TRICORE:
2314 return CPU_INFO_ARCH_TRICORE;
2315
2316 case SYS_EMU_TARGET_S390X:
2317 return CPU_INFO_ARCH_S390;
2318
2319 case SYS_EMU_TARGET_RISCV32:
2320 case SYS_EMU_TARGET_RISCV64:
2321 return CPU_INFO_ARCH_RISCV;
2322
2323 default:
2324 return CPU_INFO_ARCH_OTHER;
2325 }
2326}
2327
2328static void cpustate_to_cpuinfo_s390(CpuInfoS390 *info, const CPUState *cpu)
2329{
2330#ifdef TARGET_S390X
2331 S390CPU *s390_cpu = S390_CPU(cpu);
2332 CPUS390XState *env = &s390_cpu->env;
2333
2334 info->cpu_state = env->cpu_state;
2335#else
2336 abort();
2337#endif
2338}
2339
Luiz Capitulinoce74ee32018-02-16 17:08:38 +01002340/*
2341 * fast means: we NEVER interrupt vCPU threads to retrieve
2342 * information from KVM.
2343 */
2344CpuInfoFastList *qmp_query_cpus_fast(Error **errp)
2345{
2346 MachineState *ms = MACHINE(qdev_get_machine());
2347 MachineClass *mc = MACHINE_GET_CLASS(ms);
2348 CpuInfoFastList *head = NULL, *cur_item = NULL;
Laszlo Ersekdaa9d2b2018-04-27 21:28:51 +02002349 SysEmuTarget target = qapi_enum_parse(&SysEmuTarget_lookup, TARGET_NAME,
2350 -1, &error_abort);
Luiz Capitulinoce74ee32018-02-16 17:08:38 +01002351 CPUState *cpu;
2352
2353 CPU_FOREACH(cpu) {
2354 CpuInfoFastList *info = g_malloc0(sizeof(*info));
2355 info->value = g_malloc0(sizeof(*info->value));
2356
2357 info->value->cpu_index = cpu->cpu_index;
2358 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
2359 info->value->thread_id = cpu->thread_id;
2360
2361 info->value->has_props = !!mc->cpu_index_to_instance_props;
2362 if (info->value->has_props) {
2363 CpuInstanceProperties *props;
2364 props = g_malloc0(sizeof(*props));
2365 *props = mc->cpu_index_to_instance_props(ms, cpu->cpu_index);
2366 info->value->props = props;
2367 }
2368
Laszlo Ersekdaa9d2b2018-04-27 21:28:51 +02002369 info->value->arch = sysemu_target_to_cpuinfo_arch(target);
2370 info->value->target = target;
2371 if (target == SYS_EMU_TARGET_S390X) {
2372 cpustate_to_cpuinfo_s390(&info->value->u.s390x, cpu);
Laszlo Ersekdaa9d2b2018-04-27 21:28:51 +02002373 }
2374
Luiz Capitulinoce74ee32018-02-16 17:08:38 +01002375 if (!cur_item) {
2376 head = cur_item = info;
2377 } else {
2378 cur_item->next = info;
2379 cur_item = info;
2380 }
2381 }
2382
2383 return head;
2384}
2385
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002386void qmp_memsave(int64_t addr, int64_t size, const char *filename,
2387 bool has_cpu, int64_t cpu_index, Error **errp)
2388{
2389 FILE *f;
2390 uint32_t l;
Andreas Färber55e5c282012-12-17 06:18:02 +01002391 CPUState *cpu;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002392 uint8_t buf[1024];
Borislav Petkov0dc9daf2015-02-08 13:14:38 +01002393 int64_t orig_addr = addr, orig_size = size;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002394
2395 if (!has_cpu) {
2396 cpu_index = 0;
2397 }
2398
Andreas Färber151d1322013-02-15 15:41:49 +01002399 cpu = qemu_get_cpu(cpu_index);
2400 if (cpu == NULL) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01002401 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
2402 "a CPU number");
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002403 return;
2404 }
2405
2406 f = fopen(filename, "wb");
2407 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04002408 error_setg_file_open(errp, errno, filename);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002409 return;
2410 }
2411
2412 while (size != 0) {
2413 l = sizeof(buf);
2414 if (l > size)
2415 l = size;
Aneesh Kumar K.V2f4d0f52013-10-01 21:49:30 +05302416 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
Borislav Petkov0dc9daf2015-02-08 13:14:38 +01002417 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
2418 " specified", orig_addr, orig_size);
Aneesh Kumar K.V2f4d0f52013-10-01 21:49:30 +05302419 goto exit;
2420 }
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002421 if (fwrite(buf, 1, l, f) != l) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01002422 error_setg(errp, QERR_IO_ERROR);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02002423 goto exit;
2424 }
2425 addr += l;
2426 size -= l;
2427 }
2428
2429exit:
2430 fclose(f);
2431}
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02002432
2433void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
2434 Error **errp)
2435{
2436 FILE *f;
2437 uint32_t l;
2438 uint8_t buf[1024];
2439
2440 f = fopen(filename, "wb");
2441 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04002442 error_setg_file_open(errp, errno, filename);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02002443 return;
2444 }
2445
2446 while (size != 0) {
2447 l = sizeof(buf);
2448 if (l > size)
2449 l = size;
Stefan Weileb6282f2014-04-07 20:28:23 +02002450 cpu_physical_memory_read(addr, buf, l);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02002451 if (fwrite(buf, 1, l, f) != l) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01002452 error_setg(errp, QERR_IO_ERROR);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02002453 goto exit;
2454 }
2455 addr += l;
2456 size -= l;
2457 }
2458
2459exit:
2460 fclose(f);
2461}
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02002462
2463void qmp_inject_nmi(Error **errp)
2464{
Alexey Kardashevskiy9cb805f2014-08-20 22:16:33 +10002465 nmi_monitor_handle(monitor_get_cpu_index(), errp);
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02002466}
Sebastian Tanase27498be2014-07-25 11:56:33 +02002467
Markus Armbruster76c86612019-04-17 21:17:53 +02002468void dump_drift_info(void)
Sebastian Tanase27498be2014-07-25 11:56:33 +02002469{
2470 if (!use_icount) {
2471 return;
2472 }
2473
Markus Armbruster76c86612019-04-17 21:17:53 +02002474 qemu_printf("Host - Guest clock %"PRIi64" ms\n",
Sebastian Tanase27498be2014-07-25 11:56:33 +02002475 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
2476 if (icount_align_option) {
Markus Armbruster76c86612019-04-17 21:17:53 +02002477 qemu_printf("Max guest delay %"PRIi64" ms\n",
2478 -max_delay / SCALE_MS);
2479 qemu_printf("Max guest advance %"PRIi64" ms\n",
2480 max_advance / SCALE_MS);
Sebastian Tanase27498be2014-07-25 11:56:33 +02002481 } else {
Markus Armbruster76c86612019-04-17 21:17:53 +02002482 qemu_printf("Max guest delay NA\n");
2483 qemu_printf("Max guest advance NA\n");
Sebastian Tanase27498be2014-07-25 11:56:33 +02002484 }
2485}