blob: 4052be525f79754c2097e328baf0e20ae4b5f3e5 [file] [log] [blame]
Blue Swirl296af7c2010-03-29 19:23:50 +00001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Needed early for CONFIG_BSD etc. */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000026#include "qemu/osdep.h"
Blue Swirl296af7c2010-03-29 19:23:50 +000027
Paolo Bonzini83c90892012-12-17 18:19:49 +010028#include "monitor/monitor.h"
Wenchao Xiaa4e15de2014-06-18 08:43:36 +020029#include "qapi/qmp/qerror.h"
Markus Armbrusterd49b6832015-03-17 18:29:20 +010030#include "qemu/error-report.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010031#include "sysemu/sysemu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010032#include "exec/gdbstub.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/dma.h"
34#include "sysemu/kvm.h"
Luiz Capitulinode0b36b2011-09-21 16:38:35 -030035#include "qmp-commands.h"
Blue Swirl296af7c2010-03-29 19:23:50 +000036
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/thread.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/cpus.h"
39#include "sysemu/qtest.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010040#include "qemu/main-loop.h"
41#include "qemu/bitmap.h"
Liu Ping Fancb365642013-09-25 14:20:58 +080042#include "qemu/seqlock.h"
Wenchao Xiaa4e15de2014-06-18 08:43:36 +020043#include "qapi-event.h"
Alexey Kardashevskiy9cb805f2014-08-20 22:16:33 +100044#include "hw/nmi.h"
Pavel Dovgalyuk8b427042015-09-17 19:24:05 +030045#include "sysemu/replay.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020046
47#ifndef _WIN32
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010048#include "qemu/compatfd.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020049#endif
Blue Swirl296af7c2010-03-29 19:23:50 +000050
Jan Kiszka6d9cb732011-02-01 22:15:58 +010051#ifdef CONFIG_LINUX
52
53#include <sys/prctl.h>
54
Marcelo Tosattic0532a72010-10-11 15:31:21 -030055#ifndef PR_MCE_KILL
56#define PR_MCE_KILL 33
57#endif
58
Jan Kiszka6d9cb732011-02-01 22:15:58 +010059#ifndef PR_MCE_KILL_SET
60#define PR_MCE_KILL_SET 1
61#endif
62
63#ifndef PR_MCE_KILL_EARLY
64#define PR_MCE_KILL_EARLY 1
65#endif
66
67#endif /* CONFIG_LINUX */
68
Andreas Färber182735e2013-05-29 22:29:20 +020069static CPUState *next_cpu;
Sebastian Tanase27498be2014-07-25 11:56:33 +020070int64_t max_delay;
71int64_t max_advance;
Blue Swirl296af7c2010-03-29 19:23:50 +000072
Jason J. Herne2adcc852015-09-08 13:12:33 -040073/* vcpu throttling controls */
74static QEMUTimer *throttle_timer;
75static unsigned int throttle_percentage;
76
77#define CPU_THROTTLE_PCT_MIN 1
78#define CPU_THROTTLE_PCT_MAX 99
79#define CPU_THROTTLE_TIMESLICE_NS 10000000
80
Tiejun Chen321bc0b2013-08-02 09:43:09 +080081bool cpu_is_stopped(CPUState *cpu)
82{
83 return cpu->stopped || !runstate_is_running();
84}
85
Andreas Färbera98ae1d2013-05-26 23:21:08 +020086static bool cpu_thread_is_idle(CPUState *cpu)
Peter Maydellac873f12012-07-19 16:52:27 +010087{
Andreas Färberc64ca812012-05-03 02:11:45 +020088 if (cpu->stop || cpu->queued_work_first) {
Peter Maydellac873f12012-07-19 16:52:27 +010089 return false;
90 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +080091 if (cpu_is_stopped(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010092 return true;
93 }
Andreas Färber8c2e1b02013-08-25 18:53:55 +020094 if (!cpu->halted || cpu_has_work(cpu) ||
Alexander Graf215e79c2013-04-24 22:24:12 +020095 kvm_halt_in_kernel()) {
Peter Maydellac873f12012-07-19 16:52:27 +010096 return false;
97 }
98 return true;
99}
100
101static bool all_cpu_threads_idle(void)
102{
Andreas Färber182735e2013-05-29 22:29:20 +0200103 CPUState *cpu;
Peter Maydellac873f12012-07-19 16:52:27 +0100104
Andreas Färberbdc44642013-06-24 23:50:24 +0200105 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200106 if (!cpu_thread_is_idle(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +0100107 return false;
108 }
109 }
110 return true;
111}
112
Blue Swirl296af7c2010-03-29 19:23:50 +0000113/***********************************************************/
Paolo Bonzini946fb272011-09-12 13:57:37 +0200114/* guest cycle counter */
115
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200116/* Protected by TimersState seqlock */
117
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200118static bool icount_sleep = true;
Sebastian Tanase71468392014-07-23 11:47:50 +0200119static int64_t vm_clock_warp_start = -1;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200120/* Conversion factor from emulated instructions to virtual clock ticks. */
121static int icount_time_shift;
122/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
123#define MAX_ICOUNT_SHIFT 10
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200124
Paolo Bonzini946fb272011-09-12 13:57:37 +0200125static QEMUTimer *icount_rt_timer;
126static QEMUTimer *icount_vm_timer;
127static QEMUTimer *icount_warp_timer;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200128
129typedef struct TimersState {
Liu Ping Fancb365642013-09-25 14:20:58 +0800130 /* Protected by BQL. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200131 int64_t cpu_ticks_prev;
132 int64_t cpu_ticks_offset;
Liu Ping Fancb365642013-09-25 14:20:58 +0800133
134 /* cpu_clock_offset can be read out of BQL, so protect it with
135 * this lock.
136 */
137 QemuSeqLock vm_clock_seqlock;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200138 int64_t cpu_clock_offset;
139 int32_t cpu_ticks_enabled;
140 int64_t dummy;
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200141
142 /* Compensate for varying guest execution speed. */
143 int64_t qemu_icount_bias;
144 /* Only written by TCG thread */
145 int64_t qemu_icount;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200146} TimersState;
147
Liu Ping Fand9cd4002013-07-21 08:43:00 +0000148static TimersState timers_state;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200149
Pavel Dovgalyuk2a629142014-12-08 10:53:45 +0300150int64_t cpu_get_icount_raw(void)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200151{
152 int64_t icount;
Andreas Färber4917cf42013-05-27 05:17:50 +0200153 CPUState *cpu = current_cpu;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200154
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200155 icount = timers_state.qemu_icount;
Andreas Färber4917cf42013-05-27 05:17:50 +0200156 if (cpu) {
Paolo Bonzini414b15c2015-06-24 14:16:26 +0200157 if (!cpu->can_do_io) {
Pavel Dovgalyuk2a629142014-12-08 10:53:45 +0300158 fprintf(stderr, "Bad icount read\n");
159 exit(1);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200160 }
Andreas Färber28ecfd72013-08-26 05:51:49 +0200161 icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200162 }
Pavel Dovgalyuk2a629142014-12-08 10:53:45 +0300163 return icount;
164}
165
166/* Return the virtual CPU time, based on the instruction counter. */
167static int64_t cpu_get_icount_locked(void)
168{
169 int64_t icount = cpu_get_icount_raw();
KONRAD Frederic3f031312014-08-01 01:37:15 +0200170 return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200171}
172
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200173int64_t cpu_get_icount(void)
174{
175 int64_t icount;
176 unsigned start;
177
178 do {
179 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
180 icount = cpu_get_icount_locked();
181 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
182
183 return icount;
184}
185
KONRAD Frederic3f031312014-08-01 01:37:15 +0200186int64_t cpu_icount_to_ns(int64_t icount)
187{
188 return icount << icount_time_shift;
189}
190
Paolo Bonzini946fb272011-09-12 13:57:37 +0200191/* return the host CPU cycle counter and handle stop/restart */
Liu Ping Fancb365642013-09-25 14:20:58 +0800192/* Caller must hold the BQL */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200193int64_t cpu_get_ticks(void)
194{
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100195 int64_t ticks;
196
Paolo Bonzini946fb272011-09-12 13:57:37 +0200197 if (use_icount) {
198 return cpu_get_icount();
199 }
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100200
201 ticks = timers_state.cpu_ticks_offset;
202 if (timers_state.cpu_ticks_enabled) {
Christopher Covington4a7428c2015-09-25 10:42:21 -0400203 ticks += cpu_get_host_ticks();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200204 }
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100205
206 if (timers_state.cpu_ticks_prev > ticks) {
207 /* Note: non increasing ticks may happen if the host uses
208 software suspend */
209 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
210 ticks = timers_state.cpu_ticks_prev;
211 }
212
213 timers_state.cpu_ticks_prev = ticks;
214 return ticks;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200215}
216
Liu Ping Fancb365642013-09-25 14:20:58 +0800217static int64_t cpu_get_clock_locked(void)
218{
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100219 int64_t ticks;
Liu Ping Fancb365642013-09-25 14:20:58 +0800220
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100221 ticks = timers_state.cpu_clock_offset;
222 if (timers_state.cpu_ticks_enabled) {
223 ticks += get_clock();
Liu Ping Fancb365642013-09-25 14:20:58 +0800224 }
225
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100226 return ticks;
Liu Ping Fancb365642013-09-25 14:20:58 +0800227}
228
Paolo Bonzini946fb272011-09-12 13:57:37 +0200229/* return the host CPU monotonic timer and handle stop/restart */
230int64_t cpu_get_clock(void)
231{
232 int64_t ti;
Liu Ping Fancb365642013-09-25 14:20:58 +0800233 unsigned start;
234
235 do {
236 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
237 ti = cpu_get_clock_locked();
238 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
239
240 return ti;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200241}
242
Liu Ping Fancb365642013-09-25 14:20:58 +0800243/* enable cpu_get_ticks()
244 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
245 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200246void cpu_enable_ticks(void)
247{
Liu Ping Fancb365642013-09-25 14:20:58 +0800248 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
249 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200250 if (!timers_state.cpu_ticks_enabled) {
Christopher Covington4a7428c2015-09-25 10:42:21 -0400251 timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200252 timers_state.cpu_clock_offset -= get_clock();
253 timers_state.cpu_ticks_enabled = 1;
254 }
Liu Ping Fancb365642013-09-25 14:20:58 +0800255 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200256}
257
258/* disable cpu_get_ticks() : the clock is stopped. You must not call
Liu Ping Fancb365642013-09-25 14:20:58 +0800259 * cpu_get_ticks() after that.
260 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
261 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200262void cpu_disable_ticks(void)
263{
Liu Ping Fancb365642013-09-25 14:20:58 +0800264 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
265 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200266 if (timers_state.cpu_ticks_enabled) {
Christopher Covington4a7428c2015-09-25 10:42:21 -0400267 timers_state.cpu_ticks_offset += cpu_get_host_ticks();
Liu Ping Fancb365642013-09-25 14:20:58 +0800268 timers_state.cpu_clock_offset = cpu_get_clock_locked();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200269 timers_state.cpu_ticks_enabled = 0;
270 }
Liu Ping Fancb365642013-09-25 14:20:58 +0800271 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200272}
273
274/* Correlation between real and virtual time is always going to be
275 fairly approximate, so ignore small variation.
276 When the guest is idle real and virtual time will be aligned in
277 the IO wait loop. */
278#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
279
280static void icount_adjust(void)
281{
282 int64_t cur_time;
283 int64_t cur_icount;
284 int64_t delta;
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200285
286 /* Protected by TimersState mutex. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200287 static int64_t last_delta;
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200288
Paolo Bonzini946fb272011-09-12 13:57:37 +0200289 /* If the VM is not running, then do nothing. */
290 if (!runstate_is_running()) {
291 return;
292 }
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200293
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200294 seqlock_write_lock(&timers_state.vm_clock_seqlock);
295 cur_time = cpu_get_clock_locked();
296 cur_icount = cpu_get_icount_locked();
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200297
Paolo Bonzini946fb272011-09-12 13:57:37 +0200298 delta = cur_icount - cur_time;
299 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
300 if (delta > 0
301 && last_delta + ICOUNT_WOBBLE < delta * 2
302 && icount_time_shift > 0) {
303 /* The guest is getting too far ahead. Slow time down. */
304 icount_time_shift--;
305 }
306 if (delta < 0
307 && last_delta - ICOUNT_WOBBLE > delta * 2
308 && icount_time_shift < MAX_ICOUNT_SHIFT) {
309 /* The guest is getting too far behind. Speed time up. */
310 icount_time_shift++;
311 }
312 last_delta = delta;
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200313 timers_state.qemu_icount_bias = cur_icount
314 - (timers_state.qemu_icount << icount_time_shift);
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200315 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200316}
317
318static void icount_adjust_rt(void *opaque)
319{
Alex Bligh40daca52013-08-21 16:03:02 +0100320 timer_mod(icount_rt_timer,
Pavel Dovgalyuk1979b902015-01-12 15:00:43 +0300321 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200322 icount_adjust();
323}
324
325static void icount_adjust_vm(void *opaque)
326{
Alex Bligh40daca52013-08-21 16:03:02 +0100327 timer_mod(icount_vm_timer,
328 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
329 get_ticks_per_sec() / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200330 icount_adjust();
331}
332
333static int64_t qemu_icount_round(int64_t count)
334{
335 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
336}
337
Pavel Dovgalyukefab87c2015-09-17 19:24:39 +0300338static void icount_warp_rt(void)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200339{
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200340 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
341 * changes from -1 to another value, so the race here is okay.
342 */
343 if (atomic_read(&vm_clock_warp_start) == -1) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200344 return;
345 }
346
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200347 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200348 if (runstate_is_running()) {
Pavel Dovgalyuk8eda2062015-09-17 19:24:28 +0300349 int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT,
350 cpu_get_clock_locked());
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200351 int64_t warp_delta;
352
353 warp_delta = clock - vm_clock_warp_start;
354 if (use_icount == 2) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200355 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100356 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
Paolo Bonzini946fb272011-09-12 13:57:37 +0200357 * far ahead of real time.
358 */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200359 int64_t cur_icount = cpu_get_icount_locked();
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300360 int64_t delta = clock - cur_icount;
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200361 warp_delta = MIN(warp_delta, delta);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200362 }
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200363 timers_state.qemu_icount_bias += warp_delta;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200364 }
365 vm_clock_warp_start = -1;
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200366 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200367
368 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
369 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
370 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200371}
372
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300373static void icount_timer_cb(void *opaque)
Pavel Dovgalyukefab87c2015-09-17 19:24:39 +0300374{
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300375 /* No need for a checkpoint because the timer already synchronizes
376 * with CHECKPOINT_CLOCK_VIRTUAL_RT.
377 */
378 icount_warp_rt();
Pavel Dovgalyukefab87c2015-09-17 19:24:39 +0300379}
380
Paolo Bonzini8156be52012-03-28 15:42:04 +0200381void qtest_clock_warp(int64_t dest)
382{
Alex Bligh40daca52013-08-21 16:03:02 +0100383 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Fam Zhengefef88b2015-01-19 17:51:43 +0800384 AioContext *aio_context;
Paolo Bonzini8156be52012-03-28 15:42:04 +0200385 assert(qtest_enabled());
Fam Zhengefef88b2015-01-19 17:51:43 +0800386 aio_context = qemu_get_aio_context();
Paolo Bonzini8156be52012-03-28 15:42:04 +0200387 while (clock < dest) {
Alex Bligh40daca52013-08-21 16:03:02 +0100388 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Sergey Fedorovc9299e22014-06-10 13:10:28 +0400389 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
Fam Zhengefef88b2015-01-19 17:51:43 +0800390
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200391 seqlock_write_lock(&timers_state.vm_clock_seqlock);
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200392 timers_state.qemu_icount_bias += warp;
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200393 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
394
Alex Bligh40daca52013-08-21 16:03:02 +0100395 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
Fam Zhengefef88b2015-01-19 17:51:43 +0800396 timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
Alex Bligh40daca52013-08-21 16:03:02 +0100397 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200398 }
Alex Bligh40daca52013-08-21 16:03:02 +0100399 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200400}
401
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300402void qemu_start_warp_timer(void)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200403{
Paolo Bonzinice78d182013-10-07 17:30:02 +0200404 int64_t clock;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200405 int64_t deadline;
406
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300407 if (!use_icount) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200408 return;
409 }
410
Pavel Dovgalyuk8bd7f712015-09-17 19:24:44 +0300411 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
412 * do not fire, so computing the deadline does not make sense.
413 */
414 if (!runstate_is_running()) {
415 return;
416 }
417
418 /* warp clock deterministically in record/replay mode */
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300419 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
Pavel Dovgalyuk8bd7f712015-09-17 19:24:44 +0300420 return;
421 }
422
Paolo Bonzinice78d182013-10-07 17:30:02 +0200423 if (!all_cpu_threads_idle()) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200424 return;
425 }
426
Paolo Bonzini8156be52012-03-28 15:42:04 +0200427 if (qtest_enabled()) {
428 /* When testing, qtest commands advance icount. */
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300429 return;
Paolo Bonzini8156be52012-03-28 15:42:04 +0200430 }
431
Alex Blighac70aaf2013-08-21 16:02:57 +0100432 /* We want to use the earliest deadline from ALL vm_clocks */
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300433 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
Alex Bligh40daca52013-08-21 16:03:02 +0100434 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200435 if (deadline < 0) {
Victor CLEMENTd7a0f712015-05-29 17:14:06 +0200436 static bool notified;
437 if (!icount_sleep && !notified) {
438 error_report("WARNING: icount sleep disabled and no active timers");
439 notified = true;
440 }
Paolo Bonzinice78d182013-10-07 17:30:02 +0200441 return;
Alex Blighac70aaf2013-08-21 16:02:57 +0100442 }
443
Paolo Bonzini946fb272011-09-12 13:57:37 +0200444 if (deadline > 0) {
445 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100446 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
Paolo Bonzini946fb272011-09-12 13:57:37 +0200447 * sleep. Otherwise, the CPU might be waiting for a future timer
448 * interrupt to wake it up, but the interrupt never comes because
449 * the vCPU isn't running any insns and thus doesn't advance the
Alex Bligh40daca52013-08-21 16:03:02 +0100450 * QEMU_CLOCK_VIRTUAL.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200451 */
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200452 if (!icount_sleep) {
453 /*
454 * We never let VCPUs sleep in no sleep icount mode.
455 * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
456 * to the next QEMU_CLOCK_VIRTUAL event and notify it.
457 * It is useful when we want a deterministic execution time,
458 * isolated from host latencies.
459 */
460 seqlock_write_lock(&timers_state.vm_clock_seqlock);
461 timers_state.qemu_icount_bias += deadline;
462 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
463 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
464 } else {
465 /*
466 * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
467 * "real" time, (related to the time left until the next event) has
468 * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
469 * This avoids that the warps are visible externally; for example,
470 * you will not be sending network packets continuously instead of
471 * every 100ms.
472 */
473 seqlock_write_lock(&timers_state.vm_clock_seqlock);
474 if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
475 vm_clock_warp_start = clock;
476 }
477 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
478 timer_mod_anticipate(icount_warp_timer, clock + deadline);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200479 }
Alex Blighac70aaf2013-08-21 16:02:57 +0100480 } else if (deadline == 0) {
Alex Bligh40daca52013-08-21 16:03:02 +0100481 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200482 }
483}
484
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300485static void qemu_account_warp_timer(void)
486{
487 if (!use_icount || !icount_sleep) {
488 return;
489 }
490
491 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
492 * do not fire, so computing the deadline does not make sense.
493 */
494 if (!runstate_is_running()) {
495 return;
496 }
497
498 /* warp clock deterministically in record/replay mode */
499 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
500 return;
501 }
502
503 timer_del(icount_warp_timer);
504 icount_warp_rt();
505}
506
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200507static bool icount_state_needed(void *opaque)
508{
509 return use_icount;
510}
511
512/*
513 * This is a subsection for icount migration.
514 */
515static const VMStateDescription icount_vmstate_timers = {
516 .name = "timer/icount",
517 .version_id = 1,
518 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200519 .needed = icount_state_needed,
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200520 .fields = (VMStateField[]) {
521 VMSTATE_INT64(qemu_icount_bias, TimersState),
522 VMSTATE_INT64(qemu_icount, TimersState),
523 VMSTATE_END_OF_LIST()
524 }
525};
526
Paolo Bonzini946fb272011-09-12 13:57:37 +0200527static const VMStateDescription vmstate_timers = {
528 .name = "timer",
529 .version_id = 2,
530 .minimum_version_id = 1,
Juan Quintela35d08452014-04-16 16:01:33 +0200531 .fields = (VMStateField[]) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200532 VMSTATE_INT64(cpu_ticks_offset, TimersState),
533 VMSTATE_INT64(dummy, TimersState),
534 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
535 VMSTATE_END_OF_LIST()
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200536 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200537 .subsections = (const VMStateDescription*[]) {
538 &icount_vmstate_timers,
539 NULL
Paolo Bonzini946fb272011-09-12 13:57:37 +0200540 }
541};
542
Jason J. Herne2adcc852015-09-08 13:12:33 -0400543static void cpu_throttle_thread(void *opaque)
544{
545 CPUState *cpu = opaque;
546 double pct;
547 double throttle_ratio;
548 long sleeptime_ns;
549
550 if (!cpu_throttle_get_percentage()) {
551 return;
552 }
553
554 pct = (double)cpu_throttle_get_percentage()/100;
555 throttle_ratio = pct / (1 - pct);
556 sleeptime_ns = (long)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS);
557
558 qemu_mutex_unlock_iothread();
559 atomic_set(&cpu->throttle_thread_scheduled, 0);
560 g_usleep(sleeptime_ns / 1000); /* Convert ns to us for usleep call */
561 qemu_mutex_lock_iothread();
562}
563
564static void cpu_throttle_timer_tick(void *opaque)
565{
566 CPUState *cpu;
567 double pct;
568
569 /* Stop the timer if needed */
570 if (!cpu_throttle_get_percentage()) {
571 return;
572 }
573 CPU_FOREACH(cpu) {
574 if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
575 async_run_on_cpu(cpu, cpu_throttle_thread, cpu);
576 }
577 }
578
579 pct = (double)cpu_throttle_get_percentage()/100;
580 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
581 CPU_THROTTLE_TIMESLICE_NS / (1-pct));
582}
583
584void cpu_throttle_set(int new_throttle_pct)
585{
586 /* Ensure throttle percentage is within valid range */
587 new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
588 new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
589
590 atomic_set(&throttle_percentage, new_throttle_pct);
591
592 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
593 CPU_THROTTLE_TIMESLICE_NS);
594}
595
596void cpu_throttle_stop(void)
597{
598 atomic_set(&throttle_percentage, 0);
599}
600
601bool cpu_throttle_active(void)
602{
603 return (cpu_throttle_get_percentage() != 0);
604}
605
606int cpu_throttle_get_percentage(void)
607{
608 return atomic_read(&throttle_percentage);
609}
610
Pavel Dovgalyuk4603ea02014-09-01 09:34:49 +0400611void cpu_ticks_init(void)
612{
613 seqlock_init(&timers_state.vm_clock_seqlock, NULL);
614 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
Jason J. Herne2adcc852015-09-08 13:12:33 -0400615 throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
616 cpu_throttle_timer_tick, NULL);
Pavel Dovgalyuk4603ea02014-09-01 09:34:49 +0400617}
618
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200619void configure_icount(QemuOpts *opts, Error **errp)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200620{
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200621 const char *option;
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200622 char *rem_str = NULL;
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200623
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200624 option = qemu_opt_get(opts, "shift");
Paolo Bonzini946fb272011-09-12 13:57:37 +0200625 if (!option) {
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200626 if (qemu_opt_get(opts, "align") != NULL) {
627 error_setg(errp, "Please specify shift option when using align");
628 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200629 return;
630 }
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200631
632 icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200633 if (icount_sleep) {
634 icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +0300635 icount_timer_cb, NULL);
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200636 }
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200637
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200638 icount_align_option = qemu_opt_get_bool(opts, "align", false);
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200639
640 if (icount_align_option && !icount_sleep) {
Pranith Kumar778d9f92016-02-26 10:16:51 -0500641 error_setg(errp, "align=on and sleep=off are incompatible");
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200642 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200643 if (strcmp(option, "auto") != 0) {
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200644 errno = 0;
645 icount_time_shift = strtol(option, &rem_str, 0);
646 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
647 error_setg(errp, "icount: Invalid shift value");
648 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200649 use_icount = 1;
650 return;
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200651 } else if (icount_align_option) {
652 error_setg(errp, "shift=auto and align=on are incompatible");
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200653 } else if (!icount_sleep) {
Pranith Kumar778d9f92016-02-26 10:16:51 -0500654 error_setg(errp, "shift=auto and sleep=off are incompatible");
Paolo Bonzini946fb272011-09-12 13:57:37 +0200655 }
656
657 use_icount = 2;
658
659 /* 125MIPS seems a reasonable initial guess at the guest speed.
660 It will be corrected fairly quickly anyway. */
661 icount_time_shift = 3;
662
663 /* Have both realtime and virtual time triggers for speed adjustment.
664 The realtime trigger catches emulated time passing too slowly,
665 the virtual time trigger catches emulated time passing too fast.
666 Realtime triggers occur even when idle, so use them less frequently
667 than VM triggers. */
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300668 icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
669 icount_adjust_rt, NULL);
Alex Bligh40daca52013-08-21 16:03:02 +0100670 timer_mod(icount_rt_timer,
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300671 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
Alex Bligh40daca52013-08-21 16:03:02 +0100672 icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
673 icount_adjust_vm, NULL);
674 timer_mod(icount_vm_timer,
675 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
676 get_ticks_per_sec() / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200677}
678
679/***********************************************************/
Blue Swirl296af7c2010-03-29 19:23:50 +0000680void hw_error(const char *fmt, ...)
681{
682 va_list ap;
Andreas Färber55e5c282012-12-17 06:18:02 +0100683 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000684
685 va_start(ap, fmt);
686 fprintf(stderr, "qemu: hardware error: ");
687 vfprintf(stderr, fmt, ap);
688 fprintf(stderr, "\n");
Andreas Färberbdc44642013-06-24 23:50:24 +0200689 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100690 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
Andreas Färber878096e2013-05-27 01:33:50 +0200691 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
Blue Swirl296af7c2010-03-29 19:23:50 +0000692 }
693 va_end(ap);
694 abort();
695}
696
697void cpu_synchronize_all_states(void)
698{
Andreas Färber182735e2013-05-29 22:29:20 +0200699 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000700
Andreas Färberbdc44642013-06-24 23:50:24 +0200701 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200702 cpu_synchronize_state(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000703 }
704}
705
706void cpu_synchronize_all_post_reset(void)
707{
Andreas Färber182735e2013-05-29 22:29:20 +0200708 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000709
Andreas Färberbdc44642013-06-24 23:50:24 +0200710 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200711 cpu_synchronize_post_reset(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000712 }
713}
714
715void cpu_synchronize_all_post_init(void)
716{
Andreas Färber182735e2013-05-29 22:29:20 +0200717 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000718
Andreas Färberbdc44642013-06-24 23:50:24 +0200719 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200720 cpu_synchronize_post_init(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000721 }
722}
723
Kevin Wolf56983462013-07-05 13:49:54 +0200724static int do_vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +0000725{
Kevin Wolf56983462013-07-05 13:49:54 +0200726 int ret = 0;
727
Luiz Capitulino13548692011-07-29 15:36:43 -0300728 if (runstate_is_running()) {
Blue Swirl296af7c2010-03-29 19:23:50 +0000729 cpu_disable_ticks();
Blue Swirl296af7c2010-03-29 19:23:50 +0000730 pause_all_vcpus();
Luiz Capitulinof5bbfba2011-07-29 15:04:45 -0300731 runstate_set(state);
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -0300732 vm_state_notify(0, state);
Wenchao Xiaa4e15de2014-06-18 08:43:36 +0200733 qapi_event_send_stop(&error_abort);
Blue Swirl296af7c2010-03-29 19:23:50 +0000734 }
Kevin Wolf56983462013-07-05 13:49:54 +0200735
Kevin Wolf594a45c2013-07-18 14:52:19 +0200736 bdrv_drain_all();
737 ret = bdrv_flush_all();
738
Kevin Wolf56983462013-07-05 13:49:54 +0200739 return ret;
Blue Swirl296af7c2010-03-29 19:23:50 +0000740}
741
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200742static bool cpu_can_run(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000743{
Andreas Färber4fdeee72012-05-02 23:10:09 +0200744 if (cpu->stop) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200745 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100746 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +0800747 if (cpu_is_stopped(cpu)) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200748 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100749 }
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200750 return true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000751}
752
Andreas Färber91325042013-05-27 02:07:49 +0200753static void cpu_handle_guest_debug(CPUState *cpu)
Jan Kiszka3c638d02010-06-25 16:56:56 +0200754{
Andreas Färber64f6b342013-05-27 02:06:09 +0200755 gdb_set_stop_cpu(cpu);
Jan Kiszka8cf71712011-02-07 12:19:16 +0100756 qemu_system_debug_request();
Andreas Färberf324e762012-05-02 23:26:21 +0200757 cpu->stopped = true;
Jan Kiszka3c638d02010-06-25 16:56:56 +0200758}
759
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100760#ifdef CONFIG_LINUX
761static void sigbus_reraise(void)
762{
763 sigset_t set;
764 struct sigaction action;
765
766 memset(&action, 0, sizeof(action));
767 action.sa_handler = SIG_DFL;
768 if (!sigaction(SIGBUS, &action, NULL)) {
769 raise(SIGBUS);
770 sigemptyset(&set);
771 sigaddset(&set, SIGBUS);
772 sigprocmask(SIG_UNBLOCK, &set, NULL);
773 }
774 perror("Failed to re-raise SIGBUS!\n");
775 abort();
776}
777
778static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
779 void *ctx)
780{
781 if (kvm_on_sigbus(siginfo->ssi_code,
782 (void *)(intptr_t)siginfo->ssi_addr)) {
783 sigbus_reraise();
784 }
785}
786
787static void qemu_init_sigbus(void)
788{
789 struct sigaction action;
790
791 memset(&action, 0, sizeof(action));
792 action.sa_flags = SA_SIGINFO;
793 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
794 sigaction(SIGBUS, &action, NULL);
795
796 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
797}
798
Andreas Färber290adf32013-01-17 09:30:27 +0100799static void qemu_kvm_eat_signals(CPUState *cpu)
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100800{
801 struct timespec ts = { 0, 0 };
802 siginfo_t siginfo;
803 sigset_t waitset;
804 sigset_t chkset;
805 int r;
806
807 sigemptyset(&waitset);
808 sigaddset(&waitset, SIG_IPI);
809 sigaddset(&waitset, SIGBUS);
810
811 do {
812 r = sigtimedwait(&waitset, &siginfo, &ts);
813 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
814 perror("sigtimedwait");
815 exit(1);
816 }
817
818 switch (r) {
819 case SIGBUS:
Andreas Färber290adf32013-01-17 09:30:27 +0100820 if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100821 sigbus_reraise();
822 }
823 break;
824 default:
825 break;
826 }
827
828 r = sigpending(&chkset);
829 if (r == -1) {
830 perror("sigpending");
831 exit(1);
832 }
833 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100834}
835
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100836#else /* !CONFIG_LINUX */
837
838static void qemu_init_sigbus(void)
839{
840}
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100841
Andreas Färber290adf32013-01-17 09:30:27 +0100842static void qemu_kvm_eat_signals(CPUState *cpu)
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100843{
844}
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100845#endif /* !CONFIG_LINUX */
846
Blue Swirl296af7c2010-03-29 19:23:50 +0000847#ifndef _WIN32
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100848static void dummy_signal(int sig)
Blue Swirl296af7c2010-03-29 19:23:50 +0000849{
850}
851
Andreas Färber13618e02013-05-26 23:41:00 +0200852static void qemu_kvm_init_cpu_signals(CPUState *cpu)
Paolo Bonzini714bd042011-03-12 17:44:06 +0100853{
854 int r;
855 sigset_t set;
856 struct sigaction sigact;
857
858 memset(&sigact, 0, sizeof(sigact));
859 sigact.sa_handler = dummy_signal;
860 sigaction(SIG_IPI, &sigact, NULL);
861
Paolo Bonzini714bd042011-03-12 17:44:06 +0100862 pthread_sigmask(SIG_BLOCK, NULL, &set);
863 sigdelset(&set, SIG_IPI);
864 sigdelset(&set, SIGBUS);
Andreas Färber491d6e82013-05-26 23:38:10 +0200865 r = kvm_set_signal_mask(cpu, &set);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100866 if (r) {
867 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
868 exit(1);
869 }
Paolo Bonzini714bd042011-03-12 17:44:06 +0100870}
871
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100872#else /* _WIN32 */
Andreas Färber13618e02013-05-26 23:41:00 +0200873static void qemu_kvm_init_cpu_signals(CPUState *cpu)
Paolo Bonzini714bd042011-03-12 17:44:06 +0100874{
875 abort();
876}
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100877#endif /* _WIN32 */
Blue Swirl296af7c2010-03-29 19:23:50 +0000878
Stefan Weilb2532d82012-09-27 07:41:42 +0200879static QemuMutex qemu_global_mutex;
Paolo Bonzini46daff12011-06-09 13:10:24 +0200880static QemuCond qemu_io_proceeded_cond;
Paolo Bonzini6b498092015-02-27 19:58:23 +0100881static unsigned iothread_requesting_mutex;
Blue Swirl296af7c2010-03-29 19:23:50 +0000882
883static QemuThread io_thread;
884
Blue Swirl296af7c2010-03-29 19:23:50 +0000885/* cpu creation */
886static QemuCond qemu_cpu_cond;
887/* system init */
Blue Swirl296af7c2010-03-29 19:23:50 +0000888static QemuCond qemu_pause_cond;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300889static QemuCond qemu_work_cond;
Blue Swirl296af7c2010-03-29 19:23:50 +0000890
Paolo Bonzinid3b12f52011-09-13 10:30:52 +0200891void qemu_init_cpu_loop(void)
Blue Swirl296af7c2010-03-29 19:23:50 +0000892{
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100893 qemu_init_sigbus();
Anthony Liguoried945922011-02-08 18:18:18 +0100894 qemu_cond_init(&qemu_cpu_cond);
Anthony Liguoried945922011-02-08 18:18:18 +0100895 qemu_cond_init(&qemu_pause_cond);
896 qemu_cond_init(&qemu_work_cond);
Paolo Bonzini46daff12011-06-09 13:10:24 +0200897 qemu_cond_init(&qemu_io_proceeded_cond);
Blue Swirl296af7c2010-03-29 19:23:50 +0000898 qemu_mutex_init(&qemu_global_mutex);
Blue Swirl296af7c2010-03-29 19:23:50 +0000899
Jan Kiszkab7680cb2011-03-12 17:43:51 +0100900 qemu_thread_get_self(&io_thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000901}
902
Andreas Färberf100f0b2012-05-03 14:58:47 +0200903void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300904{
905 struct qemu_work_item wi;
906
Andreas Färber60e82572012-05-02 22:23:49 +0200907 if (qemu_cpu_is_self(cpu)) {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300908 func(data);
909 return;
910 }
911
912 wi.func = func;
913 wi.data = data;
Chegu Vinod3c022702013-06-24 03:49:41 -0600914 wi.free = false;
Paolo Bonzini376692b2015-07-10 12:32:32 +0200915
916 qemu_mutex_lock(&cpu->work_mutex);
Andreas Färberc64ca812012-05-03 02:11:45 +0200917 if (cpu->queued_work_first == NULL) {
918 cpu->queued_work_first = &wi;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100919 } else {
Andreas Färberc64ca812012-05-03 02:11:45 +0200920 cpu->queued_work_last->next = &wi;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100921 }
Andreas Färberc64ca812012-05-03 02:11:45 +0200922 cpu->queued_work_last = &wi;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300923 wi.next = NULL;
924 wi.done = false;
Paolo Bonzini376692b2015-07-10 12:32:32 +0200925 qemu_mutex_unlock(&cpu->work_mutex);
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300926
Andreas Färberc08d7422012-05-03 04:34:15 +0200927 qemu_cpu_kick(cpu);
Paolo Bonzini376692b2015-07-10 12:32:32 +0200928 while (!atomic_mb_read(&wi.done)) {
Andreas Färber4917cf42013-05-27 05:17:50 +0200929 CPUState *self_cpu = current_cpu;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300930
931 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
Andreas Färber4917cf42013-05-27 05:17:50 +0200932 current_cpu = self_cpu;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300933 }
934}
935
Chegu Vinod3c022702013-06-24 03:49:41 -0600936void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
937{
938 struct qemu_work_item *wi;
939
940 if (qemu_cpu_is_self(cpu)) {
941 func(data);
942 return;
943 }
944
945 wi = g_malloc0(sizeof(struct qemu_work_item));
946 wi->func = func;
947 wi->data = data;
948 wi->free = true;
Paolo Bonzini376692b2015-07-10 12:32:32 +0200949
950 qemu_mutex_lock(&cpu->work_mutex);
Chegu Vinod3c022702013-06-24 03:49:41 -0600951 if (cpu->queued_work_first == NULL) {
952 cpu->queued_work_first = wi;
953 } else {
954 cpu->queued_work_last->next = wi;
955 }
956 cpu->queued_work_last = wi;
957 wi->next = NULL;
958 wi->done = false;
Paolo Bonzini376692b2015-07-10 12:32:32 +0200959 qemu_mutex_unlock(&cpu->work_mutex);
Chegu Vinod3c022702013-06-24 03:49:41 -0600960
961 qemu_cpu_kick(cpu);
962}
963
Andreas Färber6d45b102012-05-03 02:13:22 +0200964static void flush_queued_work(CPUState *cpu)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300965{
966 struct qemu_work_item *wi;
967
Andreas Färberc64ca812012-05-03 02:11:45 +0200968 if (cpu->queued_work_first == NULL) {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300969 return;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100970 }
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300971
Paolo Bonzini376692b2015-07-10 12:32:32 +0200972 qemu_mutex_lock(&cpu->work_mutex);
973 while (cpu->queued_work_first != NULL) {
974 wi = cpu->queued_work_first;
Andreas Färberc64ca812012-05-03 02:11:45 +0200975 cpu->queued_work_first = wi->next;
Paolo Bonzini376692b2015-07-10 12:32:32 +0200976 if (!cpu->queued_work_first) {
977 cpu->queued_work_last = NULL;
978 }
979 qemu_mutex_unlock(&cpu->work_mutex);
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300980 wi->func(wi->data);
Paolo Bonzini376692b2015-07-10 12:32:32 +0200981 qemu_mutex_lock(&cpu->work_mutex);
Chegu Vinod3c022702013-06-24 03:49:41 -0600982 if (wi->free) {
983 g_free(wi);
Paolo Bonzini376692b2015-07-10 12:32:32 +0200984 } else {
985 atomic_mb_set(&wi->done, true);
Chegu Vinod3c022702013-06-24 03:49:41 -0600986 }
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300987 }
Paolo Bonzini376692b2015-07-10 12:32:32 +0200988 qemu_mutex_unlock(&cpu->work_mutex);
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300989 qemu_cond_broadcast(&qemu_work_cond);
990}
991
Andreas Färber509a0d72012-05-03 02:18:09 +0200992static void qemu_wait_io_event_common(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000993{
Andreas Färber4fdeee72012-05-02 23:10:09 +0200994 if (cpu->stop) {
995 cpu->stop = false;
Andreas Färberf324e762012-05-02 23:26:21 +0200996 cpu->stopped = true;
Dr. David Alan Gilbert96bce682016-01-25 10:08:18 +0000997 qemu_cond_broadcast(&qemu_pause_cond);
Blue Swirl296af7c2010-03-29 19:23:50 +0000998 }
Andreas Färber6d45b102012-05-03 02:13:22 +0200999 flush_queued_work(cpu);
Andreas Färber216fc9a2012-05-02 17:49:49 +02001000 cpu->thread_kicked = false;
Blue Swirl296af7c2010-03-29 19:23:50 +00001001}
1002
KONRAD Fredericd5f8d612015-08-10 17:27:06 +02001003static void qemu_tcg_wait_io_event(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001004{
Jan Kiszka16400322011-02-09 16:29:37 +01001005 while (all_cpu_threads_idle()) {
KONRAD Fredericd5f8d612015-08-10 17:27:06 +02001006 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +01001007 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001008
Paolo Bonzini46daff12011-06-09 13:10:24 +02001009 while (iothread_requesting_mutex) {
1010 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
1011 }
Jan Kiszka6cabe1f2010-06-25 16:56:53 +02001012
Andreas Färberbdc44642013-06-24 23:50:24 +02001013 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001014 qemu_wait_io_event_common(cpu);
Jan Kiszka6cabe1f2010-06-25 16:56:53 +02001015 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001016}
1017
Andreas Färberfd529e82013-05-26 23:24:55 +02001018static void qemu_kvm_wait_io_event(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001019{
Andreas Färbera98ae1d2013-05-26 23:21:08 +02001020 while (cpu_thread_is_idle(cpu)) {
Andreas Färberf5c121b2012-05-03 01:22:49 +02001021 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +01001022 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001023
Andreas Färber290adf32013-01-17 09:30:27 +01001024 qemu_kvm_eat_signals(cpu);
Andreas Färber509a0d72012-05-03 02:18:09 +02001025 qemu_wait_io_event_common(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001026}
1027
Jan Kiszka7e97cd82011-02-07 12:19:12 +01001028static void *qemu_kvm_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +00001029{
Andreas Färber48a106b2013-05-27 02:20:39 +02001030 CPUState *cpu = arg;
Jan Kiszka84b49152011-02-01 22:15:50 +01001031 int r;
Blue Swirl296af7c2010-03-29 19:23:50 +00001032
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001033 rcu_register_thread();
1034
Paolo Bonzini2e7f7a32015-06-18 18:47:18 +02001035 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +02001036 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +02001037 cpu->thread_id = qemu_get_thread_id();
Pavel Dovgalyuk626cf8f2014-12-08 10:53:17 +03001038 cpu->can_do_io = 1;
Andreas Färber4917cf42013-05-27 05:17:50 +02001039 current_cpu = cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001040
Andreas Färber504134d2012-12-17 06:38:45 +01001041 r = kvm_init_vcpu(cpu);
Jan Kiszka84b49152011-02-01 22:15:50 +01001042 if (r < 0) {
1043 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
1044 exit(1);
1045 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001046
Andreas Färber13618e02013-05-26 23:41:00 +02001047 qemu_kvm_init_cpu_signals(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001048
1049 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +02001050 cpu->created = true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001051 qemu_cond_signal(&qemu_cpu_cond);
1052
Blue Swirl296af7c2010-03-29 19:23:50 +00001053 while (1) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001054 if (cpu_can_run(cpu)) {
Andreas Färber1458c362013-05-26 23:46:55 +02001055 r = kvm_cpu_exec(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +01001056 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +02001057 cpu_handle_guest_debug(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +01001058 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001059 }
Andreas Färberfd529e82013-05-26 23:24:55 +02001060 qemu_kvm_wait_io_event(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001061 }
1062
1063 return NULL;
1064}
1065
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001066static void *qemu_dummy_cpu_thread_fn(void *arg)
1067{
1068#ifdef _WIN32
1069 fprintf(stderr, "qtest is not supported under Windows\n");
1070 exit(1);
1071#else
Andreas Färber10a90212013-05-27 02:24:35 +02001072 CPUState *cpu = arg;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001073 sigset_t waitset;
1074 int r;
1075
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001076 rcu_register_thread();
1077
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001078 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +02001079 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +02001080 cpu->thread_id = qemu_get_thread_id();
Pavel Dovgalyuk626cf8f2014-12-08 10:53:17 +03001081 cpu->can_do_io = 1;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001082
1083 sigemptyset(&waitset);
1084 sigaddset(&waitset, SIG_IPI);
1085
1086 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +02001087 cpu->created = true;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001088 qemu_cond_signal(&qemu_cpu_cond);
1089
Andreas Färber4917cf42013-05-27 05:17:50 +02001090 current_cpu = cpu;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001091 while (1) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001092 current_cpu = NULL;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001093 qemu_mutex_unlock_iothread();
1094 do {
1095 int sig;
1096 r = sigwait(&waitset, &sig);
1097 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1098 if (r == -1) {
1099 perror("sigwait");
1100 exit(1);
1101 }
1102 qemu_mutex_lock_iothread();
Andreas Färber4917cf42013-05-27 05:17:50 +02001103 current_cpu = cpu;
Andreas Färber509a0d72012-05-03 02:18:09 +02001104 qemu_wait_io_event_common(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001105 }
1106
1107 return NULL;
1108#endif
1109}
1110
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001111static void tcg_exec_all(void);
1112
Jan Kiszka7e97cd82011-02-07 12:19:12 +01001113static void *qemu_tcg_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +00001114{
Andreas Färberc3586ba2012-05-03 01:41:24 +02001115 CPUState *cpu = arg;
Blue Swirl296af7c2010-03-29 19:23:50 +00001116
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001117 rcu_register_thread();
1118
Paolo Bonzini2e7f7a32015-06-18 18:47:18 +02001119 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +02001120 qemu_thread_get_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +00001121
Andreas Färber38fcbd32013-07-07 19:50:23 +02001122 CPU_FOREACH(cpu) {
1123 cpu->thread_id = qemu_get_thread_id();
1124 cpu->created = true;
Pavel Dovgalyuk626cf8f2014-12-08 10:53:17 +03001125 cpu->can_do_io = 1;
Andreas Färber38fcbd32013-07-07 19:50:23 +02001126 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001127 qemu_cond_signal(&qemu_cpu_cond);
1128
Jan Kiszkafa7d1862011-08-22 18:35:25 +02001129 /* wait for initial kick-off after machine start */
Emilio G. Cotac28e3992015-04-27 12:45:28 -04001130 while (first_cpu->stopped) {
KONRAD Fredericd5f8d612015-08-10 17:27:06 +02001131 qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka8e564b42012-02-17 18:31:15 +01001132
1133 /* process any pending work */
Andreas Färberbdc44642013-06-24 23:50:24 +02001134 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001135 qemu_wait_io_event_common(cpu);
Jan Kiszka8e564b42012-02-17 18:31:15 +01001136 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001137 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001138
Paolo Bonzini21618b32015-02-27 20:01:03 +01001139 /* process any pending work */
Paolo Bonziniaed807c2015-08-18 06:43:15 -07001140 atomic_mb_set(&exit_request, 1);
Paolo Bonzini21618b32015-02-27 20:01:03 +01001141
Blue Swirl296af7c2010-03-29 19:23:50 +00001142 while (1) {
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001143 tcg_exec_all();
Alex Blighac70aaf2013-08-21 16:02:57 +01001144
1145 if (use_icount) {
Alex Bligh40daca52013-08-21 16:03:02 +01001146 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +01001147
1148 if (deadline == 0) {
Alex Bligh40daca52013-08-21 16:03:02 +01001149 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +01001150 }
Paolo Bonzini3b2319a2011-04-13 10:03:43 +02001151 }
KONRAD Fredericd5f8d612015-08-10 17:27:06 +02001152 qemu_tcg_wait_io_event(QTAILQ_FIRST(&cpus));
Blue Swirl296af7c2010-03-29 19:23:50 +00001153 }
1154
1155 return NULL;
1156}
1157
Andreas Färber2ff09a42012-05-03 00:23:30 +02001158static void qemu_cpu_kick_thread(CPUState *cpu)
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001159{
1160#ifndef _WIN32
1161 int err;
1162
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001163 if (cpu->thread_kicked) {
1164 return;
Paolo Bonzini9102ded2015-08-18 06:52:09 -07001165 }
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001166 cpu->thread_kicked = true;
Andreas Färber814e6122012-05-02 17:00:37 +02001167 err = pthread_kill(cpu->thread->thread, SIG_IPI);
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001168 if (err) {
1169 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1170 exit(1);
1171 }
1172#else /* _WIN32 */
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001173 abort();
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001174#endif
1175}
1176
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001177static void qemu_cpu_kick_no_halt(void)
1178{
1179 CPUState *cpu;
1180 /* Ensure whatever caused the exit has reached the CPU threads before
1181 * writing exit_request.
1182 */
1183 atomic_mb_set(&exit_request, 1);
1184 cpu = atomic_mb_read(&tcg_current_cpu);
1185 if (cpu) {
1186 cpu_exit(cpu);
1187 }
1188}
1189
Andreas Färberc08d7422012-05-03 04:34:15 +02001190void qemu_cpu_kick(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001191{
Andreas Färberf5c121b2012-05-03 01:22:49 +02001192 qemu_cond_broadcast(cpu->halt_cond);
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001193 if (tcg_enabled()) {
1194 qemu_cpu_kick_no_halt();
1195 } else {
1196 qemu_cpu_kick_thread(cpu);
1197 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001198}
1199
Jan Kiszka46d62fa2011-02-01 22:15:59 +01001200void qemu_cpu_kick_self(void)
1201{
Andreas Färber4917cf42013-05-27 05:17:50 +02001202 assert(current_cpu);
Paolo Bonzini9102ded2015-08-18 06:52:09 -07001203 qemu_cpu_kick_thread(current_cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001204}
1205
Andreas Färber60e82572012-05-02 22:23:49 +02001206bool qemu_cpu_is_self(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001207{
Andreas Färber814e6122012-05-02 17:00:37 +02001208 return qemu_thread_is_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +00001209}
1210
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001211bool qemu_in_vcpu_thread(void)
Juan Quintelaaa723c22012-09-18 16:30:11 +02001212{
Andreas Färber4917cf42013-05-27 05:17:50 +02001213 return current_cpu && qemu_cpu_is_self(current_cpu);
Juan Quintelaaa723c22012-09-18 16:30:11 +02001214}
1215
Paolo Bonziniafbe7052015-06-18 18:47:19 +02001216static __thread bool iothread_locked = false;
1217
1218bool qemu_mutex_iothread_locked(void)
1219{
1220 return iothread_locked;
1221}
1222
Blue Swirl296af7c2010-03-29 19:23:50 +00001223void qemu_mutex_lock_iothread(void)
1224{
Paolo Bonzini21618b32015-02-27 20:01:03 +01001225 atomic_inc(&iothread_requesting_mutex);
Paolo Bonzini2e7f7a32015-06-18 18:47:18 +02001226 /* In the simple case there is no need to bump the VCPU thread out of
1227 * TCG code execution.
1228 */
1229 if (!tcg_enabled() || qemu_in_vcpu_thread() ||
Aníbal Limón46036b22015-09-03 15:48:33 -05001230 !first_cpu || !first_cpu->created) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001231 qemu_mutex_lock(&qemu_global_mutex);
Paolo Bonzini21618b32015-02-27 20:01:03 +01001232 atomic_dec(&iothread_requesting_mutex);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001233 } else {
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001234 if (qemu_mutex_trylock(&qemu_global_mutex)) {
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001235 qemu_cpu_kick_no_halt();
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001236 qemu_mutex_lock(&qemu_global_mutex);
1237 }
Paolo Bonzini6b498092015-02-27 19:58:23 +01001238 atomic_dec(&iothread_requesting_mutex);
Paolo Bonzini46daff12011-06-09 13:10:24 +02001239 qemu_cond_broadcast(&qemu_io_proceeded_cond);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001240 }
Paolo Bonziniafbe7052015-06-18 18:47:19 +02001241 iothread_locked = true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001242}
1243
1244void qemu_mutex_unlock_iothread(void)
1245{
Paolo Bonziniafbe7052015-06-18 18:47:19 +02001246 iothread_locked = false;
Blue Swirl296af7c2010-03-29 19:23:50 +00001247 qemu_mutex_unlock(&qemu_global_mutex);
1248}
1249
1250static int all_vcpus_paused(void)
1251{
Andreas Färberbdc44642013-06-24 23:50:24 +02001252 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001253
Andreas Färberbdc44642013-06-24 23:50:24 +02001254 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001255 if (!cpu->stopped) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001256 return 0;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001257 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001258 }
1259
1260 return 1;
1261}
1262
1263void pause_all_vcpus(void)
1264{
Andreas Färberbdc44642013-06-24 23:50:24 +02001265 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001266
Alex Bligh40daca52013-08-21 16:03:02 +01001267 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
Andreas Färberbdc44642013-06-24 23:50:24 +02001268 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001269 cpu->stop = true;
1270 qemu_cpu_kick(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001271 }
1272
Juan Quintelaaa723c22012-09-18 16:30:11 +02001273 if (qemu_in_vcpu_thread()) {
Jan Kiszkad798e972012-02-17 18:31:16 +01001274 cpu_stop_current();
1275 if (!kvm_enabled()) {
Andreas Färberbdc44642013-06-24 23:50:24 +02001276 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001277 cpu->stop = false;
1278 cpu->stopped = true;
Jan Kiszkad798e972012-02-17 18:31:16 +01001279 }
1280 return;
1281 }
1282 }
1283
Blue Swirl296af7c2010-03-29 19:23:50 +00001284 while (!all_vcpus_paused()) {
Paolo Bonzinibe7d6c52011-03-12 17:44:02 +01001285 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
Andreas Färberbdc44642013-06-24 23:50:24 +02001286 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001287 qemu_cpu_kick(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001288 }
1289 }
1290}
1291
Igor Mammedov29936832013-04-23 10:29:37 +02001292void cpu_resume(CPUState *cpu)
1293{
1294 cpu->stop = false;
1295 cpu->stopped = false;
1296 qemu_cpu_kick(cpu);
1297}
1298
Blue Swirl296af7c2010-03-29 19:23:50 +00001299void resume_all_vcpus(void)
1300{
Andreas Färberbdc44642013-06-24 23:50:24 +02001301 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001302
Alex Bligh40daca52013-08-21 16:03:02 +01001303 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
Andreas Färberbdc44642013-06-24 23:50:24 +02001304 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001305 cpu_resume(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001306 }
1307}
1308
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001309/* For temporary buffers for forming a name */
1310#define VCPU_THREAD_NAME_SIZE 16
1311
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001312static void qemu_tcg_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001313{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001314 char thread_name[VCPU_THREAD_NAME_SIZE];
KONRAD Fredericd5f8d612015-08-10 17:27:06 +02001315 static QemuCond *tcg_halt_cond;
1316 static QemuThread *tcg_cpu_thread;
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001317
Blue Swirl296af7c2010-03-29 19:23:50 +00001318 /* share a single thread for all cpus with TCG */
1319 if (!tcg_cpu_thread) {
Andreas Färber814e6122012-05-02 17:00:37 +02001320 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001321 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1322 qemu_cond_init(cpu->halt_cond);
1323 tcg_halt_cond = cpu->halt_cond;
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001324 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
1325 cpu->cpu_index);
1326 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1327 cpu, QEMU_THREAD_JOINABLE);
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001328#ifdef _WIN32
Andreas Färber814e6122012-05-02 17:00:37 +02001329 cpu->hThread = qemu_thread_get_handle(cpu->thread);
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001330#endif
Andreas Färber61a46212012-05-02 22:49:36 +02001331 while (!cpu->created) {
Paolo Bonzini18a85722011-03-12 17:44:03 +01001332 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001333 }
Andreas Färber814e6122012-05-02 17:00:37 +02001334 tcg_cpu_thread = cpu->thread;
Blue Swirl296af7c2010-03-29 19:23:50 +00001335 } else {
Andreas Färber814e6122012-05-02 17:00:37 +02001336 cpu->thread = tcg_cpu_thread;
Andreas Färberf5c121b2012-05-03 01:22:49 +02001337 cpu->halt_cond = tcg_halt_cond;
Blue Swirl296af7c2010-03-29 19:23:50 +00001338 }
1339}
1340
Andreas Färber48a106b2013-05-27 02:20:39 +02001341static void qemu_kvm_start_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001342{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001343 char thread_name[VCPU_THREAD_NAME_SIZE];
1344
Andreas Färber814e6122012-05-02 17:00:37 +02001345 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001346 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1347 qemu_cond_init(cpu->halt_cond);
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001348 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1349 cpu->cpu_index);
1350 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1351 cpu, QEMU_THREAD_JOINABLE);
Andreas Färber61a46212012-05-02 22:49:36 +02001352 while (!cpu->created) {
Paolo Bonzini18a85722011-03-12 17:44:03 +01001353 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001354 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001355}
1356
Andreas Färber10a90212013-05-27 02:24:35 +02001357static void qemu_dummy_start_vcpu(CPUState *cpu)
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001358{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001359 char thread_name[VCPU_THREAD_NAME_SIZE];
1360
Andreas Färber814e6122012-05-02 17:00:37 +02001361 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001362 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1363 qemu_cond_init(cpu->halt_cond);
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001364 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
1365 cpu->cpu_index);
1366 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001367 QEMU_THREAD_JOINABLE);
Andreas Färber61a46212012-05-02 22:49:36 +02001368 while (!cpu->created) {
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001369 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1370 }
1371}
1372
Andreas Färberc643bed2013-05-27 03:23:24 +02001373void qemu_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001374{
Andreas Färberce3960e2012-12-17 03:27:07 +01001375 cpu->nr_cores = smp_cores;
1376 cpu->nr_threads = smp_threads;
Andreas Färberf324e762012-05-02 23:26:21 +02001377 cpu->stopped = true;
Peter Maydell56943e82016-01-21 14:15:04 +00001378
1379 if (!cpu->as) {
1380 /* If the target cpu hasn't set up any address spaces itself,
1381 * give it the default one.
1382 */
Peter Crosthwaite6731d862016-01-21 14:15:06 +00001383 AddressSpace *as = address_space_init_shareable(cpu->memory,
1384 "cpu-memory");
Peter Maydell12ebc9a2016-01-21 14:15:04 +00001385 cpu->num_ases = 1;
Peter Crosthwaite6731d862016-01-21 14:15:06 +00001386 cpu_address_space_init(cpu, as, 0);
Peter Maydell56943e82016-01-21 14:15:04 +00001387 }
1388
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001389 if (kvm_enabled()) {
Andreas Färber48a106b2013-05-27 02:20:39 +02001390 qemu_kvm_start_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001391 } else if (tcg_enabled()) {
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001392 qemu_tcg_init_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001393 } else {
Andreas Färber10a90212013-05-27 02:24:35 +02001394 qemu_dummy_start_vcpu(cpu);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001395 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001396}
1397
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001398void cpu_stop_current(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001399{
Andreas Färber4917cf42013-05-27 05:17:50 +02001400 if (current_cpu) {
1401 current_cpu->stop = false;
1402 current_cpu->stopped = true;
1403 cpu_exit(current_cpu);
Dr. David Alan Gilbert96bce682016-01-25 10:08:18 +00001404 qemu_cond_broadcast(&qemu_pause_cond);
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001405 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001406}
1407
Kevin Wolf56983462013-07-05 13:49:54 +02001408int vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +00001409{
Juan Quintelaaa723c22012-09-18 16:30:11 +02001410 if (qemu_in_vcpu_thread()) {
Paolo Bonzini74892d22014-06-05 14:53:58 +02001411 qemu_system_vmstop_request_prepare();
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -03001412 qemu_system_vmstop_request(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00001413 /*
1414 * FIXME: should not return to device code in case
1415 * vm_stop() has been requested.
1416 */
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001417 cpu_stop_current();
Kevin Wolf56983462013-07-05 13:49:54 +02001418 return 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00001419 }
Kevin Wolf56983462013-07-05 13:49:54 +02001420
1421 return do_vm_stop(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00001422}
1423
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001424/* does a state transition even if the VM is already stopped,
1425 current state is forgotten forever */
Kevin Wolf56983462013-07-05 13:49:54 +02001426int vm_stop_force_state(RunState state)
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001427{
1428 if (runstate_is_running()) {
Kevin Wolf56983462013-07-05 13:49:54 +02001429 return vm_stop(state);
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001430 } else {
1431 runstate_set(state);
Wen Congyangb2780d32015-11-20 17:34:38 +08001432
1433 bdrv_drain_all();
Kevin Wolf594a45c2013-07-18 14:52:19 +02001434 /* Make sure to return an error if the flush in a previous vm_stop()
1435 * failed. */
1436 return bdrv_flush_all();
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001437 }
1438}
1439
Pavel Dovgalyuk8b427042015-09-17 19:24:05 +03001440static int64_t tcg_get_icount_limit(void)
1441{
1442 int64_t deadline;
1443
1444 if (replay_mode != REPLAY_MODE_PLAY) {
1445 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1446
1447 /* Maintain prior (possibly buggy) behaviour where if no deadline
1448 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1449 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1450 * nanoseconds.
1451 */
1452 if ((deadline < 0) || (deadline > INT32_MAX)) {
1453 deadline = INT32_MAX;
1454 }
1455
1456 return qemu_icount_round(deadline);
1457 } else {
1458 return replay_get_instructions();
1459 }
1460}
1461
Peter Crosthwaite3d57f782015-06-23 19:31:17 -07001462static int tcg_cpu_exec(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001463{
1464 int ret;
1465#ifdef CONFIG_PROFILER
1466 int64_t ti;
1467#endif
1468
1469#ifdef CONFIG_PROFILER
1470 ti = profile_getclock();
1471#endif
1472 if (use_icount) {
1473 int64_t count;
1474 int decr;
KONRAD Fredericc96778b2014-08-01 01:37:09 +02001475 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1476 + cpu->icount_extra);
Andreas Färber28ecfd72013-08-26 05:51:49 +02001477 cpu->icount_decr.u16.low = 0;
Andreas Färberefee7342013-08-26 05:39:29 +02001478 cpu->icount_extra = 0;
Pavel Dovgalyuk8b427042015-09-17 19:24:05 +03001479 count = tcg_get_icount_limit();
KONRAD Fredericc96778b2014-08-01 01:37:09 +02001480 timers_state.qemu_icount += count;
Blue Swirl296af7c2010-03-29 19:23:50 +00001481 decr = (count > 0xffff) ? 0xffff : count;
1482 count -= decr;
Andreas Färber28ecfd72013-08-26 05:51:49 +02001483 cpu->icount_decr.u16.low = decr;
Andreas Färberefee7342013-08-26 05:39:29 +02001484 cpu->icount_extra = count;
Blue Swirl296af7c2010-03-29 19:23:50 +00001485 }
Peter Crosthwaiteea3e9842015-06-18 10:24:55 -07001486 ret = cpu_exec(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001487#ifdef CONFIG_PROFILER
Alexey Kardashevskiy89d5cbd2015-03-16 14:57:38 +11001488 tcg_time += profile_getclock() - ti;
Blue Swirl296af7c2010-03-29 19:23:50 +00001489#endif
1490 if (use_icount) {
1491 /* Fold pending instructions back into the
1492 instruction counter, and clear the interrupt flag. */
KONRAD Fredericc96778b2014-08-01 01:37:09 +02001493 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1494 + cpu->icount_extra);
Andreas Färber28ecfd72013-08-26 05:51:49 +02001495 cpu->icount_decr.u32 = 0;
Andreas Färberefee7342013-08-26 05:39:29 +02001496 cpu->icount_extra = 0;
Pavel Dovgalyuk8b427042015-09-17 19:24:05 +03001497 replay_account_executed_instructions();
Blue Swirl296af7c2010-03-29 19:23:50 +00001498 }
1499 return ret;
1500}
1501
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001502static void tcg_exec_all(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001503{
Jan Kiszka9a360852011-02-01 22:15:55 +01001504 int r;
1505
Alex Bligh40daca52013-08-21 16:03:02 +01001506 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
Pavel Dovgalyuke76d1792016-03-10 14:56:09 +03001507 qemu_account_warp_timer();
Paolo Bonziniab33fcd2011-04-13 10:03:44 +02001508
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001509 if (next_cpu == NULL) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001510 next_cpu = first_cpu;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001511 }
Andreas Färberbdc44642013-06-24 23:50:24 +02001512 for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
Andreas Färber182735e2013-05-29 22:29:20 +02001513 CPUState *cpu = next_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001514
Alex Bligh40daca52013-08-21 16:03:02 +01001515 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
Andreas Färbered2803d2013-06-21 20:20:45 +02001516 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
Blue Swirl296af7c2010-03-29 19:23:50 +00001517
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001518 if (cpu_can_run(cpu)) {
Peter Crosthwaite3d57f782015-06-23 19:31:17 -07001519 r = tcg_cpu_exec(cpu);
Jan Kiszka9a360852011-02-01 22:15:55 +01001520 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +02001521 cpu_handle_guest_debug(cpu);
Jan Kiszka3c638d02010-06-25 16:56:56 +02001522 break;
1523 }
Andreas Färberf324e762012-05-02 23:26:21 +02001524 } else if (cpu->stop || cpu->stopped) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001525 break;
1526 }
1527 }
Paolo Bonziniaed807c2015-08-18 06:43:15 -07001528
1529 /* Pairs with smp_wmb in qemu_cpu_kick. */
1530 atomic_mb_set(&exit_request, 0);
Blue Swirl296af7c2010-03-29 19:23:50 +00001531}
1532
Stefan Weil9a78eea2010-10-22 23:03:33 +02001533void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
Blue Swirl262353c2010-05-04 19:55:35 +00001534{
1535 /* XXX: implement xxx_cpu_list for targets that still miss it */
Peter Maydelle916cbf2012-09-05 17:41:08 -03001536#if defined(cpu_list)
1537 cpu_list(f, cpu_fprintf);
Blue Swirl262353c2010-05-04 19:55:35 +00001538#endif
1539}
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001540
1541CpuInfoList *qmp_query_cpus(Error **errp)
1542{
1543 CpuInfoList *head = NULL, *cur_item = NULL;
Andreas Färber182735e2013-05-29 22:29:20 +02001544 CPUState *cpu;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001545
Andreas Färberbdc44642013-06-24 23:50:24 +02001546 CPU_FOREACH(cpu) {
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001547 CpuInfoList *info;
Andreas Färber182735e2013-05-29 22:29:20 +02001548#if defined(TARGET_I386)
1549 X86CPU *x86_cpu = X86_CPU(cpu);
1550 CPUX86State *env = &x86_cpu->env;
1551#elif defined(TARGET_PPC)
1552 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1553 CPUPPCState *env = &ppc_cpu->env;
1554#elif defined(TARGET_SPARC)
1555 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1556 CPUSPARCState *env = &sparc_cpu->env;
1557#elif defined(TARGET_MIPS)
1558 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1559 CPUMIPSState *env = &mips_cpu->env;
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +01001560#elif defined(TARGET_TRICORE)
1561 TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
1562 CPUTriCoreState *env = &tricore_cpu->env;
Andreas Färber182735e2013-05-29 22:29:20 +02001563#endif
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001564
Andreas Färbercb446ec2013-05-01 14:24:52 +02001565 cpu_synchronize_state(cpu);
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001566
1567 info = g_malloc0(sizeof(*info));
1568 info->value = g_malloc0(sizeof(*info->value));
Andreas Färber55e5c282012-12-17 06:18:02 +01001569 info->value->CPU = cpu->cpu_index;
Andreas Färber182735e2013-05-29 22:29:20 +02001570 info->value->current = (cpu == first_cpu);
Andreas Färber259186a2013-01-17 18:51:17 +01001571 info->value->halted = cpu->halted;
Eduardo Habkost58f88d42015-05-08 16:04:22 -03001572 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
Andreas Färber9f09e182012-05-03 06:59:07 +02001573 info->value->thread_id = cpu->thread_id;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001574#if defined(TARGET_I386)
Eric Blake86f4b682015-11-18 01:52:59 -07001575 info->value->arch = CPU_INFO_ARCH_X86;
Eric Blake544a3732016-02-17 23:48:27 -07001576 info->value->u.x86.pc = env->eip + env->segs[R_CS].base;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001577#elif defined(TARGET_PPC)
Eric Blake86f4b682015-11-18 01:52:59 -07001578 info->value->arch = CPU_INFO_ARCH_PPC;
Eric Blake544a3732016-02-17 23:48:27 -07001579 info->value->u.ppc.nip = env->nip;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001580#elif defined(TARGET_SPARC)
Eric Blake86f4b682015-11-18 01:52:59 -07001581 info->value->arch = CPU_INFO_ARCH_SPARC;
Eric Blake544a3732016-02-17 23:48:27 -07001582 info->value->u.q_sparc.pc = env->pc;
1583 info->value->u.q_sparc.npc = env->npc;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001584#elif defined(TARGET_MIPS)
Eric Blake86f4b682015-11-18 01:52:59 -07001585 info->value->arch = CPU_INFO_ARCH_MIPS;
Eric Blake544a3732016-02-17 23:48:27 -07001586 info->value->u.q_mips.PC = env->active_tc.PC;
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +01001587#elif defined(TARGET_TRICORE)
Eric Blake86f4b682015-11-18 01:52:59 -07001588 info->value->arch = CPU_INFO_ARCH_TRICORE;
Eric Blake544a3732016-02-17 23:48:27 -07001589 info->value->u.tricore.PC = env->PC;
Eric Blake86f4b682015-11-18 01:52:59 -07001590#else
1591 info->value->arch = CPU_INFO_ARCH_OTHER;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001592#endif
1593
1594 /* XXX: waiting for the qapi to support GSList */
1595 if (!cur_item) {
1596 head = cur_item = info;
1597 } else {
1598 cur_item->next = info;
1599 cur_item = info;
1600 }
1601 }
1602
1603 return head;
1604}
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001605
1606void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1607 bool has_cpu, int64_t cpu_index, Error **errp)
1608{
1609 FILE *f;
1610 uint32_t l;
Andreas Färber55e5c282012-12-17 06:18:02 +01001611 CPUState *cpu;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001612 uint8_t buf[1024];
Borislav Petkov0dc9daf2015-02-08 13:14:38 +01001613 int64_t orig_addr = addr, orig_size = size;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001614
1615 if (!has_cpu) {
1616 cpu_index = 0;
1617 }
1618
Andreas Färber151d1322013-02-15 15:41:49 +01001619 cpu = qemu_get_cpu(cpu_index);
1620 if (cpu == NULL) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01001621 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1622 "a CPU number");
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001623 return;
1624 }
1625
1626 f = fopen(filename, "wb");
1627 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04001628 error_setg_file_open(errp, errno, filename);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001629 return;
1630 }
1631
1632 while (size != 0) {
1633 l = sizeof(buf);
1634 if (l > size)
1635 l = size;
Aneesh Kumar K.V2f4d0f52013-10-01 21:49:30 +05301636 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
Borislav Petkov0dc9daf2015-02-08 13:14:38 +01001637 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
1638 " specified", orig_addr, orig_size);
Aneesh Kumar K.V2f4d0f52013-10-01 21:49:30 +05301639 goto exit;
1640 }
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001641 if (fwrite(buf, 1, l, f) != l) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01001642 error_setg(errp, QERR_IO_ERROR);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001643 goto exit;
1644 }
1645 addr += l;
1646 size -= l;
1647 }
1648
1649exit:
1650 fclose(f);
1651}
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001652
1653void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1654 Error **errp)
1655{
1656 FILE *f;
1657 uint32_t l;
1658 uint8_t buf[1024];
1659
1660 f = fopen(filename, "wb");
1661 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04001662 error_setg_file_open(errp, errno, filename);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001663 return;
1664 }
1665
1666 while (size != 0) {
1667 l = sizeof(buf);
1668 if (l > size)
1669 l = size;
Stefan Weileb6282f2014-04-07 20:28:23 +02001670 cpu_physical_memory_read(addr, buf, l);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001671 if (fwrite(buf, 1, l, f) != l) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01001672 error_setg(errp, QERR_IO_ERROR);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001673 goto exit;
1674 }
1675 addr += l;
1676 size -= l;
1677 }
1678
1679exit:
1680 fclose(f);
1681}
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001682
1683void qmp_inject_nmi(Error **errp)
1684{
1685#if defined(TARGET_I386)
Andreas Färber182735e2013-05-29 22:29:20 +02001686 CPUState *cs;
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001687
Andreas Färberbdc44642013-06-24 23:50:24 +02001688 CPU_FOREACH(cs) {
Andreas Färber182735e2013-05-29 22:29:20 +02001689 X86CPU *cpu = X86_CPU(cs);
Andreas Färber182735e2013-05-29 22:29:20 +02001690
Chen Fan02e51482013-12-23 17:04:02 +08001691 if (!cpu->apic_state) {
Andreas Färber182735e2013-05-29 22:29:20 +02001692 cpu_interrupt(cs, CPU_INTERRUPT_NMI);
Jan Kiszka02c09192011-10-18 00:00:06 +08001693 } else {
Chen Fan02e51482013-12-23 17:04:02 +08001694 apic_deliver_nmi(cpu->apic_state);
Jan Kiszka02c09192011-10-18 00:00:06 +08001695 }
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001696 }
1697#else
Alexey Kardashevskiy9cb805f2014-08-20 22:16:33 +10001698 nmi_monitor_handle(monitor_get_cpu_index(), errp);
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001699#endif
1700}
Sebastian Tanase27498be2014-07-25 11:56:33 +02001701
1702void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
1703{
1704 if (!use_icount) {
1705 return;
1706 }
1707
1708 cpu_fprintf(f, "Host - Guest clock %"PRIi64" ms\n",
1709 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
1710 if (icount_align_option) {
1711 cpu_fprintf(f, "Max guest delay %"PRIi64" ms\n", -max_delay/SCALE_MS);
1712 cpu_fprintf(f, "Max guest advance %"PRIi64" ms\n", max_advance/SCALE_MS);
1713 } else {
1714 cpu_fprintf(f, "Max guest delay NA\n");
1715 cpu_fprintf(f, "Max guest advance NA\n");
1716 }
1717}