blob: 2385caa2487c33dc126208dd0d92283064daa08e [file] [log] [blame]
Blue Swirl296af7c2010-03-29 19:23:50 +00001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Needed early for CONFIG_BSD etc. */
26#include "config-host.h"
27
Paolo Bonzini83c90892012-12-17 18:19:49 +010028#include "monitor/monitor.h"
Wenchao Xiaa4e15de2014-06-18 08:43:36 +020029#include "qapi/qmp/qerror.h"
Markus Armbrusterd49b6832015-03-17 18:29:20 +010030#include "qemu/error-report.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010031#include "sysemu/sysemu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010032#include "exec/gdbstub.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/dma.h"
34#include "sysemu/kvm.h"
Luiz Capitulinode0b36b2011-09-21 16:38:35 -030035#include "qmp-commands.h"
Blue Swirl296af7c2010-03-29 19:23:50 +000036
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/thread.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/cpus.h"
39#include "sysemu/qtest.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010040#include "qemu/main-loop.h"
41#include "qemu/bitmap.h"
Liu Ping Fancb365642013-09-25 14:20:58 +080042#include "qemu/seqlock.h"
Wenchao Xiaa4e15de2014-06-18 08:43:36 +020043#include "qapi-event.h"
Alexey Kardashevskiy9cb805f2014-08-20 22:16:33 +100044#include "hw/nmi.h"
Pavel Dovgalyuk8b427042015-09-17 19:24:05 +030045#include "sysemu/replay.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020046
47#ifndef _WIN32
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010048#include "qemu/compatfd.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020049#endif
Blue Swirl296af7c2010-03-29 19:23:50 +000050
Jan Kiszka6d9cb732011-02-01 22:15:58 +010051#ifdef CONFIG_LINUX
52
53#include <sys/prctl.h>
54
Marcelo Tosattic0532a72010-10-11 15:31:21 -030055#ifndef PR_MCE_KILL
56#define PR_MCE_KILL 33
57#endif
58
Jan Kiszka6d9cb732011-02-01 22:15:58 +010059#ifndef PR_MCE_KILL_SET
60#define PR_MCE_KILL_SET 1
61#endif
62
63#ifndef PR_MCE_KILL_EARLY
64#define PR_MCE_KILL_EARLY 1
65#endif
66
67#endif /* CONFIG_LINUX */
68
Andreas Färber182735e2013-05-29 22:29:20 +020069static CPUState *next_cpu;
Sebastian Tanase27498be2014-07-25 11:56:33 +020070int64_t max_delay;
71int64_t max_advance;
Blue Swirl296af7c2010-03-29 19:23:50 +000072
Jason J. Herne2adcc852015-09-08 13:12:33 -040073/* vcpu throttling controls */
74static QEMUTimer *throttle_timer;
75static unsigned int throttle_percentage;
76
77#define CPU_THROTTLE_PCT_MIN 1
78#define CPU_THROTTLE_PCT_MAX 99
79#define CPU_THROTTLE_TIMESLICE_NS 10000000
80
Tiejun Chen321bc0b2013-08-02 09:43:09 +080081bool cpu_is_stopped(CPUState *cpu)
82{
83 return cpu->stopped || !runstate_is_running();
84}
85
Andreas Färbera98ae1d2013-05-26 23:21:08 +020086static bool cpu_thread_is_idle(CPUState *cpu)
Peter Maydellac873f12012-07-19 16:52:27 +010087{
Andreas Färberc64ca812012-05-03 02:11:45 +020088 if (cpu->stop || cpu->queued_work_first) {
Peter Maydellac873f12012-07-19 16:52:27 +010089 return false;
90 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +080091 if (cpu_is_stopped(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010092 return true;
93 }
Andreas Färber8c2e1b02013-08-25 18:53:55 +020094 if (!cpu->halted || cpu_has_work(cpu) ||
Alexander Graf215e79c2013-04-24 22:24:12 +020095 kvm_halt_in_kernel()) {
Peter Maydellac873f12012-07-19 16:52:27 +010096 return false;
97 }
98 return true;
99}
100
101static bool all_cpu_threads_idle(void)
102{
Andreas Färber182735e2013-05-29 22:29:20 +0200103 CPUState *cpu;
Peter Maydellac873f12012-07-19 16:52:27 +0100104
Andreas Färberbdc44642013-06-24 23:50:24 +0200105 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200106 if (!cpu_thread_is_idle(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +0100107 return false;
108 }
109 }
110 return true;
111}
112
Blue Swirl296af7c2010-03-29 19:23:50 +0000113/***********************************************************/
Paolo Bonzini946fb272011-09-12 13:57:37 +0200114/* guest cycle counter */
115
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200116/* Protected by TimersState seqlock */
117
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200118static bool icount_sleep = true;
Sebastian Tanase71468392014-07-23 11:47:50 +0200119static int64_t vm_clock_warp_start = -1;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200120/* Conversion factor from emulated instructions to virtual clock ticks. */
121static int icount_time_shift;
122/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
123#define MAX_ICOUNT_SHIFT 10
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200124
Paolo Bonzini946fb272011-09-12 13:57:37 +0200125static QEMUTimer *icount_rt_timer;
126static QEMUTimer *icount_vm_timer;
127static QEMUTimer *icount_warp_timer;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200128
129typedef struct TimersState {
Liu Ping Fancb365642013-09-25 14:20:58 +0800130 /* Protected by BQL. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200131 int64_t cpu_ticks_prev;
132 int64_t cpu_ticks_offset;
Liu Ping Fancb365642013-09-25 14:20:58 +0800133
134 /* cpu_clock_offset can be read out of BQL, so protect it with
135 * this lock.
136 */
137 QemuSeqLock vm_clock_seqlock;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200138 int64_t cpu_clock_offset;
139 int32_t cpu_ticks_enabled;
140 int64_t dummy;
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200141
142 /* Compensate for varying guest execution speed. */
143 int64_t qemu_icount_bias;
144 /* Only written by TCG thread */
145 int64_t qemu_icount;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200146} TimersState;
147
Liu Ping Fand9cd4002013-07-21 08:43:00 +0000148static TimersState timers_state;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200149
Pavel Dovgalyuk2a629142014-12-08 10:53:45 +0300150int64_t cpu_get_icount_raw(void)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200151{
152 int64_t icount;
Andreas Färber4917cf42013-05-27 05:17:50 +0200153 CPUState *cpu = current_cpu;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200154
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200155 icount = timers_state.qemu_icount;
Andreas Färber4917cf42013-05-27 05:17:50 +0200156 if (cpu) {
Paolo Bonzini414b15c2015-06-24 14:16:26 +0200157 if (!cpu->can_do_io) {
Pavel Dovgalyuk2a629142014-12-08 10:53:45 +0300158 fprintf(stderr, "Bad icount read\n");
159 exit(1);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200160 }
Andreas Färber28ecfd72013-08-26 05:51:49 +0200161 icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200162 }
Pavel Dovgalyuk2a629142014-12-08 10:53:45 +0300163 return icount;
164}
165
166/* Return the virtual CPU time, based on the instruction counter. */
167static int64_t cpu_get_icount_locked(void)
168{
169 int64_t icount = cpu_get_icount_raw();
KONRAD Frederic3f031312014-08-01 01:37:15 +0200170 return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200171}
172
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200173int64_t cpu_get_icount(void)
174{
175 int64_t icount;
176 unsigned start;
177
178 do {
179 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
180 icount = cpu_get_icount_locked();
181 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
182
183 return icount;
184}
185
KONRAD Frederic3f031312014-08-01 01:37:15 +0200186int64_t cpu_icount_to_ns(int64_t icount)
187{
188 return icount << icount_time_shift;
189}
190
Paolo Bonzini946fb272011-09-12 13:57:37 +0200191/* return the host CPU cycle counter and handle stop/restart */
Liu Ping Fancb365642013-09-25 14:20:58 +0800192/* Caller must hold the BQL */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200193int64_t cpu_get_ticks(void)
194{
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100195 int64_t ticks;
196
Paolo Bonzini946fb272011-09-12 13:57:37 +0200197 if (use_icount) {
198 return cpu_get_icount();
199 }
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100200
201 ticks = timers_state.cpu_ticks_offset;
202 if (timers_state.cpu_ticks_enabled) {
Christopher Covington4a7428c2015-09-25 10:42:21 -0400203 ticks += cpu_get_host_ticks();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200204 }
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100205
206 if (timers_state.cpu_ticks_prev > ticks) {
207 /* Note: non increasing ticks may happen if the host uses
208 software suspend */
209 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
210 ticks = timers_state.cpu_ticks_prev;
211 }
212
213 timers_state.cpu_ticks_prev = ticks;
214 return ticks;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200215}
216
Liu Ping Fancb365642013-09-25 14:20:58 +0800217static int64_t cpu_get_clock_locked(void)
218{
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100219 int64_t ticks;
Liu Ping Fancb365642013-09-25 14:20:58 +0800220
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100221 ticks = timers_state.cpu_clock_offset;
222 if (timers_state.cpu_ticks_enabled) {
223 ticks += get_clock();
Liu Ping Fancb365642013-09-25 14:20:58 +0800224 }
225
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100226 return ticks;
Liu Ping Fancb365642013-09-25 14:20:58 +0800227}
228
Paolo Bonzini946fb272011-09-12 13:57:37 +0200229/* return the host CPU monotonic timer and handle stop/restart */
230int64_t cpu_get_clock(void)
231{
232 int64_t ti;
Liu Ping Fancb365642013-09-25 14:20:58 +0800233 unsigned start;
234
235 do {
236 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
237 ti = cpu_get_clock_locked();
238 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
239
240 return ti;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200241}
242
Liu Ping Fancb365642013-09-25 14:20:58 +0800243/* enable cpu_get_ticks()
244 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
245 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200246void cpu_enable_ticks(void)
247{
Liu Ping Fancb365642013-09-25 14:20:58 +0800248 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
249 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200250 if (!timers_state.cpu_ticks_enabled) {
Christopher Covington4a7428c2015-09-25 10:42:21 -0400251 timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200252 timers_state.cpu_clock_offset -= get_clock();
253 timers_state.cpu_ticks_enabled = 1;
254 }
Liu Ping Fancb365642013-09-25 14:20:58 +0800255 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200256}
257
258/* disable cpu_get_ticks() : the clock is stopped. You must not call
Liu Ping Fancb365642013-09-25 14:20:58 +0800259 * cpu_get_ticks() after that.
260 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
261 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200262void cpu_disable_ticks(void)
263{
Liu Ping Fancb365642013-09-25 14:20:58 +0800264 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
265 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200266 if (timers_state.cpu_ticks_enabled) {
Christopher Covington4a7428c2015-09-25 10:42:21 -0400267 timers_state.cpu_ticks_offset += cpu_get_host_ticks();
Liu Ping Fancb365642013-09-25 14:20:58 +0800268 timers_state.cpu_clock_offset = cpu_get_clock_locked();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200269 timers_state.cpu_ticks_enabled = 0;
270 }
Liu Ping Fancb365642013-09-25 14:20:58 +0800271 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200272}
273
274/* Correlation between real and virtual time is always going to be
275 fairly approximate, so ignore small variation.
276 When the guest is idle real and virtual time will be aligned in
277 the IO wait loop. */
278#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
279
280static void icount_adjust(void)
281{
282 int64_t cur_time;
283 int64_t cur_icount;
284 int64_t delta;
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200285
286 /* Protected by TimersState mutex. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200287 static int64_t last_delta;
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200288
Paolo Bonzini946fb272011-09-12 13:57:37 +0200289 /* If the VM is not running, then do nothing. */
290 if (!runstate_is_running()) {
291 return;
292 }
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200293
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200294 seqlock_write_lock(&timers_state.vm_clock_seqlock);
295 cur_time = cpu_get_clock_locked();
296 cur_icount = cpu_get_icount_locked();
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200297
Paolo Bonzini946fb272011-09-12 13:57:37 +0200298 delta = cur_icount - cur_time;
299 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
300 if (delta > 0
301 && last_delta + ICOUNT_WOBBLE < delta * 2
302 && icount_time_shift > 0) {
303 /* The guest is getting too far ahead. Slow time down. */
304 icount_time_shift--;
305 }
306 if (delta < 0
307 && last_delta - ICOUNT_WOBBLE > delta * 2
308 && icount_time_shift < MAX_ICOUNT_SHIFT) {
309 /* The guest is getting too far behind. Speed time up. */
310 icount_time_shift++;
311 }
312 last_delta = delta;
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200313 timers_state.qemu_icount_bias = cur_icount
314 - (timers_state.qemu_icount << icount_time_shift);
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200315 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200316}
317
318static void icount_adjust_rt(void *opaque)
319{
Alex Bligh40daca52013-08-21 16:03:02 +0100320 timer_mod(icount_rt_timer,
Pavel Dovgalyuk1979b902015-01-12 15:00:43 +0300321 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200322 icount_adjust();
323}
324
325static void icount_adjust_vm(void *opaque)
326{
Alex Bligh40daca52013-08-21 16:03:02 +0100327 timer_mod(icount_vm_timer,
328 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
329 get_ticks_per_sec() / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200330 icount_adjust();
331}
332
333static int64_t qemu_icount_round(int64_t count)
334{
335 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
336}
337
338static void icount_warp_rt(void *opaque)
339{
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200340 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
341 * changes from -1 to another value, so the race here is okay.
342 */
343 if (atomic_read(&vm_clock_warp_start) == -1) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200344 return;
345 }
346
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200347 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200348 if (runstate_is_running()) {
Pavel Dovgalyuk8eda2062015-09-17 19:24:28 +0300349 int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT,
350 cpu_get_clock_locked());
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200351 int64_t warp_delta;
352
353 warp_delta = clock - vm_clock_warp_start;
354 if (use_icount == 2) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200355 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100356 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
Paolo Bonzini946fb272011-09-12 13:57:37 +0200357 * far ahead of real time.
358 */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200359 int64_t cur_icount = cpu_get_icount_locked();
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300360 int64_t delta = clock - cur_icount;
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200361 warp_delta = MIN(warp_delta, delta);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200362 }
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200363 timers_state.qemu_icount_bias += warp_delta;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200364 }
365 vm_clock_warp_start = -1;
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200366 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200367
368 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
369 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
370 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200371}
372
Paolo Bonzini8156be52012-03-28 15:42:04 +0200373void qtest_clock_warp(int64_t dest)
374{
Alex Bligh40daca52013-08-21 16:03:02 +0100375 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Fam Zhengefef88b2015-01-19 17:51:43 +0800376 AioContext *aio_context;
Paolo Bonzini8156be52012-03-28 15:42:04 +0200377 assert(qtest_enabled());
Fam Zhengefef88b2015-01-19 17:51:43 +0800378 aio_context = qemu_get_aio_context();
Paolo Bonzini8156be52012-03-28 15:42:04 +0200379 while (clock < dest) {
Alex Bligh40daca52013-08-21 16:03:02 +0100380 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Sergey Fedorovc9299e22014-06-10 13:10:28 +0400381 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
Fam Zhengefef88b2015-01-19 17:51:43 +0800382
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200383 seqlock_write_lock(&timers_state.vm_clock_seqlock);
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200384 timers_state.qemu_icount_bias += warp;
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200385 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
386
Alex Bligh40daca52013-08-21 16:03:02 +0100387 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
Fam Zhengefef88b2015-01-19 17:51:43 +0800388 timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
Alex Bligh40daca52013-08-21 16:03:02 +0100389 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200390 }
Alex Bligh40daca52013-08-21 16:03:02 +0100391 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200392}
393
Alex Bligh40daca52013-08-21 16:03:02 +0100394void qemu_clock_warp(QEMUClockType type)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200395{
Paolo Bonzinice78d182013-10-07 17:30:02 +0200396 int64_t clock;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200397 int64_t deadline;
398
399 /*
400 * There are too many global variables to make the "warp" behavior
401 * applicable to other clocks. But a clock argument removes the
402 * need for if statements all over the place.
403 */
Alex Bligh40daca52013-08-21 16:03:02 +0100404 if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200405 return;
406 }
407
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200408 if (icount_sleep) {
409 /*
410 * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
411 * This ensures that the deadline for the timer is computed correctly
412 * below.
413 * This also makes sure that the insn counter is synchronized before
414 * the CPU starts running, in case the CPU is woken by an event other
415 * than the earliest QEMU_CLOCK_VIRTUAL timer.
416 */
417 icount_warp_rt(NULL);
418 timer_del(icount_warp_timer);
419 }
Paolo Bonzinice78d182013-10-07 17:30:02 +0200420 if (!all_cpu_threads_idle()) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200421 return;
422 }
423
Paolo Bonzini8156be52012-03-28 15:42:04 +0200424 if (qtest_enabled()) {
425 /* When testing, qtest commands advance icount. */
426 return;
427 }
428
Alex Blighac70aaf2013-08-21 16:02:57 +0100429 /* We want to use the earliest deadline from ALL vm_clocks */
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300430 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
Alex Bligh40daca52013-08-21 16:03:02 +0100431 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200432 if (deadline < 0) {
Victor CLEMENTd7a0f712015-05-29 17:14:06 +0200433 static bool notified;
434 if (!icount_sleep && !notified) {
435 error_report("WARNING: icount sleep disabled and no active timers");
436 notified = true;
437 }
Paolo Bonzinice78d182013-10-07 17:30:02 +0200438 return;
Alex Blighac70aaf2013-08-21 16:02:57 +0100439 }
440
Paolo Bonzini946fb272011-09-12 13:57:37 +0200441 if (deadline > 0) {
442 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100443 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
Paolo Bonzini946fb272011-09-12 13:57:37 +0200444 * sleep. Otherwise, the CPU might be waiting for a future timer
445 * interrupt to wake it up, but the interrupt never comes because
446 * the vCPU isn't running any insns and thus doesn't advance the
Alex Bligh40daca52013-08-21 16:03:02 +0100447 * QEMU_CLOCK_VIRTUAL.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200448 */
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200449 if (!icount_sleep) {
450 /*
451 * We never let VCPUs sleep in no sleep icount mode.
452 * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
453 * to the next QEMU_CLOCK_VIRTUAL event and notify it.
454 * It is useful when we want a deterministic execution time,
455 * isolated from host latencies.
456 */
457 seqlock_write_lock(&timers_state.vm_clock_seqlock);
458 timers_state.qemu_icount_bias += deadline;
459 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
460 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
461 } else {
462 /*
463 * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
464 * "real" time, (related to the time left until the next event) has
465 * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
466 * This avoids that the warps are visible externally; for example,
467 * you will not be sending network packets continuously instead of
468 * every 100ms.
469 */
470 seqlock_write_lock(&timers_state.vm_clock_seqlock);
471 if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
472 vm_clock_warp_start = clock;
473 }
474 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
475 timer_mod_anticipate(icount_warp_timer, clock + deadline);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200476 }
Alex Blighac70aaf2013-08-21 16:02:57 +0100477 } else if (deadline == 0) {
Alex Bligh40daca52013-08-21 16:03:02 +0100478 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200479 }
480}
481
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200482static bool icount_state_needed(void *opaque)
483{
484 return use_icount;
485}
486
487/*
488 * This is a subsection for icount migration.
489 */
490static const VMStateDescription icount_vmstate_timers = {
491 .name = "timer/icount",
492 .version_id = 1,
493 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200494 .needed = icount_state_needed,
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200495 .fields = (VMStateField[]) {
496 VMSTATE_INT64(qemu_icount_bias, TimersState),
497 VMSTATE_INT64(qemu_icount, TimersState),
498 VMSTATE_END_OF_LIST()
499 }
500};
501
Paolo Bonzini946fb272011-09-12 13:57:37 +0200502static const VMStateDescription vmstate_timers = {
503 .name = "timer",
504 .version_id = 2,
505 .minimum_version_id = 1,
Juan Quintela35d08452014-04-16 16:01:33 +0200506 .fields = (VMStateField[]) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200507 VMSTATE_INT64(cpu_ticks_offset, TimersState),
508 VMSTATE_INT64(dummy, TimersState),
509 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
510 VMSTATE_END_OF_LIST()
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200511 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200512 .subsections = (const VMStateDescription*[]) {
513 &icount_vmstate_timers,
514 NULL
Paolo Bonzini946fb272011-09-12 13:57:37 +0200515 }
516};
517
Jason J. Herne2adcc852015-09-08 13:12:33 -0400518static void cpu_throttle_thread(void *opaque)
519{
520 CPUState *cpu = opaque;
521 double pct;
522 double throttle_ratio;
523 long sleeptime_ns;
524
525 if (!cpu_throttle_get_percentage()) {
526 return;
527 }
528
529 pct = (double)cpu_throttle_get_percentage()/100;
530 throttle_ratio = pct / (1 - pct);
531 sleeptime_ns = (long)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS);
532
533 qemu_mutex_unlock_iothread();
534 atomic_set(&cpu->throttle_thread_scheduled, 0);
535 g_usleep(sleeptime_ns / 1000); /* Convert ns to us for usleep call */
536 qemu_mutex_lock_iothread();
537}
538
539static void cpu_throttle_timer_tick(void *opaque)
540{
541 CPUState *cpu;
542 double pct;
543
544 /* Stop the timer if needed */
545 if (!cpu_throttle_get_percentage()) {
546 return;
547 }
548 CPU_FOREACH(cpu) {
549 if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
550 async_run_on_cpu(cpu, cpu_throttle_thread, cpu);
551 }
552 }
553
554 pct = (double)cpu_throttle_get_percentage()/100;
555 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
556 CPU_THROTTLE_TIMESLICE_NS / (1-pct));
557}
558
559void cpu_throttle_set(int new_throttle_pct)
560{
561 /* Ensure throttle percentage is within valid range */
562 new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
563 new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
564
565 atomic_set(&throttle_percentage, new_throttle_pct);
566
567 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
568 CPU_THROTTLE_TIMESLICE_NS);
569}
570
571void cpu_throttle_stop(void)
572{
573 atomic_set(&throttle_percentage, 0);
574}
575
576bool cpu_throttle_active(void)
577{
578 return (cpu_throttle_get_percentage() != 0);
579}
580
581int cpu_throttle_get_percentage(void)
582{
583 return atomic_read(&throttle_percentage);
584}
585
Pavel Dovgalyuk4603ea02014-09-01 09:34:49 +0400586void cpu_ticks_init(void)
587{
588 seqlock_init(&timers_state.vm_clock_seqlock, NULL);
589 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
Jason J. Herne2adcc852015-09-08 13:12:33 -0400590 throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
591 cpu_throttle_timer_tick, NULL);
Pavel Dovgalyuk4603ea02014-09-01 09:34:49 +0400592}
593
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200594void configure_icount(QemuOpts *opts, Error **errp)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200595{
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200596 const char *option;
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200597 char *rem_str = NULL;
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200598
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200599 option = qemu_opt_get(opts, "shift");
Paolo Bonzini946fb272011-09-12 13:57:37 +0200600 if (!option) {
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200601 if (qemu_opt_get(opts, "align") != NULL) {
602 error_setg(errp, "Please specify shift option when using align");
603 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200604 return;
605 }
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200606
607 icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
Victor CLEMENT5045e9d92015-05-29 17:14:04 +0200608 if (icount_sleep) {
609 icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
610 icount_warp_rt, NULL);
611 }
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200612
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200613 icount_align_option = qemu_opt_get_bool(opts, "align", false);
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200614
615 if (icount_align_option && !icount_sleep) {
616 error_setg(errp, "align=on and sleep=no are incompatible");
617 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200618 if (strcmp(option, "auto") != 0) {
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200619 errno = 0;
620 icount_time_shift = strtol(option, &rem_str, 0);
621 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
622 error_setg(errp, "icount: Invalid shift value");
623 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200624 use_icount = 1;
625 return;
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200626 } else if (icount_align_option) {
627 error_setg(errp, "shift=auto and align=on are incompatible");
Victor CLEMENTf1f4b572015-05-29 17:14:05 +0200628 } else if (!icount_sleep) {
629 error_setg(errp, "shift=auto and sleep=no are incompatible");
Paolo Bonzini946fb272011-09-12 13:57:37 +0200630 }
631
632 use_icount = 2;
633
634 /* 125MIPS seems a reasonable initial guess at the guest speed.
635 It will be corrected fairly quickly anyway. */
636 icount_time_shift = 3;
637
638 /* Have both realtime and virtual time triggers for speed adjustment.
639 The realtime trigger catches emulated time passing too slowly,
640 the virtual time trigger catches emulated time passing too fast.
641 Realtime triggers occur even when idle, so use them less frequently
642 than VM triggers. */
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300643 icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
644 icount_adjust_rt, NULL);
Alex Bligh40daca52013-08-21 16:03:02 +0100645 timer_mod(icount_rt_timer,
Pavel Dovgalyukbf2a7dd2014-11-26 13:40:55 +0300646 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
Alex Bligh40daca52013-08-21 16:03:02 +0100647 icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
648 icount_adjust_vm, NULL);
649 timer_mod(icount_vm_timer,
650 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
651 get_ticks_per_sec() / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200652}
653
654/***********************************************************/
Blue Swirl296af7c2010-03-29 19:23:50 +0000655void hw_error(const char *fmt, ...)
656{
657 va_list ap;
Andreas Färber55e5c282012-12-17 06:18:02 +0100658 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000659
660 va_start(ap, fmt);
661 fprintf(stderr, "qemu: hardware error: ");
662 vfprintf(stderr, fmt, ap);
663 fprintf(stderr, "\n");
Andreas Färberbdc44642013-06-24 23:50:24 +0200664 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100665 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
Andreas Färber878096e2013-05-27 01:33:50 +0200666 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
Blue Swirl296af7c2010-03-29 19:23:50 +0000667 }
668 va_end(ap);
669 abort();
670}
671
672void cpu_synchronize_all_states(void)
673{
Andreas Färber182735e2013-05-29 22:29:20 +0200674 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000675
Andreas Färberbdc44642013-06-24 23:50:24 +0200676 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200677 cpu_synchronize_state(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000678 }
679}
680
681void cpu_synchronize_all_post_reset(void)
682{
Andreas Färber182735e2013-05-29 22:29:20 +0200683 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000684
Andreas Färberbdc44642013-06-24 23:50:24 +0200685 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200686 cpu_synchronize_post_reset(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000687 }
688}
689
690void cpu_synchronize_all_post_init(void)
691{
Andreas Färber182735e2013-05-29 22:29:20 +0200692 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000693
Andreas Färberbdc44642013-06-24 23:50:24 +0200694 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200695 cpu_synchronize_post_init(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000696 }
697}
698
Marcelo Tosattide9d61e2014-09-05 10:52:46 -0300699void cpu_clean_all_dirty(void)
700{
701 CPUState *cpu;
702
703 CPU_FOREACH(cpu) {
704 cpu_clean_state(cpu);
705 }
706}
707
Kevin Wolf56983462013-07-05 13:49:54 +0200708static int do_vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +0000709{
Kevin Wolf56983462013-07-05 13:49:54 +0200710 int ret = 0;
711
Luiz Capitulino13548692011-07-29 15:36:43 -0300712 if (runstate_is_running()) {
Blue Swirl296af7c2010-03-29 19:23:50 +0000713 cpu_disable_ticks();
Blue Swirl296af7c2010-03-29 19:23:50 +0000714 pause_all_vcpus();
Luiz Capitulinof5bbfba2011-07-29 15:04:45 -0300715 runstate_set(state);
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -0300716 vm_state_notify(0, state);
Wenchao Xiaa4e15de2014-06-18 08:43:36 +0200717 qapi_event_send_stop(&error_abort);
Blue Swirl296af7c2010-03-29 19:23:50 +0000718 }
Kevin Wolf56983462013-07-05 13:49:54 +0200719
Kevin Wolf594a45c2013-07-18 14:52:19 +0200720 bdrv_drain_all();
721 ret = bdrv_flush_all();
722
Kevin Wolf56983462013-07-05 13:49:54 +0200723 return ret;
Blue Swirl296af7c2010-03-29 19:23:50 +0000724}
725
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200726static bool cpu_can_run(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000727{
Andreas Färber4fdeee72012-05-02 23:10:09 +0200728 if (cpu->stop) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200729 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100730 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +0800731 if (cpu_is_stopped(cpu)) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200732 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100733 }
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200734 return true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000735}
736
Andreas Färber91325042013-05-27 02:07:49 +0200737static void cpu_handle_guest_debug(CPUState *cpu)
Jan Kiszka3c638d02010-06-25 16:56:56 +0200738{
Andreas Färber64f6b342013-05-27 02:06:09 +0200739 gdb_set_stop_cpu(cpu);
Jan Kiszka8cf71712011-02-07 12:19:16 +0100740 qemu_system_debug_request();
Andreas Färberf324e762012-05-02 23:26:21 +0200741 cpu->stopped = true;
Jan Kiszka3c638d02010-06-25 16:56:56 +0200742}
743
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100744#ifdef CONFIG_LINUX
745static void sigbus_reraise(void)
746{
747 sigset_t set;
748 struct sigaction action;
749
750 memset(&action, 0, sizeof(action));
751 action.sa_handler = SIG_DFL;
752 if (!sigaction(SIGBUS, &action, NULL)) {
753 raise(SIGBUS);
754 sigemptyset(&set);
755 sigaddset(&set, SIGBUS);
756 sigprocmask(SIG_UNBLOCK, &set, NULL);
757 }
758 perror("Failed to re-raise SIGBUS!\n");
759 abort();
760}
761
762static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
763 void *ctx)
764{
765 if (kvm_on_sigbus(siginfo->ssi_code,
766 (void *)(intptr_t)siginfo->ssi_addr)) {
767 sigbus_reraise();
768 }
769}
770
771static void qemu_init_sigbus(void)
772{
773 struct sigaction action;
774
775 memset(&action, 0, sizeof(action));
776 action.sa_flags = SA_SIGINFO;
777 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
778 sigaction(SIGBUS, &action, NULL);
779
780 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
781}
782
Andreas Färber290adf32013-01-17 09:30:27 +0100783static void qemu_kvm_eat_signals(CPUState *cpu)
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100784{
785 struct timespec ts = { 0, 0 };
786 siginfo_t siginfo;
787 sigset_t waitset;
788 sigset_t chkset;
789 int r;
790
791 sigemptyset(&waitset);
792 sigaddset(&waitset, SIG_IPI);
793 sigaddset(&waitset, SIGBUS);
794
795 do {
796 r = sigtimedwait(&waitset, &siginfo, &ts);
797 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
798 perror("sigtimedwait");
799 exit(1);
800 }
801
802 switch (r) {
803 case SIGBUS:
Andreas Färber290adf32013-01-17 09:30:27 +0100804 if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100805 sigbus_reraise();
806 }
807 break;
808 default:
809 break;
810 }
811
812 r = sigpending(&chkset);
813 if (r == -1) {
814 perror("sigpending");
815 exit(1);
816 }
817 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100818}
819
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100820#else /* !CONFIG_LINUX */
821
822static void qemu_init_sigbus(void)
823{
824}
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100825
Andreas Färber290adf32013-01-17 09:30:27 +0100826static void qemu_kvm_eat_signals(CPUState *cpu)
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100827{
828}
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100829#endif /* !CONFIG_LINUX */
830
Blue Swirl296af7c2010-03-29 19:23:50 +0000831#ifndef _WIN32
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100832static void dummy_signal(int sig)
Blue Swirl296af7c2010-03-29 19:23:50 +0000833{
834}
835
Andreas Färber13618e02013-05-26 23:41:00 +0200836static void qemu_kvm_init_cpu_signals(CPUState *cpu)
Paolo Bonzini714bd042011-03-12 17:44:06 +0100837{
838 int r;
839 sigset_t set;
840 struct sigaction sigact;
841
842 memset(&sigact, 0, sizeof(sigact));
843 sigact.sa_handler = dummy_signal;
844 sigaction(SIG_IPI, &sigact, NULL);
845
Paolo Bonzini714bd042011-03-12 17:44:06 +0100846 pthread_sigmask(SIG_BLOCK, NULL, &set);
847 sigdelset(&set, SIG_IPI);
848 sigdelset(&set, SIGBUS);
Andreas Färber491d6e82013-05-26 23:38:10 +0200849 r = kvm_set_signal_mask(cpu, &set);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100850 if (r) {
851 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
852 exit(1);
853 }
Paolo Bonzini714bd042011-03-12 17:44:06 +0100854}
855
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100856#else /* _WIN32 */
Andreas Färber13618e02013-05-26 23:41:00 +0200857static void qemu_kvm_init_cpu_signals(CPUState *cpu)
Paolo Bonzini714bd042011-03-12 17:44:06 +0100858{
859 abort();
860}
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100861#endif /* _WIN32 */
Blue Swirl296af7c2010-03-29 19:23:50 +0000862
Stefan Weilb2532d82012-09-27 07:41:42 +0200863static QemuMutex qemu_global_mutex;
Paolo Bonzini46daff12011-06-09 13:10:24 +0200864static QemuCond qemu_io_proceeded_cond;
Paolo Bonzini6b498092015-02-27 19:58:23 +0100865static unsigned iothread_requesting_mutex;
Blue Swirl296af7c2010-03-29 19:23:50 +0000866
867static QemuThread io_thread;
868
Blue Swirl296af7c2010-03-29 19:23:50 +0000869/* cpu creation */
870static QemuCond qemu_cpu_cond;
871/* system init */
Blue Swirl296af7c2010-03-29 19:23:50 +0000872static QemuCond qemu_pause_cond;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300873static QemuCond qemu_work_cond;
Blue Swirl296af7c2010-03-29 19:23:50 +0000874
Paolo Bonzinid3b12f52011-09-13 10:30:52 +0200875void qemu_init_cpu_loop(void)
Blue Swirl296af7c2010-03-29 19:23:50 +0000876{
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100877 qemu_init_sigbus();
Anthony Liguoried945922011-02-08 18:18:18 +0100878 qemu_cond_init(&qemu_cpu_cond);
Anthony Liguoried945922011-02-08 18:18:18 +0100879 qemu_cond_init(&qemu_pause_cond);
880 qemu_cond_init(&qemu_work_cond);
Paolo Bonzini46daff12011-06-09 13:10:24 +0200881 qemu_cond_init(&qemu_io_proceeded_cond);
Blue Swirl296af7c2010-03-29 19:23:50 +0000882 qemu_mutex_init(&qemu_global_mutex);
Blue Swirl296af7c2010-03-29 19:23:50 +0000883
Jan Kiszkab7680cb2011-03-12 17:43:51 +0100884 qemu_thread_get_self(&io_thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000885}
886
Andreas Färberf100f0b2012-05-03 14:58:47 +0200887void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300888{
889 struct qemu_work_item wi;
890
Andreas Färber60e82572012-05-02 22:23:49 +0200891 if (qemu_cpu_is_self(cpu)) {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300892 func(data);
893 return;
894 }
895
896 wi.func = func;
897 wi.data = data;
Chegu Vinod3c022702013-06-24 03:49:41 -0600898 wi.free = false;
Paolo Bonzini376692b2015-07-10 12:32:32 +0200899
900 qemu_mutex_lock(&cpu->work_mutex);
Andreas Färberc64ca812012-05-03 02:11:45 +0200901 if (cpu->queued_work_first == NULL) {
902 cpu->queued_work_first = &wi;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100903 } else {
Andreas Färberc64ca812012-05-03 02:11:45 +0200904 cpu->queued_work_last->next = &wi;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100905 }
Andreas Färberc64ca812012-05-03 02:11:45 +0200906 cpu->queued_work_last = &wi;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300907 wi.next = NULL;
908 wi.done = false;
Paolo Bonzini376692b2015-07-10 12:32:32 +0200909 qemu_mutex_unlock(&cpu->work_mutex);
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300910
Andreas Färberc08d7422012-05-03 04:34:15 +0200911 qemu_cpu_kick(cpu);
Paolo Bonzini376692b2015-07-10 12:32:32 +0200912 while (!atomic_mb_read(&wi.done)) {
Andreas Färber4917cf42013-05-27 05:17:50 +0200913 CPUState *self_cpu = current_cpu;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300914
915 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
Andreas Färber4917cf42013-05-27 05:17:50 +0200916 current_cpu = self_cpu;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300917 }
918}
919
Chegu Vinod3c022702013-06-24 03:49:41 -0600920void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
921{
922 struct qemu_work_item *wi;
923
924 if (qemu_cpu_is_self(cpu)) {
925 func(data);
926 return;
927 }
928
929 wi = g_malloc0(sizeof(struct qemu_work_item));
930 wi->func = func;
931 wi->data = data;
932 wi->free = true;
Paolo Bonzini376692b2015-07-10 12:32:32 +0200933
934 qemu_mutex_lock(&cpu->work_mutex);
Chegu Vinod3c022702013-06-24 03:49:41 -0600935 if (cpu->queued_work_first == NULL) {
936 cpu->queued_work_first = wi;
937 } else {
938 cpu->queued_work_last->next = wi;
939 }
940 cpu->queued_work_last = wi;
941 wi->next = NULL;
942 wi->done = false;
Paolo Bonzini376692b2015-07-10 12:32:32 +0200943 qemu_mutex_unlock(&cpu->work_mutex);
Chegu Vinod3c022702013-06-24 03:49:41 -0600944
945 qemu_cpu_kick(cpu);
946}
947
Andreas Färber6d45b102012-05-03 02:13:22 +0200948static void flush_queued_work(CPUState *cpu)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300949{
950 struct qemu_work_item *wi;
951
Andreas Färberc64ca812012-05-03 02:11:45 +0200952 if (cpu->queued_work_first == NULL) {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300953 return;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100954 }
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300955
Paolo Bonzini376692b2015-07-10 12:32:32 +0200956 qemu_mutex_lock(&cpu->work_mutex);
957 while (cpu->queued_work_first != NULL) {
958 wi = cpu->queued_work_first;
Andreas Färberc64ca812012-05-03 02:11:45 +0200959 cpu->queued_work_first = wi->next;
Paolo Bonzini376692b2015-07-10 12:32:32 +0200960 if (!cpu->queued_work_first) {
961 cpu->queued_work_last = NULL;
962 }
963 qemu_mutex_unlock(&cpu->work_mutex);
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300964 wi->func(wi->data);
Paolo Bonzini376692b2015-07-10 12:32:32 +0200965 qemu_mutex_lock(&cpu->work_mutex);
Chegu Vinod3c022702013-06-24 03:49:41 -0600966 if (wi->free) {
967 g_free(wi);
Paolo Bonzini376692b2015-07-10 12:32:32 +0200968 } else {
969 atomic_mb_set(&wi->done, true);
Chegu Vinod3c022702013-06-24 03:49:41 -0600970 }
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300971 }
Paolo Bonzini376692b2015-07-10 12:32:32 +0200972 qemu_mutex_unlock(&cpu->work_mutex);
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300973 qemu_cond_broadcast(&qemu_work_cond);
974}
975
Andreas Färber509a0d72012-05-03 02:18:09 +0200976static void qemu_wait_io_event_common(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000977{
Andreas Färber4fdeee72012-05-02 23:10:09 +0200978 if (cpu->stop) {
979 cpu->stop = false;
Andreas Färberf324e762012-05-02 23:26:21 +0200980 cpu->stopped = true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000981 qemu_cond_signal(&qemu_pause_cond);
982 }
Andreas Färber6d45b102012-05-03 02:13:22 +0200983 flush_queued_work(cpu);
Andreas Färber216fc9a2012-05-02 17:49:49 +0200984 cpu->thread_kicked = false;
Blue Swirl296af7c2010-03-29 19:23:50 +0000985}
986
KONRAD Fredericd5f8d612015-08-10 17:27:06 +0200987static void qemu_tcg_wait_io_event(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000988{
Jan Kiszka16400322011-02-09 16:29:37 +0100989 while (all_cpu_threads_idle()) {
Paolo Bonziniab33fcd2011-04-13 10:03:44 +0200990 /* Start accounting real time to the virtual clock if the CPUs
991 are idle. */
Alex Bligh40daca52013-08-21 16:03:02 +0100992 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
KONRAD Fredericd5f8d612015-08-10 17:27:06 +0200993 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +0100994 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000995
Paolo Bonzini46daff12011-06-09 13:10:24 +0200996 while (iothread_requesting_mutex) {
997 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
998 }
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200999
Andreas Färberbdc44642013-06-24 23:50:24 +02001000 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001001 qemu_wait_io_event_common(cpu);
Jan Kiszka6cabe1f2010-06-25 16:56:53 +02001002 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001003}
1004
Andreas Färberfd529e82013-05-26 23:24:55 +02001005static void qemu_kvm_wait_io_event(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001006{
Andreas Färbera98ae1d2013-05-26 23:21:08 +02001007 while (cpu_thread_is_idle(cpu)) {
Andreas Färberf5c121b2012-05-03 01:22:49 +02001008 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +01001009 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001010
Andreas Färber290adf32013-01-17 09:30:27 +01001011 qemu_kvm_eat_signals(cpu);
Andreas Färber509a0d72012-05-03 02:18:09 +02001012 qemu_wait_io_event_common(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001013}
1014
Jan Kiszka7e97cd82011-02-07 12:19:12 +01001015static void *qemu_kvm_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +00001016{
Andreas Färber48a106b2013-05-27 02:20:39 +02001017 CPUState *cpu = arg;
Jan Kiszka84b49152011-02-01 22:15:50 +01001018 int r;
Blue Swirl296af7c2010-03-29 19:23:50 +00001019
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001020 rcu_register_thread();
1021
Paolo Bonzini2e7f7a32015-06-18 18:47:18 +02001022 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +02001023 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +02001024 cpu->thread_id = qemu_get_thread_id();
Pavel Dovgalyuk626cf8f2014-12-08 10:53:17 +03001025 cpu->can_do_io = 1;
Andreas Färber4917cf42013-05-27 05:17:50 +02001026 current_cpu = cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001027
Andreas Färber504134d2012-12-17 06:38:45 +01001028 r = kvm_init_vcpu(cpu);
Jan Kiszka84b49152011-02-01 22:15:50 +01001029 if (r < 0) {
1030 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
1031 exit(1);
1032 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001033
Andreas Färber13618e02013-05-26 23:41:00 +02001034 qemu_kvm_init_cpu_signals(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001035
1036 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +02001037 cpu->created = true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001038 qemu_cond_signal(&qemu_cpu_cond);
1039
Blue Swirl296af7c2010-03-29 19:23:50 +00001040 while (1) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001041 if (cpu_can_run(cpu)) {
Andreas Färber1458c362013-05-26 23:46:55 +02001042 r = kvm_cpu_exec(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +01001043 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +02001044 cpu_handle_guest_debug(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +01001045 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001046 }
Andreas Färberfd529e82013-05-26 23:24:55 +02001047 qemu_kvm_wait_io_event(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001048 }
1049
1050 return NULL;
1051}
1052
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001053static void *qemu_dummy_cpu_thread_fn(void *arg)
1054{
1055#ifdef _WIN32
1056 fprintf(stderr, "qtest is not supported under Windows\n");
1057 exit(1);
1058#else
Andreas Färber10a90212013-05-27 02:24:35 +02001059 CPUState *cpu = arg;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001060 sigset_t waitset;
1061 int r;
1062
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001063 rcu_register_thread();
1064
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001065 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +02001066 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +02001067 cpu->thread_id = qemu_get_thread_id();
Pavel Dovgalyuk626cf8f2014-12-08 10:53:17 +03001068 cpu->can_do_io = 1;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001069
1070 sigemptyset(&waitset);
1071 sigaddset(&waitset, SIG_IPI);
1072
1073 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +02001074 cpu->created = true;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001075 qemu_cond_signal(&qemu_cpu_cond);
1076
Andreas Färber4917cf42013-05-27 05:17:50 +02001077 current_cpu = cpu;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001078 while (1) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001079 current_cpu = NULL;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001080 qemu_mutex_unlock_iothread();
1081 do {
1082 int sig;
1083 r = sigwait(&waitset, &sig);
1084 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1085 if (r == -1) {
1086 perror("sigwait");
1087 exit(1);
1088 }
1089 qemu_mutex_lock_iothread();
Andreas Färber4917cf42013-05-27 05:17:50 +02001090 current_cpu = cpu;
Andreas Färber509a0d72012-05-03 02:18:09 +02001091 qemu_wait_io_event_common(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001092 }
1093
1094 return NULL;
1095#endif
1096}
1097
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001098static void tcg_exec_all(void);
1099
Jan Kiszka7e97cd82011-02-07 12:19:12 +01001100static void *qemu_tcg_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +00001101{
Andreas Färberc3586ba2012-05-03 01:41:24 +02001102 CPUState *cpu = arg;
Blue Swirl296af7c2010-03-29 19:23:50 +00001103
Paolo Bonziniab28bd22015-07-09 08:55:38 +02001104 rcu_register_thread();
1105
Paolo Bonzini2e7f7a32015-06-18 18:47:18 +02001106 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +02001107 qemu_thread_get_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +00001108
Andreas Färber38fcbd32013-07-07 19:50:23 +02001109 CPU_FOREACH(cpu) {
1110 cpu->thread_id = qemu_get_thread_id();
1111 cpu->created = true;
Pavel Dovgalyuk626cf8f2014-12-08 10:53:17 +03001112 cpu->can_do_io = 1;
Andreas Färber38fcbd32013-07-07 19:50:23 +02001113 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001114 qemu_cond_signal(&qemu_cpu_cond);
1115
Jan Kiszkafa7d1862011-08-22 18:35:25 +02001116 /* wait for initial kick-off after machine start */
Emilio G. Cotac28e3992015-04-27 12:45:28 -04001117 while (first_cpu->stopped) {
KONRAD Fredericd5f8d612015-08-10 17:27:06 +02001118 qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka8e564b42012-02-17 18:31:15 +01001119
1120 /* process any pending work */
Andreas Färberbdc44642013-06-24 23:50:24 +02001121 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001122 qemu_wait_io_event_common(cpu);
Jan Kiszka8e564b42012-02-17 18:31:15 +01001123 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001124 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001125
Paolo Bonzini21618b32015-02-27 20:01:03 +01001126 /* process any pending work */
Paolo Bonziniaed807c2015-08-18 06:43:15 -07001127 atomic_mb_set(&exit_request, 1);
Paolo Bonzini21618b32015-02-27 20:01:03 +01001128
Blue Swirl296af7c2010-03-29 19:23:50 +00001129 while (1) {
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001130 tcg_exec_all();
Alex Blighac70aaf2013-08-21 16:02:57 +01001131
1132 if (use_icount) {
Alex Bligh40daca52013-08-21 16:03:02 +01001133 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +01001134
1135 if (deadline == 0) {
Alex Bligh40daca52013-08-21 16:03:02 +01001136 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +01001137 }
Paolo Bonzini3b2319a2011-04-13 10:03:43 +02001138 }
KONRAD Fredericd5f8d612015-08-10 17:27:06 +02001139 qemu_tcg_wait_io_event(QTAILQ_FIRST(&cpus));
Blue Swirl296af7c2010-03-29 19:23:50 +00001140 }
1141
1142 return NULL;
1143}
1144
Andreas Färber2ff09a42012-05-03 00:23:30 +02001145static void qemu_cpu_kick_thread(CPUState *cpu)
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001146{
1147#ifndef _WIN32
1148 int err;
1149
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001150 if (cpu->thread_kicked) {
1151 return;
Paolo Bonzini9102ded2015-08-18 06:52:09 -07001152 }
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001153 cpu->thread_kicked = true;
Andreas Färber814e6122012-05-02 17:00:37 +02001154 err = pthread_kill(cpu->thread->thread, SIG_IPI);
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001155 if (err) {
1156 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1157 exit(1);
1158 }
1159#else /* _WIN32 */
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001160 abort();
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001161#endif
1162}
1163
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001164static void qemu_cpu_kick_no_halt(void)
1165{
1166 CPUState *cpu;
1167 /* Ensure whatever caused the exit has reached the CPU threads before
1168 * writing exit_request.
1169 */
1170 atomic_mb_set(&exit_request, 1);
1171 cpu = atomic_mb_read(&tcg_current_cpu);
1172 if (cpu) {
1173 cpu_exit(cpu);
1174 }
1175}
1176
Andreas Färberc08d7422012-05-03 04:34:15 +02001177void qemu_cpu_kick(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001178{
Andreas Färberf5c121b2012-05-03 01:22:49 +02001179 qemu_cond_broadcast(cpu->halt_cond);
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001180 if (tcg_enabled()) {
1181 qemu_cpu_kick_no_halt();
1182 } else {
1183 qemu_cpu_kick_thread(cpu);
1184 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001185}
1186
Jan Kiszka46d62fa2011-02-01 22:15:59 +01001187void qemu_cpu_kick_self(void)
1188{
Andreas Färber4917cf42013-05-27 05:17:50 +02001189 assert(current_cpu);
Paolo Bonzini9102ded2015-08-18 06:52:09 -07001190 qemu_cpu_kick_thread(current_cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001191}
1192
Andreas Färber60e82572012-05-02 22:23:49 +02001193bool qemu_cpu_is_self(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001194{
Andreas Färber814e6122012-05-02 17:00:37 +02001195 return qemu_thread_is_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +00001196}
1197
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001198bool qemu_in_vcpu_thread(void)
Juan Quintelaaa723c22012-09-18 16:30:11 +02001199{
Andreas Färber4917cf42013-05-27 05:17:50 +02001200 return current_cpu && qemu_cpu_is_self(current_cpu);
Juan Quintelaaa723c22012-09-18 16:30:11 +02001201}
1202
Paolo Bonziniafbe7052015-06-18 18:47:19 +02001203static __thread bool iothread_locked = false;
1204
1205bool qemu_mutex_iothread_locked(void)
1206{
1207 return iothread_locked;
1208}
1209
Blue Swirl296af7c2010-03-29 19:23:50 +00001210void qemu_mutex_lock_iothread(void)
1211{
Paolo Bonzini21618b32015-02-27 20:01:03 +01001212 atomic_inc(&iothread_requesting_mutex);
Paolo Bonzini2e7f7a32015-06-18 18:47:18 +02001213 /* In the simple case there is no need to bump the VCPU thread out of
1214 * TCG code execution.
1215 */
1216 if (!tcg_enabled() || qemu_in_vcpu_thread() ||
Aníbal Limón46036b22015-09-03 15:48:33 -05001217 !first_cpu || !first_cpu->created) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001218 qemu_mutex_lock(&qemu_global_mutex);
Paolo Bonzini21618b32015-02-27 20:01:03 +01001219 atomic_dec(&iothread_requesting_mutex);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001220 } else {
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001221 if (qemu_mutex_trylock(&qemu_global_mutex)) {
Paolo Bonzinie0c38212015-08-26 00:19:19 +02001222 qemu_cpu_kick_no_halt();
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001223 qemu_mutex_lock(&qemu_global_mutex);
1224 }
Paolo Bonzini6b498092015-02-27 19:58:23 +01001225 atomic_dec(&iothread_requesting_mutex);
Paolo Bonzini46daff12011-06-09 13:10:24 +02001226 qemu_cond_broadcast(&qemu_io_proceeded_cond);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001227 }
Paolo Bonziniafbe7052015-06-18 18:47:19 +02001228 iothread_locked = true;
Blue Swirl296af7c2010-03-29 19:23:50 +00001229}
1230
1231void qemu_mutex_unlock_iothread(void)
1232{
Paolo Bonziniafbe7052015-06-18 18:47:19 +02001233 iothread_locked = false;
Blue Swirl296af7c2010-03-29 19:23:50 +00001234 qemu_mutex_unlock(&qemu_global_mutex);
1235}
1236
1237static int all_vcpus_paused(void)
1238{
Andreas Färberbdc44642013-06-24 23:50:24 +02001239 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001240
Andreas Färberbdc44642013-06-24 23:50:24 +02001241 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001242 if (!cpu->stopped) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001243 return 0;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001244 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001245 }
1246
1247 return 1;
1248}
1249
1250void pause_all_vcpus(void)
1251{
Andreas Färberbdc44642013-06-24 23:50:24 +02001252 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001253
Alex Bligh40daca52013-08-21 16:03:02 +01001254 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
Andreas Färberbdc44642013-06-24 23:50:24 +02001255 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001256 cpu->stop = true;
1257 qemu_cpu_kick(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001258 }
1259
Juan Quintelaaa723c22012-09-18 16:30:11 +02001260 if (qemu_in_vcpu_thread()) {
Jan Kiszkad798e972012-02-17 18:31:16 +01001261 cpu_stop_current();
1262 if (!kvm_enabled()) {
Andreas Färberbdc44642013-06-24 23:50:24 +02001263 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001264 cpu->stop = false;
1265 cpu->stopped = true;
Jan Kiszkad798e972012-02-17 18:31:16 +01001266 }
1267 return;
1268 }
1269 }
1270
Blue Swirl296af7c2010-03-29 19:23:50 +00001271 while (!all_vcpus_paused()) {
Paolo Bonzinibe7d6c52011-03-12 17:44:02 +01001272 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
Andreas Färberbdc44642013-06-24 23:50:24 +02001273 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001274 qemu_cpu_kick(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001275 }
1276 }
1277}
1278
Igor Mammedov29936832013-04-23 10:29:37 +02001279void cpu_resume(CPUState *cpu)
1280{
1281 cpu->stop = false;
1282 cpu->stopped = false;
1283 qemu_cpu_kick(cpu);
1284}
1285
Blue Swirl296af7c2010-03-29 19:23:50 +00001286void resume_all_vcpus(void)
1287{
Andreas Färberbdc44642013-06-24 23:50:24 +02001288 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001289
Alex Bligh40daca52013-08-21 16:03:02 +01001290 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
Andreas Färberbdc44642013-06-24 23:50:24 +02001291 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001292 cpu_resume(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001293 }
1294}
1295
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001296/* For temporary buffers for forming a name */
1297#define VCPU_THREAD_NAME_SIZE 16
1298
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001299static void qemu_tcg_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001300{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001301 char thread_name[VCPU_THREAD_NAME_SIZE];
KONRAD Fredericd5f8d612015-08-10 17:27:06 +02001302 static QemuCond *tcg_halt_cond;
1303 static QemuThread *tcg_cpu_thread;
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001304
Edgar E. Iglesias09daed82013-12-17 13:06:51 +10001305 tcg_cpu_address_space_init(cpu, cpu->as);
1306
Blue Swirl296af7c2010-03-29 19:23:50 +00001307 /* share a single thread for all cpus with TCG */
1308 if (!tcg_cpu_thread) {
Andreas Färber814e6122012-05-02 17:00:37 +02001309 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001310 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1311 qemu_cond_init(cpu->halt_cond);
1312 tcg_halt_cond = cpu->halt_cond;
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001313 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
1314 cpu->cpu_index);
1315 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1316 cpu, QEMU_THREAD_JOINABLE);
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001317#ifdef _WIN32
Andreas Färber814e6122012-05-02 17:00:37 +02001318 cpu->hThread = qemu_thread_get_handle(cpu->thread);
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001319#endif
Andreas Färber61a46212012-05-02 22:49:36 +02001320 while (!cpu->created) {
Paolo Bonzini18a85722011-03-12 17:44:03 +01001321 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001322 }
Andreas Färber814e6122012-05-02 17:00:37 +02001323 tcg_cpu_thread = cpu->thread;
Blue Swirl296af7c2010-03-29 19:23:50 +00001324 } else {
Andreas Färber814e6122012-05-02 17:00:37 +02001325 cpu->thread = tcg_cpu_thread;
Andreas Färberf5c121b2012-05-03 01:22:49 +02001326 cpu->halt_cond = tcg_halt_cond;
Blue Swirl296af7c2010-03-29 19:23:50 +00001327 }
1328}
1329
Andreas Färber48a106b2013-05-27 02:20:39 +02001330static void qemu_kvm_start_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001331{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001332 char thread_name[VCPU_THREAD_NAME_SIZE];
1333
Andreas Färber814e6122012-05-02 17:00:37 +02001334 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001335 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1336 qemu_cond_init(cpu->halt_cond);
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001337 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1338 cpu->cpu_index);
1339 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1340 cpu, QEMU_THREAD_JOINABLE);
Andreas Färber61a46212012-05-02 22:49:36 +02001341 while (!cpu->created) {
Paolo Bonzini18a85722011-03-12 17:44:03 +01001342 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001343 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001344}
1345
Andreas Färber10a90212013-05-27 02:24:35 +02001346static void qemu_dummy_start_vcpu(CPUState *cpu)
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001347{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001348 char thread_name[VCPU_THREAD_NAME_SIZE];
1349
Andreas Färber814e6122012-05-02 17:00:37 +02001350 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001351 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1352 qemu_cond_init(cpu->halt_cond);
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001353 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
1354 cpu->cpu_index);
1355 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001356 QEMU_THREAD_JOINABLE);
Andreas Färber61a46212012-05-02 22:49:36 +02001357 while (!cpu->created) {
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001358 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1359 }
1360}
1361
Andreas Färberc643bed2013-05-27 03:23:24 +02001362void qemu_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001363{
Andreas Färberce3960e2012-12-17 03:27:07 +01001364 cpu->nr_cores = smp_cores;
1365 cpu->nr_threads = smp_threads;
Andreas Färberf324e762012-05-02 23:26:21 +02001366 cpu->stopped = true;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001367 if (kvm_enabled()) {
Andreas Färber48a106b2013-05-27 02:20:39 +02001368 qemu_kvm_start_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001369 } else if (tcg_enabled()) {
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001370 qemu_tcg_init_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001371 } else {
Andreas Färber10a90212013-05-27 02:24:35 +02001372 qemu_dummy_start_vcpu(cpu);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001373 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001374}
1375
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001376void cpu_stop_current(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001377{
Andreas Färber4917cf42013-05-27 05:17:50 +02001378 if (current_cpu) {
1379 current_cpu->stop = false;
1380 current_cpu->stopped = true;
1381 cpu_exit(current_cpu);
Paolo Bonzini67bb1722011-03-12 17:43:59 +01001382 qemu_cond_signal(&qemu_pause_cond);
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001383 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001384}
1385
Kevin Wolf56983462013-07-05 13:49:54 +02001386int vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +00001387{
Juan Quintelaaa723c22012-09-18 16:30:11 +02001388 if (qemu_in_vcpu_thread()) {
Paolo Bonzini74892d22014-06-05 14:53:58 +02001389 qemu_system_vmstop_request_prepare();
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -03001390 qemu_system_vmstop_request(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00001391 /*
1392 * FIXME: should not return to device code in case
1393 * vm_stop() has been requested.
1394 */
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001395 cpu_stop_current();
Kevin Wolf56983462013-07-05 13:49:54 +02001396 return 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00001397 }
Kevin Wolf56983462013-07-05 13:49:54 +02001398
1399 return do_vm_stop(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00001400}
1401
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001402/* does a state transition even if the VM is already stopped,
1403 current state is forgotten forever */
Kevin Wolf56983462013-07-05 13:49:54 +02001404int vm_stop_force_state(RunState state)
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001405{
1406 if (runstate_is_running()) {
Kevin Wolf56983462013-07-05 13:49:54 +02001407 return vm_stop(state);
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001408 } else {
1409 runstate_set(state);
Kevin Wolf594a45c2013-07-18 14:52:19 +02001410 /* Make sure to return an error if the flush in a previous vm_stop()
1411 * failed. */
1412 return bdrv_flush_all();
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001413 }
1414}
1415
Pavel Dovgalyuk8b427042015-09-17 19:24:05 +03001416static int64_t tcg_get_icount_limit(void)
1417{
1418 int64_t deadline;
1419
1420 if (replay_mode != REPLAY_MODE_PLAY) {
1421 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1422
1423 /* Maintain prior (possibly buggy) behaviour where if no deadline
1424 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1425 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1426 * nanoseconds.
1427 */
1428 if ((deadline < 0) || (deadline > INT32_MAX)) {
1429 deadline = INT32_MAX;
1430 }
1431
1432 return qemu_icount_round(deadline);
1433 } else {
1434 return replay_get_instructions();
1435 }
1436}
1437
Peter Crosthwaite3d57f782015-06-23 19:31:17 -07001438static int tcg_cpu_exec(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001439{
1440 int ret;
1441#ifdef CONFIG_PROFILER
1442 int64_t ti;
1443#endif
1444
1445#ifdef CONFIG_PROFILER
1446 ti = profile_getclock();
1447#endif
1448 if (use_icount) {
1449 int64_t count;
1450 int decr;
KONRAD Fredericc96778b2014-08-01 01:37:09 +02001451 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1452 + cpu->icount_extra);
Andreas Färber28ecfd72013-08-26 05:51:49 +02001453 cpu->icount_decr.u16.low = 0;
Andreas Färberefee7342013-08-26 05:39:29 +02001454 cpu->icount_extra = 0;
Pavel Dovgalyuk8b427042015-09-17 19:24:05 +03001455 count = tcg_get_icount_limit();
KONRAD Fredericc96778b2014-08-01 01:37:09 +02001456 timers_state.qemu_icount += count;
Blue Swirl296af7c2010-03-29 19:23:50 +00001457 decr = (count > 0xffff) ? 0xffff : count;
1458 count -= decr;
Andreas Färber28ecfd72013-08-26 05:51:49 +02001459 cpu->icount_decr.u16.low = decr;
Andreas Färberefee7342013-08-26 05:39:29 +02001460 cpu->icount_extra = count;
Blue Swirl296af7c2010-03-29 19:23:50 +00001461 }
Peter Crosthwaiteea3e9842015-06-18 10:24:55 -07001462 ret = cpu_exec(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001463#ifdef CONFIG_PROFILER
Alexey Kardashevskiy89d5cbd2015-03-16 14:57:38 +11001464 tcg_time += profile_getclock() - ti;
Blue Swirl296af7c2010-03-29 19:23:50 +00001465#endif
1466 if (use_icount) {
1467 /* Fold pending instructions back into the
1468 instruction counter, and clear the interrupt flag. */
KONRAD Fredericc96778b2014-08-01 01:37:09 +02001469 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1470 + cpu->icount_extra);
Andreas Färber28ecfd72013-08-26 05:51:49 +02001471 cpu->icount_decr.u32 = 0;
Andreas Färberefee7342013-08-26 05:39:29 +02001472 cpu->icount_extra = 0;
Pavel Dovgalyuk8b427042015-09-17 19:24:05 +03001473 replay_account_executed_instructions();
Blue Swirl296af7c2010-03-29 19:23:50 +00001474 }
1475 return ret;
1476}
1477
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001478static void tcg_exec_all(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001479{
Jan Kiszka9a360852011-02-01 22:15:55 +01001480 int r;
1481
Alex Bligh40daca52013-08-21 16:03:02 +01001482 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1483 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
Paolo Bonziniab33fcd2011-04-13 10:03:44 +02001484
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001485 if (next_cpu == NULL) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001486 next_cpu = first_cpu;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001487 }
Andreas Färberbdc44642013-06-24 23:50:24 +02001488 for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
Andreas Färber182735e2013-05-29 22:29:20 +02001489 CPUState *cpu = next_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001490
Alex Bligh40daca52013-08-21 16:03:02 +01001491 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
Andreas Färbered2803d2013-06-21 20:20:45 +02001492 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
Blue Swirl296af7c2010-03-29 19:23:50 +00001493
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001494 if (cpu_can_run(cpu)) {
Peter Crosthwaite3d57f782015-06-23 19:31:17 -07001495 r = tcg_cpu_exec(cpu);
Jan Kiszka9a360852011-02-01 22:15:55 +01001496 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +02001497 cpu_handle_guest_debug(cpu);
Jan Kiszka3c638d02010-06-25 16:56:56 +02001498 break;
1499 }
Andreas Färberf324e762012-05-02 23:26:21 +02001500 } else if (cpu->stop || cpu->stopped) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001501 break;
1502 }
1503 }
Paolo Bonziniaed807c2015-08-18 06:43:15 -07001504
1505 /* Pairs with smp_wmb in qemu_cpu_kick. */
1506 atomic_mb_set(&exit_request, 0);
Blue Swirl296af7c2010-03-29 19:23:50 +00001507}
1508
Stefan Weil9a78eea2010-10-22 23:03:33 +02001509void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
Blue Swirl262353c2010-05-04 19:55:35 +00001510{
1511 /* XXX: implement xxx_cpu_list for targets that still miss it */
Peter Maydelle916cbf2012-09-05 17:41:08 -03001512#if defined(cpu_list)
1513 cpu_list(f, cpu_fprintf);
Blue Swirl262353c2010-05-04 19:55:35 +00001514#endif
1515}
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001516
1517CpuInfoList *qmp_query_cpus(Error **errp)
1518{
1519 CpuInfoList *head = NULL, *cur_item = NULL;
Andreas Färber182735e2013-05-29 22:29:20 +02001520 CPUState *cpu;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001521
Andreas Färberbdc44642013-06-24 23:50:24 +02001522 CPU_FOREACH(cpu) {
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001523 CpuInfoList *info;
Andreas Färber182735e2013-05-29 22:29:20 +02001524#if defined(TARGET_I386)
1525 X86CPU *x86_cpu = X86_CPU(cpu);
1526 CPUX86State *env = &x86_cpu->env;
1527#elif defined(TARGET_PPC)
1528 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1529 CPUPPCState *env = &ppc_cpu->env;
1530#elif defined(TARGET_SPARC)
1531 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1532 CPUSPARCState *env = &sparc_cpu->env;
1533#elif defined(TARGET_MIPS)
1534 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1535 CPUMIPSState *env = &mips_cpu->env;
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +01001536#elif defined(TARGET_TRICORE)
1537 TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
1538 CPUTriCoreState *env = &tricore_cpu->env;
Andreas Färber182735e2013-05-29 22:29:20 +02001539#endif
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001540
Andreas Färbercb446ec2013-05-01 14:24:52 +02001541 cpu_synchronize_state(cpu);
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001542
1543 info = g_malloc0(sizeof(*info));
1544 info->value = g_malloc0(sizeof(*info->value));
Andreas Färber55e5c282012-12-17 06:18:02 +01001545 info->value->CPU = cpu->cpu_index;
Andreas Färber182735e2013-05-29 22:29:20 +02001546 info->value->current = (cpu == first_cpu);
Andreas Färber259186a2013-01-17 18:51:17 +01001547 info->value->halted = cpu->halted;
Eduardo Habkost58f88d42015-05-08 16:04:22 -03001548 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
Andreas Färber9f09e182012-05-03 06:59:07 +02001549 info->value->thread_id = cpu->thread_id;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001550#if defined(TARGET_I386)
1551 info->value->has_pc = true;
1552 info->value->pc = env->eip + env->segs[R_CS].base;
1553#elif defined(TARGET_PPC)
1554 info->value->has_nip = true;
1555 info->value->nip = env->nip;
1556#elif defined(TARGET_SPARC)
1557 info->value->has_pc = true;
1558 info->value->pc = env->pc;
1559 info->value->has_npc = true;
1560 info->value->npc = env->npc;
1561#elif defined(TARGET_MIPS)
1562 info->value->has_PC = true;
1563 info->value->PC = env->active_tc.PC;
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +01001564#elif defined(TARGET_TRICORE)
1565 info->value->has_PC = true;
1566 info->value->PC = env->PC;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001567#endif
1568
1569 /* XXX: waiting for the qapi to support GSList */
1570 if (!cur_item) {
1571 head = cur_item = info;
1572 } else {
1573 cur_item->next = info;
1574 cur_item = info;
1575 }
1576 }
1577
1578 return head;
1579}
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001580
1581void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1582 bool has_cpu, int64_t cpu_index, Error **errp)
1583{
1584 FILE *f;
1585 uint32_t l;
Andreas Färber55e5c282012-12-17 06:18:02 +01001586 CPUState *cpu;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001587 uint8_t buf[1024];
Borislav Petkov0dc9daf2015-02-08 13:14:38 +01001588 int64_t orig_addr = addr, orig_size = size;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001589
1590 if (!has_cpu) {
1591 cpu_index = 0;
1592 }
1593
Andreas Färber151d1322013-02-15 15:41:49 +01001594 cpu = qemu_get_cpu(cpu_index);
1595 if (cpu == NULL) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01001596 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1597 "a CPU number");
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001598 return;
1599 }
1600
1601 f = fopen(filename, "wb");
1602 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04001603 error_setg_file_open(errp, errno, filename);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001604 return;
1605 }
1606
1607 while (size != 0) {
1608 l = sizeof(buf);
1609 if (l > size)
1610 l = size;
Aneesh Kumar K.V2f4d0f52013-10-01 21:49:30 +05301611 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
Borislav Petkov0dc9daf2015-02-08 13:14:38 +01001612 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
1613 " specified", orig_addr, orig_size);
Aneesh Kumar K.V2f4d0f52013-10-01 21:49:30 +05301614 goto exit;
1615 }
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001616 if (fwrite(buf, 1, l, f) != l) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01001617 error_setg(errp, QERR_IO_ERROR);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001618 goto exit;
1619 }
1620 addr += l;
1621 size -= l;
1622 }
1623
1624exit:
1625 fclose(f);
1626}
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001627
1628void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1629 Error **errp)
1630{
1631 FILE *f;
1632 uint32_t l;
1633 uint8_t buf[1024];
1634
1635 f = fopen(filename, "wb");
1636 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04001637 error_setg_file_open(errp, errno, filename);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001638 return;
1639 }
1640
1641 while (size != 0) {
1642 l = sizeof(buf);
1643 if (l > size)
1644 l = size;
Stefan Weileb6282f2014-04-07 20:28:23 +02001645 cpu_physical_memory_read(addr, buf, l);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001646 if (fwrite(buf, 1, l, f) != l) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +01001647 error_setg(errp, QERR_IO_ERROR);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001648 goto exit;
1649 }
1650 addr += l;
1651 size -= l;
1652 }
1653
1654exit:
1655 fclose(f);
1656}
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001657
1658void qmp_inject_nmi(Error **errp)
1659{
1660#if defined(TARGET_I386)
Andreas Färber182735e2013-05-29 22:29:20 +02001661 CPUState *cs;
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001662
Andreas Färberbdc44642013-06-24 23:50:24 +02001663 CPU_FOREACH(cs) {
Andreas Färber182735e2013-05-29 22:29:20 +02001664 X86CPU *cpu = X86_CPU(cs);
Andreas Färber182735e2013-05-29 22:29:20 +02001665
Chen Fan02e51482013-12-23 17:04:02 +08001666 if (!cpu->apic_state) {
Andreas Färber182735e2013-05-29 22:29:20 +02001667 cpu_interrupt(cs, CPU_INTERRUPT_NMI);
Jan Kiszka02c09192011-10-18 00:00:06 +08001668 } else {
Chen Fan02e51482013-12-23 17:04:02 +08001669 apic_deliver_nmi(cpu->apic_state);
Jan Kiszka02c09192011-10-18 00:00:06 +08001670 }
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001671 }
1672#else
Alexey Kardashevskiy9cb805f2014-08-20 22:16:33 +10001673 nmi_monitor_handle(monitor_get_cpu_index(), errp);
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001674#endif
1675}
Sebastian Tanase27498be2014-07-25 11:56:33 +02001676
1677void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
1678{
1679 if (!use_icount) {
1680 return;
1681 }
1682
1683 cpu_fprintf(f, "Host - Guest clock %"PRIi64" ms\n",
1684 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
1685 if (icount_align_option) {
1686 cpu_fprintf(f, "Max guest delay %"PRIi64" ms\n", -max_delay/SCALE_MS);
1687 cpu_fprintf(f, "Max guest advance %"PRIi64" ms\n", max_advance/SCALE_MS);
1688 } else {
1689 cpu_fprintf(f, "Max guest delay NA\n");
1690 cpu_fprintf(f, "Max guest advance NA\n");
1691 }
1692}