blob: 19245e99b9e6c450036dd52c65a21e04382c9f5a [file] [log] [blame]
Blue Swirl296af7c2010-03-29 19:23:50 +00001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Needed early for CONFIG_BSD etc. */
26#include "config-host.h"
27
Paolo Bonzini83c90892012-12-17 18:19:49 +010028#include "monitor/monitor.h"
Wenchao Xiaa4e15de2014-06-18 08:43:36 +020029#include "qapi/qmp/qerror.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010030#include "sysemu/sysemu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010031#include "exec/gdbstub.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010032#include "sysemu/dma.h"
33#include "sysemu/kvm.h"
Luiz Capitulinode0b36b2011-09-21 16:38:35 -030034#include "qmp-commands.h"
Blue Swirl296af7c2010-03-29 19:23:50 +000035
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/thread.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010037#include "sysemu/cpus.h"
38#include "sysemu/qtest.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010039#include "qemu/main-loop.h"
40#include "qemu/bitmap.h"
Liu Ping Fancb365642013-09-25 14:20:58 +080041#include "qemu/seqlock.h"
Wenchao Xiaa4e15de2014-06-18 08:43:36 +020042#include "qapi-event.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020043
44#ifndef _WIN32
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010045#include "qemu/compatfd.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020046#endif
Blue Swirl296af7c2010-03-29 19:23:50 +000047
Jan Kiszka6d9cb732011-02-01 22:15:58 +010048#ifdef CONFIG_LINUX
49
50#include <sys/prctl.h>
51
Marcelo Tosattic0532a72010-10-11 15:31:21 -030052#ifndef PR_MCE_KILL
53#define PR_MCE_KILL 33
54#endif
55
Jan Kiszka6d9cb732011-02-01 22:15:58 +010056#ifndef PR_MCE_KILL_SET
57#define PR_MCE_KILL_SET 1
58#endif
59
60#ifndef PR_MCE_KILL_EARLY
61#define PR_MCE_KILL_EARLY 1
62#endif
63
64#endif /* CONFIG_LINUX */
65
Andreas Färber182735e2013-05-29 22:29:20 +020066static CPUState *next_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +000067
Tiejun Chen321bc0b2013-08-02 09:43:09 +080068bool cpu_is_stopped(CPUState *cpu)
69{
70 return cpu->stopped || !runstate_is_running();
71}
72
Andreas Färbera98ae1d2013-05-26 23:21:08 +020073static bool cpu_thread_is_idle(CPUState *cpu)
Peter Maydellac873f12012-07-19 16:52:27 +010074{
Andreas Färberc64ca812012-05-03 02:11:45 +020075 if (cpu->stop || cpu->queued_work_first) {
Peter Maydellac873f12012-07-19 16:52:27 +010076 return false;
77 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +080078 if (cpu_is_stopped(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010079 return true;
80 }
Andreas Färber8c2e1b02013-08-25 18:53:55 +020081 if (!cpu->halted || cpu_has_work(cpu) ||
Alexander Graf215e79c2013-04-24 22:24:12 +020082 kvm_halt_in_kernel()) {
Peter Maydellac873f12012-07-19 16:52:27 +010083 return false;
84 }
85 return true;
86}
87
88static bool all_cpu_threads_idle(void)
89{
Andreas Färber182735e2013-05-29 22:29:20 +020090 CPUState *cpu;
Peter Maydellac873f12012-07-19 16:52:27 +010091
Andreas Färberbdc44642013-06-24 23:50:24 +020092 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +020093 if (!cpu_thread_is_idle(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010094 return false;
95 }
96 }
97 return true;
98}
99
Blue Swirl296af7c2010-03-29 19:23:50 +0000100/***********************************************************/
Paolo Bonzini946fb272011-09-12 13:57:37 +0200101/* guest cycle counter */
102
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200103/* Protected by TimersState seqlock */
104
Sebastian Tanase71468392014-07-23 11:47:50 +0200105static int64_t vm_clock_warp_start = -1;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200106/* Conversion factor from emulated instructions to virtual clock ticks. */
107static int icount_time_shift;
108/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
109#define MAX_ICOUNT_SHIFT 10
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200110
Paolo Bonzini946fb272011-09-12 13:57:37 +0200111static QEMUTimer *icount_rt_timer;
112static QEMUTimer *icount_vm_timer;
113static QEMUTimer *icount_warp_timer;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200114
115typedef struct TimersState {
Liu Ping Fancb365642013-09-25 14:20:58 +0800116 /* Protected by BQL. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200117 int64_t cpu_ticks_prev;
118 int64_t cpu_ticks_offset;
Liu Ping Fancb365642013-09-25 14:20:58 +0800119
120 /* cpu_clock_offset can be read out of BQL, so protect it with
121 * this lock.
122 */
123 QemuSeqLock vm_clock_seqlock;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200124 int64_t cpu_clock_offset;
125 int32_t cpu_ticks_enabled;
126 int64_t dummy;
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200127
128 /* Compensate for varying guest execution speed. */
129 int64_t qemu_icount_bias;
130 /* Only written by TCG thread */
131 int64_t qemu_icount;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200132} TimersState;
133
Liu Ping Fand9cd4002013-07-21 08:43:00 +0000134static TimersState timers_state;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200135
136/* Return the virtual CPU time, based on the instruction counter. */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200137static int64_t cpu_get_icount_locked(void)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200138{
139 int64_t icount;
Andreas Färber4917cf42013-05-27 05:17:50 +0200140 CPUState *cpu = current_cpu;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200141
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200142 icount = timers_state.qemu_icount;
Andreas Färber4917cf42013-05-27 05:17:50 +0200143 if (cpu) {
Andreas Färber99df7dc2013-08-26 05:15:23 +0200144 if (!cpu_can_do_io(cpu)) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200145 fprintf(stderr, "Bad clock read\n");
146 }
Andreas Färber28ecfd72013-08-26 05:51:49 +0200147 icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200148 }
KONRAD Frederic3f031312014-08-01 01:37:15 +0200149 return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200150}
151
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200152int64_t cpu_get_icount(void)
153{
154 int64_t icount;
155 unsigned start;
156
157 do {
158 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
159 icount = cpu_get_icount_locked();
160 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
161
162 return icount;
163}
164
KONRAD Frederic3f031312014-08-01 01:37:15 +0200165int64_t cpu_icount_to_ns(int64_t icount)
166{
167 return icount << icount_time_shift;
168}
169
Paolo Bonzini946fb272011-09-12 13:57:37 +0200170/* return the host CPU cycle counter and handle stop/restart */
Liu Ping Fancb365642013-09-25 14:20:58 +0800171/* Caller must hold the BQL */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200172int64_t cpu_get_ticks(void)
173{
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100174 int64_t ticks;
175
Paolo Bonzini946fb272011-09-12 13:57:37 +0200176 if (use_icount) {
177 return cpu_get_icount();
178 }
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100179
180 ticks = timers_state.cpu_ticks_offset;
181 if (timers_state.cpu_ticks_enabled) {
182 ticks += cpu_get_real_ticks();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200183 }
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100184
185 if (timers_state.cpu_ticks_prev > ticks) {
186 /* Note: non increasing ticks may happen if the host uses
187 software suspend */
188 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
189 ticks = timers_state.cpu_ticks_prev;
190 }
191
192 timers_state.cpu_ticks_prev = ticks;
193 return ticks;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200194}
195
Liu Ping Fancb365642013-09-25 14:20:58 +0800196static int64_t cpu_get_clock_locked(void)
197{
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100198 int64_t ticks;
Liu Ping Fancb365642013-09-25 14:20:58 +0800199
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100200 ticks = timers_state.cpu_clock_offset;
201 if (timers_state.cpu_ticks_enabled) {
202 ticks += get_clock();
Liu Ping Fancb365642013-09-25 14:20:58 +0800203 }
204
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100205 return ticks;
Liu Ping Fancb365642013-09-25 14:20:58 +0800206}
207
Paolo Bonzini946fb272011-09-12 13:57:37 +0200208/* return the host CPU monotonic timer and handle stop/restart */
209int64_t cpu_get_clock(void)
210{
211 int64_t ti;
Liu Ping Fancb365642013-09-25 14:20:58 +0800212 unsigned start;
213
214 do {
215 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
216 ti = cpu_get_clock_locked();
217 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
218
219 return ti;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200220}
221
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200222/* return the offset between the host clock and virtual CPU clock */
223int64_t cpu_get_clock_offset(void)
224{
225 int64_t ti;
226 unsigned start;
227
228 do {
229 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
230 ti = timers_state.cpu_clock_offset;
231 if (!timers_state.cpu_ticks_enabled) {
232 ti -= get_clock();
233 }
234 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
235
236 return -ti;
237}
238
Liu Ping Fancb365642013-09-25 14:20:58 +0800239/* enable cpu_get_ticks()
240 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
241 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200242void cpu_enable_ticks(void)
243{
Liu Ping Fancb365642013-09-25 14:20:58 +0800244 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
245 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200246 if (!timers_state.cpu_ticks_enabled) {
247 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
248 timers_state.cpu_clock_offset -= get_clock();
249 timers_state.cpu_ticks_enabled = 1;
250 }
Liu Ping Fancb365642013-09-25 14:20:58 +0800251 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200252}
253
254/* disable cpu_get_ticks() : the clock is stopped. You must not call
Liu Ping Fancb365642013-09-25 14:20:58 +0800255 * cpu_get_ticks() after that.
256 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
257 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200258void cpu_disable_ticks(void)
259{
Liu Ping Fancb365642013-09-25 14:20:58 +0800260 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
261 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200262 if (timers_state.cpu_ticks_enabled) {
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100263 timers_state.cpu_ticks_offset += cpu_get_real_ticks();
Liu Ping Fancb365642013-09-25 14:20:58 +0800264 timers_state.cpu_clock_offset = cpu_get_clock_locked();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200265 timers_state.cpu_ticks_enabled = 0;
266 }
Liu Ping Fancb365642013-09-25 14:20:58 +0800267 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200268}
269
270/* Correlation between real and virtual time is always going to be
271 fairly approximate, so ignore small variation.
272 When the guest is idle real and virtual time will be aligned in
273 the IO wait loop. */
274#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
275
276static void icount_adjust(void)
277{
278 int64_t cur_time;
279 int64_t cur_icount;
280 int64_t delta;
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200281
282 /* Protected by TimersState mutex. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200283 static int64_t last_delta;
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200284
Paolo Bonzini946fb272011-09-12 13:57:37 +0200285 /* If the VM is not running, then do nothing. */
286 if (!runstate_is_running()) {
287 return;
288 }
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200289
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200290 seqlock_write_lock(&timers_state.vm_clock_seqlock);
291 cur_time = cpu_get_clock_locked();
292 cur_icount = cpu_get_icount_locked();
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200293
Paolo Bonzini946fb272011-09-12 13:57:37 +0200294 delta = cur_icount - cur_time;
295 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
296 if (delta > 0
297 && last_delta + ICOUNT_WOBBLE < delta * 2
298 && icount_time_shift > 0) {
299 /* The guest is getting too far ahead. Slow time down. */
300 icount_time_shift--;
301 }
302 if (delta < 0
303 && last_delta - ICOUNT_WOBBLE > delta * 2
304 && icount_time_shift < MAX_ICOUNT_SHIFT) {
305 /* The guest is getting too far behind. Speed time up. */
306 icount_time_shift++;
307 }
308 last_delta = delta;
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200309 timers_state.qemu_icount_bias = cur_icount
310 - (timers_state.qemu_icount << icount_time_shift);
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200311 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200312}
313
314static void icount_adjust_rt(void *opaque)
315{
Alex Bligh40daca52013-08-21 16:03:02 +0100316 timer_mod(icount_rt_timer,
317 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200318 icount_adjust();
319}
320
321static void icount_adjust_vm(void *opaque)
322{
Alex Bligh40daca52013-08-21 16:03:02 +0100323 timer_mod(icount_vm_timer,
324 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
325 get_ticks_per_sec() / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200326 icount_adjust();
327}
328
329static int64_t qemu_icount_round(int64_t count)
330{
331 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
332}
333
334static void icount_warp_rt(void *opaque)
335{
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200336 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
337 * changes from -1 to another value, so the race here is okay.
338 */
339 if (atomic_read(&vm_clock_warp_start) == -1) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200340 return;
341 }
342
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200343 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200344 if (runstate_is_running()) {
Alex Bligh40daca52013-08-21 16:03:02 +0100345 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200346 int64_t warp_delta;
347
348 warp_delta = clock - vm_clock_warp_start;
349 if (use_icount == 2) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200350 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100351 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
Paolo Bonzini946fb272011-09-12 13:57:37 +0200352 * far ahead of real time.
353 */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200354 int64_t cur_time = cpu_get_clock_locked();
355 int64_t cur_icount = cpu_get_icount_locked();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200356 int64_t delta = cur_time - cur_icount;
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200357 warp_delta = MIN(warp_delta, delta);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200358 }
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200359 timers_state.qemu_icount_bias += warp_delta;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200360 }
361 vm_clock_warp_start = -1;
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200362 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200363
364 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
365 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
366 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200367}
368
Paolo Bonzini8156be52012-03-28 15:42:04 +0200369void qtest_clock_warp(int64_t dest)
370{
Alex Bligh40daca52013-08-21 16:03:02 +0100371 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200372 assert(qtest_enabled());
373 while (clock < dest) {
Alex Bligh40daca52013-08-21 16:03:02 +0100374 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Sergey Fedorovc9299e22014-06-10 13:10:28 +0400375 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200376 seqlock_write_lock(&timers_state.vm_clock_seqlock);
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200377 timers_state.qemu_icount_bias += warp;
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200378 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
379
Alex Bligh40daca52013-08-21 16:03:02 +0100380 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
381 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200382 }
Alex Bligh40daca52013-08-21 16:03:02 +0100383 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200384}
385
Alex Bligh40daca52013-08-21 16:03:02 +0100386void qemu_clock_warp(QEMUClockType type)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200387{
Paolo Bonzinice78d182013-10-07 17:30:02 +0200388 int64_t clock;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200389 int64_t deadline;
390
391 /*
392 * There are too many global variables to make the "warp" behavior
393 * applicable to other clocks. But a clock argument removes the
394 * need for if statements all over the place.
395 */
Alex Bligh40daca52013-08-21 16:03:02 +0100396 if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200397 return;
398 }
399
400 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100401 * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
402 * This ensures that the deadline for the timer is computed correctly below.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200403 * This also makes sure that the insn counter is synchronized before the
404 * CPU starts running, in case the CPU is woken by an event other than
Alex Bligh40daca52013-08-21 16:03:02 +0100405 * the earliest QEMU_CLOCK_VIRTUAL timer.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200406 */
407 icount_warp_rt(NULL);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200408 timer_del(icount_warp_timer);
409 if (!all_cpu_threads_idle()) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200410 return;
411 }
412
Paolo Bonzini8156be52012-03-28 15:42:04 +0200413 if (qtest_enabled()) {
414 /* When testing, qtest commands advance icount. */
415 return;
416 }
417
Alex Blighac70aaf2013-08-21 16:02:57 +0100418 /* We want to use the earliest deadline from ALL vm_clocks */
Paolo Bonzinice78d182013-10-07 17:30:02 +0200419 clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Alex Bligh40daca52013-08-21 16:03:02 +0100420 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200421 if (deadline < 0) {
422 return;
Alex Blighac70aaf2013-08-21 16:02:57 +0100423 }
424
Paolo Bonzini946fb272011-09-12 13:57:37 +0200425 if (deadline > 0) {
426 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100427 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
Paolo Bonzini946fb272011-09-12 13:57:37 +0200428 * sleep. Otherwise, the CPU might be waiting for a future timer
429 * interrupt to wake it up, but the interrupt never comes because
430 * the vCPU isn't running any insns and thus doesn't advance the
Alex Bligh40daca52013-08-21 16:03:02 +0100431 * QEMU_CLOCK_VIRTUAL.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200432 *
433 * An extreme solution for this problem would be to never let VCPUs
Alex Bligh40daca52013-08-21 16:03:02 +0100434 * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
435 * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
436 * event. Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
437 * after some e"real" time, (related to the time left until the next
438 * event) has passed. The QEMU_CLOCK_REALTIME timer will do this.
439 * This avoids that the warps are visible externally; for example,
440 * you will not be sending network packets continuously instead of
441 * every 100ms.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200442 */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200443 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200444 if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
445 vm_clock_warp_start = clock;
446 }
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200447 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200448 timer_mod_anticipate(icount_warp_timer, clock + deadline);
Alex Blighac70aaf2013-08-21 16:02:57 +0100449 } else if (deadline == 0) {
Alex Bligh40daca52013-08-21 16:03:02 +0100450 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200451 }
452}
453
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200454static bool icount_state_needed(void *opaque)
455{
456 return use_icount;
457}
458
459/*
460 * This is a subsection for icount migration.
461 */
462static const VMStateDescription icount_vmstate_timers = {
463 .name = "timer/icount",
464 .version_id = 1,
465 .minimum_version_id = 1,
466 .fields = (VMStateField[]) {
467 VMSTATE_INT64(qemu_icount_bias, TimersState),
468 VMSTATE_INT64(qemu_icount, TimersState),
469 VMSTATE_END_OF_LIST()
470 }
471};
472
Paolo Bonzini946fb272011-09-12 13:57:37 +0200473static const VMStateDescription vmstate_timers = {
474 .name = "timer",
475 .version_id = 2,
476 .minimum_version_id = 1,
Juan Quintela35d08452014-04-16 16:01:33 +0200477 .fields = (VMStateField[]) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200478 VMSTATE_INT64(cpu_ticks_offset, TimersState),
479 VMSTATE_INT64(dummy, TimersState),
480 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
481 VMSTATE_END_OF_LIST()
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200482 },
483 .subsections = (VMStateSubsection[]) {
484 {
485 .vmsd = &icount_vmstate_timers,
486 .needed = icount_state_needed,
487 }, {
488 /* empty */
489 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200490 }
491};
492
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200493void configure_icount(QemuOpts *opts, Error **errp)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200494{
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200495 const char *option;
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200496 char *rem_str = NULL;
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200497
Liu Ping Fancb365642013-09-25 14:20:58 +0800498 seqlock_init(&timers_state.vm_clock_seqlock, NULL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200499 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200500 option = qemu_opt_get(opts, "shift");
Paolo Bonzini946fb272011-09-12 13:57:37 +0200501 if (!option) {
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200502 if (qemu_opt_get(opts, "align") != NULL) {
503 error_setg(errp, "Please specify shift option when using align");
504 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200505 return;
506 }
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200507 icount_align_option = qemu_opt_get_bool(opts, "align", false);
Alex Bligh40daca52013-08-21 16:03:02 +0100508 icount_warp_timer = timer_new_ns(QEMU_CLOCK_REALTIME,
509 icount_warp_rt, NULL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200510 if (strcmp(option, "auto") != 0) {
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200511 errno = 0;
512 icount_time_shift = strtol(option, &rem_str, 0);
513 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
514 error_setg(errp, "icount: Invalid shift value");
515 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200516 use_icount = 1;
517 return;
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200518 } else if (icount_align_option) {
519 error_setg(errp, "shift=auto and align=on are incompatible");
Paolo Bonzini946fb272011-09-12 13:57:37 +0200520 }
521
522 use_icount = 2;
523
524 /* 125MIPS seems a reasonable initial guess at the guest speed.
525 It will be corrected fairly quickly anyway. */
526 icount_time_shift = 3;
527
528 /* Have both realtime and virtual time triggers for speed adjustment.
529 The realtime trigger catches emulated time passing too slowly,
530 the virtual time trigger catches emulated time passing too fast.
531 Realtime triggers occur even when idle, so use them less frequently
532 than VM triggers. */
Alex Bligh40daca52013-08-21 16:03:02 +0100533 icount_rt_timer = timer_new_ms(QEMU_CLOCK_REALTIME,
534 icount_adjust_rt, NULL);
535 timer_mod(icount_rt_timer,
536 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
537 icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
538 icount_adjust_vm, NULL);
539 timer_mod(icount_vm_timer,
540 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
541 get_ticks_per_sec() / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200542}
543
544/***********************************************************/
Blue Swirl296af7c2010-03-29 19:23:50 +0000545void hw_error(const char *fmt, ...)
546{
547 va_list ap;
Andreas Färber55e5c282012-12-17 06:18:02 +0100548 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000549
550 va_start(ap, fmt);
551 fprintf(stderr, "qemu: hardware error: ");
552 vfprintf(stderr, fmt, ap);
553 fprintf(stderr, "\n");
Andreas Färberbdc44642013-06-24 23:50:24 +0200554 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100555 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
Andreas Färber878096e2013-05-27 01:33:50 +0200556 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
Blue Swirl296af7c2010-03-29 19:23:50 +0000557 }
558 va_end(ap);
559 abort();
560}
561
562void cpu_synchronize_all_states(void)
563{
Andreas Färber182735e2013-05-29 22:29:20 +0200564 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000565
Andreas Färberbdc44642013-06-24 23:50:24 +0200566 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200567 cpu_synchronize_state(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000568 }
569}
570
571void cpu_synchronize_all_post_reset(void)
572{
Andreas Färber182735e2013-05-29 22:29:20 +0200573 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000574
Andreas Färberbdc44642013-06-24 23:50:24 +0200575 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200576 cpu_synchronize_post_reset(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000577 }
578}
579
580void cpu_synchronize_all_post_init(void)
581{
Andreas Färber182735e2013-05-29 22:29:20 +0200582 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000583
Andreas Färberbdc44642013-06-24 23:50:24 +0200584 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200585 cpu_synchronize_post_init(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000586 }
587}
588
Kevin Wolf56983462013-07-05 13:49:54 +0200589static int do_vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +0000590{
Kevin Wolf56983462013-07-05 13:49:54 +0200591 int ret = 0;
592
Luiz Capitulino13548692011-07-29 15:36:43 -0300593 if (runstate_is_running()) {
Blue Swirl296af7c2010-03-29 19:23:50 +0000594 cpu_disable_ticks();
Blue Swirl296af7c2010-03-29 19:23:50 +0000595 pause_all_vcpus();
Luiz Capitulinof5bbfba2011-07-29 15:04:45 -0300596 runstate_set(state);
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -0300597 vm_state_notify(0, state);
Wenchao Xiaa4e15de2014-06-18 08:43:36 +0200598 qapi_event_send_stop(&error_abort);
Blue Swirl296af7c2010-03-29 19:23:50 +0000599 }
Kevin Wolf56983462013-07-05 13:49:54 +0200600
Kevin Wolf594a45c2013-07-18 14:52:19 +0200601 bdrv_drain_all();
602 ret = bdrv_flush_all();
603
Kevin Wolf56983462013-07-05 13:49:54 +0200604 return ret;
Blue Swirl296af7c2010-03-29 19:23:50 +0000605}
606
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200607static bool cpu_can_run(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000608{
Andreas Färber4fdeee72012-05-02 23:10:09 +0200609 if (cpu->stop) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200610 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100611 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +0800612 if (cpu_is_stopped(cpu)) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200613 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100614 }
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200615 return true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000616}
617
Andreas Färber91325042013-05-27 02:07:49 +0200618static void cpu_handle_guest_debug(CPUState *cpu)
Jan Kiszka3c638d02010-06-25 16:56:56 +0200619{
Andreas Färber64f6b342013-05-27 02:06:09 +0200620 gdb_set_stop_cpu(cpu);
Jan Kiszka8cf71712011-02-07 12:19:16 +0100621 qemu_system_debug_request();
Andreas Färberf324e762012-05-02 23:26:21 +0200622 cpu->stopped = true;
Jan Kiszka3c638d02010-06-25 16:56:56 +0200623}
624
Paolo Bonzini714bd042011-03-12 17:44:06 +0100625static void cpu_signal(int sig)
626{
Andreas Färber4917cf42013-05-27 05:17:50 +0200627 if (current_cpu) {
628 cpu_exit(current_cpu);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100629 }
630 exit_request = 1;
631}
Paolo Bonzini714bd042011-03-12 17:44:06 +0100632
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100633#ifdef CONFIG_LINUX
634static void sigbus_reraise(void)
635{
636 sigset_t set;
637 struct sigaction action;
638
639 memset(&action, 0, sizeof(action));
640 action.sa_handler = SIG_DFL;
641 if (!sigaction(SIGBUS, &action, NULL)) {
642 raise(SIGBUS);
643 sigemptyset(&set);
644 sigaddset(&set, SIGBUS);
645 sigprocmask(SIG_UNBLOCK, &set, NULL);
646 }
647 perror("Failed to re-raise SIGBUS!\n");
648 abort();
649}
650
651static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
652 void *ctx)
653{
654 if (kvm_on_sigbus(siginfo->ssi_code,
655 (void *)(intptr_t)siginfo->ssi_addr)) {
656 sigbus_reraise();
657 }
658}
659
660static void qemu_init_sigbus(void)
661{
662 struct sigaction action;
663
664 memset(&action, 0, sizeof(action));
665 action.sa_flags = SA_SIGINFO;
666 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
667 sigaction(SIGBUS, &action, NULL);
668
669 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
670}
671
Andreas Färber290adf32013-01-17 09:30:27 +0100672static void qemu_kvm_eat_signals(CPUState *cpu)
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100673{
674 struct timespec ts = { 0, 0 };
675 siginfo_t siginfo;
676 sigset_t waitset;
677 sigset_t chkset;
678 int r;
679
680 sigemptyset(&waitset);
681 sigaddset(&waitset, SIG_IPI);
682 sigaddset(&waitset, SIGBUS);
683
684 do {
685 r = sigtimedwait(&waitset, &siginfo, &ts);
686 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
687 perror("sigtimedwait");
688 exit(1);
689 }
690
691 switch (r) {
692 case SIGBUS:
Andreas Färber290adf32013-01-17 09:30:27 +0100693 if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100694 sigbus_reraise();
695 }
696 break;
697 default:
698 break;
699 }
700
701 r = sigpending(&chkset);
702 if (r == -1) {
703 perror("sigpending");
704 exit(1);
705 }
706 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100707}
708
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100709#else /* !CONFIG_LINUX */
710
711static void qemu_init_sigbus(void)
712{
713}
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100714
Andreas Färber290adf32013-01-17 09:30:27 +0100715static void qemu_kvm_eat_signals(CPUState *cpu)
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100716{
717}
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100718#endif /* !CONFIG_LINUX */
719
Blue Swirl296af7c2010-03-29 19:23:50 +0000720#ifndef _WIN32
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100721static void dummy_signal(int sig)
Blue Swirl296af7c2010-03-29 19:23:50 +0000722{
723}
724
Andreas Färber13618e02013-05-26 23:41:00 +0200725static void qemu_kvm_init_cpu_signals(CPUState *cpu)
Paolo Bonzini714bd042011-03-12 17:44:06 +0100726{
727 int r;
728 sigset_t set;
729 struct sigaction sigact;
730
731 memset(&sigact, 0, sizeof(sigact));
732 sigact.sa_handler = dummy_signal;
733 sigaction(SIG_IPI, &sigact, NULL);
734
Paolo Bonzini714bd042011-03-12 17:44:06 +0100735 pthread_sigmask(SIG_BLOCK, NULL, &set);
736 sigdelset(&set, SIG_IPI);
737 sigdelset(&set, SIGBUS);
Andreas Färber491d6e82013-05-26 23:38:10 +0200738 r = kvm_set_signal_mask(cpu, &set);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100739 if (r) {
740 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
741 exit(1);
742 }
Paolo Bonzini714bd042011-03-12 17:44:06 +0100743}
744
745static void qemu_tcg_init_cpu_signals(void)
746{
Paolo Bonzini714bd042011-03-12 17:44:06 +0100747 sigset_t set;
748 struct sigaction sigact;
749
750 memset(&sigact, 0, sizeof(sigact));
751 sigact.sa_handler = cpu_signal;
752 sigaction(SIG_IPI, &sigact, NULL);
753
754 sigemptyset(&set);
755 sigaddset(&set, SIG_IPI);
756 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100757}
758
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100759#else /* _WIN32 */
Andreas Färber13618e02013-05-26 23:41:00 +0200760static void qemu_kvm_init_cpu_signals(CPUState *cpu)
Paolo Bonzini714bd042011-03-12 17:44:06 +0100761{
762 abort();
763}
764
765static void qemu_tcg_init_cpu_signals(void)
766{
767}
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100768#endif /* _WIN32 */
Blue Swirl296af7c2010-03-29 19:23:50 +0000769
Stefan Weilb2532d82012-09-27 07:41:42 +0200770static QemuMutex qemu_global_mutex;
Paolo Bonzini46daff12011-06-09 13:10:24 +0200771static QemuCond qemu_io_proceeded_cond;
772static bool iothread_requesting_mutex;
Blue Swirl296af7c2010-03-29 19:23:50 +0000773
774static QemuThread io_thread;
775
776static QemuThread *tcg_cpu_thread;
777static QemuCond *tcg_halt_cond;
778
Blue Swirl296af7c2010-03-29 19:23:50 +0000779/* cpu creation */
780static QemuCond qemu_cpu_cond;
781/* system init */
Blue Swirl296af7c2010-03-29 19:23:50 +0000782static QemuCond qemu_pause_cond;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300783static QemuCond qemu_work_cond;
Blue Swirl296af7c2010-03-29 19:23:50 +0000784
Paolo Bonzinid3b12f52011-09-13 10:30:52 +0200785void qemu_init_cpu_loop(void)
Blue Swirl296af7c2010-03-29 19:23:50 +0000786{
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100787 qemu_init_sigbus();
Anthony Liguoried945922011-02-08 18:18:18 +0100788 qemu_cond_init(&qemu_cpu_cond);
Anthony Liguoried945922011-02-08 18:18:18 +0100789 qemu_cond_init(&qemu_pause_cond);
790 qemu_cond_init(&qemu_work_cond);
Paolo Bonzini46daff12011-06-09 13:10:24 +0200791 qemu_cond_init(&qemu_io_proceeded_cond);
Blue Swirl296af7c2010-03-29 19:23:50 +0000792 qemu_mutex_init(&qemu_global_mutex);
Blue Swirl296af7c2010-03-29 19:23:50 +0000793
Jan Kiszkab7680cb2011-03-12 17:43:51 +0100794 qemu_thread_get_self(&io_thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000795}
796
Andreas Färberf100f0b2012-05-03 14:58:47 +0200797void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300798{
799 struct qemu_work_item wi;
800
Andreas Färber60e82572012-05-02 22:23:49 +0200801 if (qemu_cpu_is_self(cpu)) {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300802 func(data);
803 return;
804 }
805
806 wi.func = func;
807 wi.data = data;
Chegu Vinod3c022702013-06-24 03:49:41 -0600808 wi.free = false;
Andreas Färberc64ca812012-05-03 02:11:45 +0200809 if (cpu->queued_work_first == NULL) {
810 cpu->queued_work_first = &wi;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100811 } else {
Andreas Färberc64ca812012-05-03 02:11:45 +0200812 cpu->queued_work_last->next = &wi;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100813 }
Andreas Färberc64ca812012-05-03 02:11:45 +0200814 cpu->queued_work_last = &wi;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300815 wi.next = NULL;
816 wi.done = false;
817
Andreas Färberc08d7422012-05-03 04:34:15 +0200818 qemu_cpu_kick(cpu);
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300819 while (!wi.done) {
Andreas Färber4917cf42013-05-27 05:17:50 +0200820 CPUState *self_cpu = current_cpu;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300821
822 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
Andreas Färber4917cf42013-05-27 05:17:50 +0200823 current_cpu = self_cpu;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300824 }
825}
826
Chegu Vinod3c022702013-06-24 03:49:41 -0600827void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
828{
829 struct qemu_work_item *wi;
830
831 if (qemu_cpu_is_self(cpu)) {
832 func(data);
833 return;
834 }
835
836 wi = g_malloc0(sizeof(struct qemu_work_item));
837 wi->func = func;
838 wi->data = data;
839 wi->free = true;
840 if (cpu->queued_work_first == NULL) {
841 cpu->queued_work_first = wi;
842 } else {
843 cpu->queued_work_last->next = wi;
844 }
845 cpu->queued_work_last = wi;
846 wi->next = NULL;
847 wi->done = false;
848
849 qemu_cpu_kick(cpu);
850}
851
Andreas Färber6d45b102012-05-03 02:13:22 +0200852static void flush_queued_work(CPUState *cpu)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300853{
854 struct qemu_work_item *wi;
855
Andreas Färberc64ca812012-05-03 02:11:45 +0200856 if (cpu->queued_work_first == NULL) {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300857 return;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100858 }
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300859
Andreas Färberc64ca812012-05-03 02:11:45 +0200860 while ((wi = cpu->queued_work_first)) {
861 cpu->queued_work_first = wi->next;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300862 wi->func(wi->data);
863 wi->done = true;
Chegu Vinod3c022702013-06-24 03:49:41 -0600864 if (wi->free) {
865 g_free(wi);
866 }
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300867 }
Andreas Färberc64ca812012-05-03 02:11:45 +0200868 cpu->queued_work_last = NULL;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300869 qemu_cond_broadcast(&qemu_work_cond);
870}
871
Andreas Färber509a0d72012-05-03 02:18:09 +0200872static void qemu_wait_io_event_common(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000873{
Andreas Färber4fdeee72012-05-02 23:10:09 +0200874 if (cpu->stop) {
875 cpu->stop = false;
Andreas Färberf324e762012-05-02 23:26:21 +0200876 cpu->stopped = true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000877 qemu_cond_signal(&qemu_pause_cond);
878 }
Andreas Färber6d45b102012-05-03 02:13:22 +0200879 flush_queued_work(cpu);
Andreas Färber216fc9a2012-05-02 17:49:49 +0200880 cpu->thread_kicked = false;
Blue Swirl296af7c2010-03-29 19:23:50 +0000881}
882
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200883static void qemu_tcg_wait_io_event(void)
Blue Swirl296af7c2010-03-29 19:23:50 +0000884{
Andreas Färber182735e2013-05-29 22:29:20 +0200885 CPUState *cpu;
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200886
Jan Kiszka16400322011-02-09 16:29:37 +0100887 while (all_cpu_threads_idle()) {
Paolo Bonziniab33fcd2011-04-13 10:03:44 +0200888 /* Start accounting real time to the virtual clock if the CPUs
889 are idle. */
Alex Bligh40daca52013-08-21 16:03:02 +0100890 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini9705fbb2011-03-12 17:44:00 +0100891 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +0100892 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000893
Paolo Bonzini46daff12011-06-09 13:10:24 +0200894 while (iothread_requesting_mutex) {
895 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
896 }
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200897
Andreas Färberbdc44642013-06-24 23:50:24 +0200898 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200899 qemu_wait_io_event_common(cpu);
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200900 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000901}
902
Andreas Färberfd529e82013-05-26 23:24:55 +0200903static void qemu_kvm_wait_io_event(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000904{
Andreas Färbera98ae1d2013-05-26 23:21:08 +0200905 while (cpu_thread_is_idle(cpu)) {
Andreas Färberf5c121b2012-05-03 01:22:49 +0200906 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +0100907 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000908
Andreas Färber290adf32013-01-17 09:30:27 +0100909 qemu_kvm_eat_signals(cpu);
Andreas Färber509a0d72012-05-03 02:18:09 +0200910 qemu_wait_io_event_common(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000911}
912
Jan Kiszka7e97cd82011-02-07 12:19:12 +0100913static void *qemu_kvm_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +0000914{
Andreas Färber48a106b2013-05-27 02:20:39 +0200915 CPUState *cpu = arg;
Jan Kiszka84b49152011-02-01 22:15:50 +0100916 int r;
Blue Swirl296af7c2010-03-29 19:23:50 +0000917
Marcelo Tosatti6164e6d2010-03-23 13:37:13 -0300918 qemu_mutex_lock(&qemu_global_mutex);
Andreas Färber814e6122012-05-02 17:00:37 +0200919 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +0200920 cpu->thread_id = qemu_get_thread_id();
Andreas Färber4917cf42013-05-27 05:17:50 +0200921 current_cpu = cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000922
Andreas Färber504134d2012-12-17 06:38:45 +0100923 r = kvm_init_vcpu(cpu);
Jan Kiszka84b49152011-02-01 22:15:50 +0100924 if (r < 0) {
925 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
926 exit(1);
927 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000928
Andreas Färber13618e02013-05-26 23:41:00 +0200929 qemu_kvm_init_cpu_signals(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000930
931 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +0200932 cpu->created = true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000933 qemu_cond_signal(&qemu_cpu_cond);
934
Blue Swirl296af7c2010-03-29 19:23:50 +0000935 while (1) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200936 if (cpu_can_run(cpu)) {
Andreas Färber1458c362013-05-26 23:46:55 +0200937 r = kvm_cpu_exec(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +0100938 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +0200939 cpu_handle_guest_debug(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +0100940 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100941 }
Andreas Färberfd529e82013-05-26 23:24:55 +0200942 qemu_kvm_wait_io_event(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000943 }
944
945 return NULL;
946}
947
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200948static void *qemu_dummy_cpu_thread_fn(void *arg)
949{
950#ifdef _WIN32
951 fprintf(stderr, "qtest is not supported under Windows\n");
952 exit(1);
953#else
Andreas Färber10a90212013-05-27 02:24:35 +0200954 CPUState *cpu = arg;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200955 sigset_t waitset;
956 int r;
957
958 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +0200959 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +0200960 cpu->thread_id = qemu_get_thread_id();
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200961
962 sigemptyset(&waitset);
963 sigaddset(&waitset, SIG_IPI);
964
965 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +0200966 cpu->created = true;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200967 qemu_cond_signal(&qemu_cpu_cond);
968
Andreas Färber4917cf42013-05-27 05:17:50 +0200969 current_cpu = cpu;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200970 while (1) {
Andreas Färber4917cf42013-05-27 05:17:50 +0200971 current_cpu = NULL;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200972 qemu_mutex_unlock_iothread();
973 do {
974 int sig;
975 r = sigwait(&waitset, &sig);
976 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
977 if (r == -1) {
978 perror("sigwait");
979 exit(1);
980 }
981 qemu_mutex_lock_iothread();
Andreas Färber4917cf42013-05-27 05:17:50 +0200982 current_cpu = cpu;
Andreas Färber509a0d72012-05-03 02:18:09 +0200983 qemu_wait_io_event_common(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200984 }
985
986 return NULL;
987#endif
988}
989
Jan Kiszkabdb7ca62011-09-26 09:40:39 +0200990static void tcg_exec_all(void);
991
Jan Kiszka7e97cd82011-02-07 12:19:12 +0100992static void *qemu_tcg_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +0000993{
Andreas Färberc3586ba2012-05-03 01:41:24 +0200994 CPUState *cpu = arg;
Blue Swirl296af7c2010-03-29 19:23:50 +0000995
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100996 qemu_tcg_init_cpu_signals();
Andreas Färber814e6122012-05-02 17:00:37 +0200997 qemu_thread_get_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000998
Blue Swirl296af7c2010-03-29 19:23:50 +0000999 qemu_mutex_lock(&qemu_global_mutex);
Andreas Färber38fcbd32013-07-07 19:50:23 +02001000 CPU_FOREACH(cpu) {
1001 cpu->thread_id = qemu_get_thread_id();
1002 cpu->created = true;
1003 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001004 qemu_cond_signal(&qemu_cpu_cond);
1005
Jan Kiszkafa7d1862011-08-22 18:35:25 +02001006 /* wait for initial kick-off after machine start */
Andreas Färberbdc44642013-06-24 23:50:24 +02001007 while (QTAILQ_FIRST(&cpus)->stopped) {
Jan Kiszkafa7d1862011-08-22 18:35:25 +02001008 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
Jan Kiszka8e564b42012-02-17 18:31:15 +01001009
1010 /* process any pending work */
Andreas Färberbdc44642013-06-24 23:50:24 +02001011 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001012 qemu_wait_io_event_common(cpu);
Jan Kiszka8e564b42012-02-17 18:31:15 +01001013 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001014 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001015
1016 while (1) {
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001017 tcg_exec_all();
Alex Blighac70aaf2013-08-21 16:02:57 +01001018
1019 if (use_icount) {
Alex Bligh40daca52013-08-21 16:03:02 +01001020 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +01001021
1022 if (deadline == 0) {
Alex Bligh40daca52013-08-21 16:03:02 +01001023 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +01001024 }
Paolo Bonzini3b2319a2011-04-13 10:03:43 +02001025 }
Jan Kiszka6cabe1f2010-06-25 16:56:53 +02001026 qemu_tcg_wait_io_event();
Blue Swirl296af7c2010-03-29 19:23:50 +00001027 }
1028
1029 return NULL;
1030}
1031
Andreas Färber2ff09a42012-05-03 00:23:30 +02001032static void qemu_cpu_kick_thread(CPUState *cpu)
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001033{
1034#ifndef _WIN32
1035 int err;
1036
Andreas Färber814e6122012-05-02 17:00:37 +02001037 err = pthread_kill(cpu->thread->thread, SIG_IPI);
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001038 if (err) {
1039 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1040 exit(1);
1041 }
1042#else /* _WIN32 */
Andreas Färber60e82572012-05-02 22:23:49 +02001043 if (!qemu_cpu_is_self(cpu)) {
Olivier Hainqueed9164a2013-04-09 18:06:53 +02001044 CONTEXT tcgContext;
1045
1046 if (SuspendThread(cpu->hThread) == (DWORD)-1) {
Stefan Weil7f1721d2013-04-13 22:45:50 +02001047 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
Olivier Hainqueed9164a2013-04-09 18:06:53 +02001048 GetLastError());
1049 exit(1);
1050 }
1051
1052 /* On multi-core systems, we are not sure that the thread is actually
1053 * suspended until we can get the context.
1054 */
1055 tcgContext.ContextFlags = CONTEXT_CONTROL;
1056 while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
1057 continue;
1058 }
1059
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001060 cpu_signal(0);
Olivier Hainqueed9164a2013-04-09 18:06:53 +02001061
1062 if (ResumeThread(cpu->hThread) == (DWORD)-1) {
Stefan Weil7f1721d2013-04-13 22:45:50 +02001063 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
Olivier Hainqueed9164a2013-04-09 18:06:53 +02001064 GetLastError());
1065 exit(1);
1066 }
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001067 }
1068#endif
1069}
1070
Andreas Färberc08d7422012-05-03 04:34:15 +02001071void qemu_cpu_kick(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001072{
Andreas Färberf5c121b2012-05-03 01:22:49 +02001073 qemu_cond_broadcast(cpu->halt_cond);
Andreas Färber216fc9a2012-05-02 17:49:49 +02001074 if (!tcg_enabled() && !cpu->thread_kicked) {
Andreas Färber2ff09a42012-05-03 00:23:30 +02001075 qemu_cpu_kick_thread(cpu);
Andreas Färber216fc9a2012-05-02 17:49:49 +02001076 cpu->thread_kicked = true;
Jan Kiszkaaa2c3642011-02-01 22:15:42 +01001077 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001078}
1079
Jan Kiszka46d62fa2011-02-01 22:15:59 +01001080void qemu_cpu_kick_self(void)
1081{
Paolo Bonzinib55c22c2011-03-12 17:44:07 +01001082#ifndef _WIN32
Andreas Färber4917cf42013-05-27 05:17:50 +02001083 assert(current_cpu);
Jan Kiszka46d62fa2011-02-01 22:15:59 +01001084
Andreas Färber4917cf42013-05-27 05:17:50 +02001085 if (!current_cpu->thread_kicked) {
1086 qemu_cpu_kick_thread(current_cpu);
1087 current_cpu->thread_kicked = true;
Jan Kiszka46d62fa2011-02-01 22:15:59 +01001088 }
Paolo Bonzinib55c22c2011-03-12 17:44:07 +01001089#else
1090 abort();
1091#endif
Blue Swirl296af7c2010-03-29 19:23:50 +00001092}
1093
Andreas Färber60e82572012-05-02 22:23:49 +02001094bool qemu_cpu_is_self(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001095{
Andreas Färber814e6122012-05-02 17:00:37 +02001096 return qemu_thread_is_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +00001097}
1098
Juan Quintelaaa723c22012-09-18 16:30:11 +02001099static bool qemu_in_vcpu_thread(void)
1100{
Andreas Färber4917cf42013-05-27 05:17:50 +02001101 return current_cpu && qemu_cpu_is_self(current_cpu);
Juan Quintelaaa723c22012-09-18 16:30:11 +02001102}
1103
Blue Swirl296af7c2010-03-29 19:23:50 +00001104void qemu_mutex_lock_iothread(void)
1105{
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001106 if (!tcg_enabled()) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001107 qemu_mutex_lock(&qemu_global_mutex);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001108 } else {
Paolo Bonzini46daff12011-06-09 13:10:24 +02001109 iothread_requesting_mutex = true;
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001110 if (qemu_mutex_trylock(&qemu_global_mutex)) {
Andreas Färber182735e2013-05-29 22:29:20 +02001111 qemu_cpu_kick_thread(first_cpu);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001112 qemu_mutex_lock(&qemu_global_mutex);
1113 }
Paolo Bonzini46daff12011-06-09 13:10:24 +02001114 iothread_requesting_mutex = false;
1115 qemu_cond_broadcast(&qemu_io_proceeded_cond);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001116 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001117}
1118
1119void qemu_mutex_unlock_iothread(void)
1120{
1121 qemu_mutex_unlock(&qemu_global_mutex);
1122}
1123
1124static int all_vcpus_paused(void)
1125{
Andreas Färberbdc44642013-06-24 23:50:24 +02001126 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001127
Andreas Färberbdc44642013-06-24 23:50:24 +02001128 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001129 if (!cpu->stopped) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001130 return 0;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001131 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001132 }
1133
1134 return 1;
1135}
1136
1137void pause_all_vcpus(void)
1138{
Andreas Färberbdc44642013-06-24 23:50:24 +02001139 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001140
Alex Bligh40daca52013-08-21 16:03:02 +01001141 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
Andreas Färberbdc44642013-06-24 23:50:24 +02001142 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001143 cpu->stop = true;
1144 qemu_cpu_kick(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001145 }
1146
Juan Quintelaaa723c22012-09-18 16:30:11 +02001147 if (qemu_in_vcpu_thread()) {
Jan Kiszkad798e972012-02-17 18:31:16 +01001148 cpu_stop_current();
1149 if (!kvm_enabled()) {
Andreas Färberbdc44642013-06-24 23:50:24 +02001150 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001151 cpu->stop = false;
1152 cpu->stopped = true;
Jan Kiszkad798e972012-02-17 18:31:16 +01001153 }
1154 return;
1155 }
1156 }
1157
Blue Swirl296af7c2010-03-29 19:23:50 +00001158 while (!all_vcpus_paused()) {
Paolo Bonzinibe7d6c52011-03-12 17:44:02 +01001159 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
Andreas Färberbdc44642013-06-24 23:50:24 +02001160 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001161 qemu_cpu_kick(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001162 }
1163 }
1164}
1165
Igor Mammedov29936832013-04-23 10:29:37 +02001166void cpu_resume(CPUState *cpu)
1167{
1168 cpu->stop = false;
1169 cpu->stopped = false;
1170 qemu_cpu_kick(cpu);
1171}
1172
Blue Swirl296af7c2010-03-29 19:23:50 +00001173void resume_all_vcpus(void)
1174{
Andreas Färberbdc44642013-06-24 23:50:24 +02001175 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001176
Alex Bligh40daca52013-08-21 16:03:02 +01001177 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
Andreas Färberbdc44642013-06-24 23:50:24 +02001178 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001179 cpu_resume(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001180 }
1181}
1182
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001183/* For temporary buffers for forming a name */
1184#define VCPU_THREAD_NAME_SIZE 16
1185
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001186static void qemu_tcg_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001187{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001188 char thread_name[VCPU_THREAD_NAME_SIZE];
1189
Edgar E. Iglesias09daed82013-12-17 13:06:51 +10001190 tcg_cpu_address_space_init(cpu, cpu->as);
1191
Blue Swirl296af7c2010-03-29 19:23:50 +00001192 /* share a single thread for all cpus with TCG */
1193 if (!tcg_cpu_thread) {
Andreas Färber814e6122012-05-02 17:00:37 +02001194 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001195 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1196 qemu_cond_init(cpu->halt_cond);
1197 tcg_halt_cond = cpu->halt_cond;
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001198 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
1199 cpu->cpu_index);
1200 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1201 cpu, QEMU_THREAD_JOINABLE);
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001202#ifdef _WIN32
Andreas Färber814e6122012-05-02 17:00:37 +02001203 cpu->hThread = qemu_thread_get_handle(cpu->thread);
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001204#endif
Andreas Färber61a46212012-05-02 22:49:36 +02001205 while (!cpu->created) {
Paolo Bonzini18a85722011-03-12 17:44:03 +01001206 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001207 }
Andreas Färber814e6122012-05-02 17:00:37 +02001208 tcg_cpu_thread = cpu->thread;
Blue Swirl296af7c2010-03-29 19:23:50 +00001209 } else {
Andreas Färber814e6122012-05-02 17:00:37 +02001210 cpu->thread = tcg_cpu_thread;
Andreas Färberf5c121b2012-05-03 01:22:49 +02001211 cpu->halt_cond = tcg_halt_cond;
Blue Swirl296af7c2010-03-29 19:23:50 +00001212 }
1213}
1214
Andreas Färber48a106b2013-05-27 02:20:39 +02001215static void qemu_kvm_start_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001216{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001217 char thread_name[VCPU_THREAD_NAME_SIZE];
1218
Andreas Färber814e6122012-05-02 17:00:37 +02001219 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001220 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1221 qemu_cond_init(cpu->halt_cond);
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001222 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1223 cpu->cpu_index);
1224 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1225 cpu, QEMU_THREAD_JOINABLE);
Andreas Färber61a46212012-05-02 22:49:36 +02001226 while (!cpu->created) {
Paolo Bonzini18a85722011-03-12 17:44:03 +01001227 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001228 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001229}
1230
Andreas Färber10a90212013-05-27 02:24:35 +02001231static void qemu_dummy_start_vcpu(CPUState *cpu)
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001232{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001233 char thread_name[VCPU_THREAD_NAME_SIZE];
1234
Andreas Färber814e6122012-05-02 17:00:37 +02001235 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001236 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1237 qemu_cond_init(cpu->halt_cond);
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001238 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
1239 cpu->cpu_index);
1240 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001241 QEMU_THREAD_JOINABLE);
Andreas Färber61a46212012-05-02 22:49:36 +02001242 while (!cpu->created) {
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001243 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1244 }
1245}
1246
Andreas Färberc643bed2013-05-27 03:23:24 +02001247void qemu_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001248{
Andreas Färberce3960e2012-12-17 03:27:07 +01001249 cpu->nr_cores = smp_cores;
1250 cpu->nr_threads = smp_threads;
Andreas Färberf324e762012-05-02 23:26:21 +02001251 cpu->stopped = true;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001252 if (kvm_enabled()) {
Andreas Färber48a106b2013-05-27 02:20:39 +02001253 qemu_kvm_start_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001254 } else if (tcg_enabled()) {
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001255 qemu_tcg_init_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001256 } else {
Andreas Färber10a90212013-05-27 02:24:35 +02001257 qemu_dummy_start_vcpu(cpu);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001258 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001259}
1260
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001261void cpu_stop_current(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001262{
Andreas Färber4917cf42013-05-27 05:17:50 +02001263 if (current_cpu) {
1264 current_cpu->stop = false;
1265 current_cpu->stopped = true;
1266 cpu_exit(current_cpu);
Paolo Bonzini67bb1722011-03-12 17:43:59 +01001267 qemu_cond_signal(&qemu_pause_cond);
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001268 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001269}
1270
Kevin Wolf56983462013-07-05 13:49:54 +02001271int vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +00001272{
Juan Quintelaaa723c22012-09-18 16:30:11 +02001273 if (qemu_in_vcpu_thread()) {
Paolo Bonzini74892d22014-06-05 14:53:58 +02001274 qemu_system_vmstop_request_prepare();
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -03001275 qemu_system_vmstop_request(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00001276 /*
1277 * FIXME: should not return to device code in case
1278 * vm_stop() has been requested.
1279 */
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001280 cpu_stop_current();
Kevin Wolf56983462013-07-05 13:49:54 +02001281 return 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00001282 }
Kevin Wolf56983462013-07-05 13:49:54 +02001283
1284 return do_vm_stop(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00001285}
1286
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001287/* does a state transition even if the VM is already stopped,
1288 current state is forgotten forever */
Kevin Wolf56983462013-07-05 13:49:54 +02001289int vm_stop_force_state(RunState state)
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001290{
1291 if (runstate_is_running()) {
Kevin Wolf56983462013-07-05 13:49:54 +02001292 return vm_stop(state);
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001293 } else {
1294 runstate_set(state);
Kevin Wolf594a45c2013-07-18 14:52:19 +02001295 /* Make sure to return an error if the flush in a previous vm_stop()
1296 * failed. */
1297 return bdrv_flush_all();
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001298 }
1299}
1300
Andreas Färber9349b4f2012-03-14 01:38:32 +01001301static int tcg_cpu_exec(CPUArchState *env)
Blue Swirl296af7c2010-03-29 19:23:50 +00001302{
Andreas Färberefee7342013-08-26 05:39:29 +02001303 CPUState *cpu = ENV_GET_CPU(env);
Blue Swirl296af7c2010-03-29 19:23:50 +00001304 int ret;
1305#ifdef CONFIG_PROFILER
1306 int64_t ti;
1307#endif
1308
1309#ifdef CONFIG_PROFILER
1310 ti = profile_getclock();
1311#endif
1312 if (use_icount) {
1313 int64_t count;
Alex Blighac70aaf2013-08-21 16:02:57 +01001314 int64_t deadline;
Blue Swirl296af7c2010-03-29 19:23:50 +00001315 int decr;
KONRAD Fredericc96778b2014-08-01 01:37:09 +02001316 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1317 + cpu->icount_extra);
Andreas Färber28ecfd72013-08-26 05:51:49 +02001318 cpu->icount_decr.u16.low = 0;
Andreas Färberefee7342013-08-26 05:39:29 +02001319 cpu->icount_extra = 0;
Alex Bligh40daca52013-08-21 16:03:02 +01001320 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +01001321
1322 /* Maintain prior (possibly buggy) behaviour where if no deadline
Alex Bligh40daca52013-08-21 16:03:02 +01001323 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
Alex Blighac70aaf2013-08-21 16:02:57 +01001324 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1325 * nanoseconds.
1326 */
1327 if ((deadline < 0) || (deadline > INT32_MAX)) {
1328 deadline = INT32_MAX;
1329 }
1330
1331 count = qemu_icount_round(deadline);
KONRAD Fredericc96778b2014-08-01 01:37:09 +02001332 timers_state.qemu_icount += count;
Blue Swirl296af7c2010-03-29 19:23:50 +00001333 decr = (count > 0xffff) ? 0xffff : count;
1334 count -= decr;
Andreas Färber28ecfd72013-08-26 05:51:49 +02001335 cpu->icount_decr.u16.low = decr;
Andreas Färberefee7342013-08-26 05:39:29 +02001336 cpu->icount_extra = count;
Blue Swirl296af7c2010-03-29 19:23:50 +00001337 }
1338 ret = cpu_exec(env);
1339#ifdef CONFIG_PROFILER
1340 qemu_time += profile_getclock() - ti;
1341#endif
1342 if (use_icount) {
1343 /* Fold pending instructions back into the
1344 instruction counter, and clear the interrupt flag. */
KONRAD Fredericc96778b2014-08-01 01:37:09 +02001345 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1346 + cpu->icount_extra);
Andreas Färber28ecfd72013-08-26 05:51:49 +02001347 cpu->icount_decr.u32 = 0;
Andreas Färberefee7342013-08-26 05:39:29 +02001348 cpu->icount_extra = 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00001349 }
1350 return ret;
1351}
1352
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001353static void tcg_exec_all(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001354{
Jan Kiszka9a360852011-02-01 22:15:55 +01001355 int r;
1356
Alex Bligh40daca52013-08-21 16:03:02 +01001357 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1358 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
Paolo Bonziniab33fcd2011-04-13 10:03:44 +02001359
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001360 if (next_cpu == NULL) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001361 next_cpu = first_cpu;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001362 }
Andreas Färberbdc44642013-06-24 23:50:24 +02001363 for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
Andreas Färber182735e2013-05-29 22:29:20 +02001364 CPUState *cpu = next_cpu;
1365 CPUArchState *env = cpu->env_ptr;
Blue Swirl296af7c2010-03-29 19:23:50 +00001366
Alex Bligh40daca52013-08-21 16:03:02 +01001367 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
Andreas Färbered2803d2013-06-21 20:20:45 +02001368 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
Blue Swirl296af7c2010-03-29 19:23:50 +00001369
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001370 if (cpu_can_run(cpu)) {
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001371 r = tcg_cpu_exec(env);
Jan Kiszka9a360852011-02-01 22:15:55 +01001372 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +02001373 cpu_handle_guest_debug(cpu);
Jan Kiszka3c638d02010-06-25 16:56:56 +02001374 break;
1375 }
Andreas Färberf324e762012-05-02 23:26:21 +02001376 } else if (cpu->stop || cpu->stopped) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001377 break;
1378 }
1379 }
Jan Kiszkac629a4b2010-06-25 16:56:52 +02001380 exit_request = 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00001381}
1382
Stefan Weil9a78eea2010-10-22 23:03:33 +02001383void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
Blue Swirl262353c2010-05-04 19:55:35 +00001384{
1385 /* XXX: implement xxx_cpu_list for targets that still miss it */
Peter Maydelle916cbf2012-09-05 17:41:08 -03001386#if defined(cpu_list)
1387 cpu_list(f, cpu_fprintf);
Blue Swirl262353c2010-05-04 19:55:35 +00001388#endif
1389}
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001390
1391CpuInfoList *qmp_query_cpus(Error **errp)
1392{
1393 CpuInfoList *head = NULL, *cur_item = NULL;
Andreas Färber182735e2013-05-29 22:29:20 +02001394 CPUState *cpu;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001395
Andreas Färberbdc44642013-06-24 23:50:24 +02001396 CPU_FOREACH(cpu) {
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001397 CpuInfoList *info;
Andreas Färber182735e2013-05-29 22:29:20 +02001398#if defined(TARGET_I386)
1399 X86CPU *x86_cpu = X86_CPU(cpu);
1400 CPUX86State *env = &x86_cpu->env;
1401#elif defined(TARGET_PPC)
1402 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1403 CPUPPCState *env = &ppc_cpu->env;
1404#elif defined(TARGET_SPARC)
1405 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1406 CPUSPARCState *env = &sparc_cpu->env;
1407#elif defined(TARGET_MIPS)
1408 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1409 CPUMIPSState *env = &mips_cpu->env;
1410#endif
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001411
Andreas Färbercb446ec2013-05-01 14:24:52 +02001412 cpu_synchronize_state(cpu);
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001413
1414 info = g_malloc0(sizeof(*info));
1415 info->value = g_malloc0(sizeof(*info->value));
Andreas Färber55e5c282012-12-17 06:18:02 +01001416 info->value->CPU = cpu->cpu_index;
Andreas Färber182735e2013-05-29 22:29:20 +02001417 info->value->current = (cpu == first_cpu);
Andreas Färber259186a2013-01-17 18:51:17 +01001418 info->value->halted = cpu->halted;
Andreas Färber9f09e182012-05-03 06:59:07 +02001419 info->value->thread_id = cpu->thread_id;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001420#if defined(TARGET_I386)
1421 info->value->has_pc = true;
1422 info->value->pc = env->eip + env->segs[R_CS].base;
1423#elif defined(TARGET_PPC)
1424 info->value->has_nip = true;
1425 info->value->nip = env->nip;
1426#elif defined(TARGET_SPARC)
1427 info->value->has_pc = true;
1428 info->value->pc = env->pc;
1429 info->value->has_npc = true;
1430 info->value->npc = env->npc;
1431#elif defined(TARGET_MIPS)
1432 info->value->has_PC = true;
1433 info->value->PC = env->active_tc.PC;
1434#endif
1435
1436 /* XXX: waiting for the qapi to support GSList */
1437 if (!cur_item) {
1438 head = cur_item = info;
1439 } else {
1440 cur_item->next = info;
1441 cur_item = info;
1442 }
1443 }
1444
1445 return head;
1446}
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001447
1448void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1449 bool has_cpu, int64_t cpu_index, Error **errp)
1450{
1451 FILE *f;
1452 uint32_t l;
Andreas Färber55e5c282012-12-17 06:18:02 +01001453 CPUState *cpu;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001454 uint8_t buf[1024];
1455
1456 if (!has_cpu) {
1457 cpu_index = 0;
1458 }
1459
Andreas Färber151d1322013-02-15 15:41:49 +01001460 cpu = qemu_get_cpu(cpu_index);
1461 if (cpu == NULL) {
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001462 error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1463 "a CPU number");
1464 return;
1465 }
1466
1467 f = fopen(filename, "wb");
1468 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04001469 error_setg_file_open(errp, errno, filename);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001470 return;
1471 }
1472
1473 while (size != 0) {
1474 l = sizeof(buf);
1475 if (l > size)
1476 l = size;
Aneesh Kumar K.V2f4d0f52013-10-01 21:49:30 +05301477 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
1478 error_setg(errp, "Invalid addr 0x%016" PRIx64 "specified", addr);
1479 goto exit;
1480 }
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001481 if (fwrite(buf, 1, l, f) != l) {
1482 error_set(errp, QERR_IO_ERROR);
1483 goto exit;
1484 }
1485 addr += l;
1486 size -= l;
1487 }
1488
1489exit:
1490 fclose(f);
1491}
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001492
1493void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1494 Error **errp)
1495{
1496 FILE *f;
1497 uint32_t l;
1498 uint8_t buf[1024];
1499
1500 f = fopen(filename, "wb");
1501 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04001502 error_setg_file_open(errp, errno, filename);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001503 return;
1504 }
1505
1506 while (size != 0) {
1507 l = sizeof(buf);
1508 if (l > size)
1509 l = size;
Stefan Weileb6282f2014-04-07 20:28:23 +02001510 cpu_physical_memory_read(addr, buf, l);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001511 if (fwrite(buf, 1, l, f) != l) {
1512 error_set(errp, QERR_IO_ERROR);
1513 goto exit;
1514 }
1515 addr += l;
1516 size -= l;
1517 }
1518
1519exit:
1520 fclose(f);
1521}
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001522
1523void qmp_inject_nmi(Error **errp)
1524{
1525#if defined(TARGET_I386)
Andreas Färber182735e2013-05-29 22:29:20 +02001526 CPUState *cs;
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001527
Andreas Färberbdc44642013-06-24 23:50:24 +02001528 CPU_FOREACH(cs) {
Andreas Färber182735e2013-05-29 22:29:20 +02001529 X86CPU *cpu = X86_CPU(cs);
Andreas Färber182735e2013-05-29 22:29:20 +02001530
Chen Fan02e51482013-12-23 17:04:02 +08001531 if (!cpu->apic_state) {
Andreas Färber182735e2013-05-29 22:29:20 +02001532 cpu_interrupt(cs, CPU_INTERRUPT_NMI);
Jan Kiszka02c09192011-10-18 00:00:06 +08001533 } else {
Chen Fan02e51482013-12-23 17:04:02 +08001534 apic_deliver_nmi(cpu->apic_state);
Jan Kiszka02c09192011-10-18 00:00:06 +08001535 }
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001536 }
Eugene (jno) Dvurechenski7f7f9752012-12-05 15:50:07 +01001537#elif defined(TARGET_S390X)
1538 CPUState *cs;
1539 S390CPU *cpu;
1540
Andreas Färberbdc44642013-06-24 23:50:24 +02001541 CPU_FOREACH(cs) {
Eugene (jno) Dvurechenski7f7f9752012-12-05 15:50:07 +01001542 cpu = S390_CPU(cs);
1543 if (cpu->env.cpu_num == monitor_get_cpu_index()) {
1544 if (s390_cpu_restart(S390_CPU(cs)) == -1) {
1545 error_set(errp, QERR_UNSUPPORTED);
1546 return;
1547 }
1548 break;
1549 }
1550 }
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001551#else
1552 error_set(errp, QERR_UNSUPPORTED);
1553#endif
1554}