blob: a6b6557146442b97b602832966a2079ea8ab6ed7 [file] [log] [blame]
Blue Swirl296af7c2010-03-29 19:23:50 +00001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Needed early for CONFIG_BSD etc. */
26#include "config-host.h"
27
Paolo Bonzini83c90892012-12-17 18:19:49 +010028#include "monitor/monitor.h"
Wenchao Xiaa4e15de2014-06-18 08:43:36 +020029#include "qapi/qmp/qerror.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010030#include "sysemu/sysemu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010031#include "exec/gdbstub.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010032#include "sysemu/dma.h"
33#include "sysemu/kvm.h"
Luiz Capitulinode0b36b2011-09-21 16:38:35 -030034#include "qmp-commands.h"
Blue Swirl296af7c2010-03-29 19:23:50 +000035
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/thread.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010037#include "sysemu/cpus.h"
38#include "sysemu/qtest.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010039#include "qemu/main-loop.h"
40#include "qemu/bitmap.h"
Liu Ping Fancb365642013-09-25 14:20:58 +080041#include "qemu/seqlock.h"
Wenchao Xiaa4e15de2014-06-18 08:43:36 +020042#include "qapi-event.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020043
44#ifndef _WIN32
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010045#include "qemu/compatfd.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020046#endif
Blue Swirl296af7c2010-03-29 19:23:50 +000047
Jan Kiszka6d9cb732011-02-01 22:15:58 +010048#ifdef CONFIG_LINUX
49
50#include <sys/prctl.h>
51
Marcelo Tosattic0532a72010-10-11 15:31:21 -030052#ifndef PR_MCE_KILL
53#define PR_MCE_KILL 33
54#endif
55
Jan Kiszka6d9cb732011-02-01 22:15:58 +010056#ifndef PR_MCE_KILL_SET
57#define PR_MCE_KILL_SET 1
58#endif
59
60#ifndef PR_MCE_KILL_EARLY
61#define PR_MCE_KILL_EARLY 1
62#endif
63
64#endif /* CONFIG_LINUX */
65
Andreas Färber182735e2013-05-29 22:29:20 +020066static CPUState *next_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +000067
Tiejun Chen321bc0b2013-08-02 09:43:09 +080068bool cpu_is_stopped(CPUState *cpu)
69{
70 return cpu->stopped || !runstate_is_running();
71}
72
Andreas Färbera98ae1d2013-05-26 23:21:08 +020073static bool cpu_thread_is_idle(CPUState *cpu)
Peter Maydellac873f12012-07-19 16:52:27 +010074{
Andreas Färberc64ca812012-05-03 02:11:45 +020075 if (cpu->stop || cpu->queued_work_first) {
Peter Maydellac873f12012-07-19 16:52:27 +010076 return false;
77 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +080078 if (cpu_is_stopped(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010079 return true;
80 }
Andreas Färber8c2e1b02013-08-25 18:53:55 +020081 if (!cpu->halted || cpu_has_work(cpu) ||
Alexander Graf215e79c2013-04-24 22:24:12 +020082 kvm_halt_in_kernel()) {
Peter Maydellac873f12012-07-19 16:52:27 +010083 return false;
84 }
85 return true;
86}
87
88static bool all_cpu_threads_idle(void)
89{
Andreas Färber182735e2013-05-29 22:29:20 +020090 CPUState *cpu;
Peter Maydellac873f12012-07-19 16:52:27 +010091
Andreas Färberbdc44642013-06-24 23:50:24 +020092 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +020093 if (!cpu_thread_is_idle(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010094 return false;
95 }
96 }
97 return true;
98}
99
Blue Swirl296af7c2010-03-29 19:23:50 +0000100/***********************************************************/
Paolo Bonzini946fb272011-09-12 13:57:37 +0200101/* guest cycle counter */
102
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200103/* Protected by TimersState seqlock */
104
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200105static int64_t vm_clock_warp_start;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200106/* Conversion factor from emulated instructions to virtual clock ticks. */
107static int icount_time_shift;
108/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
109#define MAX_ICOUNT_SHIFT 10
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200110
Paolo Bonzini946fb272011-09-12 13:57:37 +0200111static QEMUTimer *icount_rt_timer;
112static QEMUTimer *icount_vm_timer;
113static QEMUTimer *icount_warp_timer;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200114
115typedef struct TimersState {
Liu Ping Fancb365642013-09-25 14:20:58 +0800116 /* Protected by BQL. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200117 int64_t cpu_ticks_prev;
118 int64_t cpu_ticks_offset;
Liu Ping Fancb365642013-09-25 14:20:58 +0800119
120 /* cpu_clock_offset can be read out of BQL, so protect it with
121 * this lock.
122 */
123 QemuSeqLock vm_clock_seqlock;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200124 int64_t cpu_clock_offset;
125 int32_t cpu_ticks_enabled;
126 int64_t dummy;
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200127
128 /* Compensate for varying guest execution speed. */
129 int64_t qemu_icount_bias;
130 /* Only written by TCG thread */
131 int64_t qemu_icount;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200132} TimersState;
133
Liu Ping Fand9cd4002013-07-21 08:43:00 +0000134static TimersState timers_state;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200135
136/* Return the virtual CPU time, based on the instruction counter. */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200137static int64_t cpu_get_icount_locked(void)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200138{
139 int64_t icount;
Andreas Färber4917cf42013-05-27 05:17:50 +0200140 CPUState *cpu = current_cpu;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200141
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200142 icount = timers_state.qemu_icount;
Andreas Färber4917cf42013-05-27 05:17:50 +0200143 if (cpu) {
Andreas Färber99df7dc2013-08-26 05:15:23 +0200144 if (!cpu_can_do_io(cpu)) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200145 fprintf(stderr, "Bad clock read\n");
146 }
Andreas Färber28ecfd72013-08-26 05:51:49 +0200147 icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200148 }
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200149 return timers_state.qemu_icount_bias + (icount << icount_time_shift);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200150}
151
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200152int64_t cpu_get_icount(void)
153{
154 int64_t icount;
155 unsigned start;
156
157 do {
158 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
159 icount = cpu_get_icount_locked();
160 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
161
162 return icount;
163}
164
Paolo Bonzini946fb272011-09-12 13:57:37 +0200165/* return the host CPU cycle counter and handle stop/restart */
Liu Ping Fancb365642013-09-25 14:20:58 +0800166/* Caller must hold the BQL */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200167int64_t cpu_get_ticks(void)
168{
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100169 int64_t ticks;
170
Paolo Bonzini946fb272011-09-12 13:57:37 +0200171 if (use_icount) {
172 return cpu_get_icount();
173 }
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100174
175 ticks = timers_state.cpu_ticks_offset;
176 if (timers_state.cpu_ticks_enabled) {
177 ticks += cpu_get_real_ticks();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200178 }
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100179
180 if (timers_state.cpu_ticks_prev > ticks) {
181 /* Note: non increasing ticks may happen if the host uses
182 software suspend */
183 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
184 ticks = timers_state.cpu_ticks_prev;
185 }
186
187 timers_state.cpu_ticks_prev = ticks;
188 return ticks;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200189}
190
Liu Ping Fancb365642013-09-25 14:20:58 +0800191static int64_t cpu_get_clock_locked(void)
192{
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100193 int64_t ticks;
Liu Ping Fancb365642013-09-25 14:20:58 +0800194
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100195 ticks = timers_state.cpu_clock_offset;
196 if (timers_state.cpu_ticks_enabled) {
197 ticks += get_clock();
Liu Ping Fancb365642013-09-25 14:20:58 +0800198 }
199
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100200 return ticks;
Liu Ping Fancb365642013-09-25 14:20:58 +0800201}
202
Paolo Bonzini946fb272011-09-12 13:57:37 +0200203/* return the host CPU monotonic timer and handle stop/restart */
204int64_t cpu_get_clock(void)
205{
206 int64_t ti;
Liu Ping Fancb365642013-09-25 14:20:58 +0800207 unsigned start;
208
209 do {
210 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
211 ti = cpu_get_clock_locked();
212 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
213
214 return ti;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200215}
216
Liu Ping Fancb365642013-09-25 14:20:58 +0800217/* enable cpu_get_ticks()
218 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
219 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200220void cpu_enable_ticks(void)
221{
Liu Ping Fancb365642013-09-25 14:20:58 +0800222 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
223 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200224 if (!timers_state.cpu_ticks_enabled) {
225 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
226 timers_state.cpu_clock_offset -= get_clock();
227 timers_state.cpu_ticks_enabled = 1;
228 }
Liu Ping Fancb365642013-09-25 14:20:58 +0800229 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200230}
231
232/* disable cpu_get_ticks() : the clock is stopped. You must not call
Liu Ping Fancb365642013-09-25 14:20:58 +0800233 * cpu_get_ticks() after that.
234 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
235 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200236void cpu_disable_ticks(void)
237{
Liu Ping Fancb365642013-09-25 14:20:58 +0800238 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
239 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200240 if (timers_state.cpu_ticks_enabled) {
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100241 timers_state.cpu_ticks_offset += cpu_get_real_ticks();
Liu Ping Fancb365642013-09-25 14:20:58 +0800242 timers_state.cpu_clock_offset = cpu_get_clock_locked();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200243 timers_state.cpu_ticks_enabled = 0;
244 }
Liu Ping Fancb365642013-09-25 14:20:58 +0800245 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200246}
247
248/* Correlation between real and virtual time is always going to be
249 fairly approximate, so ignore small variation.
250 When the guest is idle real and virtual time will be aligned in
251 the IO wait loop. */
252#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
253
254static void icount_adjust(void)
255{
256 int64_t cur_time;
257 int64_t cur_icount;
258 int64_t delta;
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200259
260 /* Protected by TimersState mutex. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200261 static int64_t last_delta;
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200262
Paolo Bonzini946fb272011-09-12 13:57:37 +0200263 /* If the VM is not running, then do nothing. */
264 if (!runstate_is_running()) {
265 return;
266 }
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200267
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200268 seqlock_write_lock(&timers_state.vm_clock_seqlock);
269 cur_time = cpu_get_clock_locked();
270 cur_icount = cpu_get_icount_locked();
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200271
Paolo Bonzini946fb272011-09-12 13:57:37 +0200272 delta = cur_icount - cur_time;
273 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
274 if (delta > 0
275 && last_delta + ICOUNT_WOBBLE < delta * 2
276 && icount_time_shift > 0) {
277 /* The guest is getting too far ahead. Slow time down. */
278 icount_time_shift--;
279 }
280 if (delta < 0
281 && last_delta - ICOUNT_WOBBLE > delta * 2
282 && icount_time_shift < MAX_ICOUNT_SHIFT) {
283 /* The guest is getting too far behind. Speed time up. */
284 icount_time_shift++;
285 }
286 last_delta = delta;
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200287 timers_state.qemu_icount_bias = cur_icount
288 - (timers_state.qemu_icount << icount_time_shift);
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200289 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200290}
291
292static void icount_adjust_rt(void *opaque)
293{
Alex Bligh40daca52013-08-21 16:03:02 +0100294 timer_mod(icount_rt_timer,
295 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200296 icount_adjust();
297}
298
299static void icount_adjust_vm(void *opaque)
300{
Alex Bligh40daca52013-08-21 16:03:02 +0100301 timer_mod(icount_vm_timer,
302 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
303 get_ticks_per_sec() / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200304 icount_adjust();
305}
306
307static int64_t qemu_icount_round(int64_t count)
308{
309 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
310}
311
312static void icount_warp_rt(void *opaque)
313{
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200314 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
315 * changes from -1 to another value, so the race here is okay.
316 */
317 if (atomic_read(&vm_clock_warp_start) == -1) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200318 return;
319 }
320
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200321 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200322 if (runstate_is_running()) {
Alex Bligh40daca52013-08-21 16:03:02 +0100323 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200324 int64_t warp_delta;
325
326 warp_delta = clock - vm_clock_warp_start;
327 if (use_icount == 2) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200328 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100329 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
Paolo Bonzini946fb272011-09-12 13:57:37 +0200330 * far ahead of real time.
331 */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200332 int64_t cur_time = cpu_get_clock_locked();
333 int64_t cur_icount = cpu_get_icount_locked();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200334 int64_t delta = cur_time - cur_icount;
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200335 warp_delta = MIN(warp_delta, delta);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200336 }
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200337 timers_state.qemu_icount_bias += warp_delta;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200338 }
339 vm_clock_warp_start = -1;
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200340 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200341
342 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
343 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
344 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200345}
346
Paolo Bonzini8156be52012-03-28 15:42:04 +0200347void qtest_clock_warp(int64_t dest)
348{
Alex Bligh40daca52013-08-21 16:03:02 +0100349 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200350 assert(qtest_enabled());
351 while (clock < dest) {
Alex Bligh40daca52013-08-21 16:03:02 +0100352 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Sergey Fedorovc9299e22014-06-10 13:10:28 +0400353 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200354 seqlock_write_lock(&timers_state.vm_clock_seqlock);
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200355 timers_state.qemu_icount_bias += warp;
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200356 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
357
Alex Bligh40daca52013-08-21 16:03:02 +0100358 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
359 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200360 }
Alex Bligh40daca52013-08-21 16:03:02 +0100361 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200362}
363
Alex Bligh40daca52013-08-21 16:03:02 +0100364void qemu_clock_warp(QEMUClockType type)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200365{
Paolo Bonzinice78d182013-10-07 17:30:02 +0200366 int64_t clock;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200367 int64_t deadline;
368
369 /*
370 * There are too many global variables to make the "warp" behavior
371 * applicable to other clocks. But a clock argument removes the
372 * need for if statements all over the place.
373 */
Alex Bligh40daca52013-08-21 16:03:02 +0100374 if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200375 return;
376 }
377
378 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100379 * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
380 * This ensures that the deadline for the timer is computed correctly below.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200381 * This also makes sure that the insn counter is synchronized before the
382 * CPU starts running, in case the CPU is woken by an event other than
Alex Bligh40daca52013-08-21 16:03:02 +0100383 * the earliest QEMU_CLOCK_VIRTUAL timer.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200384 */
385 icount_warp_rt(NULL);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200386 timer_del(icount_warp_timer);
387 if (!all_cpu_threads_idle()) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200388 return;
389 }
390
Paolo Bonzini8156be52012-03-28 15:42:04 +0200391 if (qtest_enabled()) {
392 /* When testing, qtest commands advance icount. */
393 return;
394 }
395
Alex Blighac70aaf2013-08-21 16:02:57 +0100396 /* We want to use the earliest deadline from ALL vm_clocks */
Paolo Bonzinice78d182013-10-07 17:30:02 +0200397 clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Alex Bligh40daca52013-08-21 16:03:02 +0100398 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200399 if (deadline < 0) {
400 return;
Alex Blighac70aaf2013-08-21 16:02:57 +0100401 }
402
Paolo Bonzini946fb272011-09-12 13:57:37 +0200403 if (deadline > 0) {
404 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100405 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
Paolo Bonzini946fb272011-09-12 13:57:37 +0200406 * sleep. Otherwise, the CPU might be waiting for a future timer
407 * interrupt to wake it up, but the interrupt never comes because
408 * the vCPU isn't running any insns and thus doesn't advance the
Alex Bligh40daca52013-08-21 16:03:02 +0100409 * QEMU_CLOCK_VIRTUAL.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200410 *
411 * An extreme solution for this problem would be to never let VCPUs
Alex Bligh40daca52013-08-21 16:03:02 +0100412 * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
413 * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
414 * event. Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
415 * after some e"real" time, (related to the time left until the next
416 * event) has passed. The QEMU_CLOCK_REALTIME timer will do this.
417 * This avoids that the warps are visible externally; for example,
418 * you will not be sending network packets continuously instead of
419 * every 100ms.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200420 */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200421 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200422 if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
423 vm_clock_warp_start = clock;
424 }
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200425 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200426 timer_mod_anticipate(icount_warp_timer, clock + deadline);
Alex Blighac70aaf2013-08-21 16:02:57 +0100427 } else if (deadline == 0) {
Alex Bligh40daca52013-08-21 16:03:02 +0100428 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200429 }
430}
431
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200432static bool icount_state_needed(void *opaque)
433{
434 return use_icount;
435}
436
437/*
438 * This is a subsection for icount migration.
439 */
440static const VMStateDescription icount_vmstate_timers = {
441 .name = "timer/icount",
442 .version_id = 1,
443 .minimum_version_id = 1,
444 .fields = (VMStateField[]) {
445 VMSTATE_INT64(qemu_icount_bias, TimersState),
446 VMSTATE_INT64(qemu_icount, TimersState),
447 VMSTATE_END_OF_LIST()
448 }
449};
450
Paolo Bonzini946fb272011-09-12 13:57:37 +0200451static const VMStateDescription vmstate_timers = {
452 .name = "timer",
453 .version_id = 2,
454 .minimum_version_id = 1,
Juan Quintela35d08452014-04-16 16:01:33 +0200455 .fields = (VMStateField[]) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200456 VMSTATE_INT64(cpu_ticks_offset, TimersState),
457 VMSTATE_INT64(dummy, TimersState),
458 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
459 VMSTATE_END_OF_LIST()
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200460 },
461 .subsections = (VMStateSubsection[]) {
462 {
463 .vmsd = &icount_vmstate_timers,
464 .needed = icount_state_needed,
465 }, {
466 /* empty */
467 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200468 }
469};
470
471void configure_icount(const char *option)
472{
Liu Ping Fancb365642013-09-25 14:20:58 +0800473 seqlock_init(&timers_state.vm_clock_seqlock, NULL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200474 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
475 if (!option) {
476 return;
477 }
478
Alex Bligh40daca52013-08-21 16:03:02 +0100479 icount_warp_timer = timer_new_ns(QEMU_CLOCK_REALTIME,
480 icount_warp_rt, NULL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200481 if (strcmp(option, "auto") != 0) {
482 icount_time_shift = strtol(option, NULL, 0);
483 use_icount = 1;
484 return;
485 }
486
487 use_icount = 2;
488
489 /* 125MIPS seems a reasonable initial guess at the guest speed.
490 It will be corrected fairly quickly anyway. */
491 icount_time_shift = 3;
492
493 /* Have both realtime and virtual time triggers for speed adjustment.
494 The realtime trigger catches emulated time passing too slowly,
495 the virtual time trigger catches emulated time passing too fast.
496 Realtime triggers occur even when idle, so use them less frequently
497 than VM triggers. */
Alex Bligh40daca52013-08-21 16:03:02 +0100498 icount_rt_timer = timer_new_ms(QEMU_CLOCK_REALTIME,
499 icount_adjust_rt, NULL);
500 timer_mod(icount_rt_timer,
501 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
502 icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
503 icount_adjust_vm, NULL);
504 timer_mod(icount_vm_timer,
505 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
506 get_ticks_per_sec() / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200507}
508
509/***********************************************************/
Blue Swirl296af7c2010-03-29 19:23:50 +0000510void hw_error(const char *fmt, ...)
511{
512 va_list ap;
Andreas Färber55e5c282012-12-17 06:18:02 +0100513 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000514
515 va_start(ap, fmt);
516 fprintf(stderr, "qemu: hardware error: ");
517 vfprintf(stderr, fmt, ap);
518 fprintf(stderr, "\n");
Andreas Färberbdc44642013-06-24 23:50:24 +0200519 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100520 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
Andreas Färber878096e2013-05-27 01:33:50 +0200521 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
Blue Swirl296af7c2010-03-29 19:23:50 +0000522 }
523 va_end(ap);
524 abort();
525}
526
527void cpu_synchronize_all_states(void)
528{
Andreas Färber182735e2013-05-29 22:29:20 +0200529 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000530
Andreas Färberbdc44642013-06-24 23:50:24 +0200531 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200532 cpu_synchronize_state(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000533 }
534}
535
536void cpu_synchronize_all_post_reset(void)
537{
Andreas Färber182735e2013-05-29 22:29:20 +0200538 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000539
Andreas Färberbdc44642013-06-24 23:50:24 +0200540 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200541 cpu_synchronize_post_reset(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000542 }
543}
544
545void cpu_synchronize_all_post_init(void)
546{
Andreas Färber182735e2013-05-29 22:29:20 +0200547 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000548
Andreas Färberbdc44642013-06-24 23:50:24 +0200549 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200550 cpu_synchronize_post_init(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000551 }
552}
553
Kevin Wolf56983462013-07-05 13:49:54 +0200554static int do_vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +0000555{
Kevin Wolf56983462013-07-05 13:49:54 +0200556 int ret = 0;
557
Luiz Capitulino13548692011-07-29 15:36:43 -0300558 if (runstate_is_running()) {
Blue Swirl296af7c2010-03-29 19:23:50 +0000559 cpu_disable_ticks();
Blue Swirl296af7c2010-03-29 19:23:50 +0000560 pause_all_vcpus();
Luiz Capitulinof5bbfba2011-07-29 15:04:45 -0300561 runstate_set(state);
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -0300562 vm_state_notify(0, state);
Wenchao Xiaa4e15de2014-06-18 08:43:36 +0200563 qapi_event_send_stop(&error_abort);
Blue Swirl296af7c2010-03-29 19:23:50 +0000564 }
Kevin Wolf56983462013-07-05 13:49:54 +0200565
Kevin Wolf594a45c2013-07-18 14:52:19 +0200566 bdrv_drain_all();
567 ret = bdrv_flush_all();
568
Kevin Wolf56983462013-07-05 13:49:54 +0200569 return ret;
Blue Swirl296af7c2010-03-29 19:23:50 +0000570}
571
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200572static bool cpu_can_run(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000573{
Andreas Färber4fdeee72012-05-02 23:10:09 +0200574 if (cpu->stop) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200575 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100576 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +0800577 if (cpu_is_stopped(cpu)) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200578 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100579 }
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200580 return true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000581}
582
Andreas Färber91325042013-05-27 02:07:49 +0200583static void cpu_handle_guest_debug(CPUState *cpu)
Jan Kiszka3c638d02010-06-25 16:56:56 +0200584{
Andreas Färber64f6b342013-05-27 02:06:09 +0200585 gdb_set_stop_cpu(cpu);
Jan Kiszka8cf71712011-02-07 12:19:16 +0100586 qemu_system_debug_request();
Andreas Färberf324e762012-05-02 23:26:21 +0200587 cpu->stopped = true;
Jan Kiszka3c638d02010-06-25 16:56:56 +0200588}
589
Paolo Bonzini714bd042011-03-12 17:44:06 +0100590static void cpu_signal(int sig)
591{
Andreas Färber4917cf42013-05-27 05:17:50 +0200592 if (current_cpu) {
593 cpu_exit(current_cpu);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100594 }
595 exit_request = 1;
596}
Paolo Bonzini714bd042011-03-12 17:44:06 +0100597
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100598#ifdef CONFIG_LINUX
599static void sigbus_reraise(void)
600{
601 sigset_t set;
602 struct sigaction action;
603
604 memset(&action, 0, sizeof(action));
605 action.sa_handler = SIG_DFL;
606 if (!sigaction(SIGBUS, &action, NULL)) {
607 raise(SIGBUS);
608 sigemptyset(&set);
609 sigaddset(&set, SIGBUS);
610 sigprocmask(SIG_UNBLOCK, &set, NULL);
611 }
612 perror("Failed to re-raise SIGBUS!\n");
613 abort();
614}
615
616static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
617 void *ctx)
618{
619 if (kvm_on_sigbus(siginfo->ssi_code,
620 (void *)(intptr_t)siginfo->ssi_addr)) {
621 sigbus_reraise();
622 }
623}
624
625static void qemu_init_sigbus(void)
626{
627 struct sigaction action;
628
629 memset(&action, 0, sizeof(action));
630 action.sa_flags = SA_SIGINFO;
631 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
632 sigaction(SIGBUS, &action, NULL);
633
634 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
635}
636
Andreas Färber290adf32013-01-17 09:30:27 +0100637static void qemu_kvm_eat_signals(CPUState *cpu)
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100638{
639 struct timespec ts = { 0, 0 };
640 siginfo_t siginfo;
641 sigset_t waitset;
642 sigset_t chkset;
643 int r;
644
645 sigemptyset(&waitset);
646 sigaddset(&waitset, SIG_IPI);
647 sigaddset(&waitset, SIGBUS);
648
649 do {
650 r = sigtimedwait(&waitset, &siginfo, &ts);
651 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
652 perror("sigtimedwait");
653 exit(1);
654 }
655
656 switch (r) {
657 case SIGBUS:
Andreas Färber290adf32013-01-17 09:30:27 +0100658 if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100659 sigbus_reraise();
660 }
661 break;
662 default:
663 break;
664 }
665
666 r = sigpending(&chkset);
667 if (r == -1) {
668 perror("sigpending");
669 exit(1);
670 }
671 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100672}
673
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100674#else /* !CONFIG_LINUX */
675
676static void qemu_init_sigbus(void)
677{
678}
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100679
Andreas Färber290adf32013-01-17 09:30:27 +0100680static void qemu_kvm_eat_signals(CPUState *cpu)
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100681{
682}
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100683#endif /* !CONFIG_LINUX */
684
Blue Swirl296af7c2010-03-29 19:23:50 +0000685#ifndef _WIN32
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100686static void dummy_signal(int sig)
Blue Swirl296af7c2010-03-29 19:23:50 +0000687{
688}
689
Andreas Färber13618e02013-05-26 23:41:00 +0200690static void qemu_kvm_init_cpu_signals(CPUState *cpu)
Paolo Bonzini714bd042011-03-12 17:44:06 +0100691{
692 int r;
693 sigset_t set;
694 struct sigaction sigact;
695
696 memset(&sigact, 0, sizeof(sigact));
697 sigact.sa_handler = dummy_signal;
698 sigaction(SIG_IPI, &sigact, NULL);
699
Paolo Bonzini714bd042011-03-12 17:44:06 +0100700 pthread_sigmask(SIG_BLOCK, NULL, &set);
701 sigdelset(&set, SIG_IPI);
702 sigdelset(&set, SIGBUS);
Andreas Färber491d6e82013-05-26 23:38:10 +0200703 r = kvm_set_signal_mask(cpu, &set);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100704 if (r) {
705 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
706 exit(1);
707 }
Paolo Bonzini714bd042011-03-12 17:44:06 +0100708}
709
710static void qemu_tcg_init_cpu_signals(void)
711{
Paolo Bonzini714bd042011-03-12 17:44:06 +0100712 sigset_t set;
713 struct sigaction sigact;
714
715 memset(&sigact, 0, sizeof(sigact));
716 sigact.sa_handler = cpu_signal;
717 sigaction(SIG_IPI, &sigact, NULL);
718
719 sigemptyset(&set);
720 sigaddset(&set, SIG_IPI);
721 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100722}
723
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100724#else /* _WIN32 */
Andreas Färber13618e02013-05-26 23:41:00 +0200725static void qemu_kvm_init_cpu_signals(CPUState *cpu)
Paolo Bonzini714bd042011-03-12 17:44:06 +0100726{
727 abort();
728}
729
730static void qemu_tcg_init_cpu_signals(void)
731{
732}
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100733#endif /* _WIN32 */
Blue Swirl296af7c2010-03-29 19:23:50 +0000734
Stefan Weilb2532d82012-09-27 07:41:42 +0200735static QemuMutex qemu_global_mutex;
Paolo Bonzini46daff12011-06-09 13:10:24 +0200736static QemuCond qemu_io_proceeded_cond;
737static bool iothread_requesting_mutex;
Blue Swirl296af7c2010-03-29 19:23:50 +0000738
739static QemuThread io_thread;
740
741static QemuThread *tcg_cpu_thread;
742static QemuCond *tcg_halt_cond;
743
Blue Swirl296af7c2010-03-29 19:23:50 +0000744/* cpu creation */
745static QemuCond qemu_cpu_cond;
746/* system init */
Blue Swirl296af7c2010-03-29 19:23:50 +0000747static QemuCond qemu_pause_cond;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300748static QemuCond qemu_work_cond;
Blue Swirl296af7c2010-03-29 19:23:50 +0000749
Paolo Bonzinid3b12f52011-09-13 10:30:52 +0200750void qemu_init_cpu_loop(void)
Blue Swirl296af7c2010-03-29 19:23:50 +0000751{
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100752 qemu_init_sigbus();
Anthony Liguoried945922011-02-08 18:18:18 +0100753 qemu_cond_init(&qemu_cpu_cond);
Anthony Liguoried945922011-02-08 18:18:18 +0100754 qemu_cond_init(&qemu_pause_cond);
755 qemu_cond_init(&qemu_work_cond);
Paolo Bonzini46daff12011-06-09 13:10:24 +0200756 qemu_cond_init(&qemu_io_proceeded_cond);
Blue Swirl296af7c2010-03-29 19:23:50 +0000757 qemu_mutex_init(&qemu_global_mutex);
Blue Swirl296af7c2010-03-29 19:23:50 +0000758
Jan Kiszkab7680cb2011-03-12 17:43:51 +0100759 qemu_thread_get_self(&io_thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000760}
761
Andreas Färberf100f0b2012-05-03 14:58:47 +0200762void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300763{
764 struct qemu_work_item wi;
765
Andreas Färber60e82572012-05-02 22:23:49 +0200766 if (qemu_cpu_is_self(cpu)) {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300767 func(data);
768 return;
769 }
770
771 wi.func = func;
772 wi.data = data;
Chegu Vinod3c022702013-06-24 03:49:41 -0600773 wi.free = false;
Andreas Färberc64ca812012-05-03 02:11:45 +0200774 if (cpu->queued_work_first == NULL) {
775 cpu->queued_work_first = &wi;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100776 } else {
Andreas Färberc64ca812012-05-03 02:11:45 +0200777 cpu->queued_work_last->next = &wi;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100778 }
Andreas Färberc64ca812012-05-03 02:11:45 +0200779 cpu->queued_work_last = &wi;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300780 wi.next = NULL;
781 wi.done = false;
782
Andreas Färberc08d7422012-05-03 04:34:15 +0200783 qemu_cpu_kick(cpu);
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300784 while (!wi.done) {
Andreas Färber4917cf42013-05-27 05:17:50 +0200785 CPUState *self_cpu = current_cpu;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300786
787 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
Andreas Färber4917cf42013-05-27 05:17:50 +0200788 current_cpu = self_cpu;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300789 }
790}
791
Chegu Vinod3c022702013-06-24 03:49:41 -0600792void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
793{
794 struct qemu_work_item *wi;
795
796 if (qemu_cpu_is_self(cpu)) {
797 func(data);
798 return;
799 }
800
801 wi = g_malloc0(sizeof(struct qemu_work_item));
802 wi->func = func;
803 wi->data = data;
804 wi->free = true;
805 if (cpu->queued_work_first == NULL) {
806 cpu->queued_work_first = wi;
807 } else {
808 cpu->queued_work_last->next = wi;
809 }
810 cpu->queued_work_last = wi;
811 wi->next = NULL;
812 wi->done = false;
813
814 qemu_cpu_kick(cpu);
815}
816
Andreas Färber6d45b102012-05-03 02:13:22 +0200817static void flush_queued_work(CPUState *cpu)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300818{
819 struct qemu_work_item *wi;
820
Andreas Färberc64ca812012-05-03 02:11:45 +0200821 if (cpu->queued_work_first == NULL) {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300822 return;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100823 }
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300824
Andreas Färberc64ca812012-05-03 02:11:45 +0200825 while ((wi = cpu->queued_work_first)) {
826 cpu->queued_work_first = wi->next;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300827 wi->func(wi->data);
828 wi->done = true;
Chegu Vinod3c022702013-06-24 03:49:41 -0600829 if (wi->free) {
830 g_free(wi);
831 }
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300832 }
Andreas Färberc64ca812012-05-03 02:11:45 +0200833 cpu->queued_work_last = NULL;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300834 qemu_cond_broadcast(&qemu_work_cond);
835}
836
Andreas Färber509a0d72012-05-03 02:18:09 +0200837static void qemu_wait_io_event_common(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000838{
Andreas Färber4fdeee72012-05-02 23:10:09 +0200839 if (cpu->stop) {
840 cpu->stop = false;
Andreas Färberf324e762012-05-02 23:26:21 +0200841 cpu->stopped = true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000842 qemu_cond_signal(&qemu_pause_cond);
843 }
Andreas Färber6d45b102012-05-03 02:13:22 +0200844 flush_queued_work(cpu);
Andreas Färber216fc9a2012-05-02 17:49:49 +0200845 cpu->thread_kicked = false;
Blue Swirl296af7c2010-03-29 19:23:50 +0000846}
847
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200848static void qemu_tcg_wait_io_event(void)
Blue Swirl296af7c2010-03-29 19:23:50 +0000849{
Andreas Färber182735e2013-05-29 22:29:20 +0200850 CPUState *cpu;
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200851
Jan Kiszka16400322011-02-09 16:29:37 +0100852 while (all_cpu_threads_idle()) {
Paolo Bonziniab33fcd2011-04-13 10:03:44 +0200853 /* Start accounting real time to the virtual clock if the CPUs
854 are idle. */
Alex Bligh40daca52013-08-21 16:03:02 +0100855 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini9705fbb2011-03-12 17:44:00 +0100856 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +0100857 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000858
Paolo Bonzini46daff12011-06-09 13:10:24 +0200859 while (iothread_requesting_mutex) {
860 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
861 }
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200862
Andreas Färberbdc44642013-06-24 23:50:24 +0200863 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200864 qemu_wait_io_event_common(cpu);
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200865 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000866}
867
Andreas Färberfd529e82013-05-26 23:24:55 +0200868static void qemu_kvm_wait_io_event(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000869{
Andreas Färbera98ae1d2013-05-26 23:21:08 +0200870 while (cpu_thread_is_idle(cpu)) {
Andreas Färberf5c121b2012-05-03 01:22:49 +0200871 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +0100872 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000873
Andreas Färber290adf32013-01-17 09:30:27 +0100874 qemu_kvm_eat_signals(cpu);
Andreas Färber509a0d72012-05-03 02:18:09 +0200875 qemu_wait_io_event_common(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000876}
877
Jan Kiszka7e97cd82011-02-07 12:19:12 +0100878static void *qemu_kvm_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +0000879{
Andreas Färber48a106b2013-05-27 02:20:39 +0200880 CPUState *cpu = arg;
Jan Kiszka84b49152011-02-01 22:15:50 +0100881 int r;
Blue Swirl296af7c2010-03-29 19:23:50 +0000882
Marcelo Tosatti6164e6d2010-03-23 13:37:13 -0300883 qemu_mutex_lock(&qemu_global_mutex);
Andreas Färber814e6122012-05-02 17:00:37 +0200884 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +0200885 cpu->thread_id = qemu_get_thread_id();
Andreas Färber4917cf42013-05-27 05:17:50 +0200886 current_cpu = cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000887
Andreas Färber504134d2012-12-17 06:38:45 +0100888 r = kvm_init_vcpu(cpu);
Jan Kiszka84b49152011-02-01 22:15:50 +0100889 if (r < 0) {
890 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
891 exit(1);
892 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000893
Andreas Färber13618e02013-05-26 23:41:00 +0200894 qemu_kvm_init_cpu_signals(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000895
896 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +0200897 cpu->created = true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000898 qemu_cond_signal(&qemu_cpu_cond);
899
Blue Swirl296af7c2010-03-29 19:23:50 +0000900 while (1) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200901 if (cpu_can_run(cpu)) {
Andreas Färber1458c362013-05-26 23:46:55 +0200902 r = kvm_cpu_exec(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +0100903 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +0200904 cpu_handle_guest_debug(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +0100905 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100906 }
Andreas Färberfd529e82013-05-26 23:24:55 +0200907 qemu_kvm_wait_io_event(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000908 }
909
910 return NULL;
911}
912
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200913static void *qemu_dummy_cpu_thread_fn(void *arg)
914{
915#ifdef _WIN32
916 fprintf(stderr, "qtest is not supported under Windows\n");
917 exit(1);
918#else
Andreas Färber10a90212013-05-27 02:24:35 +0200919 CPUState *cpu = arg;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200920 sigset_t waitset;
921 int r;
922
923 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +0200924 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +0200925 cpu->thread_id = qemu_get_thread_id();
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200926
927 sigemptyset(&waitset);
928 sigaddset(&waitset, SIG_IPI);
929
930 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +0200931 cpu->created = true;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200932 qemu_cond_signal(&qemu_cpu_cond);
933
Andreas Färber4917cf42013-05-27 05:17:50 +0200934 current_cpu = cpu;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200935 while (1) {
Andreas Färber4917cf42013-05-27 05:17:50 +0200936 current_cpu = NULL;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200937 qemu_mutex_unlock_iothread();
938 do {
939 int sig;
940 r = sigwait(&waitset, &sig);
941 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
942 if (r == -1) {
943 perror("sigwait");
944 exit(1);
945 }
946 qemu_mutex_lock_iothread();
Andreas Färber4917cf42013-05-27 05:17:50 +0200947 current_cpu = cpu;
Andreas Färber509a0d72012-05-03 02:18:09 +0200948 qemu_wait_io_event_common(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200949 }
950
951 return NULL;
952#endif
953}
954
Jan Kiszkabdb7ca62011-09-26 09:40:39 +0200955static void tcg_exec_all(void);
956
Jan Kiszka7e97cd82011-02-07 12:19:12 +0100957static void *qemu_tcg_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +0000958{
Andreas Färberc3586ba2012-05-03 01:41:24 +0200959 CPUState *cpu = arg;
Blue Swirl296af7c2010-03-29 19:23:50 +0000960
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100961 qemu_tcg_init_cpu_signals();
Andreas Färber814e6122012-05-02 17:00:37 +0200962 qemu_thread_get_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000963
Blue Swirl296af7c2010-03-29 19:23:50 +0000964 qemu_mutex_lock(&qemu_global_mutex);
Andreas Färber38fcbd32013-07-07 19:50:23 +0200965 CPU_FOREACH(cpu) {
966 cpu->thread_id = qemu_get_thread_id();
967 cpu->created = true;
968 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000969 qemu_cond_signal(&qemu_cpu_cond);
970
Jan Kiszkafa7d1862011-08-22 18:35:25 +0200971 /* wait for initial kick-off after machine start */
Andreas Färberbdc44642013-06-24 23:50:24 +0200972 while (QTAILQ_FIRST(&cpus)->stopped) {
Jan Kiszkafa7d1862011-08-22 18:35:25 +0200973 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
Jan Kiszka8e564b42012-02-17 18:31:15 +0100974
975 /* process any pending work */
Andreas Färberbdc44642013-06-24 23:50:24 +0200976 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200977 qemu_wait_io_event_common(cpu);
Jan Kiszka8e564b42012-02-17 18:31:15 +0100978 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100979 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000980
981 while (1) {
Jan Kiszkabdb7ca62011-09-26 09:40:39 +0200982 tcg_exec_all();
Alex Blighac70aaf2013-08-21 16:02:57 +0100983
984 if (use_icount) {
Alex Bligh40daca52013-08-21 16:03:02 +0100985 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +0100986
987 if (deadline == 0) {
Alex Bligh40daca52013-08-21 16:03:02 +0100988 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +0100989 }
Paolo Bonzini3b2319a2011-04-13 10:03:43 +0200990 }
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200991 qemu_tcg_wait_io_event();
Blue Swirl296af7c2010-03-29 19:23:50 +0000992 }
993
994 return NULL;
995}
996
Andreas Färber2ff09a42012-05-03 00:23:30 +0200997static void qemu_cpu_kick_thread(CPUState *cpu)
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100998{
999#ifndef _WIN32
1000 int err;
1001
Andreas Färber814e6122012-05-02 17:00:37 +02001002 err = pthread_kill(cpu->thread->thread, SIG_IPI);
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001003 if (err) {
1004 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1005 exit(1);
1006 }
1007#else /* _WIN32 */
Andreas Färber60e82572012-05-02 22:23:49 +02001008 if (!qemu_cpu_is_self(cpu)) {
Olivier Hainqueed9164a2013-04-09 18:06:53 +02001009 CONTEXT tcgContext;
1010
1011 if (SuspendThread(cpu->hThread) == (DWORD)-1) {
Stefan Weil7f1721d2013-04-13 22:45:50 +02001012 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
Olivier Hainqueed9164a2013-04-09 18:06:53 +02001013 GetLastError());
1014 exit(1);
1015 }
1016
1017 /* On multi-core systems, we are not sure that the thread is actually
1018 * suspended until we can get the context.
1019 */
1020 tcgContext.ContextFlags = CONTEXT_CONTROL;
1021 while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
1022 continue;
1023 }
1024
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001025 cpu_signal(0);
Olivier Hainqueed9164a2013-04-09 18:06:53 +02001026
1027 if (ResumeThread(cpu->hThread) == (DWORD)-1) {
Stefan Weil7f1721d2013-04-13 22:45:50 +02001028 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
Olivier Hainqueed9164a2013-04-09 18:06:53 +02001029 GetLastError());
1030 exit(1);
1031 }
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001032 }
1033#endif
1034}
1035
Andreas Färberc08d7422012-05-03 04:34:15 +02001036void qemu_cpu_kick(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001037{
Andreas Färberf5c121b2012-05-03 01:22:49 +02001038 qemu_cond_broadcast(cpu->halt_cond);
Andreas Färber216fc9a2012-05-02 17:49:49 +02001039 if (!tcg_enabled() && !cpu->thread_kicked) {
Andreas Färber2ff09a42012-05-03 00:23:30 +02001040 qemu_cpu_kick_thread(cpu);
Andreas Färber216fc9a2012-05-02 17:49:49 +02001041 cpu->thread_kicked = true;
Jan Kiszkaaa2c3642011-02-01 22:15:42 +01001042 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001043}
1044
Jan Kiszka46d62fa2011-02-01 22:15:59 +01001045void qemu_cpu_kick_self(void)
1046{
Paolo Bonzinib55c22c2011-03-12 17:44:07 +01001047#ifndef _WIN32
Andreas Färber4917cf42013-05-27 05:17:50 +02001048 assert(current_cpu);
Jan Kiszka46d62fa2011-02-01 22:15:59 +01001049
Andreas Färber4917cf42013-05-27 05:17:50 +02001050 if (!current_cpu->thread_kicked) {
1051 qemu_cpu_kick_thread(current_cpu);
1052 current_cpu->thread_kicked = true;
Jan Kiszka46d62fa2011-02-01 22:15:59 +01001053 }
Paolo Bonzinib55c22c2011-03-12 17:44:07 +01001054#else
1055 abort();
1056#endif
Blue Swirl296af7c2010-03-29 19:23:50 +00001057}
1058
Andreas Färber60e82572012-05-02 22:23:49 +02001059bool qemu_cpu_is_self(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001060{
Andreas Färber814e6122012-05-02 17:00:37 +02001061 return qemu_thread_is_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +00001062}
1063
Juan Quintelaaa723c22012-09-18 16:30:11 +02001064static bool qemu_in_vcpu_thread(void)
1065{
Andreas Färber4917cf42013-05-27 05:17:50 +02001066 return current_cpu && qemu_cpu_is_self(current_cpu);
Juan Quintelaaa723c22012-09-18 16:30:11 +02001067}
1068
Blue Swirl296af7c2010-03-29 19:23:50 +00001069void qemu_mutex_lock_iothread(void)
1070{
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001071 if (!tcg_enabled()) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001072 qemu_mutex_lock(&qemu_global_mutex);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001073 } else {
Paolo Bonzini46daff12011-06-09 13:10:24 +02001074 iothread_requesting_mutex = true;
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001075 if (qemu_mutex_trylock(&qemu_global_mutex)) {
Andreas Färber182735e2013-05-29 22:29:20 +02001076 qemu_cpu_kick_thread(first_cpu);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001077 qemu_mutex_lock(&qemu_global_mutex);
1078 }
Paolo Bonzini46daff12011-06-09 13:10:24 +02001079 iothread_requesting_mutex = false;
1080 qemu_cond_broadcast(&qemu_io_proceeded_cond);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001081 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001082}
1083
1084void qemu_mutex_unlock_iothread(void)
1085{
1086 qemu_mutex_unlock(&qemu_global_mutex);
1087}
1088
1089static int all_vcpus_paused(void)
1090{
Andreas Färberbdc44642013-06-24 23:50:24 +02001091 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001092
Andreas Färberbdc44642013-06-24 23:50:24 +02001093 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001094 if (!cpu->stopped) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001095 return 0;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001096 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001097 }
1098
1099 return 1;
1100}
1101
1102void pause_all_vcpus(void)
1103{
Andreas Färberbdc44642013-06-24 23:50:24 +02001104 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001105
Alex Bligh40daca52013-08-21 16:03:02 +01001106 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
Andreas Färberbdc44642013-06-24 23:50:24 +02001107 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001108 cpu->stop = true;
1109 qemu_cpu_kick(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001110 }
1111
Juan Quintelaaa723c22012-09-18 16:30:11 +02001112 if (qemu_in_vcpu_thread()) {
Jan Kiszkad798e972012-02-17 18:31:16 +01001113 cpu_stop_current();
1114 if (!kvm_enabled()) {
Andreas Färberbdc44642013-06-24 23:50:24 +02001115 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001116 cpu->stop = false;
1117 cpu->stopped = true;
Jan Kiszkad798e972012-02-17 18:31:16 +01001118 }
1119 return;
1120 }
1121 }
1122
Blue Swirl296af7c2010-03-29 19:23:50 +00001123 while (!all_vcpus_paused()) {
Paolo Bonzinibe7d6c52011-03-12 17:44:02 +01001124 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
Andreas Färberbdc44642013-06-24 23:50:24 +02001125 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001126 qemu_cpu_kick(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001127 }
1128 }
1129}
1130
Igor Mammedov29936832013-04-23 10:29:37 +02001131void cpu_resume(CPUState *cpu)
1132{
1133 cpu->stop = false;
1134 cpu->stopped = false;
1135 qemu_cpu_kick(cpu);
1136}
1137
Blue Swirl296af7c2010-03-29 19:23:50 +00001138void resume_all_vcpus(void)
1139{
Andreas Färberbdc44642013-06-24 23:50:24 +02001140 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001141
Alex Bligh40daca52013-08-21 16:03:02 +01001142 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
Andreas Färberbdc44642013-06-24 23:50:24 +02001143 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001144 cpu_resume(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001145 }
1146}
1147
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001148/* For temporary buffers for forming a name */
1149#define VCPU_THREAD_NAME_SIZE 16
1150
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001151static void qemu_tcg_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001152{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001153 char thread_name[VCPU_THREAD_NAME_SIZE];
1154
Edgar E. Iglesias09daed82013-12-17 13:06:51 +10001155 tcg_cpu_address_space_init(cpu, cpu->as);
1156
Blue Swirl296af7c2010-03-29 19:23:50 +00001157 /* share a single thread for all cpus with TCG */
1158 if (!tcg_cpu_thread) {
Andreas Färber814e6122012-05-02 17:00:37 +02001159 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001160 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1161 qemu_cond_init(cpu->halt_cond);
1162 tcg_halt_cond = cpu->halt_cond;
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001163 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
1164 cpu->cpu_index);
1165 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1166 cpu, QEMU_THREAD_JOINABLE);
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001167#ifdef _WIN32
Andreas Färber814e6122012-05-02 17:00:37 +02001168 cpu->hThread = qemu_thread_get_handle(cpu->thread);
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001169#endif
Andreas Färber61a46212012-05-02 22:49:36 +02001170 while (!cpu->created) {
Paolo Bonzini18a85722011-03-12 17:44:03 +01001171 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001172 }
Andreas Färber814e6122012-05-02 17:00:37 +02001173 tcg_cpu_thread = cpu->thread;
Blue Swirl296af7c2010-03-29 19:23:50 +00001174 } else {
Andreas Färber814e6122012-05-02 17:00:37 +02001175 cpu->thread = tcg_cpu_thread;
Andreas Färberf5c121b2012-05-03 01:22:49 +02001176 cpu->halt_cond = tcg_halt_cond;
Blue Swirl296af7c2010-03-29 19:23:50 +00001177 }
1178}
1179
Andreas Färber48a106b2013-05-27 02:20:39 +02001180static void qemu_kvm_start_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001181{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001182 char thread_name[VCPU_THREAD_NAME_SIZE];
1183
Andreas Färber814e6122012-05-02 17:00:37 +02001184 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001185 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1186 qemu_cond_init(cpu->halt_cond);
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001187 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1188 cpu->cpu_index);
1189 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1190 cpu, QEMU_THREAD_JOINABLE);
Andreas Färber61a46212012-05-02 22:49:36 +02001191 while (!cpu->created) {
Paolo Bonzini18a85722011-03-12 17:44:03 +01001192 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001193 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001194}
1195
Andreas Färber10a90212013-05-27 02:24:35 +02001196static void qemu_dummy_start_vcpu(CPUState *cpu)
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001197{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001198 char thread_name[VCPU_THREAD_NAME_SIZE];
1199
Andreas Färber814e6122012-05-02 17:00:37 +02001200 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001201 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1202 qemu_cond_init(cpu->halt_cond);
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001203 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
1204 cpu->cpu_index);
1205 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001206 QEMU_THREAD_JOINABLE);
Andreas Färber61a46212012-05-02 22:49:36 +02001207 while (!cpu->created) {
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001208 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1209 }
1210}
1211
Andreas Färberc643bed2013-05-27 03:23:24 +02001212void qemu_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001213{
Andreas Färberce3960e2012-12-17 03:27:07 +01001214 cpu->nr_cores = smp_cores;
1215 cpu->nr_threads = smp_threads;
Andreas Färberf324e762012-05-02 23:26:21 +02001216 cpu->stopped = true;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001217 if (kvm_enabled()) {
Andreas Färber48a106b2013-05-27 02:20:39 +02001218 qemu_kvm_start_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001219 } else if (tcg_enabled()) {
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001220 qemu_tcg_init_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001221 } else {
Andreas Färber10a90212013-05-27 02:24:35 +02001222 qemu_dummy_start_vcpu(cpu);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001223 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001224}
1225
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001226void cpu_stop_current(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001227{
Andreas Färber4917cf42013-05-27 05:17:50 +02001228 if (current_cpu) {
1229 current_cpu->stop = false;
1230 current_cpu->stopped = true;
1231 cpu_exit(current_cpu);
Paolo Bonzini67bb1722011-03-12 17:43:59 +01001232 qemu_cond_signal(&qemu_pause_cond);
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001233 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001234}
1235
Kevin Wolf56983462013-07-05 13:49:54 +02001236int vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +00001237{
Juan Quintelaaa723c22012-09-18 16:30:11 +02001238 if (qemu_in_vcpu_thread()) {
Paolo Bonzini74892d22014-06-05 14:53:58 +02001239 qemu_system_vmstop_request_prepare();
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -03001240 qemu_system_vmstop_request(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00001241 /*
1242 * FIXME: should not return to device code in case
1243 * vm_stop() has been requested.
1244 */
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001245 cpu_stop_current();
Kevin Wolf56983462013-07-05 13:49:54 +02001246 return 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00001247 }
Kevin Wolf56983462013-07-05 13:49:54 +02001248
1249 return do_vm_stop(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00001250}
1251
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001252/* does a state transition even if the VM is already stopped,
1253 current state is forgotten forever */
Kevin Wolf56983462013-07-05 13:49:54 +02001254int vm_stop_force_state(RunState state)
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001255{
1256 if (runstate_is_running()) {
Kevin Wolf56983462013-07-05 13:49:54 +02001257 return vm_stop(state);
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001258 } else {
1259 runstate_set(state);
Kevin Wolf594a45c2013-07-18 14:52:19 +02001260 /* Make sure to return an error if the flush in a previous vm_stop()
1261 * failed. */
1262 return bdrv_flush_all();
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001263 }
1264}
1265
Andreas Färber9349b4f2012-03-14 01:38:32 +01001266static int tcg_cpu_exec(CPUArchState *env)
Blue Swirl296af7c2010-03-29 19:23:50 +00001267{
Andreas Färberefee7342013-08-26 05:39:29 +02001268 CPUState *cpu = ENV_GET_CPU(env);
Blue Swirl296af7c2010-03-29 19:23:50 +00001269 int ret;
1270#ifdef CONFIG_PROFILER
1271 int64_t ti;
1272#endif
1273
1274#ifdef CONFIG_PROFILER
1275 ti = profile_getclock();
1276#endif
1277 if (use_icount) {
1278 int64_t count;
Alex Blighac70aaf2013-08-21 16:02:57 +01001279 int64_t deadline;
Blue Swirl296af7c2010-03-29 19:23:50 +00001280 int decr;
KONRAD Fredericc96778b2014-08-01 01:37:09 +02001281 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1282 + cpu->icount_extra);
Andreas Färber28ecfd72013-08-26 05:51:49 +02001283 cpu->icount_decr.u16.low = 0;
Andreas Färberefee7342013-08-26 05:39:29 +02001284 cpu->icount_extra = 0;
Alex Bligh40daca52013-08-21 16:03:02 +01001285 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +01001286
1287 /* Maintain prior (possibly buggy) behaviour where if no deadline
Alex Bligh40daca52013-08-21 16:03:02 +01001288 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
Alex Blighac70aaf2013-08-21 16:02:57 +01001289 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1290 * nanoseconds.
1291 */
1292 if ((deadline < 0) || (deadline > INT32_MAX)) {
1293 deadline = INT32_MAX;
1294 }
1295
1296 count = qemu_icount_round(deadline);
KONRAD Fredericc96778b2014-08-01 01:37:09 +02001297 timers_state.qemu_icount += count;
Blue Swirl296af7c2010-03-29 19:23:50 +00001298 decr = (count > 0xffff) ? 0xffff : count;
1299 count -= decr;
Andreas Färber28ecfd72013-08-26 05:51:49 +02001300 cpu->icount_decr.u16.low = decr;
Andreas Färberefee7342013-08-26 05:39:29 +02001301 cpu->icount_extra = count;
Blue Swirl296af7c2010-03-29 19:23:50 +00001302 }
1303 ret = cpu_exec(env);
1304#ifdef CONFIG_PROFILER
1305 qemu_time += profile_getclock() - ti;
1306#endif
1307 if (use_icount) {
1308 /* Fold pending instructions back into the
1309 instruction counter, and clear the interrupt flag. */
KONRAD Fredericc96778b2014-08-01 01:37:09 +02001310 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1311 + cpu->icount_extra);
Andreas Färber28ecfd72013-08-26 05:51:49 +02001312 cpu->icount_decr.u32 = 0;
Andreas Färberefee7342013-08-26 05:39:29 +02001313 cpu->icount_extra = 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00001314 }
1315 return ret;
1316}
1317
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001318static void tcg_exec_all(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001319{
Jan Kiszka9a360852011-02-01 22:15:55 +01001320 int r;
1321
Alex Bligh40daca52013-08-21 16:03:02 +01001322 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1323 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
Paolo Bonziniab33fcd2011-04-13 10:03:44 +02001324
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001325 if (next_cpu == NULL) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001326 next_cpu = first_cpu;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001327 }
Andreas Färberbdc44642013-06-24 23:50:24 +02001328 for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
Andreas Färber182735e2013-05-29 22:29:20 +02001329 CPUState *cpu = next_cpu;
1330 CPUArchState *env = cpu->env_ptr;
Blue Swirl296af7c2010-03-29 19:23:50 +00001331
Alex Bligh40daca52013-08-21 16:03:02 +01001332 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
Andreas Färbered2803d2013-06-21 20:20:45 +02001333 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
Blue Swirl296af7c2010-03-29 19:23:50 +00001334
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001335 if (cpu_can_run(cpu)) {
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001336 r = tcg_cpu_exec(env);
Jan Kiszka9a360852011-02-01 22:15:55 +01001337 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +02001338 cpu_handle_guest_debug(cpu);
Jan Kiszka3c638d02010-06-25 16:56:56 +02001339 break;
1340 }
Andreas Färberf324e762012-05-02 23:26:21 +02001341 } else if (cpu->stop || cpu->stopped) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001342 break;
1343 }
1344 }
Jan Kiszkac629a4b2010-06-25 16:56:52 +02001345 exit_request = 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00001346}
1347
Stefan Weil9a78eea2010-10-22 23:03:33 +02001348void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
Blue Swirl262353c2010-05-04 19:55:35 +00001349{
1350 /* XXX: implement xxx_cpu_list for targets that still miss it */
Peter Maydelle916cbf2012-09-05 17:41:08 -03001351#if defined(cpu_list)
1352 cpu_list(f, cpu_fprintf);
Blue Swirl262353c2010-05-04 19:55:35 +00001353#endif
1354}
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001355
1356CpuInfoList *qmp_query_cpus(Error **errp)
1357{
1358 CpuInfoList *head = NULL, *cur_item = NULL;
Andreas Färber182735e2013-05-29 22:29:20 +02001359 CPUState *cpu;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001360
Andreas Färberbdc44642013-06-24 23:50:24 +02001361 CPU_FOREACH(cpu) {
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001362 CpuInfoList *info;
Andreas Färber182735e2013-05-29 22:29:20 +02001363#if defined(TARGET_I386)
1364 X86CPU *x86_cpu = X86_CPU(cpu);
1365 CPUX86State *env = &x86_cpu->env;
1366#elif defined(TARGET_PPC)
1367 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1368 CPUPPCState *env = &ppc_cpu->env;
1369#elif defined(TARGET_SPARC)
1370 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1371 CPUSPARCState *env = &sparc_cpu->env;
1372#elif defined(TARGET_MIPS)
1373 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1374 CPUMIPSState *env = &mips_cpu->env;
1375#endif
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001376
Andreas Färbercb446ec2013-05-01 14:24:52 +02001377 cpu_synchronize_state(cpu);
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001378
1379 info = g_malloc0(sizeof(*info));
1380 info->value = g_malloc0(sizeof(*info->value));
Andreas Färber55e5c282012-12-17 06:18:02 +01001381 info->value->CPU = cpu->cpu_index;
Andreas Färber182735e2013-05-29 22:29:20 +02001382 info->value->current = (cpu == first_cpu);
Andreas Färber259186a2013-01-17 18:51:17 +01001383 info->value->halted = cpu->halted;
Andreas Färber9f09e182012-05-03 06:59:07 +02001384 info->value->thread_id = cpu->thread_id;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001385#if defined(TARGET_I386)
1386 info->value->has_pc = true;
1387 info->value->pc = env->eip + env->segs[R_CS].base;
1388#elif defined(TARGET_PPC)
1389 info->value->has_nip = true;
1390 info->value->nip = env->nip;
1391#elif defined(TARGET_SPARC)
1392 info->value->has_pc = true;
1393 info->value->pc = env->pc;
1394 info->value->has_npc = true;
1395 info->value->npc = env->npc;
1396#elif defined(TARGET_MIPS)
1397 info->value->has_PC = true;
1398 info->value->PC = env->active_tc.PC;
1399#endif
1400
1401 /* XXX: waiting for the qapi to support GSList */
1402 if (!cur_item) {
1403 head = cur_item = info;
1404 } else {
1405 cur_item->next = info;
1406 cur_item = info;
1407 }
1408 }
1409
1410 return head;
1411}
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001412
1413void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1414 bool has_cpu, int64_t cpu_index, Error **errp)
1415{
1416 FILE *f;
1417 uint32_t l;
Andreas Färber55e5c282012-12-17 06:18:02 +01001418 CPUState *cpu;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001419 uint8_t buf[1024];
1420
1421 if (!has_cpu) {
1422 cpu_index = 0;
1423 }
1424
Andreas Färber151d1322013-02-15 15:41:49 +01001425 cpu = qemu_get_cpu(cpu_index);
1426 if (cpu == NULL) {
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001427 error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1428 "a CPU number");
1429 return;
1430 }
1431
1432 f = fopen(filename, "wb");
1433 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04001434 error_setg_file_open(errp, errno, filename);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001435 return;
1436 }
1437
1438 while (size != 0) {
1439 l = sizeof(buf);
1440 if (l > size)
1441 l = size;
Aneesh Kumar K.V2f4d0f52013-10-01 21:49:30 +05301442 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
1443 error_setg(errp, "Invalid addr 0x%016" PRIx64 "specified", addr);
1444 goto exit;
1445 }
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001446 if (fwrite(buf, 1, l, f) != l) {
1447 error_set(errp, QERR_IO_ERROR);
1448 goto exit;
1449 }
1450 addr += l;
1451 size -= l;
1452 }
1453
1454exit:
1455 fclose(f);
1456}
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001457
1458void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1459 Error **errp)
1460{
1461 FILE *f;
1462 uint32_t l;
1463 uint8_t buf[1024];
1464
1465 f = fopen(filename, "wb");
1466 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04001467 error_setg_file_open(errp, errno, filename);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001468 return;
1469 }
1470
1471 while (size != 0) {
1472 l = sizeof(buf);
1473 if (l > size)
1474 l = size;
Stefan Weileb6282f2014-04-07 20:28:23 +02001475 cpu_physical_memory_read(addr, buf, l);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001476 if (fwrite(buf, 1, l, f) != l) {
1477 error_set(errp, QERR_IO_ERROR);
1478 goto exit;
1479 }
1480 addr += l;
1481 size -= l;
1482 }
1483
1484exit:
1485 fclose(f);
1486}
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001487
1488void qmp_inject_nmi(Error **errp)
1489{
1490#if defined(TARGET_I386)
Andreas Färber182735e2013-05-29 22:29:20 +02001491 CPUState *cs;
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001492
Andreas Färberbdc44642013-06-24 23:50:24 +02001493 CPU_FOREACH(cs) {
Andreas Färber182735e2013-05-29 22:29:20 +02001494 X86CPU *cpu = X86_CPU(cs);
Andreas Färber182735e2013-05-29 22:29:20 +02001495
Chen Fan02e51482013-12-23 17:04:02 +08001496 if (!cpu->apic_state) {
Andreas Färber182735e2013-05-29 22:29:20 +02001497 cpu_interrupt(cs, CPU_INTERRUPT_NMI);
Jan Kiszka02c09192011-10-18 00:00:06 +08001498 } else {
Chen Fan02e51482013-12-23 17:04:02 +08001499 apic_deliver_nmi(cpu->apic_state);
Jan Kiszka02c09192011-10-18 00:00:06 +08001500 }
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001501 }
Eugene (jno) Dvurechenski7f7f9752012-12-05 15:50:07 +01001502#elif defined(TARGET_S390X)
1503 CPUState *cs;
1504 S390CPU *cpu;
1505
Andreas Färberbdc44642013-06-24 23:50:24 +02001506 CPU_FOREACH(cs) {
Eugene (jno) Dvurechenski7f7f9752012-12-05 15:50:07 +01001507 cpu = S390_CPU(cs);
1508 if (cpu->env.cpu_num == monitor_get_cpu_index()) {
1509 if (s390_cpu_restart(S390_CPU(cs)) == -1) {
1510 error_set(errp, QERR_UNSUPPORTED);
1511 return;
1512 }
1513 break;
1514 }
1515 }
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001516#else
1517 error_set(errp, QERR_UNSUPPORTED);
1518#endif
1519}