blob: 7e09538799ed696ca502069a7ff7b29182d26466 [file] [log] [blame]
Blue Swirl296af7c2010-03-29 19:23:50 +00001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Needed early for CONFIG_BSD etc. */
26#include "config-host.h"
27
Paolo Bonzini83c90892012-12-17 18:19:49 +010028#include "monitor/monitor.h"
Wenchao Xiaa4e15de2014-06-18 08:43:36 +020029#include "qapi/qmp/qerror.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010030#include "sysemu/sysemu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010031#include "exec/gdbstub.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010032#include "sysemu/dma.h"
33#include "sysemu/kvm.h"
Luiz Capitulinode0b36b2011-09-21 16:38:35 -030034#include "qmp-commands.h"
Blue Swirl296af7c2010-03-29 19:23:50 +000035
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/thread.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010037#include "sysemu/cpus.h"
38#include "sysemu/qtest.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010039#include "qemu/main-loop.h"
40#include "qemu/bitmap.h"
Liu Ping Fancb365642013-09-25 14:20:58 +080041#include "qemu/seqlock.h"
Wenchao Xiaa4e15de2014-06-18 08:43:36 +020042#include "qapi-event.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020043
44#ifndef _WIN32
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010045#include "qemu/compatfd.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020046#endif
Blue Swirl296af7c2010-03-29 19:23:50 +000047
Jan Kiszka6d9cb732011-02-01 22:15:58 +010048#ifdef CONFIG_LINUX
49
50#include <sys/prctl.h>
51
Marcelo Tosattic0532a72010-10-11 15:31:21 -030052#ifndef PR_MCE_KILL
53#define PR_MCE_KILL 33
54#endif
55
Jan Kiszka6d9cb732011-02-01 22:15:58 +010056#ifndef PR_MCE_KILL_SET
57#define PR_MCE_KILL_SET 1
58#endif
59
60#ifndef PR_MCE_KILL_EARLY
61#define PR_MCE_KILL_EARLY 1
62#endif
63
64#endif /* CONFIG_LINUX */
65
Andreas Färber182735e2013-05-29 22:29:20 +020066static CPUState *next_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +000067
Tiejun Chen321bc0b2013-08-02 09:43:09 +080068bool cpu_is_stopped(CPUState *cpu)
69{
70 return cpu->stopped || !runstate_is_running();
71}
72
Andreas Färbera98ae1d2013-05-26 23:21:08 +020073static bool cpu_thread_is_idle(CPUState *cpu)
Peter Maydellac873f12012-07-19 16:52:27 +010074{
Andreas Färberc64ca812012-05-03 02:11:45 +020075 if (cpu->stop || cpu->queued_work_first) {
Peter Maydellac873f12012-07-19 16:52:27 +010076 return false;
77 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +080078 if (cpu_is_stopped(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010079 return true;
80 }
Andreas Färber8c2e1b02013-08-25 18:53:55 +020081 if (!cpu->halted || cpu_has_work(cpu) ||
Alexander Graf215e79c2013-04-24 22:24:12 +020082 kvm_halt_in_kernel()) {
Peter Maydellac873f12012-07-19 16:52:27 +010083 return false;
84 }
85 return true;
86}
87
88static bool all_cpu_threads_idle(void)
89{
Andreas Färber182735e2013-05-29 22:29:20 +020090 CPUState *cpu;
Peter Maydellac873f12012-07-19 16:52:27 +010091
Andreas Färberbdc44642013-06-24 23:50:24 +020092 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +020093 if (!cpu_thread_is_idle(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010094 return false;
95 }
96 }
97 return true;
98}
99
Blue Swirl296af7c2010-03-29 19:23:50 +0000100/***********************************************************/
Paolo Bonzini946fb272011-09-12 13:57:37 +0200101/* guest cycle counter */
102
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200103/* Protected by TimersState seqlock */
104
Sebastian Tanase71468392014-07-23 11:47:50 +0200105static int64_t vm_clock_warp_start = -1;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200106/* Conversion factor from emulated instructions to virtual clock ticks. */
107static int icount_time_shift;
108/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
109#define MAX_ICOUNT_SHIFT 10
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200110
Paolo Bonzini946fb272011-09-12 13:57:37 +0200111static QEMUTimer *icount_rt_timer;
112static QEMUTimer *icount_vm_timer;
113static QEMUTimer *icount_warp_timer;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200114
115typedef struct TimersState {
Liu Ping Fancb365642013-09-25 14:20:58 +0800116 /* Protected by BQL. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200117 int64_t cpu_ticks_prev;
118 int64_t cpu_ticks_offset;
Liu Ping Fancb365642013-09-25 14:20:58 +0800119
120 /* cpu_clock_offset can be read out of BQL, so protect it with
121 * this lock.
122 */
123 QemuSeqLock vm_clock_seqlock;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200124 int64_t cpu_clock_offset;
125 int32_t cpu_ticks_enabled;
126 int64_t dummy;
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200127
128 /* Compensate for varying guest execution speed. */
129 int64_t qemu_icount_bias;
130 /* Only written by TCG thread */
131 int64_t qemu_icount;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200132} TimersState;
133
Liu Ping Fand9cd4002013-07-21 08:43:00 +0000134static TimersState timers_state;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200135
136/* Return the virtual CPU time, based on the instruction counter. */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200137static int64_t cpu_get_icount_locked(void)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200138{
139 int64_t icount;
Andreas Färber4917cf42013-05-27 05:17:50 +0200140 CPUState *cpu = current_cpu;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200141
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200142 icount = timers_state.qemu_icount;
Andreas Färber4917cf42013-05-27 05:17:50 +0200143 if (cpu) {
Andreas Färber99df7dc2013-08-26 05:15:23 +0200144 if (!cpu_can_do_io(cpu)) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200145 fprintf(stderr, "Bad clock read\n");
146 }
Andreas Färber28ecfd72013-08-26 05:51:49 +0200147 icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200148 }
KONRAD Frederic3f031312014-08-01 01:37:15 +0200149 return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200150}
151
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200152int64_t cpu_get_icount(void)
153{
154 int64_t icount;
155 unsigned start;
156
157 do {
158 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
159 icount = cpu_get_icount_locked();
160 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
161
162 return icount;
163}
164
KONRAD Frederic3f031312014-08-01 01:37:15 +0200165int64_t cpu_icount_to_ns(int64_t icount)
166{
167 return icount << icount_time_shift;
168}
169
Paolo Bonzini946fb272011-09-12 13:57:37 +0200170/* return the host CPU cycle counter and handle stop/restart */
Liu Ping Fancb365642013-09-25 14:20:58 +0800171/* Caller must hold the BQL */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200172int64_t cpu_get_ticks(void)
173{
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100174 int64_t ticks;
175
Paolo Bonzini946fb272011-09-12 13:57:37 +0200176 if (use_icount) {
177 return cpu_get_icount();
178 }
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100179
180 ticks = timers_state.cpu_ticks_offset;
181 if (timers_state.cpu_ticks_enabled) {
182 ticks += cpu_get_real_ticks();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200183 }
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100184
185 if (timers_state.cpu_ticks_prev > ticks) {
186 /* Note: non increasing ticks may happen if the host uses
187 software suspend */
188 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
189 ticks = timers_state.cpu_ticks_prev;
190 }
191
192 timers_state.cpu_ticks_prev = ticks;
193 return ticks;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200194}
195
Liu Ping Fancb365642013-09-25 14:20:58 +0800196static int64_t cpu_get_clock_locked(void)
197{
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100198 int64_t ticks;
Liu Ping Fancb365642013-09-25 14:20:58 +0800199
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100200 ticks = timers_state.cpu_clock_offset;
201 if (timers_state.cpu_ticks_enabled) {
202 ticks += get_clock();
Liu Ping Fancb365642013-09-25 14:20:58 +0800203 }
204
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100205 return ticks;
Liu Ping Fancb365642013-09-25 14:20:58 +0800206}
207
Paolo Bonzini946fb272011-09-12 13:57:37 +0200208/* return the host CPU monotonic timer and handle stop/restart */
209int64_t cpu_get_clock(void)
210{
211 int64_t ti;
Liu Ping Fancb365642013-09-25 14:20:58 +0800212 unsigned start;
213
214 do {
215 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
216 ti = cpu_get_clock_locked();
217 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
218
219 return ti;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200220}
221
Liu Ping Fancb365642013-09-25 14:20:58 +0800222/* enable cpu_get_ticks()
223 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
224 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200225void cpu_enable_ticks(void)
226{
Liu Ping Fancb365642013-09-25 14:20:58 +0800227 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
228 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200229 if (!timers_state.cpu_ticks_enabled) {
230 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
231 timers_state.cpu_clock_offset -= get_clock();
232 timers_state.cpu_ticks_enabled = 1;
233 }
Liu Ping Fancb365642013-09-25 14:20:58 +0800234 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200235}
236
237/* disable cpu_get_ticks() : the clock is stopped. You must not call
Liu Ping Fancb365642013-09-25 14:20:58 +0800238 * cpu_get_ticks() after that.
239 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
240 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200241void cpu_disable_ticks(void)
242{
Liu Ping Fancb365642013-09-25 14:20:58 +0800243 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
244 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200245 if (timers_state.cpu_ticks_enabled) {
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100246 timers_state.cpu_ticks_offset += cpu_get_real_ticks();
Liu Ping Fancb365642013-09-25 14:20:58 +0800247 timers_state.cpu_clock_offset = cpu_get_clock_locked();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200248 timers_state.cpu_ticks_enabled = 0;
249 }
Liu Ping Fancb365642013-09-25 14:20:58 +0800250 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200251}
252
253/* Correlation between real and virtual time is always going to be
254 fairly approximate, so ignore small variation.
255 When the guest is idle real and virtual time will be aligned in
256 the IO wait loop. */
257#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
258
259static void icount_adjust(void)
260{
261 int64_t cur_time;
262 int64_t cur_icount;
263 int64_t delta;
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200264
265 /* Protected by TimersState mutex. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200266 static int64_t last_delta;
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200267
Paolo Bonzini946fb272011-09-12 13:57:37 +0200268 /* If the VM is not running, then do nothing. */
269 if (!runstate_is_running()) {
270 return;
271 }
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200272
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200273 seqlock_write_lock(&timers_state.vm_clock_seqlock);
274 cur_time = cpu_get_clock_locked();
275 cur_icount = cpu_get_icount_locked();
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200276
Paolo Bonzini946fb272011-09-12 13:57:37 +0200277 delta = cur_icount - cur_time;
278 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
279 if (delta > 0
280 && last_delta + ICOUNT_WOBBLE < delta * 2
281 && icount_time_shift > 0) {
282 /* The guest is getting too far ahead. Slow time down. */
283 icount_time_shift--;
284 }
285 if (delta < 0
286 && last_delta - ICOUNT_WOBBLE > delta * 2
287 && icount_time_shift < MAX_ICOUNT_SHIFT) {
288 /* The guest is getting too far behind. Speed time up. */
289 icount_time_shift++;
290 }
291 last_delta = delta;
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200292 timers_state.qemu_icount_bias = cur_icount
293 - (timers_state.qemu_icount << icount_time_shift);
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200294 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200295}
296
297static void icount_adjust_rt(void *opaque)
298{
Alex Bligh40daca52013-08-21 16:03:02 +0100299 timer_mod(icount_rt_timer,
300 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200301 icount_adjust();
302}
303
304static void icount_adjust_vm(void *opaque)
305{
Alex Bligh40daca52013-08-21 16:03:02 +0100306 timer_mod(icount_vm_timer,
307 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
308 get_ticks_per_sec() / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200309 icount_adjust();
310}
311
312static int64_t qemu_icount_round(int64_t count)
313{
314 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
315}
316
317static void icount_warp_rt(void *opaque)
318{
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200319 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
320 * changes from -1 to another value, so the race here is okay.
321 */
322 if (atomic_read(&vm_clock_warp_start) == -1) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200323 return;
324 }
325
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200326 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200327 if (runstate_is_running()) {
Alex Bligh40daca52013-08-21 16:03:02 +0100328 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200329 int64_t warp_delta;
330
331 warp_delta = clock - vm_clock_warp_start;
332 if (use_icount == 2) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200333 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100334 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
Paolo Bonzini946fb272011-09-12 13:57:37 +0200335 * far ahead of real time.
336 */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200337 int64_t cur_time = cpu_get_clock_locked();
338 int64_t cur_icount = cpu_get_icount_locked();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200339 int64_t delta = cur_time - cur_icount;
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200340 warp_delta = MIN(warp_delta, delta);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200341 }
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200342 timers_state.qemu_icount_bias += warp_delta;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200343 }
344 vm_clock_warp_start = -1;
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200345 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200346
347 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
348 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
349 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200350}
351
Paolo Bonzini8156be52012-03-28 15:42:04 +0200352void qtest_clock_warp(int64_t dest)
353{
Alex Bligh40daca52013-08-21 16:03:02 +0100354 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200355 assert(qtest_enabled());
356 while (clock < dest) {
Alex Bligh40daca52013-08-21 16:03:02 +0100357 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Sergey Fedorovc9299e22014-06-10 13:10:28 +0400358 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200359 seqlock_write_lock(&timers_state.vm_clock_seqlock);
KONRAD Fredericc96778b2014-08-01 01:37:09 +0200360 timers_state.qemu_icount_bias += warp;
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200361 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
362
Alex Bligh40daca52013-08-21 16:03:02 +0100363 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
364 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200365 }
Alex Bligh40daca52013-08-21 16:03:02 +0100366 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200367}
368
Alex Bligh40daca52013-08-21 16:03:02 +0100369void qemu_clock_warp(QEMUClockType type)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200370{
Paolo Bonzinice78d182013-10-07 17:30:02 +0200371 int64_t clock;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200372 int64_t deadline;
373
374 /*
375 * There are too many global variables to make the "warp" behavior
376 * applicable to other clocks. But a clock argument removes the
377 * need for if statements all over the place.
378 */
Alex Bligh40daca52013-08-21 16:03:02 +0100379 if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200380 return;
381 }
382
383 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100384 * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
385 * This ensures that the deadline for the timer is computed correctly below.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200386 * This also makes sure that the insn counter is synchronized before the
387 * CPU starts running, in case the CPU is woken by an event other than
Alex Bligh40daca52013-08-21 16:03:02 +0100388 * the earliest QEMU_CLOCK_VIRTUAL timer.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200389 */
390 icount_warp_rt(NULL);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200391 timer_del(icount_warp_timer);
392 if (!all_cpu_threads_idle()) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200393 return;
394 }
395
Paolo Bonzini8156be52012-03-28 15:42:04 +0200396 if (qtest_enabled()) {
397 /* When testing, qtest commands advance icount. */
398 return;
399 }
400
Alex Blighac70aaf2013-08-21 16:02:57 +0100401 /* We want to use the earliest deadline from ALL vm_clocks */
Paolo Bonzinice78d182013-10-07 17:30:02 +0200402 clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Alex Bligh40daca52013-08-21 16:03:02 +0100403 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200404 if (deadline < 0) {
405 return;
Alex Blighac70aaf2013-08-21 16:02:57 +0100406 }
407
Paolo Bonzini946fb272011-09-12 13:57:37 +0200408 if (deadline > 0) {
409 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100410 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
Paolo Bonzini946fb272011-09-12 13:57:37 +0200411 * sleep. Otherwise, the CPU might be waiting for a future timer
412 * interrupt to wake it up, but the interrupt never comes because
413 * the vCPU isn't running any insns and thus doesn't advance the
Alex Bligh40daca52013-08-21 16:03:02 +0100414 * QEMU_CLOCK_VIRTUAL.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200415 *
416 * An extreme solution for this problem would be to never let VCPUs
Alex Bligh40daca52013-08-21 16:03:02 +0100417 * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
418 * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
419 * event. Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
420 * after some e"real" time, (related to the time left until the next
421 * event) has passed. The QEMU_CLOCK_REALTIME timer will do this.
422 * This avoids that the warps are visible externally; for example,
423 * you will not be sending network packets continuously instead of
424 * every 100ms.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200425 */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200426 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200427 if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
428 vm_clock_warp_start = clock;
429 }
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200430 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200431 timer_mod_anticipate(icount_warp_timer, clock + deadline);
Alex Blighac70aaf2013-08-21 16:02:57 +0100432 } else if (deadline == 0) {
Alex Bligh40daca52013-08-21 16:03:02 +0100433 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200434 }
435}
436
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200437static bool icount_state_needed(void *opaque)
438{
439 return use_icount;
440}
441
442/*
443 * This is a subsection for icount migration.
444 */
445static const VMStateDescription icount_vmstate_timers = {
446 .name = "timer/icount",
447 .version_id = 1,
448 .minimum_version_id = 1,
449 .fields = (VMStateField[]) {
450 VMSTATE_INT64(qemu_icount_bias, TimersState),
451 VMSTATE_INT64(qemu_icount, TimersState),
452 VMSTATE_END_OF_LIST()
453 }
454};
455
Paolo Bonzini946fb272011-09-12 13:57:37 +0200456static const VMStateDescription vmstate_timers = {
457 .name = "timer",
458 .version_id = 2,
459 .minimum_version_id = 1,
Juan Quintela35d08452014-04-16 16:01:33 +0200460 .fields = (VMStateField[]) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200461 VMSTATE_INT64(cpu_ticks_offset, TimersState),
462 VMSTATE_INT64(dummy, TimersState),
463 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
464 VMSTATE_END_OF_LIST()
KONRAD Fredericd09eae32014-08-01 01:37:10 +0200465 },
466 .subsections = (VMStateSubsection[]) {
467 {
468 .vmsd = &icount_vmstate_timers,
469 .needed = icount_state_needed,
470 }, {
471 /* empty */
472 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200473 }
474};
475
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200476void configure_icount(QemuOpts *opts, Error **errp)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200477{
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200478 const char *option;
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200479 char *rem_str = NULL;
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200480
Liu Ping Fancb365642013-09-25 14:20:58 +0800481 seqlock_init(&timers_state.vm_clock_seqlock, NULL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200482 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
Sebastian Tanase1ad95802014-07-25 11:56:28 +0200483 option = qemu_opt_get(opts, "shift");
Paolo Bonzini946fb272011-09-12 13:57:37 +0200484 if (!option) {
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200485 if (qemu_opt_get(opts, "align") != NULL) {
486 error_setg(errp, "Please specify shift option when using align");
487 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200488 return;
489 }
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200490 icount_align_option = qemu_opt_get_bool(opts, "align", false);
Alex Bligh40daca52013-08-21 16:03:02 +0100491 icount_warp_timer = timer_new_ns(QEMU_CLOCK_REALTIME,
492 icount_warp_rt, NULL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200493 if (strcmp(option, "auto") != 0) {
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200494 errno = 0;
495 icount_time_shift = strtol(option, &rem_str, 0);
496 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
497 error_setg(errp, "icount: Invalid shift value");
498 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200499 use_icount = 1;
500 return;
Sebastian Tanasea8bfac32014-07-25 11:56:29 +0200501 } else if (icount_align_option) {
502 error_setg(errp, "shift=auto and align=on are incompatible");
Paolo Bonzini946fb272011-09-12 13:57:37 +0200503 }
504
505 use_icount = 2;
506
507 /* 125MIPS seems a reasonable initial guess at the guest speed.
508 It will be corrected fairly quickly anyway. */
509 icount_time_shift = 3;
510
511 /* Have both realtime and virtual time triggers for speed adjustment.
512 The realtime trigger catches emulated time passing too slowly,
513 the virtual time trigger catches emulated time passing too fast.
514 Realtime triggers occur even when idle, so use them less frequently
515 than VM triggers. */
Alex Bligh40daca52013-08-21 16:03:02 +0100516 icount_rt_timer = timer_new_ms(QEMU_CLOCK_REALTIME,
517 icount_adjust_rt, NULL);
518 timer_mod(icount_rt_timer,
519 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
520 icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
521 icount_adjust_vm, NULL);
522 timer_mod(icount_vm_timer,
523 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
524 get_ticks_per_sec() / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200525}
526
527/***********************************************************/
Blue Swirl296af7c2010-03-29 19:23:50 +0000528void hw_error(const char *fmt, ...)
529{
530 va_list ap;
Andreas Färber55e5c282012-12-17 06:18:02 +0100531 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000532
533 va_start(ap, fmt);
534 fprintf(stderr, "qemu: hardware error: ");
535 vfprintf(stderr, fmt, ap);
536 fprintf(stderr, "\n");
Andreas Färberbdc44642013-06-24 23:50:24 +0200537 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100538 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
Andreas Färber878096e2013-05-27 01:33:50 +0200539 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
Blue Swirl296af7c2010-03-29 19:23:50 +0000540 }
541 va_end(ap);
542 abort();
543}
544
545void cpu_synchronize_all_states(void)
546{
Andreas Färber182735e2013-05-29 22:29:20 +0200547 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000548
Andreas Färberbdc44642013-06-24 23:50:24 +0200549 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200550 cpu_synchronize_state(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000551 }
552}
553
554void cpu_synchronize_all_post_reset(void)
555{
Andreas Färber182735e2013-05-29 22:29:20 +0200556 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000557
Andreas Färberbdc44642013-06-24 23:50:24 +0200558 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200559 cpu_synchronize_post_reset(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000560 }
561}
562
563void cpu_synchronize_all_post_init(void)
564{
Andreas Färber182735e2013-05-29 22:29:20 +0200565 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000566
Andreas Färberbdc44642013-06-24 23:50:24 +0200567 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200568 cpu_synchronize_post_init(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000569 }
570}
571
Kevin Wolf56983462013-07-05 13:49:54 +0200572static int do_vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +0000573{
Kevin Wolf56983462013-07-05 13:49:54 +0200574 int ret = 0;
575
Luiz Capitulino13548692011-07-29 15:36:43 -0300576 if (runstate_is_running()) {
Blue Swirl296af7c2010-03-29 19:23:50 +0000577 cpu_disable_ticks();
Blue Swirl296af7c2010-03-29 19:23:50 +0000578 pause_all_vcpus();
Luiz Capitulinof5bbfba2011-07-29 15:04:45 -0300579 runstate_set(state);
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -0300580 vm_state_notify(0, state);
Wenchao Xiaa4e15de2014-06-18 08:43:36 +0200581 qapi_event_send_stop(&error_abort);
Blue Swirl296af7c2010-03-29 19:23:50 +0000582 }
Kevin Wolf56983462013-07-05 13:49:54 +0200583
Kevin Wolf594a45c2013-07-18 14:52:19 +0200584 bdrv_drain_all();
585 ret = bdrv_flush_all();
586
Kevin Wolf56983462013-07-05 13:49:54 +0200587 return ret;
Blue Swirl296af7c2010-03-29 19:23:50 +0000588}
589
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200590static bool cpu_can_run(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000591{
Andreas Färber4fdeee72012-05-02 23:10:09 +0200592 if (cpu->stop) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200593 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100594 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +0800595 if (cpu_is_stopped(cpu)) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200596 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100597 }
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200598 return true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000599}
600
Andreas Färber91325042013-05-27 02:07:49 +0200601static void cpu_handle_guest_debug(CPUState *cpu)
Jan Kiszka3c638d02010-06-25 16:56:56 +0200602{
Andreas Färber64f6b342013-05-27 02:06:09 +0200603 gdb_set_stop_cpu(cpu);
Jan Kiszka8cf71712011-02-07 12:19:16 +0100604 qemu_system_debug_request();
Andreas Färberf324e762012-05-02 23:26:21 +0200605 cpu->stopped = true;
Jan Kiszka3c638d02010-06-25 16:56:56 +0200606}
607
Paolo Bonzini714bd042011-03-12 17:44:06 +0100608static void cpu_signal(int sig)
609{
Andreas Färber4917cf42013-05-27 05:17:50 +0200610 if (current_cpu) {
611 cpu_exit(current_cpu);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100612 }
613 exit_request = 1;
614}
Paolo Bonzini714bd042011-03-12 17:44:06 +0100615
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100616#ifdef CONFIG_LINUX
617static void sigbus_reraise(void)
618{
619 sigset_t set;
620 struct sigaction action;
621
622 memset(&action, 0, sizeof(action));
623 action.sa_handler = SIG_DFL;
624 if (!sigaction(SIGBUS, &action, NULL)) {
625 raise(SIGBUS);
626 sigemptyset(&set);
627 sigaddset(&set, SIGBUS);
628 sigprocmask(SIG_UNBLOCK, &set, NULL);
629 }
630 perror("Failed to re-raise SIGBUS!\n");
631 abort();
632}
633
634static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
635 void *ctx)
636{
637 if (kvm_on_sigbus(siginfo->ssi_code,
638 (void *)(intptr_t)siginfo->ssi_addr)) {
639 sigbus_reraise();
640 }
641}
642
643static void qemu_init_sigbus(void)
644{
645 struct sigaction action;
646
647 memset(&action, 0, sizeof(action));
648 action.sa_flags = SA_SIGINFO;
649 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
650 sigaction(SIGBUS, &action, NULL);
651
652 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
653}
654
Andreas Färber290adf32013-01-17 09:30:27 +0100655static void qemu_kvm_eat_signals(CPUState *cpu)
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100656{
657 struct timespec ts = { 0, 0 };
658 siginfo_t siginfo;
659 sigset_t waitset;
660 sigset_t chkset;
661 int r;
662
663 sigemptyset(&waitset);
664 sigaddset(&waitset, SIG_IPI);
665 sigaddset(&waitset, SIGBUS);
666
667 do {
668 r = sigtimedwait(&waitset, &siginfo, &ts);
669 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
670 perror("sigtimedwait");
671 exit(1);
672 }
673
674 switch (r) {
675 case SIGBUS:
Andreas Färber290adf32013-01-17 09:30:27 +0100676 if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100677 sigbus_reraise();
678 }
679 break;
680 default:
681 break;
682 }
683
684 r = sigpending(&chkset);
685 if (r == -1) {
686 perror("sigpending");
687 exit(1);
688 }
689 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100690}
691
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100692#else /* !CONFIG_LINUX */
693
694static void qemu_init_sigbus(void)
695{
696}
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100697
Andreas Färber290adf32013-01-17 09:30:27 +0100698static void qemu_kvm_eat_signals(CPUState *cpu)
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100699{
700}
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100701#endif /* !CONFIG_LINUX */
702
Blue Swirl296af7c2010-03-29 19:23:50 +0000703#ifndef _WIN32
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100704static void dummy_signal(int sig)
Blue Swirl296af7c2010-03-29 19:23:50 +0000705{
706}
707
Andreas Färber13618e02013-05-26 23:41:00 +0200708static void qemu_kvm_init_cpu_signals(CPUState *cpu)
Paolo Bonzini714bd042011-03-12 17:44:06 +0100709{
710 int r;
711 sigset_t set;
712 struct sigaction sigact;
713
714 memset(&sigact, 0, sizeof(sigact));
715 sigact.sa_handler = dummy_signal;
716 sigaction(SIG_IPI, &sigact, NULL);
717
Paolo Bonzini714bd042011-03-12 17:44:06 +0100718 pthread_sigmask(SIG_BLOCK, NULL, &set);
719 sigdelset(&set, SIG_IPI);
720 sigdelset(&set, SIGBUS);
Andreas Färber491d6e82013-05-26 23:38:10 +0200721 r = kvm_set_signal_mask(cpu, &set);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100722 if (r) {
723 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
724 exit(1);
725 }
Paolo Bonzini714bd042011-03-12 17:44:06 +0100726}
727
728static void qemu_tcg_init_cpu_signals(void)
729{
Paolo Bonzini714bd042011-03-12 17:44:06 +0100730 sigset_t set;
731 struct sigaction sigact;
732
733 memset(&sigact, 0, sizeof(sigact));
734 sigact.sa_handler = cpu_signal;
735 sigaction(SIG_IPI, &sigact, NULL);
736
737 sigemptyset(&set);
738 sigaddset(&set, SIG_IPI);
739 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100740}
741
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100742#else /* _WIN32 */
Andreas Färber13618e02013-05-26 23:41:00 +0200743static void qemu_kvm_init_cpu_signals(CPUState *cpu)
Paolo Bonzini714bd042011-03-12 17:44:06 +0100744{
745 abort();
746}
747
748static void qemu_tcg_init_cpu_signals(void)
749{
750}
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100751#endif /* _WIN32 */
Blue Swirl296af7c2010-03-29 19:23:50 +0000752
Stefan Weilb2532d82012-09-27 07:41:42 +0200753static QemuMutex qemu_global_mutex;
Paolo Bonzini46daff12011-06-09 13:10:24 +0200754static QemuCond qemu_io_proceeded_cond;
755static bool iothread_requesting_mutex;
Blue Swirl296af7c2010-03-29 19:23:50 +0000756
757static QemuThread io_thread;
758
759static QemuThread *tcg_cpu_thread;
760static QemuCond *tcg_halt_cond;
761
Blue Swirl296af7c2010-03-29 19:23:50 +0000762/* cpu creation */
763static QemuCond qemu_cpu_cond;
764/* system init */
Blue Swirl296af7c2010-03-29 19:23:50 +0000765static QemuCond qemu_pause_cond;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300766static QemuCond qemu_work_cond;
Blue Swirl296af7c2010-03-29 19:23:50 +0000767
Paolo Bonzinid3b12f52011-09-13 10:30:52 +0200768void qemu_init_cpu_loop(void)
Blue Swirl296af7c2010-03-29 19:23:50 +0000769{
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100770 qemu_init_sigbus();
Anthony Liguoried945922011-02-08 18:18:18 +0100771 qemu_cond_init(&qemu_cpu_cond);
Anthony Liguoried945922011-02-08 18:18:18 +0100772 qemu_cond_init(&qemu_pause_cond);
773 qemu_cond_init(&qemu_work_cond);
Paolo Bonzini46daff12011-06-09 13:10:24 +0200774 qemu_cond_init(&qemu_io_proceeded_cond);
Blue Swirl296af7c2010-03-29 19:23:50 +0000775 qemu_mutex_init(&qemu_global_mutex);
Blue Swirl296af7c2010-03-29 19:23:50 +0000776
Jan Kiszkab7680cb2011-03-12 17:43:51 +0100777 qemu_thread_get_self(&io_thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000778}
779
Andreas Färberf100f0b2012-05-03 14:58:47 +0200780void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300781{
782 struct qemu_work_item wi;
783
Andreas Färber60e82572012-05-02 22:23:49 +0200784 if (qemu_cpu_is_self(cpu)) {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300785 func(data);
786 return;
787 }
788
789 wi.func = func;
790 wi.data = data;
Chegu Vinod3c022702013-06-24 03:49:41 -0600791 wi.free = false;
Andreas Färberc64ca812012-05-03 02:11:45 +0200792 if (cpu->queued_work_first == NULL) {
793 cpu->queued_work_first = &wi;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100794 } else {
Andreas Färberc64ca812012-05-03 02:11:45 +0200795 cpu->queued_work_last->next = &wi;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100796 }
Andreas Färberc64ca812012-05-03 02:11:45 +0200797 cpu->queued_work_last = &wi;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300798 wi.next = NULL;
799 wi.done = false;
800
Andreas Färberc08d7422012-05-03 04:34:15 +0200801 qemu_cpu_kick(cpu);
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300802 while (!wi.done) {
Andreas Färber4917cf42013-05-27 05:17:50 +0200803 CPUState *self_cpu = current_cpu;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300804
805 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
Andreas Färber4917cf42013-05-27 05:17:50 +0200806 current_cpu = self_cpu;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300807 }
808}
809
Chegu Vinod3c022702013-06-24 03:49:41 -0600810void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
811{
812 struct qemu_work_item *wi;
813
814 if (qemu_cpu_is_self(cpu)) {
815 func(data);
816 return;
817 }
818
819 wi = g_malloc0(sizeof(struct qemu_work_item));
820 wi->func = func;
821 wi->data = data;
822 wi->free = true;
823 if (cpu->queued_work_first == NULL) {
824 cpu->queued_work_first = wi;
825 } else {
826 cpu->queued_work_last->next = wi;
827 }
828 cpu->queued_work_last = wi;
829 wi->next = NULL;
830 wi->done = false;
831
832 qemu_cpu_kick(cpu);
833}
834
Andreas Färber6d45b102012-05-03 02:13:22 +0200835static void flush_queued_work(CPUState *cpu)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300836{
837 struct qemu_work_item *wi;
838
Andreas Färberc64ca812012-05-03 02:11:45 +0200839 if (cpu->queued_work_first == NULL) {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300840 return;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100841 }
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300842
Andreas Färberc64ca812012-05-03 02:11:45 +0200843 while ((wi = cpu->queued_work_first)) {
844 cpu->queued_work_first = wi->next;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300845 wi->func(wi->data);
846 wi->done = true;
Chegu Vinod3c022702013-06-24 03:49:41 -0600847 if (wi->free) {
848 g_free(wi);
849 }
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300850 }
Andreas Färberc64ca812012-05-03 02:11:45 +0200851 cpu->queued_work_last = NULL;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300852 qemu_cond_broadcast(&qemu_work_cond);
853}
854
Andreas Färber509a0d72012-05-03 02:18:09 +0200855static void qemu_wait_io_event_common(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000856{
Andreas Färber4fdeee72012-05-02 23:10:09 +0200857 if (cpu->stop) {
858 cpu->stop = false;
Andreas Färberf324e762012-05-02 23:26:21 +0200859 cpu->stopped = true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000860 qemu_cond_signal(&qemu_pause_cond);
861 }
Andreas Färber6d45b102012-05-03 02:13:22 +0200862 flush_queued_work(cpu);
Andreas Färber216fc9a2012-05-02 17:49:49 +0200863 cpu->thread_kicked = false;
Blue Swirl296af7c2010-03-29 19:23:50 +0000864}
865
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200866static void qemu_tcg_wait_io_event(void)
Blue Swirl296af7c2010-03-29 19:23:50 +0000867{
Andreas Färber182735e2013-05-29 22:29:20 +0200868 CPUState *cpu;
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200869
Jan Kiszka16400322011-02-09 16:29:37 +0100870 while (all_cpu_threads_idle()) {
Paolo Bonziniab33fcd2011-04-13 10:03:44 +0200871 /* Start accounting real time to the virtual clock if the CPUs
872 are idle. */
Alex Bligh40daca52013-08-21 16:03:02 +0100873 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini9705fbb2011-03-12 17:44:00 +0100874 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +0100875 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000876
Paolo Bonzini46daff12011-06-09 13:10:24 +0200877 while (iothread_requesting_mutex) {
878 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
879 }
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200880
Andreas Färberbdc44642013-06-24 23:50:24 +0200881 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200882 qemu_wait_io_event_common(cpu);
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200883 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000884}
885
Andreas Färberfd529e82013-05-26 23:24:55 +0200886static void qemu_kvm_wait_io_event(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000887{
Andreas Färbera98ae1d2013-05-26 23:21:08 +0200888 while (cpu_thread_is_idle(cpu)) {
Andreas Färberf5c121b2012-05-03 01:22:49 +0200889 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +0100890 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000891
Andreas Färber290adf32013-01-17 09:30:27 +0100892 qemu_kvm_eat_signals(cpu);
Andreas Färber509a0d72012-05-03 02:18:09 +0200893 qemu_wait_io_event_common(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000894}
895
Jan Kiszka7e97cd82011-02-07 12:19:12 +0100896static void *qemu_kvm_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +0000897{
Andreas Färber48a106b2013-05-27 02:20:39 +0200898 CPUState *cpu = arg;
Jan Kiszka84b49152011-02-01 22:15:50 +0100899 int r;
Blue Swirl296af7c2010-03-29 19:23:50 +0000900
Marcelo Tosatti6164e6d2010-03-23 13:37:13 -0300901 qemu_mutex_lock(&qemu_global_mutex);
Andreas Färber814e6122012-05-02 17:00:37 +0200902 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +0200903 cpu->thread_id = qemu_get_thread_id();
Andreas Färber4917cf42013-05-27 05:17:50 +0200904 current_cpu = cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000905
Andreas Färber504134d2012-12-17 06:38:45 +0100906 r = kvm_init_vcpu(cpu);
Jan Kiszka84b49152011-02-01 22:15:50 +0100907 if (r < 0) {
908 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
909 exit(1);
910 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000911
Andreas Färber13618e02013-05-26 23:41:00 +0200912 qemu_kvm_init_cpu_signals(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000913
914 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +0200915 cpu->created = true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000916 qemu_cond_signal(&qemu_cpu_cond);
917
Blue Swirl296af7c2010-03-29 19:23:50 +0000918 while (1) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200919 if (cpu_can_run(cpu)) {
Andreas Färber1458c362013-05-26 23:46:55 +0200920 r = kvm_cpu_exec(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +0100921 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +0200922 cpu_handle_guest_debug(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +0100923 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100924 }
Andreas Färberfd529e82013-05-26 23:24:55 +0200925 qemu_kvm_wait_io_event(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000926 }
927
928 return NULL;
929}
930
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200931static void *qemu_dummy_cpu_thread_fn(void *arg)
932{
933#ifdef _WIN32
934 fprintf(stderr, "qtest is not supported under Windows\n");
935 exit(1);
936#else
Andreas Färber10a90212013-05-27 02:24:35 +0200937 CPUState *cpu = arg;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200938 sigset_t waitset;
939 int r;
940
941 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +0200942 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +0200943 cpu->thread_id = qemu_get_thread_id();
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200944
945 sigemptyset(&waitset);
946 sigaddset(&waitset, SIG_IPI);
947
948 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +0200949 cpu->created = true;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200950 qemu_cond_signal(&qemu_cpu_cond);
951
Andreas Färber4917cf42013-05-27 05:17:50 +0200952 current_cpu = cpu;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200953 while (1) {
Andreas Färber4917cf42013-05-27 05:17:50 +0200954 current_cpu = NULL;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200955 qemu_mutex_unlock_iothread();
956 do {
957 int sig;
958 r = sigwait(&waitset, &sig);
959 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
960 if (r == -1) {
961 perror("sigwait");
962 exit(1);
963 }
964 qemu_mutex_lock_iothread();
Andreas Färber4917cf42013-05-27 05:17:50 +0200965 current_cpu = cpu;
Andreas Färber509a0d72012-05-03 02:18:09 +0200966 qemu_wait_io_event_common(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200967 }
968
969 return NULL;
970#endif
971}
972
Jan Kiszkabdb7ca62011-09-26 09:40:39 +0200973static void tcg_exec_all(void);
974
Jan Kiszka7e97cd82011-02-07 12:19:12 +0100975static void *qemu_tcg_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +0000976{
Andreas Färberc3586ba2012-05-03 01:41:24 +0200977 CPUState *cpu = arg;
Blue Swirl296af7c2010-03-29 19:23:50 +0000978
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100979 qemu_tcg_init_cpu_signals();
Andreas Färber814e6122012-05-02 17:00:37 +0200980 qemu_thread_get_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000981
Blue Swirl296af7c2010-03-29 19:23:50 +0000982 qemu_mutex_lock(&qemu_global_mutex);
Andreas Färber38fcbd32013-07-07 19:50:23 +0200983 CPU_FOREACH(cpu) {
984 cpu->thread_id = qemu_get_thread_id();
985 cpu->created = true;
986 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000987 qemu_cond_signal(&qemu_cpu_cond);
988
Jan Kiszkafa7d1862011-08-22 18:35:25 +0200989 /* wait for initial kick-off after machine start */
Andreas Färberbdc44642013-06-24 23:50:24 +0200990 while (QTAILQ_FIRST(&cpus)->stopped) {
Jan Kiszkafa7d1862011-08-22 18:35:25 +0200991 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
Jan Kiszka8e564b42012-02-17 18:31:15 +0100992
993 /* process any pending work */
Andreas Färberbdc44642013-06-24 23:50:24 +0200994 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200995 qemu_wait_io_event_common(cpu);
Jan Kiszka8e564b42012-02-17 18:31:15 +0100996 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100997 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000998
999 while (1) {
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001000 tcg_exec_all();
Alex Blighac70aaf2013-08-21 16:02:57 +01001001
1002 if (use_icount) {
Alex Bligh40daca52013-08-21 16:03:02 +01001003 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +01001004
1005 if (deadline == 0) {
Alex Bligh40daca52013-08-21 16:03:02 +01001006 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +01001007 }
Paolo Bonzini3b2319a2011-04-13 10:03:43 +02001008 }
Jan Kiszka6cabe1f2010-06-25 16:56:53 +02001009 qemu_tcg_wait_io_event();
Blue Swirl296af7c2010-03-29 19:23:50 +00001010 }
1011
1012 return NULL;
1013}
1014
Andreas Färber2ff09a42012-05-03 00:23:30 +02001015static void qemu_cpu_kick_thread(CPUState *cpu)
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001016{
1017#ifndef _WIN32
1018 int err;
1019
Andreas Färber814e6122012-05-02 17:00:37 +02001020 err = pthread_kill(cpu->thread->thread, SIG_IPI);
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001021 if (err) {
1022 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1023 exit(1);
1024 }
1025#else /* _WIN32 */
Andreas Färber60e82572012-05-02 22:23:49 +02001026 if (!qemu_cpu_is_self(cpu)) {
Olivier Hainqueed9164a2013-04-09 18:06:53 +02001027 CONTEXT tcgContext;
1028
1029 if (SuspendThread(cpu->hThread) == (DWORD)-1) {
Stefan Weil7f1721d2013-04-13 22:45:50 +02001030 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
Olivier Hainqueed9164a2013-04-09 18:06:53 +02001031 GetLastError());
1032 exit(1);
1033 }
1034
1035 /* On multi-core systems, we are not sure that the thread is actually
1036 * suspended until we can get the context.
1037 */
1038 tcgContext.ContextFlags = CONTEXT_CONTROL;
1039 while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
1040 continue;
1041 }
1042
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001043 cpu_signal(0);
Olivier Hainqueed9164a2013-04-09 18:06:53 +02001044
1045 if (ResumeThread(cpu->hThread) == (DWORD)-1) {
Stefan Weil7f1721d2013-04-13 22:45:50 +02001046 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
Olivier Hainqueed9164a2013-04-09 18:06:53 +02001047 GetLastError());
1048 exit(1);
1049 }
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001050 }
1051#endif
1052}
1053
Andreas Färberc08d7422012-05-03 04:34:15 +02001054void qemu_cpu_kick(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001055{
Andreas Färberf5c121b2012-05-03 01:22:49 +02001056 qemu_cond_broadcast(cpu->halt_cond);
Andreas Färber216fc9a2012-05-02 17:49:49 +02001057 if (!tcg_enabled() && !cpu->thread_kicked) {
Andreas Färber2ff09a42012-05-03 00:23:30 +02001058 qemu_cpu_kick_thread(cpu);
Andreas Färber216fc9a2012-05-02 17:49:49 +02001059 cpu->thread_kicked = true;
Jan Kiszkaaa2c3642011-02-01 22:15:42 +01001060 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001061}
1062
Jan Kiszka46d62fa2011-02-01 22:15:59 +01001063void qemu_cpu_kick_self(void)
1064{
Paolo Bonzinib55c22c2011-03-12 17:44:07 +01001065#ifndef _WIN32
Andreas Färber4917cf42013-05-27 05:17:50 +02001066 assert(current_cpu);
Jan Kiszka46d62fa2011-02-01 22:15:59 +01001067
Andreas Färber4917cf42013-05-27 05:17:50 +02001068 if (!current_cpu->thread_kicked) {
1069 qemu_cpu_kick_thread(current_cpu);
1070 current_cpu->thread_kicked = true;
Jan Kiszka46d62fa2011-02-01 22:15:59 +01001071 }
Paolo Bonzinib55c22c2011-03-12 17:44:07 +01001072#else
1073 abort();
1074#endif
Blue Swirl296af7c2010-03-29 19:23:50 +00001075}
1076
Andreas Färber60e82572012-05-02 22:23:49 +02001077bool qemu_cpu_is_self(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001078{
Andreas Färber814e6122012-05-02 17:00:37 +02001079 return qemu_thread_is_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +00001080}
1081
Juan Quintelaaa723c22012-09-18 16:30:11 +02001082static bool qemu_in_vcpu_thread(void)
1083{
Andreas Färber4917cf42013-05-27 05:17:50 +02001084 return current_cpu && qemu_cpu_is_self(current_cpu);
Juan Quintelaaa723c22012-09-18 16:30:11 +02001085}
1086
Blue Swirl296af7c2010-03-29 19:23:50 +00001087void qemu_mutex_lock_iothread(void)
1088{
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001089 if (!tcg_enabled()) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001090 qemu_mutex_lock(&qemu_global_mutex);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001091 } else {
Paolo Bonzini46daff12011-06-09 13:10:24 +02001092 iothread_requesting_mutex = true;
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001093 if (qemu_mutex_trylock(&qemu_global_mutex)) {
Andreas Färber182735e2013-05-29 22:29:20 +02001094 qemu_cpu_kick_thread(first_cpu);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001095 qemu_mutex_lock(&qemu_global_mutex);
1096 }
Paolo Bonzini46daff12011-06-09 13:10:24 +02001097 iothread_requesting_mutex = false;
1098 qemu_cond_broadcast(&qemu_io_proceeded_cond);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001099 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001100}
1101
1102void qemu_mutex_unlock_iothread(void)
1103{
1104 qemu_mutex_unlock(&qemu_global_mutex);
1105}
1106
1107static int all_vcpus_paused(void)
1108{
Andreas Färberbdc44642013-06-24 23:50:24 +02001109 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001110
Andreas Färberbdc44642013-06-24 23:50:24 +02001111 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001112 if (!cpu->stopped) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001113 return 0;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001114 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001115 }
1116
1117 return 1;
1118}
1119
1120void pause_all_vcpus(void)
1121{
Andreas Färberbdc44642013-06-24 23:50:24 +02001122 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001123
Alex Bligh40daca52013-08-21 16:03:02 +01001124 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
Andreas Färberbdc44642013-06-24 23:50:24 +02001125 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001126 cpu->stop = true;
1127 qemu_cpu_kick(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001128 }
1129
Juan Quintelaaa723c22012-09-18 16:30:11 +02001130 if (qemu_in_vcpu_thread()) {
Jan Kiszkad798e972012-02-17 18:31:16 +01001131 cpu_stop_current();
1132 if (!kvm_enabled()) {
Andreas Färberbdc44642013-06-24 23:50:24 +02001133 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001134 cpu->stop = false;
1135 cpu->stopped = true;
Jan Kiszkad798e972012-02-17 18:31:16 +01001136 }
1137 return;
1138 }
1139 }
1140
Blue Swirl296af7c2010-03-29 19:23:50 +00001141 while (!all_vcpus_paused()) {
Paolo Bonzinibe7d6c52011-03-12 17:44:02 +01001142 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
Andreas Färberbdc44642013-06-24 23:50:24 +02001143 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001144 qemu_cpu_kick(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001145 }
1146 }
1147}
1148
Igor Mammedov29936832013-04-23 10:29:37 +02001149void cpu_resume(CPUState *cpu)
1150{
1151 cpu->stop = false;
1152 cpu->stopped = false;
1153 qemu_cpu_kick(cpu);
1154}
1155
Blue Swirl296af7c2010-03-29 19:23:50 +00001156void resume_all_vcpus(void)
1157{
Andreas Färberbdc44642013-06-24 23:50:24 +02001158 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001159
Alex Bligh40daca52013-08-21 16:03:02 +01001160 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
Andreas Färberbdc44642013-06-24 23:50:24 +02001161 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001162 cpu_resume(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001163 }
1164}
1165
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001166/* For temporary buffers for forming a name */
1167#define VCPU_THREAD_NAME_SIZE 16
1168
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001169static void qemu_tcg_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001170{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001171 char thread_name[VCPU_THREAD_NAME_SIZE];
1172
Edgar E. Iglesias09daed82013-12-17 13:06:51 +10001173 tcg_cpu_address_space_init(cpu, cpu->as);
1174
Blue Swirl296af7c2010-03-29 19:23:50 +00001175 /* share a single thread for all cpus with TCG */
1176 if (!tcg_cpu_thread) {
Andreas Färber814e6122012-05-02 17:00:37 +02001177 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001178 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1179 qemu_cond_init(cpu->halt_cond);
1180 tcg_halt_cond = cpu->halt_cond;
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001181 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
1182 cpu->cpu_index);
1183 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1184 cpu, QEMU_THREAD_JOINABLE);
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001185#ifdef _WIN32
Andreas Färber814e6122012-05-02 17:00:37 +02001186 cpu->hThread = qemu_thread_get_handle(cpu->thread);
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001187#endif
Andreas Färber61a46212012-05-02 22:49:36 +02001188 while (!cpu->created) {
Paolo Bonzini18a85722011-03-12 17:44:03 +01001189 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001190 }
Andreas Färber814e6122012-05-02 17:00:37 +02001191 tcg_cpu_thread = cpu->thread;
Blue Swirl296af7c2010-03-29 19:23:50 +00001192 } else {
Andreas Färber814e6122012-05-02 17:00:37 +02001193 cpu->thread = tcg_cpu_thread;
Andreas Färberf5c121b2012-05-03 01:22:49 +02001194 cpu->halt_cond = tcg_halt_cond;
Blue Swirl296af7c2010-03-29 19:23:50 +00001195 }
1196}
1197
Andreas Färber48a106b2013-05-27 02:20:39 +02001198static void qemu_kvm_start_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001199{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001200 char thread_name[VCPU_THREAD_NAME_SIZE];
1201
Andreas Färber814e6122012-05-02 17:00:37 +02001202 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001203 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1204 qemu_cond_init(cpu->halt_cond);
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001205 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1206 cpu->cpu_index);
1207 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1208 cpu, QEMU_THREAD_JOINABLE);
Andreas Färber61a46212012-05-02 22:49:36 +02001209 while (!cpu->created) {
Paolo Bonzini18a85722011-03-12 17:44:03 +01001210 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001211 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001212}
1213
Andreas Färber10a90212013-05-27 02:24:35 +02001214static void qemu_dummy_start_vcpu(CPUState *cpu)
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001215{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001216 char thread_name[VCPU_THREAD_NAME_SIZE];
1217
Andreas Färber814e6122012-05-02 17:00:37 +02001218 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001219 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1220 qemu_cond_init(cpu->halt_cond);
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001221 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
1222 cpu->cpu_index);
1223 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001224 QEMU_THREAD_JOINABLE);
Andreas Färber61a46212012-05-02 22:49:36 +02001225 while (!cpu->created) {
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001226 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1227 }
1228}
1229
Andreas Färberc643bed2013-05-27 03:23:24 +02001230void qemu_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001231{
Andreas Färberce3960e2012-12-17 03:27:07 +01001232 cpu->nr_cores = smp_cores;
1233 cpu->nr_threads = smp_threads;
Andreas Färberf324e762012-05-02 23:26:21 +02001234 cpu->stopped = true;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001235 if (kvm_enabled()) {
Andreas Färber48a106b2013-05-27 02:20:39 +02001236 qemu_kvm_start_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001237 } else if (tcg_enabled()) {
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001238 qemu_tcg_init_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001239 } else {
Andreas Färber10a90212013-05-27 02:24:35 +02001240 qemu_dummy_start_vcpu(cpu);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001241 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001242}
1243
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001244void cpu_stop_current(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001245{
Andreas Färber4917cf42013-05-27 05:17:50 +02001246 if (current_cpu) {
1247 current_cpu->stop = false;
1248 current_cpu->stopped = true;
1249 cpu_exit(current_cpu);
Paolo Bonzini67bb1722011-03-12 17:43:59 +01001250 qemu_cond_signal(&qemu_pause_cond);
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001251 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001252}
1253
Kevin Wolf56983462013-07-05 13:49:54 +02001254int vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +00001255{
Juan Quintelaaa723c22012-09-18 16:30:11 +02001256 if (qemu_in_vcpu_thread()) {
Paolo Bonzini74892d22014-06-05 14:53:58 +02001257 qemu_system_vmstop_request_prepare();
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -03001258 qemu_system_vmstop_request(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00001259 /*
1260 * FIXME: should not return to device code in case
1261 * vm_stop() has been requested.
1262 */
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001263 cpu_stop_current();
Kevin Wolf56983462013-07-05 13:49:54 +02001264 return 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00001265 }
Kevin Wolf56983462013-07-05 13:49:54 +02001266
1267 return do_vm_stop(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00001268}
1269
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001270/* does a state transition even if the VM is already stopped,
1271 current state is forgotten forever */
Kevin Wolf56983462013-07-05 13:49:54 +02001272int vm_stop_force_state(RunState state)
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001273{
1274 if (runstate_is_running()) {
Kevin Wolf56983462013-07-05 13:49:54 +02001275 return vm_stop(state);
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001276 } else {
1277 runstate_set(state);
Kevin Wolf594a45c2013-07-18 14:52:19 +02001278 /* Make sure to return an error if the flush in a previous vm_stop()
1279 * failed. */
1280 return bdrv_flush_all();
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001281 }
1282}
1283
Andreas Färber9349b4f2012-03-14 01:38:32 +01001284static int tcg_cpu_exec(CPUArchState *env)
Blue Swirl296af7c2010-03-29 19:23:50 +00001285{
Andreas Färberefee7342013-08-26 05:39:29 +02001286 CPUState *cpu = ENV_GET_CPU(env);
Blue Swirl296af7c2010-03-29 19:23:50 +00001287 int ret;
1288#ifdef CONFIG_PROFILER
1289 int64_t ti;
1290#endif
1291
1292#ifdef CONFIG_PROFILER
1293 ti = profile_getclock();
1294#endif
1295 if (use_icount) {
1296 int64_t count;
Alex Blighac70aaf2013-08-21 16:02:57 +01001297 int64_t deadline;
Blue Swirl296af7c2010-03-29 19:23:50 +00001298 int decr;
KONRAD Fredericc96778b2014-08-01 01:37:09 +02001299 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1300 + cpu->icount_extra);
Andreas Färber28ecfd72013-08-26 05:51:49 +02001301 cpu->icount_decr.u16.low = 0;
Andreas Färberefee7342013-08-26 05:39:29 +02001302 cpu->icount_extra = 0;
Alex Bligh40daca52013-08-21 16:03:02 +01001303 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +01001304
1305 /* Maintain prior (possibly buggy) behaviour where if no deadline
Alex Bligh40daca52013-08-21 16:03:02 +01001306 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
Alex Blighac70aaf2013-08-21 16:02:57 +01001307 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1308 * nanoseconds.
1309 */
1310 if ((deadline < 0) || (deadline > INT32_MAX)) {
1311 deadline = INT32_MAX;
1312 }
1313
1314 count = qemu_icount_round(deadline);
KONRAD Fredericc96778b2014-08-01 01:37:09 +02001315 timers_state.qemu_icount += count;
Blue Swirl296af7c2010-03-29 19:23:50 +00001316 decr = (count > 0xffff) ? 0xffff : count;
1317 count -= decr;
Andreas Färber28ecfd72013-08-26 05:51:49 +02001318 cpu->icount_decr.u16.low = decr;
Andreas Färberefee7342013-08-26 05:39:29 +02001319 cpu->icount_extra = count;
Blue Swirl296af7c2010-03-29 19:23:50 +00001320 }
1321 ret = cpu_exec(env);
1322#ifdef CONFIG_PROFILER
1323 qemu_time += profile_getclock() - ti;
1324#endif
1325 if (use_icount) {
1326 /* Fold pending instructions back into the
1327 instruction counter, and clear the interrupt flag. */
KONRAD Fredericc96778b2014-08-01 01:37:09 +02001328 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1329 + cpu->icount_extra);
Andreas Färber28ecfd72013-08-26 05:51:49 +02001330 cpu->icount_decr.u32 = 0;
Andreas Färberefee7342013-08-26 05:39:29 +02001331 cpu->icount_extra = 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00001332 }
1333 return ret;
1334}
1335
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001336static void tcg_exec_all(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001337{
Jan Kiszka9a360852011-02-01 22:15:55 +01001338 int r;
1339
Alex Bligh40daca52013-08-21 16:03:02 +01001340 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1341 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
Paolo Bonziniab33fcd2011-04-13 10:03:44 +02001342
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001343 if (next_cpu == NULL) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001344 next_cpu = first_cpu;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001345 }
Andreas Färberbdc44642013-06-24 23:50:24 +02001346 for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
Andreas Färber182735e2013-05-29 22:29:20 +02001347 CPUState *cpu = next_cpu;
1348 CPUArchState *env = cpu->env_ptr;
Blue Swirl296af7c2010-03-29 19:23:50 +00001349
Alex Bligh40daca52013-08-21 16:03:02 +01001350 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
Andreas Färbered2803d2013-06-21 20:20:45 +02001351 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
Blue Swirl296af7c2010-03-29 19:23:50 +00001352
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001353 if (cpu_can_run(cpu)) {
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001354 r = tcg_cpu_exec(env);
Jan Kiszka9a360852011-02-01 22:15:55 +01001355 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +02001356 cpu_handle_guest_debug(cpu);
Jan Kiszka3c638d02010-06-25 16:56:56 +02001357 break;
1358 }
Andreas Färberf324e762012-05-02 23:26:21 +02001359 } else if (cpu->stop || cpu->stopped) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001360 break;
1361 }
1362 }
Jan Kiszkac629a4b2010-06-25 16:56:52 +02001363 exit_request = 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00001364}
1365
Stefan Weil9a78eea2010-10-22 23:03:33 +02001366void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
Blue Swirl262353c2010-05-04 19:55:35 +00001367{
1368 /* XXX: implement xxx_cpu_list for targets that still miss it */
Peter Maydelle916cbf2012-09-05 17:41:08 -03001369#if defined(cpu_list)
1370 cpu_list(f, cpu_fprintf);
Blue Swirl262353c2010-05-04 19:55:35 +00001371#endif
1372}
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001373
1374CpuInfoList *qmp_query_cpus(Error **errp)
1375{
1376 CpuInfoList *head = NULL, *cur_item = NULL;
Andreas Färber182735e2013-05-29 22:29:20 +02001377 CPUState *cpu;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001378
Andreas Färberbdc44642013-06-24 23:50:24 +02001379 CPU_FOREACH(cpu) {
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001380 CpuInfoList *info;
Andreas Färber182735e2013-05-29 22:29:20 +02001381#if defined(TARGET_I386)
1382 X86CPU *x86_cpu = X86_CPU(cpu);
1383 CPUX86State *env = &x86_cpu->env;
1384#elif defined(TARGET_PPC)
1385 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1386 CPUPPCState *env = &ppc_cpu->env;
1387#elif defined(TARGET_SPARC)
1388 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1389 CPUSPARCState *env = &sparc_cpu->env;
1390#elif defined(TARGET_MIPS)
1391 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1392 CPUMIPSState *env = &mips_cpu->env;
1393#endif
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001394
Andreas Färbercb446ec2013-05-01 14:24:52 +02001395 cpu_synchronize_state(cpu);
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001396
1397 info = g_malloc0(sizeof(*info));
1398 info->value = g_malloc0(sizeof(*info->value));
Andreas Färber55e5c282012-12-17 06:18:02 +01001399 info->value->CPU = cpu->cpu_index;
Andreas Färber182735e2013-05-29 22:29:20 +02001400 info->value->current = (cpu == first_cpu);
Andreas Färber259186a2013-01-17 18:51:17 +01001401 info->value->halted = cpu->halted;
Andreas Färber9f09e182012-05-03 06:59:07 +02001402 info->value->thread_id = cpu->thread_id;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001403#if defined(TARGET_I386)
1404 info->value->has_pc = true;
1405 info->value->pc = env->eip + env->segs[R_CS].base;
1406#elif defined(TARGET_PPC)
1407 info->value->has_nip = true;
1408 info->value->nip = env->nip;
1409#elif defined(TARGET_SPARC)
1410 info->value->has_pc = true;
1411 info->value->pc = env->pc;
1412 info->value->has_npc = true;
1413 info->value->npc = env->npc;
1414#elif defined(TARGET_MIPS)
1415 info->value->has_PC = true;
1416 info->value->PC = env->active_tc.PC;
1417#endif
1418
1419 /* XXX: waiting for the qapi to support GSList */
1420 if (!cur_item) {
1421 head = cur_item = info;
1422 } else {
1423 cur_item->next = info;
1424 cur_item = info;
1425 }
1426 }
1427
1428 return head;
1429}
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001430
1431void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1432 bool has_cpu, int64_t cpu_index, Error **errp)
1433{
1434 FILE *f;
1435 uint32_t l;
Andreas Färber55e5c282012-12-17 06:18:02 +01001436 CPUState *cpu;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001437 uint8_t buf[1024];
1438
1439 if (!has_cpu) {
1440 cpu_index = 0;
1441 }
1442
Andreas Färber151d1322013-02-15 15:41:49 +01001443 cpu = qemu_get_cpu(cpu_index);
1444 if (cpu == NULL) {
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001445 error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1446 "a CPU number");
1447 return;
1448 }
1449
1450 f = fopen(filename, "wb");
1451 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04001452 error_setg_file_open(errp, errno, filename);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001453 return;
1454 }
1455
1456 while (size != 0) {
1457 l = sizeof(buf);
1458 if (l > size)
1459 l = size;
Aneesh Kumar K.V2f4d0f52013-10-01 21:49:30 +05301460 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
1461 error_setg(errp, "Invalid addr 0x%016" PRIx64 "specified", addr);
1462 goto exit;
1463 }
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001464 if (fwrite(buf, 1, l, f) != l) {
1465 error_set(errp, QERR_IO_ERROR);
1466 goto exit;
1467 }
1468 addr += l;
1469 size -= l;
1470 }
1471
1472exit:
1473 fclose(f);
1474}
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001475
1476void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1477 Error **errp)
1478{
1479 FILE *f;
1480 uint32_t l;
1481 uint8_t buf[1024];
1482
1483 f = fopen(filename, "wb");
1484 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04001485 error_setg_file_open(errp, errno, filename);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001486 return;
1487 }
1488
1489 while (size != 0) {
1490 l = sizeof(buf);
1491 if (l > size)
1492 l = size;
Stefan Weileb6282f2014-04-07 20:28:23 +02001493 cpu_physical_memory_read(addr, buf, l);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001494 if (fwrite(buf, 1, l, f) != l) {
1495 error_set(errp, QERR_IO_ERROR);
1496 goto exit;
1497 }
1498 addr += l;
1499 size -= l;
1500 }
1501
1502exit:
1503 fclose(f);
1504}
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001505
1506void qmp_inject_nmi(Error **errp)
1507{
1508#if defined(TARGET_I386)
Andreas Färber182735e2013-05-29 22:29:20 +02001509 CPUState *cs;
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001510
Andreas Färberbdc44642013-06-24 23:50:24 +02001511 CPU_FOREACH(cs) {
Andreas Färber182735e2013-05-29 22:29:20 +02001512 X86CPU *cpu = X86_CPU(cs);
Andreas Färber182735e2013-05-29 22:29:20 +02001513
Chen Fan02e51482013-12-23 17:04:02 +08001514 if (!cpu->apic_state) {
Andreas Färber182735e2013-05-29 22:29:20 +02001515 cpu_interrupt(cs, CPU_INTERRUPT_NMI);
Jan Kiszka02c09192011-10-18 00:00:06 +08001516 } else {
Chen Fan02e51482013-12-23 17:04:02 +08001517 apic_deliver_nmi(cpu->apic_state);
Jan Kiszka02c09192011-10-18 00:00:06 +08001518 }
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001519 }
Eugene (jno) Dvurechenski7f7f9752012-12-05 15:50:07 +01001520#elif defined(TARGET_S390X)
1521 CPUState *cs;
1522 S390CPU *cpu;
1523
Andreas Färberbdc44642013-06-24 23:50:24 +02001524 CPU_FOREACH(cs) {
Eugene (jno) Dvurechenski7f7f9752012-12-05 15:50:07 +01001525 cpu = S390_CPU(cs);
1526 if (cpu->env.cpu_num == monitor_get_cpu_index()) {
1527 if (s390_cpu_restart(S390_CPU(cs)) == -1) {
1528 error_set(errp, QERR_UNSUPPORTED);
1529 return;
1530 }
1531 break;
1532 }
1533 }
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001534#else
1535 error_set(errp, QERR_UNSUPPORTED);
1536#endif
1537}