blob: 6203d98432e9da5d8cf8b4214d04e0f304585388 [file] [log] [blame]
Blue Swirl296af7c2010-03-29 19:23:50 +00001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Needed early for CONFIG_BSD etc. */
26#include "config-host.h"
27
Paolo Bonzini83c90892012-12-17 18:19:49 +010028#include "monitor/monitor.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010029#include "sysemu/sysemu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010030#include "exec/gdbstub.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010031#include "sysemu/dma.h"
32#include "sysemu/kvm.h"
Luiz Capitulinode0b36b2011-09-21 16:38:35 -030033#include "qmp-commands.h"
Blue Swirl296af7c2010-03-29 19:23:50 +000034
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/thread.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010036#include "sysemu/cpus.h"
37#include "sysemu/qtest.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010038#include "qemu/main-loop.h"
39#include "qemu/bitmap.h"
Liu Ping Fancb365642013-09-25 14:20:58 +080040#include "qemu/seqlock.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020041
42#ifndef _WIN32
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010043#include "qemu/compatfd.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020044#endif
Blue Swirl296af7c2010-03-29 19:23:50 +000045
Jan Kiszka6d9cb732011-02-01 22:15:58 +010046#ifdef CONFIG_LINUX
47
48#include <sys/prctl.h>
49
Marcelo Tosattic0532a72010-10-11 15:31:21 -030050#ifndef PR_MCE_KILL
51#define PR_MCE_KILL 33
52#endif
53
Jan Kiszka6d9cb732011-02-01 22:15:58 +010054#ifndef PR_MCE_KILL_SET
55#define PR_MCE_KILL_SET 1
56#endif
57
58#ifndef PR_MCE_KILL_EARLY
59#define PR_MCE_KILL_EARLY 1
60#endif
61
62#endif /* CONFIG_LINUX */
63
Andreas Färber182735e2013-05-29 22:29:20 +020064static CPUState *next_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +000065
Tiejun Chen321bc0b2013-08-02 09:43:09 +080066bool cpu_is_stopped(CPUState *cpu)
67{
68 return cpu->stopped || !runstate_is_running();
69}
70
Andreas Färbera98ae1d2013-05-26 23:21:08 +020071static bool cpu_thread_is_idle(CPUState *cpu)
Peter Maydellac873f12012-07-19 16:52:27 +010072{
Andreas Färberc64ca812012-05-03 02:11:45 +020073 if (cpu->stop || cpu->queued_work_first) {
Peter Maydellac873f12012-07-19 16:52:27 +010074 return false;
75 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +080076 if (cpu_is_stopped(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010077 return true;
78 }
Andreas Färber259186a2013-01-17 18:51:17 +010079 if (!cpu->halted || qemu_cpu_has_work(cpu) ||
Alexander Graf215e79c2013-04-24 22:24:12 +020080 kvm_halt_in_kernel()) {
Peter Maydellac873f12012-07-19 16:52:27 +010081 return false;
82 }
83 return true;
84}
85
86static bool all_cpu_threads_idle(void)
87{
Andreas Färber182735e2013-05-29 22:29:20 +020088 CPUState *cpu;
Peter Maydellac873f12012-07-19 16:52:27 +010089
Andreas Färberbdc44642013-06-24 23:50:24 +020090 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +020091 if (!cpu_thread_is_idle(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010092 return false;
93 }
94 }
95 return true;
96}
97
Blue Swirl296af7c2010-03-29 19:23:50 +000098/***********************************************************/
Paolo Bonzini946fb272011-09-12 13:57:37 +020099/* guest cycle counter */
100
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200101/* Protected by TimersState seqlock */
102
103/* Compensate for varying guest execution speed. */
104static int64_t qemu_icount_bias;
105static int64_t vm_clock_warp_start;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200106/* Conversion factor from emulated instructions to virtual clock ticks. */
107static int icount_time_shift;
108/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
109#define MAX_ICOUNT_SHIFT 10
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200110
111/* Only written by TCG thread */
112static int64_t qemu_icount;
113
Paolo Bonzini946fb272011-09-12 13:57:37 +0200114static QEMUTimer *icount_rt_timer;
115static QEMUTimer *icount_vm_timer;
116static QEMUTimer *icount_warp_timer;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200117
118typedef struct TimersState {
Liu Ping Fancb365642013-09-25 14:20:58 +0800119 /* Protected by BQL. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200120 int64_t cpu_ticks_prev;
121 int64_t cpu_ticks_offset;
Liu Ping Fancb365642013-09-25 14:20:58 +0800122
123 /* cpu_clock_offset can be read out of BQL, so protect it with
124 * this lock.
125 */
126 QemuSeqLock vm_clock_seqlock;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200127 int64_t cpu_clock_offset;
128 int32_t cpu_ticks_enabled;
129 int64_t dummy;
130} TimersState;
131
Liu Ping Fand9cd4002013-07-21 08:43:00 +0000132static TimersState timers_state;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200133
134/* Return the virtual CPU time, based on the instruction counter. */
135int64_t cpu_get_icount(void)
136{
137 int64_t icount;
Andreas Färber4917cf42013-05-27 05:17:50 +0200138 CPUState *cpu = current_cpu;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200139
140 icount = qemu_icount;
Andreas Färber4917cf42013-05-27 05:17:50 +0200141 if (cpu) {
142 CPUArchState *env = cpu->env_ptr;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200143 if (!can_do_io(env)) {
144 fprintf(stderr, "Bad clock read\n");
145 }
146 icount -= (env->icount_decr.u16.low + env->icount_extra);
147 }
148 return qemu_icount_bias + (icount << icount_time_shift);
149}
150
151/* return the host CPU cycle counter and handle stop/restart */
Liu Ping Fancb365642013-09-25 14:20:58 +0800152/* Caller must hold the BQL */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200153int64_t cpu_get_ticks(void)
154{
155 if (use_icount) {
156 return cpu_get_icount();
157 }
158 if (!timers_state.cpu_ticks_enabled) {
159 return timers_state.cpu_ticks_offset;
160 } else {
161 int64_t ticks;
162 ticks = cpu_get_real_ticks();
163 if (timers_state.cpu_ticks_prev > ticks) {
164 /* Note: non increasing ticks may happen if the host uses
165 software suspend */
166 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
167 }
168 timers_state.cpu_ticks_prev = ticks;
169 return ticks + timers_state.cpu_ticks_offset;
170 }
171}
172
Liu Ping Fancb365642013-09-25 14:20:58 +0800173static int64_t cpu_get_clock_locked(void)
174{
175 int64_t ti;
176
177 if (!timers_state.cpu_ticks_enabled) {
178 ti = timers_state.cpu_clock_offset;
179 } else {
180 ti = get_clock();
181 ti += timers_state.cpu_clock_offset;
182 }
183
184 return ti;
185}
186
Paolo Bonzini946fb272011-09-12 13:57:37 +0200187/* return the host CPU monotonic timer and handle stop/restart */
188int64_t cpu_get_clock(void)
189{
190 int64_t ti;
Liu Ping Fancb365642013-09-25 14:20:58 +0800191 unsigned start;
192
193 do {
194 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
195 ti = cpu_get_clock_locked();
196 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
197
198 return ti;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200199}
200
Liu Ping Fancb365642013-09-25 14:20:58 +0800201/* enable cpu_get_ticks()
202 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
203 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200204void cpu_enable_ticks(void)
205{
Liu Ping Fancb365642013-09-25 14:20:58 +0800206 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
207 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200208 if (!timers_state.cpu_ticks_enabled) {
209 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
210 timers_state.cpu_clock_offset -= get_clock();
211 timers_state.cpu_ticks_enabled = 1;
212 }
Liu Ping Fancb365642013-09-25 14:20:58 +0800213 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200214}
215
216/* disable cpu_get_ticks() : the clock is stopped. You must not call
Liu Ping Fancb365642013-09-25 14:20:58 +0800217 * cpu_get_ticks() after that.
218 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
219 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200220void cpu_disable_ticks(void)
221{
Liu Ping Fancb365642013-09-25 14:20:58 +0800222 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
223 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200224 if (timers_state.cpu_ticks_enabled) {
225 timers_state.cpu_ticks_offset = cpu_get_ticks();
Liu Ping Fancb365642013-09-25 14:20:58 +0800226 timers_state.cpu_clock_offset = cpu_get_clock_locked();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200227 timers_state.cpu_ticks_enabled = 0;
228 }
Liu Ping Fancb365642013-09-25 14:20:58 +0800229 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200230}
231
232/* Correlation between real and virtual time is always going to be
233 fairly approximate, so ignore small variation.
234 When the guest is idle real and virtual time will be aligned in
235 the IO wait loop. */
236#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
237
238static void icount_adjust(void)
239{
240 int64_t cur_time;
241 int64_t cur_icount;
242 int64_t delta;
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200243
244 /* Protected by TimersState mutex. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200245 static int64_t last_delta;
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200246
Paolo Bonzini946fb272011-09-12 13:57:37 +0200247 /* If the VM is not running, then do nothing. */
248 if (!runstate_is_running()) {
249 return;
250 }
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200251
Paolo Bonzini946fb272011-09-12 13:57:37 +0200252 cur_time = cpu_get_clock();
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200253 cur_icount = cpu_get_icount();
254
Paolo Bonzini946fb272011-09-12 13:57:37 +0200255 delta = cur_icount - cur_time;
256 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
257 if (delta > 0
258 && last_delta + ICOUNT_WOBBLE < delta * 2
259 && icount_time_shift > 0) {
260 /* The guest is getting too far ahead. Slow time down. */
261 icount_time_shift--;
262 }
263 if (delta < 0
264 && last_delta - ICOUNT_WOBBLE > delta * 2
265 && icount_time_shift < MAX_ICOUNT_SHIFT) {
266 /* The guest is getting too far behind. Speed time up. */
267 icount_time_shift++;
268 }
269 last_delta = delta;
270 qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
271}
272
273static void icount_adjust_rt(void *opaque)
274{
Alex Bligh40daca52013-08-21 16:03:02 +0100275 timer_mod(icount_rt_timer,
276 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200277 icount_adjust();
278}
279
280static void icount_adjust_vm(void *opaque)
281{
Alex Bligh40daca52013-08-21 16:03:02 +0100282 timer_mod(icount_vm_timer,
283 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
284 get_ticks_per_sec() / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200285 icount_adjust();
286}
287
288static int64_t qemu_icount_round(int64_t count)
289{
290 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
291}
292
293static void icount_warp_rt(void *opaque)
294{
295 if (vm_clock_warp_start == -1) {
296 return;
297 }
298
299 if (runstate_is_running()) {
Alex Bligh40daca52013-08-21 16:03:02 +0100300 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200301 int64_t warp_delta;
302
303 warp_delta = clock - vm_clock_warp_start;
304 if (use_icount == 2) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200305 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100306 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
Paolo Bonzini946fb272011-09-12 13:57:37 +0200307 * far ahead of real time.
308 */
309 int64_t cur_time = cpu_get_clock();
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200310 int64_t cur_icount = cpu_get_icount();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200311 int64_t delta = cur_time - cur_icount;
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200312 warp_delta = MIN(warp_delta, delta);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200313 }
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200314 qemu_icount_bias += warp_delta;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200315 }
316 vm_clock_warp_start = -1;
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200317
318 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
319 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
320 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200321}
322
Paolo Bonzini8156be52012-03-28 15:42:04 +0200323void qtest_clock_warp(int64_t dest)
324{
Alex Bligh40daca52013-08-21 16:03:02 +0100325 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200326 assert(qtest_enabled());
327 while (clock < dest) {
Alex Bligh40daca52013-08-21 16:03:02 +0100328 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200329 int64_t warp = MIN(dest - clock, deadline);
330 qemu_icount_bias += warp;
Alex Bligh40daca52013-08-21 16:03:02 +0100331 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
332 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200333 }
Alex Bligh40daca52013-08-21 16:03:02 +0100334 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200335}
336
Alex Bligh40daca52013-08-21 16:03:02 +0100337void qemu_clock_warp(QEMUClockType type)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200338{
Paolo Bonzinice78d182013-10-07 17:30:02 +0200339 int64_t clock;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200340 int64_t deadline;
341
342 /*
343 * There are too many global variables to make the "warp" behavior
344 * applicable to other clocks. But a clock argument removes the
345 * need for if statements all over the place.
346 */
Alex Bligh40daca52013-08-21 16:03:02 +0100347 if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200348 return;
349 }
350
351 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100352 * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
353 * This ensures that the deadline for the timer is computed correctly below.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200354 * This also makes sure that the insn counter is synchronized before the
355 * CPU starts running, in case the CPU is woken by an event other than
Alex Bligh40daca52013-08-21 16:03:02 +0100356 * the earliest QEMU_CLOCK_VIRTUAL timer.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200357 */
358 icount_warp_rt(NULL);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200359 timer_del(icount_warp_timer);
360 if (!all_cpu_threads_idle()) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200361 return;
362 }
363
Paolo Bonzini8156be52012-03-28 15:42:04 +0200364 if (qtest_enabled()) {
365 /* When testing, qtest commands advance icount. */
366 return;
367 }
368
Alex Blighac70aaf2013-08-21 16:02:57 +0100369 /* We want to use the earliest deadline from ALL vm_clocks */
Paolo Bonzinice78d182013-10-07 17:30:02 +0200370 clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Alex Bligh40daca52013-08-21 16:03:02 +0100371 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200372 if (deadline < 0) {
373 return;
Alex Blighac70aaf2013-08-21 16:02:57 +0100374 }
375
Paolo Bonzini946fb272011-09-12 13:57:37 +0200376 if (deadline > 0) {
377 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100378 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
Paolo Bonzini946fb272011-09-12 13:57:37 +0200379 * sleep. Otherwise, the CPU might be waiting for a future timer
380 * interrupt to wake it up, but the interrupt never comes because
381 * the vCPU isn't running any insns and thus doesn't advance the
Alex Bligh40daca52013-08-21 16:03:02 +0100382 * QEMU_CLOCK_VIRTUAL.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200383 *
384 * An extreme solution for this problem would be to never let VCPUs
Alex Bligh40daca52013-08-21 16:03:02 +0100385 * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
386 * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
387 * event. Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
388 * after some e"real" time, (related to the time left until the next
389 * event) has passed. The QEMU_CLOCK_REALTIME timer will do this.
390 * This avoids that the warps are visible externally; for example,
391 * you will not be sending network packets continuously instead of
392 * every 100ms.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200393 */
Paolo Bonzinice78d182013-10-07 17:30:02 +0200394 if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
395 vm_clock_warp_start = clock;
396 }
397 timer_mod_anticipate(icount_warp_timer, clock + deadline);
Alex Blighac70aaf2013-08-21 16:02:57 +0100398 } else if (deadline == 0) {
Alex Bligh40daca52013-08-21 16:03:02 +0100399 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200400 }
401}
402
403static const VMStateDescription vmstate_timers = {
404 .name = "timer",
405 .version_id = 2,
406 .minimum_version_id = 1,
407 .minimum_version_id_old = 1,
408 .fields = (VMStateField[]) {
409 VMSTATE_INT64(cpu_ticks_offset, TimersState),
410 VMSTATE_INT64(dummy, TimersState),
411 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
412 VMSTATE_END_OF_LIST()
413 }
414};
415
416void configure_icount(const char *option)
417{
Liu Ping Fancb365642013-09-25 14:20:58 +0800418 seqlock_init(&timers_state.vm_clock_seqlock, NULL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200419 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
420 if (!option) {
421 return;
422 }
423
Alex Bligh40daca52013-08-21 16:03:02 +0100424 icount_warp_timer = timer_new_ns(QEMU_CLOCK_REALTIME,
425 icount_warp_rt, NULL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200426 if (strcmp(option, "auto") != 0) {
427 icount_time_shift = strtol(option, NULL, 0);
428 use_icount = 1;
429 return;
430 }
431
432 use_icount = 2;
433
434 /* 125MIPS seems a reasonable initial guess at the guest speed.
435 It will be corrected fairly quickly anyway. */
436 icount_time_shift = 3;
437
438 /* Have both realtime and virtual time triggers for speed adjustment.
439 The realtime trigger catches emulated time passing too slowly,
440 the virtual time trigger catches emulated time passing too fast.
441 Realtime triggers occur even when idle, so use them less frequently
442 than VM triggers. */
Alex Bligh40daca52013-08-21 16:03:02 +0100443 icount_rt_timer = timer_new_ms(QEMU_CLOCK_REALTIME,
444 icount_adjust_rt, NULL);
445 timer_mod(icount_rt_timer,
446 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
447 icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
448 icount_adjust_vm, NULL);
449 timer_mod(icount_vm_timer,
450 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
451 get_ticks_per_sec() / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200452}
453
454/***********************************************************/
Blue Swirl296af7c2010-03-29 19:23:50 +0000455void hw_error(const char *fmt, ...)
456{
457 va_list ap;
Andreas Färber55e5c282012-12-17 06:18:02 +0100458 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000459
460 va_start(ap, fmt);
461 fprintf(stderr, "qemu: hardware error: ");
462 vfprintf(stderr, fmt, ap);
463 fprintf(stderr, "\n");
Andreas Färberbdc44642013-06-24 23:50:24 +0200464 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100465 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
Andreas Färber878096e2013-05-27 01:33:50 +0200466 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
Blue Swirl296af7c2010-03-29 19:23:50 +0000467 }
468 va_end(ap);
469 abort();
470}
471
472void cpu_synchronize_all_states(void)
473{
Andreas Färber182735e2013-05-29 22:29:20 +0200474 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000475
Andreas Färberbdc44642013-06-24 23:50:24 +0200476 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200477 cpu_synchronize_state(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000478 }
479}
480
481void cpu_synchronize_all_post_reset(void)
482{
Andreas Färber182735e2013-05-29 22:29:20 +0200483 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000484
Andreas Färberbdc44642013-06-24 23:50:24 +0200485 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200486 cpu_synchronize_post_reset(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000487 }
488}
489
490void cpu_synchronize_all_post_init(void)
491{
Andreas Färber182735e2013-05-29 22:29:20 +0200492 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000493
Andreas Färberbdc44642013-06-24 23:50:24 +0200494 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200495 cpu_synchronize_post_init(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000496 }
497}
498
Kevin Wolf56983462013-07-05 13:49:54 +0200499static int do_vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +0000500{
Kevin Wolf56983462013-07-05 13:49:54 +0200501 int ret = 0;
502
Luiz Capitulino13548692011-07-29 15:36:43 -0300503 if (runstate_is_running()) {
Blue Swirl296af7c2010-03-29 19:23:50 +0000504 cpu_disable_ticks();
Blue Swirl296af7c2010-03-29 19:23:50 +0000505 pause_all_vcpus();
Luiz Capitulinof5bbfba2011-07-29 15:04:45 -0300506 runstate_set(state);
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -0300507 vm_state_notify(0, state);
Blue Swirl296af7c2010-03-29 19:23:50 +0000508 monitor_protocol_event(QEVENT_STOP, NULL);
509 }
Kevin Wolf56983462013-07-05 13:49:54 +0200510
Kevin Wolf594a45c2013-07-18 14:52:19 +0200511 bdrv_drain_all();
512 ret = bdrv_flush_all();
513
Kevin Wolf56983462013-07-05 13:49:54 +0200514 return ret;
Blue Swirl296af7c2010-03-29 19:23:50 +0000515}
516
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200517static bool cpu_can_run(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000518{
Andreas Färber4fdeee72012-05-02 23:10:09 +0200519 if (cpu->stop) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200520 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100521 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +0800522 if (cpu_is_stopped(cpu)) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200523 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100524 }
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200525 return true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000526}
527
Andreas Färber91325042013-05-27 02:07:49 +0200528static void cpu_handle_guest_debug(CPUState *cpu)
Jan Kiszka3c638d02010-06-25 16:56:56 +0200529{
Andreas Färber64f6b342013-05-27 02:06:09 +0200530 gdb_set_stop_cpu(cpu);
Jan Kiszka8cf71712011-02-07 12:19:16 +0100531 qemu_system_debug_request();
Andreas Färberf324e762012-05-02 23:26:21 +0200532 cpu->stopped = true;
Jan Kiszka3c638d02010-06-25 16:56:56 +0200533}
534
Paolo Bonzini714bd042011-03-12 17:44:06 +0100535static void cpu_signal(int sig)
536{
Andreas Färber4917cf42013-05-27 05:17:50 +0200537 if (current_cpu) {
538 cpu_exit(current_cpu);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100539 }
540 exit_request = 1;
541}
Paolo Bonzini714bd042011-03-12 17:44:06 +0100542
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100543#ifdef CONFIG_LINUX
544static void sigbus_reraise(void)
545{
546 sigset_t set;
547 struct sigaction action;
548
549 memset(&action, 0, sizeof(action));
550 action.sa_handler = SIG_DFL;
551 if (!sigaction(SIGBUS, &action, NULL)) {
552 raise(SIGBUS);
553 sigemptyset(&set);
554 sigaddset(&set, SIGBUS);
555 sigprocmask(SIG_UNBLOCK, &set, NULL);
556 }
557 perror("Failed to re-raise SIGBUS!\n");
558 abort();
559}
560
561static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
562 void *ctx)
563{
564 if (kvm_on_sigbus(siginfo->ssi_code,
565 (void *)(intptr_t)siginfo->ssi_addr)) {
566 sigbus_reraise();
567 }
568}
569
570static void qemu_init_sigbus(void)
571{
572 struct sigaction action;
573
574 memset(&action, 0, sizeof(action));
575 action.sa_flags = SA_SIGINFO;
576 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
577 sigaction(SIGBUS, &action, NULL);
578
579 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
580}
581
Andreas Färber290adf32013-01-17 09:30:27 +0100582static void qemu_kvm_eat_signals(CPUState *cpu)
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100583{
584 struct timespec ts = { 0, 0 };
585 siginfo_t siginfo;
586 sigset_t waitset;
587 sigset_t chkset;
588 int r;
589
590 sigemptyset(&waitset);
591 sigaddset(&waitset, SIG_IPI);
592 sigaddset(&waitset, SIGBUS);
593
594 do {
595 r = sigtimedwait(&waitset, &siginfo, &ts);
596 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
597 perror("sigtimedwait");
598 exit(1);
599 }
600
601 switch (r) {
602 case SIGBUS:
Andreas Färber290adf32013-01-17 09:30:27 +0100603 if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100604 sigbus_reraise();
605 }
606 break;
607 default:
608 break;
609 }
610
611 r = sigpending(&chkset);
612 if (r == -1) {
613 perror("sigpending");
614 exit(1);
615 }
616 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100617}
618
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100619#else /* !CONFIG_LINUX */
620
621static void qemu_init_sigbus(void)
622{
623}
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100624
Andreas Färber290adf32013-01-17 09:30:27 +0100625static void qemu_kvm_eat_signals(CPUState *cpu)
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100626{
627}
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100628#endif /* !CONFIG_LINUX */
629
Blue Swirl296af7c2010-03-29 19:23:50 +0000630#ifndef _WIN32
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100631static void dummy_signal(int sig)
Blue Swirl296af7c2010-03-29 19:23:50 +0000632{
633}
634
Andreas Färber13618e02013-05-26 23:41:00 +0200635static void qemu_kvm_init_cpu_signals(CPUState *cpu)
Paolo Bonzini714bd042011-03-12 17:44:06 +0100636{
637 int r;
638 sigset_t set;
639 struct sigaction sigact;
640
641 memset(&sigact, 0, sizeof(sigact));
642 sigact.sa_handler = dummy_signal;
643 sigaction(SIG_IPI, &sigact, NULL);
644
Paolo Bonzini714bd042011-03-12 17:44:06 +0100645 pthread_sigmask(SIG_BLOCK, NULL, &set);
646 sigdelset(&set, SIG_IPI);
647 sigdelset(&set, SIGBUS);
Andreas Färber491d6e82013-05-26 23:38:10 +0200648 r = kvm_set_signal_mask(cpu, &set);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100649 if (r) {
650 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
651 exit(1);
652 }
Paolo Bonzini714bd042011-03-12 17:44:06 +0100653}
654
655static void qemu_tcg_init_cpu_signals(void)
656{
Paolo Bonzini714bd042011-03-12 17:44:06 +0100657 sigset_t set;
658 struct sigaction sigact;
659
660 memset(&sigact, 0, sizeof(sigact));
661 sigact.sa_handler = cpu_signal;
662 sigaction(SIG_IPI, &sigact, NULL);
663
664 sigemptyset(&set);
665 sigaddset(&set, SIG_IPI);
666 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100667}
668
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100669#else /* _WIN32 */
Andreas Färber13618e02013-05-26 23:41:00 +0200670static void qemu_kvm_init_cpu_signals(CPUState *cpu)
Paolo Bonzini714bd042011-03-12 17:44:06 +0100671{
672 abort();
673}
674
675static void qemu_tcg_init_cpu_signals(void)
676{
677}
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100678#endif /* _WIN32 */
Blue Swirl296af7c2010-03-29 19:23:50 +0000679
Stefan Weilb2532d82012-09-27 07:41:42 +0200680static QemuMutex qemu_global_mutex;
Paolo Bonzini46daff12011-06-09 13:10:24 +0200681static QemuCond qemu_io_proceeded_cond;
682static bool iothread_requesting_mutex;
Blue Swirl296af7c2010-03-29 19:23:50 +0000683
684static QemuThread io_thread;
685
686static QemuThread *tcg_cpu_thread;
687static QemuCond *tcg_halt_cond;
688
Blue Swirl296af7c2010-03-29 19:23:50 +0000689/* cpu creation */
690static QemuCond qemu_cpu_cond;
691/* system init */
Blue Swirl296af7c2010-03-29 19:23:50 +0000692static QemuCond qemu_pause_cond;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300693static QemuCond qemu_work_cond;
Blue Swirl296af7c2010-03-29 19:23:50 +0000694
Paolo Bonzinid3b12f52011-09-13 10:30:52 +0200695void qemu_init_cpu_loop(void)
Blue Swirl296af7c2010-03-29 19:23:50 +0000696{
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100697 qemu_init_sigbus();
Anthony Liguoried945922011-02-08 18:18:18 +0100698 qemu_cond_init(&qemu_cpu_cond);
Anthony Liguoried945922011-02-08 18:18:18 +0100699 qemu_cond_init(&qemu_pause_cond);
700 qemu_cond_init(&qemu_work_cond);
Paolo Bonzini46daff12011-06-09 13:10:24 +0200701 qemu_cond_init(&qemu_io_proceeded_cond);
Blue Swirl296af7c2010-03-29 19:23:50 +0000702 qemu_mutex_init(&qemu_global_mutex);
Blue Swirl296af7c2010-03-29 19:23:50 +0000703
Jan Kiszkab7680cb2011-03-12 17:43:51 +0100704 qemu_thread_get_self(&io_thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000705}
706
Andreas Färberf100f0b2012-05-03 14:58:47 +0200707void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300708{
709 struct qemu_work_item wi;
710
Andreas Färber60e82572012-05-02 22:23:49 +0200711 if (qemu_cpu_is_self(cpu)) {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300712 func(data);
713 return;
714 }
715
716 wi.func = func;
717 wi.data = data;
Chegu Vinod3c022702013-06-24 03:49:41 -0600718 wi.free = false;
Andreas Färberc64ca812012-05-03 02:11:45 +0200719 if (cpu->queued_work_first == NULL) {
720 cpu->queued_work_first = &wi;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100721 } else {
Andreas Färberc64ca812012-05-03 02:11:45 +0200722 cpu->queued_work_last->next = &wi;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100723 }
Andreas Färberc64ca812012-05-03 02:11:45 +0200724 cpu->queued_work_last = &wi;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300725 wi.next = NULL;
726 wi.done = false;
727
Andreas Färberc08d7422012-05-03 04:34:15 +0200728 qemu_cpu_kick(cpu);
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300729 while (!wi.done) {
Andreas Färber4917cf42013-05-27 05:17:50 +0200730 CPUState *self_cpu = current_cpu;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300731
732 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
Andreas Färber4917cf42013-05-27 05:17:50 +0200733 current_cpu = self_cpu;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300734 }
735}
736
Chegu Vinod3c022702013-06-24 03:49:41 -0600737void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
738{
739 struct qemu_work_item *wi;
740
741 if (qemu_cpu_is_self(cpu)) {
742 func(data);
743 return;
744 }
745
746 wi = g_malloc0(sizeof(struct qemu_work_item));
747 wi->func = func;
748 wi->data = data;
749 wi->free = true;
750 if (cpu->queued_work_first == NULL) {
751 cpu->queued_work_first = wi;
752 } else {
753 cpu->queued_work_last->next = wi;
754 }
755 cpu->queued_work_last = wi;
756 wi->next = NULL;
757 wi->done = false;
758
759 qemu_cpu_kick(cpu);
760}
761
Andreas Färber6d45b102012-05-03 02:13:22 +0200762static void flush_queued_work(CPUState *cpu)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300763{
764 struct qemu_work_item *wi;
765
Andreas Färberc64ca812012-05-03 02:11:45 +0200766 if (cpu->queued_work_first == NULL) {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300767 return;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100768 }
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300769
Andreas Färberc64ca812012-05-03 02:11:45 +0200770 while ((wi = cpu->queued_work_first)) {
771 cpu->queued_work_first = wi->next;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300772 wi->func(wi->data);
773 wi->done = true;
Chegu Vinod3c022702013-06-24 03:49:41 -0600774 if (wi->free) {
775 g_free(wi);
776 }
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300777 }
Andreas Färberc64ca812012-05-03 02:11:45 +0200778 cpu->queued_work_last = NULL;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300779 qemu_cond_broadcast(&qemu_work_cond);
780}
781
Andreas Färber509a0d72012-05-03 02:18:09 +0200782static void qemu_wait_io_event_common(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000783{
Andreas Färber4fdeee72012-05-02 23:10:09 +0200784 if (cpu->stop) {
785 cpu->stop = false;
Andreas Färberf324e762012-05-02 23:26:21 +0200786 cpu->stopped = true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000787 qemu_cond_signal(&qemu_pause_cond);
788 }
Andreas Färber6d45b102012-05-03 02:13:22 +0200789 flush_queued_work(cpu);
Andreas Färber216fc9a2012-05-02 17:49:49 +0200790 cpu->thread_kicked = false;
Blue Swirl296af7c2010-03-29 19:23:50 +0000791}
792
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200793static void qemu_tcg_wait_io_event(void)
Blue Swirl296af7c2010-03-29 19:23:50 +0000794{
Andreas Färber182735e2013-05-29 22:29:20 +0200795 CPUState *cpu;
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200796
Jan Kiszka16400322011-02-09 16:29:37 +0100797 while (all_cpu_threads_idle()) {
Paolo Bonziniab33fcd2011-04-13 10:03:44 +0200798 /* Start accounting real time to the virtual clock if the CPUs
799 are idle. */
Alex Bligh40daca52013-08-21 16:03:02 +0100800 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini9705fbb2011-03-12 17:44:00 +0100801 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +0100802 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000803
Paolo Bonzini46daff12011-06-09 13:10:24 +0200804 while (iothread_requesting_mutex) {
805 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
806 }
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200807
Andreas Färberbdc44642013-06-24 23:50:24 +0200808 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200809 qemu_wait_io_event_common(cpu);
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200810 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000811}
812
Andreas Färberfd529e82013-05-26 23:24:55 +0200813static void qemu_kvm_wait_io_event(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000814{
Andreas Färbera98ae1d2013-05-26 23:21:08 +0200815 while (cpu_thread_is_idle(cpu)) {
Andreas Färberf5c121b2012-05-03 01:22:49 +0200816 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +0100817 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000818
Andreas Färber290adf32013-01-17 09:30:27 +0100819 qemu_kvm_eat_signals(cpu);
Andreas Färber509a0d72012-05-03 02:18:09 +0200820 qemu_wait_io_event_common(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000821}
822
Jan Kiszka7e97cd82011-02-07 12:19:12 +0100823static void *qemu_kvm_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +0000824{
Andreas Färber48a106b2013-05-27 02:20:39 +0200825 CPUState *cpu = arg;
Jan Kiszka84b49152011-02-01 22:15:50 +0100826 int r;
Blue Swirl296af7c2010-03-29 19:23:50 +0000827
Marcelo Tosatti6164e6d2010-03-23 13:37:13 -0300828 qemu_mutex_lock(&qemu_global_mutex);
Andreas Färber814e6122012-05-02 17:00:37 +0200829 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +0200830 cpu->thread_id = qemu_get_thread_id();
Andreas Färber4917cf42013-05-27 05:17:50 +0200831 current_cpu = cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000832
Andreas Färber504134d2012-12-17 06:38:45 +0100833 r = kvm_init_vcpu(cpu);
Jan Kiszka84b49152011-02-01 22:15:50 +0100834 if (r < 0) {
835 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
836 exit(1);
837 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000838
Andreas Färber13618e02013-05-26 23:41:00 +0200839 qemu_kvm_init_cpu_signals(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000840
841 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +0200842 cpu->created = true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000843 qemu_cond_signal(&qemu_cpu_cond);
844
Blue Swirl296af7c2010-03-29 19:23:50 +0000845 while (1) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200846 if (cpu_can_run(cpu)) {
Andreas Färber1458c362013-05-26 23:46:55 +0200847 r = kvm_cpu_exec(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +0100848 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +0200849 cpu_handle_guest_debug(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +0100850 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100851 }
Andreas Färberfd529e82013-05-26 23:24:55 +0200852 qemu_kvm_wait_io_event(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000853 }
854
855 return NULL;
856}
857
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200858static void *qemu_dummy_cpu_thread_fn(void *arg)
859{
860#ifdef _WIN32
861 fprintf(stderr, "qtest is not supported under Windows\n");
862 exit(1);
863#else
Andreas Färber10a90212013-05-27 02:24:35 +0200864 CPUState *cpu = arg;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200865 sigset_t waitset;
866 int r;
867
868 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +0200869 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +0200870 cpu->thread_id = qemu_get_thread_id();
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200871
872 sigemptyset(&waitset);
873 sigaddset(&waitset, SIG_IPI);
874
875 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +0200876 cpu->created = true;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200877 qemu_cond_signal(&qemu_cpu_cond);
878
Andreas Färber4917cf42013-05-27 05:17:50 +0200879 current_cpu = cpu;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200880 while (1) {
Andreas Färber4917cf42013-05-27 05:17:50 +0200881 current_cpu = NULL;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200882 qemu_mutex_unlock_iothread();
883 do {
884 int sig;
885 r = sigwait(&waitset, &sig);
886 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
887 if (r == -1) {
888 perror("sigwait");
889 exit(1);
890 }
891 qemu_mutex_lock_iothread();
Andreas Färber4917cf42013-05-27 05:17:50 +0200892 current_cpu = cpu;
Andreas Färber509a0d72012-05-03 02:18:09 +0200893 qemu_wait_io_event_common(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200894 }
895
896 return NULL;
897#endif
898}
899
Jan Kiszkabdb7ca62011-09-26 09:40:39 +0200900static void tcg_exec_all(void);
901
Jan Kiszka7e97cd82011-02-07 12:19:12 +0100902static void *qemu_tcg_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +0000903{
Andreas Färberc3586ba2012-05-03 01:41:24 +0200904 CPUState *cpu = arg;
Blue Swirl296af7c2010-03-29 19:23:50 +0000905
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100906 qemu_tcg_init_cpu_signals();
Andreas Färber814e6122012-05-02 17:00:37 +0200907 qemu_thread_get_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000908
Blue Swirl296af7c2010-03-29 19:23:50 +0000909 qemu_mutex_lock(&qemu_global_mutex);
Andreas Färber38fcbd32013-07-07 19:50:23 +0200910 CPU_FOREACH(cpu) {
911 cpu->thread_id = qemu_get_thread_id();
912 cpu->created = true;
913 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000914 qemu_cond_signal(&qemu_cpu_cond);
915
Jan Kiszkafa7d1862011-08-22 18:35:25 +0200916 /* wait for initial kick-off after machine start */
Andreas Färberbdc44642013-06-24 23:50:24 +0200917 while (QTAILQ_FIRST(&cpus)->stopped) {
Jan Kiszkafa7d1862011-08-22 18:35:25 +0200918 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
Jan Kiszka8e564b42012-02-17 18:31:15 +0100919
920 /* process any pending work */
Andreas Färberbdc44642013-06-24 23:50:24 +0200921 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200922 qemu_wait_io_event_common(cpu);
Jan Kiszka8e564b42012-02-17 18:31:15 +0100923 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100924 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000925
926 while (1) {
Jan Kiszkabdb7ca62011-09-26 09:40:39 +0200927 tcg_exec_all();
Alex Blighac70aaf2013-08-21 16:02:57 +0100928
929 if (use_icount) {
Alex Bligh40daca52013-08-21 16:03:02 +0100930 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +0100931
932 if (deadline == 0) {
Alex Bligh40daca52013-08-21 16:03:02 +0100933 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +0100934 }
Paolo Bonzini3b2319a2011-04-13 10:03:43 +0200935 }
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200936 qemu_tcg_wait_io_event();
Blue Swirl296af7c2010-03-29 19:23:50 +0000937 }
938
939 return NULL;
940}
941
Andreas Färber2ff09a42012-05-03 00:23:30 +0200942static void qemu_cpu_kick_thread(CPUState *cpu)
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100943{
944#ifndef _WIN32
945 int err;
946
Andreas Färber814e6122012-05-02 17:00:37 +0200947 err = pthread_kill(cpu->thread->thread, SIG_IPI);
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100948 if (err) {
949 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
950 exit(1);
951 }
952#else /* _WIN32 */
Andreas Färber60e82572012-05-02 22:23:49 +0200953 if (!qemu_cpu_is_self(cpu)) {
Olivier Hainqueed9164a2013-04-09 18:06:53 +0200954 CONTEXT tcgContext;
955
956 if (SuspendThread(cpu->hThread) == (DWORD)-1) {
Stefan Weil7f1721d2013-04-13 22:45:50 +0200957 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
Olivier Hainqueed9164a2013-04-09 18:06:53 +0200958 GetLastError());
959 exit(1);
960 }
961
962 /* On multi-core systems, we are not sure that the thread is actually
963 * suspended until we can get the context.
964 */
965 tcgContext.ContextFlags = CONTEXT_CONTROL;
966 while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
967 continue;
968 }
969
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100970 cpu_signal(0);
Olivier Hainqueed9164a2013-04-09 18:06:53 +0200971
972 if (ResumeThread(cpu->hThread) == (DWORD)-1) {
Stefan Weil7f1721d2013-04-13 22:45:50 +0200973 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
Olivier Hainqueed9164a2013-04-09 18:06:53 +0200974 GetLastError());
975 exit(1);
976 }
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100977 }
978#endif
979}
980
Andreas Färberc08d7422012-05-03 04:34:15 +0200981void qemu_cpu_kick(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000982{
Andreas Färberf5c121b2012-05-03 01:22:49 +0200983 qemu_cond_broadcast(cpu->halt_cond);
Andreas Färber216fc9a2012-05-02 17:49:49 +0200984 if (!tcg_enabled() && !cpu->thread_kicked) {
Andreas Färber2ff09a42012-05-03 00:23:30 +0200985 qemu_cpu_kick_thread(cpu);
Andreas Färber216fc9a2012-05-02 17:49:49 +0200986 cpu->thread_kicked = true;
Jan Kiszkaaa2c3642011-02-01 22:15:42 +0100987 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000988}
989
Jan Kiszka46d62fa2011-02-01 22:15:59 +0100990void qemu_cpu_kick_self(void)
991{
Paolo Bonzinib55c22c2011-03-12 17:44:07 +0100992#ifndef _WIN32
Andreas Färber4917cf42013-05-27 05:17:50 +0200993 assert(current_cpu);
Jan Kiszka46d62fa2011-02-01 22:15:59 +0100994
Andreas Färber4917cf42013-05-27 05:17:50 +0200995 if (!current_cpu->thread_kicked) {
996 qemu_cpu_kick_thread(current_cpu);
997 current_cpu->thread_kicked = true;
Jan Kiszka46d62fa2011-02-01 22:15:59 +0100998 }
Paolo Bonzinib55c22c2011-03-12 17:44:07 +0100999#else
1000 abort();
1001#endif
Blue Swirl296af7c2010-03-29 19:23:50 +00001002}
1003
Andreas Färber60e82572012-05-02 22:23:49 +02001004bool qemu_cpu_is_self(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001005{
Andreas Färber814e6122012-05-02 17:00:37 +02001006 return qemu_thread_is_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +00001007}
1008
Juan Quintelaaa723c22012-09-18 16:30:11 +02001009static bool qemu_in_vcpu_thread(void)
1010{
Andreas Färber4917cf42013-05-27 05:17:50 +02001011 return current_cpu && qemu_cpu_is_self(current_cpu);
Juan Quintelaaa723c22012-09-18 16:30:11 +02001012}
1013
Blue Swirl296af7c2010-03-29 19:23:50 +00001014void qemu_mutex_lock_iothread(void)
1015{
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001016 if (!tcg_enabled()) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001017 qemu_mutex_lock(&qemu_global_mutex);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001018 } else {
Paolo Bonzini46daff12011-06-09 13:10:24 +02001019 iothread_requesting_mutex = true;
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001020 if (qemu_mutex_trylock(&qemu_global_mutex)) {
Andreas Färber182735e2013-05-29 22:29:20 +02001021 qemu_cpu_kick_thread(first_cpu);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001022 qemu_mutex_lock(&qemu_global_mutex);
1023 }
Paolo Bonzini46daff12011-06-09 13:10:24 +02001024 iothread_requesting_mutex = false;
1025 qemu_cond_broadcast(&qemu_io_proceeded_cond);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001026 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001027}
1028
1029void qemu_mutex_unlock_iothread(void)
1030{
1031 qemu_mutex_unlock(&qemu_global_mutex);
1032}
1033
1034static int all_vcpus_paused(void)
1035{
Andreas Färberbdc44642013-06-24 23:50:24 +02001036 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001037
Andreas Färberbdc44642013-06-24 23:50:24 +02001038 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001039 if (!cpu->stopped) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001040 return 0;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001041 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001042 }
1043
1044 return 1;
1045}
1046
1047void pause_all_vcpus(void)
1048{
Andreas Färberbdc44642013-06-24 23:50:24 +02001049 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001050
Alex Bligh40daca52013-08-21 16:03:02 +01001051 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
Andreas Färberbdc44642013-06-24 23:50:24 +02001052 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001053 cpu->stop = true;
1054 qemu_cpu_kick(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001055 }
1056
Juan Quintelaaa723c22012-09-18 16:30:11 +02001057 if (qemu_in_vcpu_thread()) {
Jan Kiszkad798e972012-02-17 18:31:16 +01001058 cpu_stop_current();
1059 if (!kvm_enabled()) {
Andreas Färberbdc44642013-06-24 23:50:24 +02001060 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001061 cpu->stop = false;
1062 cpu->stopped = true;
Jan Kiszkad798e972012-02-17 18:31:16 +01001063 }
1064 return;
1065 }
1066 }
1067
Blue Swirl296af7c2010-03-29 19:23:50 +00001068 while (!all_vcpus_paused()) {
Paolo Bonzinibe7d6c52011-03-12 17:44:02 +01001069 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
Andreas Färberbdc44642013-06-24 23:50:24 +02001070 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001071 qemu_cpu_kick(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001072 }
1073 }
1074}
1075
Igor Mammedov29936832013-04-23 10:29:37 +02001076void cpu_resume(CPUState *cpu)
1077{
1078 cpu->stop = false;
1079 cpu->stopped = false;
1080 qemu_cpu_kick(cpu);
1081}
1082
Blue Swirl296af7c2010-03-29 19:23:50 +00001083void resume_all_vcpus(void)
1084{
Andreas Färberbdc44642013-06-24 23:50:24 +02001085 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001086
Alex Bligh40daca52013-08-21 16:03:02 +01001087 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
Andreas Färberbdc44642013-06-24 23:50:24 +02001088 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001089 cpu_resume(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001090 }
1091}
1092
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001093static void qemu_tcg_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001094{
Blue Swirl296af7c2010-03-29 19:23:50 +00001095 /* share a single thread for all cpus with TCG */
1096 if (!tcg_cpu_thread) {
Andreas Färber814e6122012-05-02 17:00:37 +02001097 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001098 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1099 qemu_cond_init(cpu->halt_cond);
1100 tcg_halt_cond = cpu->halt_cond;
Andreas Färberc3586ba2012-05-03 01:41:24 +02001101 qemu_thread_create(cpu->thread, qemu_tcg_cpu_thread_fn, cpu,
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001102 QEMU_THREAD_JOINABLE);
1103#ifdef _WIN32
Andreas Färber814e6122012-05-02 17:00:37 +02001104 cpu->hThread = qemu_thread_get_handle(cpu->thread);
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001105#endif
Andreas Färber61a46212012-05-02 22:49:36 +02001106 while (!cpu->created) {
Paolo Bonzini18a85722011-03-12 17:44:03 +01001107 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001108 }
Andreas Färber814e6122012-05-02 17:00:37 +02001109 tcg_cpu_thread = cpu->thread;
Blue Swirl296af7c2010-03-29 19:23:50 +00001110 } else {
Andreas Färber814e6122012-05-02 17:00:37 +02001111 cpu->thread = tcg_cpu_thread;
Andreas Färberf5c121b2012-05-03 01:22:49 +02001112 cpu->halt_cond = tcg_halt_cond;
Blue Swirl296af7c2010-03-29 19:23:50 +00001113 }
1114}
1115
Andreas Färber48a106b2013-05-27 02:20:39 +02001116static void qemu_kvm_start_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001117{
Andreas Färber814e6122012-05-02 17:00:37 +02001118 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001119 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1120 qemu_cond_init(cpu->halt_cond);
Andreas Färber48a106b2013-05-27 02:20:39 +02001121 qemu_thread_create(cpu->thread, qemu_kvm_cpu_thread_fn, cpu,
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001122 QEMU_THREAD_JOINABLE);
Andreas Färber61a46212012-05-02 22:49:36 +02001123 while (!cpu->created) {
Paolo Bonzini18a85722011-03-12 17:44:03 +01001124 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001125 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001126}
1127
Andreas Färber10a90212013-05-27 02:24:35 +02001128static void qemu_dummy_start_vcpu(CPUState *cpu)
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001129{
Andreas Färber814e6122012-05-02 17:00:37 +02001130 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001131 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1132 qemu_cond_init(cpu->halt_cond);
Andreas Färber10a90212013-05-27 02:24:35 +02001133 qemu_thread_create(cpu->thread, qemu_dummy_cpu_thread_fn, cpu,
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001134 QEMU_THREAD_JOINABLE);
Andreas Färber61a46212012-05-02 22:49:36 +02001135 while (!cpu->created) {
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001136 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1137 }
1138}
1139
Andreas Färberc643bed2013-05-27 03:23:24 +02001140void qemu_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001141{
Andreas Färberce3960e2012-12-17 03:27:07 +01001142 cpu->nr_cores = smp_cores;
1143 cpu->nr_threads = smp_threads;
Andreas Färberf324e762012-05-02 23:26:21 +02001144 cpu->stopped = true;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001145 if (kvm_enabled()) {
Andreas Färber48a106b2013-05-27 02:20:39 +02001146 qemu_kvm_start_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001147 } else if (tcg_enabled()) {
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001148 qemu_tcg_init_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001149 } else {
Andreas Färber10a90212013-05-27 02:24:35 +02001150 qemu_dummy_start_vcpu(cpu);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001151 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001152}
1153
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001154void cpu_stop_current(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001155{
Andreas Färber4917cf42013-05-27 05:17:50 +02001156 if (current_cpu) {
1157 current_cpu->stop = false;
1158 current_cpu->stopped = true;
1159 cpu_exit(current_cpu);
Paolo Bonzini67bb1722011-03-12 17:43:59 +01001160 qemu_cond_signal(&qemu_pause_cond);
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001161 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001162}
1163
Kevin Wolf56983462013-07-05 13:49:54 +02001164int vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +00001165{
Juan Quintelaaa723c22012-09-18 16:30:11 +02001166 if (qemu_in_vcpu_thread()) {
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -03001167 qemu_system_vmstop_request(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00001168 /*
1169 * FIXME: should not return to device code in case
1170 * vm_stop() has been requested.
1171 */
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001172 cpu_stop_current();
Kevin Wolf56983462013-07-05 13:49:54 +02001173 return 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00001174 }
Kevin Wolf56983462013-07-05 13:49:54 +02001175
1176 return do_vm_stop(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00001177}
1178
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001179/* does a state transition even if the VM is already stopped,
1180 current state is forgotten forever */
Kevin Wolf56983462013-07-05 13:49:54 +02001181int vm_stop_force_state(RunState state)
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001182{
1183 if (runstate_is_running()) {
Kevin Wolf56983462013-07-05 13:49:54 +02001184 return vm_stop(state);
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001185 } else {
1186 runstate_set(state);
Kevin Wolf594a45c2013-07-18 14:52:19 +02001187 /* Make sure to return an error if the flush in a previous vm_stop()
1188 * failed. */
1189 return bdrv_flush_all();
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001190 }
1191}
1192
Andreas Färber9349b4f2012-03-14 01:38:32 +01001193static int tcg_cpu_exec(CPUArchState *env)
Blue Swirl296af7c2010-03-29 19:23:50 +00001194{
1195 int ret;
1196#ifdef CONFIG_PROFILER
1197 int64_t ti;
1198#endif
1199
1200#ifdef CONFIG_PROFILER
1201 ti = profile_getclock();
1202#endif
1203 if (use_icount) {
1204 int64_t count;
Alex Blighac70aaf2013-08-21 16:02:57 +01001205 int64_t deadline;
Blue Swirl296af7c2010-03-29 19:23:50 +00001206 int decr;
1207 qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
1208 env->icount_decr.u16.low = 0;
1209 env->icount_extra = 0;
Alex Bligh40daca52013-08-21 16:03:02 +01001210 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +01001211
1212 /* Maintain prior (possibly buggy) behaviour where if no deadline
Alex Bligh40daca52013-08-21 16:03:02 +01001213 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
Alex Blighac70aaf2013-08-21 16:02:57 +01001214 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1215 * nanoseconds.
1216 */
1217 if ((deadline < 0) || (deadline > INT32_MAX)) {
1218 deadline = INT32_MAX;
1219 }
1220
1221 count = qemu_icount_round(deadline);
Blue Swirl296af7c2010-03-29 19:23:50 +00001222 qemu_icount += count;
1223 decr = (count > 0xffff) ? 0xffff : count;
1224 count -= decr;
1225 env->icount_decr.u16.low = decr;
1226 env->icount_extra = count;
1227 }
1228 ret = cpu_exec(env);
1229#ifdef CONFIG_PROFILER
1230 qemu_time += profile_getclock() - ti;
1231#endif
1232 if (use_icount) {
1233 /* Fold pending instructions back into the
1234 instruction counter, and clear the interrupt flag. */
1235 qemu_icount -= (env->icount_decr.u16.low
1236 + env->icount_extra);
1237 env->icount_decr.u32 = 0;
1238 env->icount_extra = 0;
1239 }
1240 return ret;
1241}
1242
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001243static void tcg_exec_all(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001244{
Jan Kiszka9a360852011-02-01 22:15:55 +01001245 int r;
1246
Alex Bligh40daca52013-08-21 16:03:02 +01001247 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1248 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
Paolo Bonziniab33fcd2011-04-13 10:03:44 +02001249
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001250 if (next_cpu == NULL) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001251 next_cpu = first_cpu;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001252 }
Andreas Färberbdc44642013-06-24 23:50:24 +02001253 for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
Andreas Färber182735e2013-05-29 22:29:20 +02001254 CPUState *cpu = next_cpu;
1255 CPUArchState *env = cpu->env_ptr;
Blue Swirl296af7c2010-03-29 19:23:50 +00001256
Alex Bligh40daca52013-08-21 16:03:02 +01001257 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
Andreas Färbered2803d2013-06-21 20:20:45 +02001258 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
Blue Swirl296af7c2010-03-29 19:23:50 +00001259
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001260 if (cpu_can_run(cpu)) {
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001261 r = tcg_cpu_exec(env);
Jan Kiszka9a360852011-02-01 22:15:55 +01001262 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +02001263 cpu_handle_guest_debug(cpu);
Jan Kiszka3c638d02010-06-25 16:56:56 +02001264 break;
1265 }
Andreas Färberf324e762012-05-02 23:26:21 +02001266 } else if (cpu->stop || cpu->stopped) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001267 break;
1268 }
1269 }
Jan Kiszkac629a4b2010-06-25 16:56:52 +02001270 exit_request = 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00001271}
1272
1273void set_numa_modes(void)
1274{
Andreas Färber1b1ed8d2012-12-17 04:22:03 +01001275 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001276 int i;
1277
Andreas Färberbdc44642013-06-24 23:50:24 +02001278 CPU_FOREACH(cpu) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001279 for (i = 0; i < nb_numa_nodes; i++) {
Andreas Färber55e5c282012-12-17 06:18:02 +01001280 if (test_bit(cpu->cpu_index, node_cpumask[i])) {
Andreas Färber1b1ed8d2012-12-17 04:22:03 +01001281 cpu->numa_node = i;
Blue Swirl296af7c2010-03-29 19:23:50 +00001282 }
1283 }
1284 }
1285}
1286
Stefan Weil9a78eea2010-10-22 23:03:33 +02001287void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
Blue Swirl262353c2010-05-04 19:55:35 +00001288{
1289 /* XXX: implement xxx_cpu_list for targets that still miss it */
Peter Maydelle916cbf2012-09-05 17:41:08 -03001290#if defined(cpu_list)
1291 cpu_list(f, cpu_fprintf);
Blue Swirl262353c2010-05-04 19:55:35 +00001292#endif
1293}
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001294
1295CpuInfoList *qmp_query_cpus(Error **errp)
1296{
1297 CpuInfoList *head = NULL, *cur_item = NULL;
Andreas Färber182735e2013-05-29 22:29:20 +02001298 CPUState *cpu;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001299
Andreas Färberbdc44642013-06-24 23:50:24 +02001300 CPU_FOREACH(cpu) {
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001301 CpuInfoList *info;
Andreas Färber182735e2013-05-29 22:29:20 +02001302#if defined(TARGET_I386)
1303 X86CPU *x86_cpu = X86_CPU(cpu);
1304 CPUX86State *env = &x86_cpu->env;
1305#elif defined(TARGET_PPC)
1306 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1307 CPUPPCState *env = &ppc_cpu->env;
1308#elif defined(TARGET_SPARC)
1309 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1310 CPUSPARCState *env = &sparc_cpu->env;
1311#elif defined(TARGET_MIPS)
1312 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1313 CPUMIPSState *env = &mips_cpu->env;
1314#endif
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001315
Andreas Färbercb446ec2013-05-01 14:24:52 +02001316 cpu_synchronize_state(cpu);
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001317
1318 info = g_malloc0(sizeof(*info));
1319 info->value = g_malloc0(sizeof(*info->value));
Andreas Färber55e5c282012-12-17 06:18:02 +01001320 info->value->CPU = cpu->cpu_index;
Andreas Färber182735e2013-05-29 22:29:20 +02001321 info->value->current = (cpu == first_cpu);
Andreas Färber259186a2013-01-17 18:51:17 +01001322 info->value->halted = cpu->halted;
Andreas Färber9f09e182012-05-03 06:59:07 +02001323 info->value->thread_id = cpu->thread_id;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001324#if defined(TARGET_I386)
1325 info->value->has_pc = true;
1326 info->value->pc = env->eip + env->segs[R_CS].base;
1327#elif defined(TARGET_PPC)
1328 info->value->has_nip = true;
1329 info->value->nip = env->nip;
1330#elif defined(TARGET_SPARC)
1331 info->value->has_pc = true;
1332 info->value->pc = env->pc;
1333 info->value->has_npc = true;
1334 info->value->npc = env->npc;
1335#elif defined(TARGET_MIPS)
1336 info->value->has_PC = true;
1337 info->value->PC = env->active_tc.PC;
1338#endif
1339
1340 /* XXX: waiting for the qapi to support GSList */
1341 if (!cur_item) {
1342 head = cur_item = info;
1343 } else {
1344 cur_item->next = info;
1345 cur_item = info;
1346 }
1347 }
1348
1349 return head;
1350}
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001351
1352void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1353 bool has_cpu, int64_t cpu_index, Error **errp)
1354{
1355 FILE *f;
1356 uint32_t l;
Andreas Färber55e5c282012-12-17 06:18:02 +01001357 CPUState *cpu;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001358 uint8_t buf[1024];
1359
1360 if (!has_cpu) {
1361 cpu_index = 0;
1362 }
1363
Andreas Färber151d1322013-02-15 15:41:49 +01001364 cpu = qemu_get_cpu(cpu_index);
1365 if (cpu == NULL) {
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001366 error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1367 "a CPU number");
1368 return;
1369 }
1370
1371 f = fopen(filename, "wb");
1372 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04001373 error_setg_file_open(errp, errno, filename);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001374 return;
1375 }
1376
1377 while (size != 0) {
1378 l = sizeof(buf);
1379 if (l > size)
1380 l = size;
Andreas Färberf17ec442013-06-29 19:40:58 +02001381 cpu_memory_rw_debug(cpu, addr, buf, l, 0);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001382 if (fwrite(buf, 1, l, f) != l) {
1383 error_set(errp, QERR_IO_ERROR);
1384 goto exit;
1385 }
1386 addr += l;
1387 size -= l;
1388 }
1389
1390exit:
1391 fclose(f);
1392}
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001393
1394void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1395 Error **errp)
1396{
1397 FILE *f;
1398 uint32_t l;
1399 uint8_t buf[1024];
1400
1401 f = fopen(filename, "wb");
1402 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04001403 error_setg_file_open(errp, errno, filename);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001404 return;
1405 }
1406
1407 while (size != 0) {
1408 l = sizeof(buf);
1409 if (l > size)
1410 l = size;
1411 cpu_physical_memory_rw(addr, buf, l, 0);
1412 if (fwrite(buf, 1, l, f) != l) {
1413 error_set(errp, QERR_IO_ERROR);
1414 goto exit;
1415 }
1416 addr += l;
1417 size -= l;
1418 }
1419
1420exit:
1421 fclose(f);
1422}
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001423
1424void qmp_inject_nmi(Error **errp)
1425{
1426#if defined(TARGET_I386)
Andreas Färber182735e2013-05-29 22:29:20 +02001427 CPUState *cs;
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001428
Andreas Färberbdc44642013-06-24 23:50:24 +02001429 CPU_FOREACH(cs) {
Andreas Färber182735e2013-05-29 22:29:20 +02001430 X86CPU *cpu = X86_CPU(cs);
1431 CPUX86State *env = &cpu->env;
1432
Jan Kiszka02c09192011-10-18 00:00:06 +08001433 if (!env->apic_state) {
Andreas Färber182735e2013-05-29 22:29:20 +02001434 cpu_interrupt(cs, CPU_INTERRUPT_NMI);
Jan Kiszka02c09192011-10-18 00:00:06 +08001435 } else {
1436 apic_deliver_nmi(env->apic_state);
1437 }
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001438 }
Eugene (jno) Dvurechenski7f7f9752012-12-05 15:50:07 +01001439#elif defined(TARGET_S390X)
1440 CPUState *cs;
1441 S390CPU *cpu;
1442
Andreas Färberbdc44642013-06-24 23:50:24 +02001443 CPU_FOREACH(cs) {
Eugene (jno) Dvurechenski7f7f9752012-12-05 15:50:07 +01001444 cpu = S390_CPU(cs);
1445 if (cpu->env.cpu_num == monitor_get_cpu_index()) {
1446 if (s390_cpu_restart(S390_CPU(cs)) == -1) {
1447 error_set(errp, QERR_UNSUPPORTED);
1448 return;
1449 }
1450 break;
1451 }
1452 }
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001453#else
1454 error_set(errp, QERR_UNSUPPORTED);
1455#endif
1456}