blob: dd7ac136215170bc17d3a6aef9a3d32b7c9dff71 [file] [log] [blame]
Blue Swirl296af7c2010-03-29 19:23:50 +00001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Needed early for CONFIG_BSD etc. */
26#include "config-host.h"
27
Paolo Bonzini83c90892012-12-17 18:19:49 +010028#include "monitor/monitor.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010029#include "sysemu/sysemu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010030#include "exec/gdbstub.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010031#include "sysemu/dma.h"
32#include "sysemu/kvm.h"
Luiz Capitulinode0b36b2011-09-21 16:38:35 -030033#include "qmp-commands.h"
Blue Swirl296af7c2010-03-29 19:23:50 +000034
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/thread.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010036#include "sysemu/cpus.h"
37#include "sysemu/qtest.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010038#include "qemu/main-loop.h"
39#include "qemu/bitmap.h"
Liu Ping Fancb365642013-09-25 14:20:58 +080040#include "qemu/seqlock.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020041
42#ifndef _WIN32
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010043#include "qemu/compatfd.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020044#endif
Blue Swirl296af7c2010-03-29 19:23:50 +000045
Jan Kiszka6d9cb732011-02-01 22:15:58 +010046#ifdef CONFIG_LINUX
47
48#include <sys/prctl.h>
49
Marcelo Tosattic0532a72010-10-11 15:31:21 -030050#ifndef PR_MCE_KILL
51#define PR_MCE_KILL 33
52#endif
53
Jan Kiszka6d9cb732011-02-01 22:15:58 +010054#ifndef PR_MCE_KILL_SET
55#define PR_MCE_KILL_SET 1
56#endif
57
58#ifndef PR_MCE_KILL_EARLY
59#define PR_MCE_KILL_EARLY 1
60#endif
61
62#endif /* CONFIG_LINUX */
63
Andreas Färber182735e2013-05-29 22:29:20 +020064static CPUState *next_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +000065
Tiejun Chen321bc0b2013-08-02 09:43:09 +080066bool cpu_is_stopped(CPUState *cpu)
67{
68 return cpu->stopped || !runstate_is_running();
69}
70
Andreas Färbera98ae1d2013-05-26 23:21:08 +020071static bool cpu_thread_is_idle(CPUState *cpu)
Peter Maydellac873f12012-07-19 16:52:27 +010072{
Andreas Färberc64ca812012-05-03 02:11:45 +020073 if (cpu->stop || cpu->queued_work_first) {
Peter Maydellac873f12012-07-19 16:52:27 +010074 return false;
75 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +080076 if (cpu_is_stopped(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010077 return true;
78 }
Andreas Färber8c2e1b02013-08-25 18:53:55 +020079 if (!cpu->halted || cpu_has_work(cpu) ||
Alexander Graf215e79c2013-04-24 22:24:12 +020080 kvm_halt_in_kernel()) {
Peter Maydellac873f12012-07-19 16:52:27 +010081 return false;
82 }
83 return true;
84}
85
86static bool all_cpu_threads_idle(void)
87{
Andreas Färber182735e2013-05-29 22:29:20 +020088 CPUState *cpu;
Peter Maydellac873f12012-07-19 16:52:27 +010089
Andreas Färberbdc44642013-06-24 23:50:24 +020090 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +020091 if (!cpu_thread_is_idle(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010092 return false;
93 }
94 }
95 return true;
96}
97
Blue Swirl296af7c2010-03-29 19:23:50 +000098/***********************************************************/
Paolo Bonzini946fb272011-09-12 13:57:37 +020099/* guest cycle counter */
100
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200101/* Protected by TimersState seqlock */
102
103/* Compensate for varying guest execution speed. */
104static int64_t qemu_icount_bias;
105static int64_t vm_clock_warp_start;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200106/* Conversion factor from emulated instructions to virtual clock ticks. */
107static int icount_time_shift;
108/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
109#define MAX_ICOUNT_SHIFT 10
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200110
111/* Only written by TCG thread */
112static int64_t qemu_icount;
113
Paolo Bonzini946fb272011-09-12 13:57:37 +0200114static QEMUTimer *icount_rt_timer;
115static QEMUTimer *icount_vm_timer;
116static QEMUTimer *icount_warp_timer;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200117
118typedef struct TimersState {
Liu Ping Fancb365642013-09-25 14:20:58 +0800119 /* Protected by BQL. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200120 int64_t cpu_ticks_prev;
121 int64_t cpu_ticks_offset;
Liu Ping Fancb365642013-09-25 14:20:58 +0800122
123 /* cpu_clock_offset can be read out of BQL, so protect it with
124 * this lock.
125 */
126 QemuSeqLock vm_clock_seqlock;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200127 int64_t cpu_clock_offset;
128 int32_t cpu_ticks_enabled;
129 int64_t dummy;
130} TimersState;
131
Liu Ping Fand9cd4002013-07-21 08:43:00 +0000132static TimersState timers_state;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200133
134/* Return the virtual CPU time, based on the instruction counter. */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200135static int64_t cpu_get_icount_locked(void)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200136{
137 int64_t icount;
Andreas Färber4917cf42013-05-27 05:17:50 +0200138 CPUState *cpu = current_cpu;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200139
140 icount = qemu_icount;
Andreas Färber4917cf42013-05-27 05:17:50 +0200141 if (cpu) {
Andreas Färber99df7dc2013-08-26 05:15:23 +0200142 if (!cpu_can_do_io(cpu)) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200143 fprintf(stderr, "Bad clock read\n");
144 }
Andreas Färber28ecfd72013-08-26 05:51:49 +0200145 icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200146 }
147 return qemu_icount_bias + (icount << icount_time_shift);
148}
149
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200150int64_t cpu_get_icount(void)
151{
152 int64_t icount;
153 unsigned start;
154
155 do {
156 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
157 icount = cpu_get_icount_locked();
158 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
159
160 return icount;
161}
162
Paolo Bonzini946fb272011-09-12 13:57:37 +0200163/* return the host CPU cycle counter and handle stop/restart */
Liu Ping Fancb365642013-09-25 14:20:58 +0800164/* Caller must hold the BQL */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200165int64_t cpu_get_ticks(void)
166{
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100167 int64_t ticks;
168
Paolo Bonzini946fb272011-09-12 13:57:37 +0200169 if (use_icount) {
170 return cpu_get_icount();
171 }
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100172
173 ticks = timers_state.cpu_ticks_offset;
174 if (timers_state.cpu_ticks_enabled) {
175 ticks += cpu_get_real_ticks();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200176 }
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100177
178 if (timers_state.cpu_ticks_prev > ticks) {
179 /* Note: non increasing ticks may happen if the host uses
180 software suspend */
181 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
182 ticks = timers_state.cpu_ticks_prev;
183 }
184
185 timers_state.cpu_ticks_prev = ticks;
186 return ticks;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200187}
188
Liu Ping Fancb365642013-09-25 14:20:58 +0800189static int64_t cpu_get_clock_locked(void)
190{
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100191 int64_t ticks;
Liu Ping Fancb365642013-09-25 14:20:58 +0800192
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100193 ticks = timers_state.cpu_clock_offset;
194 if (timers_state.cpu_ticks_enabled) {
195 ticks += get_clock();
Liu Ping Fancb365642013-09-25 14:20:58 +0800196 }
197
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100198 return ticks;
Liu Ping Fancb365642013-09-25 14:20:58 +0800199}
200
Paolo Bonzini946fb272011-09-12 13:57:37 +0200201/* return the host CPU monotonic timer and handle stop/restart */
202int64_t cpu_get_clock(void)
203{
204 int64_t ti;
Liu Ping Fancb365642013-09-25 14:20:58 +0800205 unsigned start;
206
207 do {
208 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
209 ti = cpu_get_clock_locked();
210 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
211
212 return ti;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200213}
214
Liu Ping Fancb365642013-09-25 14:20:58 +0800215/* enable cpu_get_ticks()
216 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
217 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200218void cpu_enable_ticks(void)
219{
Liu Ping Fancb365642013-09-25 14:20:58 +0800220 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
221 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200222 if (!timers_state.cpu_ticks_enabled) {
223 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
224 timers_state.cpu_clock_offset -= get_clock();
225 timers_state.cpu_ticks_enabled = 1;
226 }
Liu Ping Fancb365642013-09-25 14:20:58 +0800227 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200228}
229
230/* disable cpu_get_ticks() : the clock is stopped. You must not call
Liu Ping Fancb365642013-09-25 14:20:58 +0800231 * cpu_get_ticks() after that.
232 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
233 */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200234void cpu_disable_ticks(void)
235{
Liu Ping Fancb365642013-09-25 14:20:58 +0800236 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
237 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200238 if (timers_state.cpu_ticks_enabled) {
Paolo Bonzini5f3e3102013-10-28 17:32:18 +0100239 timers_state.cpu_ticks_offset += cpu_get_real_ticks();
Liu Ping Fancb365642013-09-25 14:20:58 +0800240 timers_state.cpu_clock_offset = cpu_get_clock_locked();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200241 timers_state.cpu_ticks_enabled = 0;
242 }
Liu Ping Fancb365642013-09-25 14:20:58 +0800243 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200244}
245
246/* Correlation between real and virtual time is always going to be
247 fairly approximate, so ignore small variation.
248 When the guest is idle real and virtual time will be aligned in
249 the IO wait loop. */
250#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
251
252static void icount_adjust(void)
253{
254 int64_t cur_time;
255 int64_t cur_icount;
256 int64_t delta;
Paolo Bonzinia3270e12013-10-07 17:18:15 +0200257
258 /* Protected by TimersState mutex. */
Paolo Bonzini946fb272011-09-12 13:57:37 +0200259 static int64_t last_delta;
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200260
Paolo Bonzini946fb272011-09-12 13:57:37 +0200261 /* If the VM is not running, then do nothing. */
262 if (!runstate_is_running()) {
263 return;
264 }
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200265
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200266 seqlock_write_lock(&timers_state.vm_clock_seqlock);
267 cur_time = cpu_get_clock_locked();
268 cur_icount = cpu_get_icount_locked();
Paolo Bonzini468cc7c2013-10-07 17:21:51 +0200269
Paolo Bonzini946fb272011-09-12 13:57:37 +0200270 delta = cur_icount - cur_time;
271 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
272 if (delta > 0
273 && last_delta + ICOUNT_WOBBLE < delta * 2
274 && icount_time_shift > 0) {
275 /* The guest is getting too far ahead. Slow time down. */
276 icount_time_shift--;
277 }
278 if (delta < 0
279 && last_delta - ICOUNT_WOBBLE > delta * 2
280 && icount_time_shift < MAX_ICOUNT_SHIFT) {
281 /* The guest is getting too far behind. Speed time up. */
282 icount_time_shift++;
283 }
284 last_delta = delta;
285 qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200286 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200287}
288
289static void icount_adjust_rt(void *opaque)
290{
Alex Bligh40daca52013-08-21 16:03:02 +0100291 timer_mod(icount_rt_timer,
292 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200293 icount_adjust();
294}
295
296static void icount_adjust_vm(void *opaque)
297{
Alex Bligh40daca52013-08-21 16:03:02 +0100298 timer_mod(icount_vm_timer,
299 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
300 get_ticks_per_sec() / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200301 icount_adjust();
302}
303
304static int64_t qemu_icount_round(int64_t count)
305{
306 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
307}
308
309static void icount_warp_rt(void *opaque)
310{
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200311 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
312 * changes from -1 to another value, so the race here is okay.
313 */
314 if (atomic_read(&vm_clock_warp_start) == -1) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200315 return;
316 }
317
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200318 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200319 if (runstate_is_running()) {
Alex Bligh40daca52013-08-21 16:03:02 +0100320 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200321 int64_t warp_delta;
322
323 warp_delta = clock - vm_clock_warp_start;
324 if (use_icount == 2) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200325 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100326 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
Paolo Bonzini946fb272011-09-12 13:57:37 +0200327 * far ahead of real time.
328 */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200329 int64_t cur_time = cpu_get_clock_locked();
330 int64_t cur_icount = cpu_get_icount_locked();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200331 int64_t delta = cur_time - cur_icount;
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200332 warp_delta = MIN(warp_delta, delta);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200333 }
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200334 qemu_icount_bias += warp_delta;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200335 }
336 vm_clock_warp_start = -1;
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200337 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzini8ed961d2013-10-07 17:26:07 +0200338
339 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
340 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
341 }
Paolo Bonzini946fb272011-09-12 13:57:37 +0200342}
343
Paolo Bonzini8156be52012-03-28 15:42:04 +0200344void qtest_clock_warp(int64_t dest)
345{
Alex Bligh40daca52013-08-21 16:03:02 +0100346 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200347 assert(qtest_enabled());
348 while (clock < dest) {
Alex Bligh40daca52013-08-21 16:03:02 +0100349 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200350 int64_t warp = MIN(dest - clock, deadline);
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200351 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200352 qemu_icount_bias += warp;
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200353 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
354
Alex Bligh40daca52013-08-21 16:03:02 +0100355 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
356 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200357 }
Alex Bligh40daca52013-08-21 16:03:02 +0100358 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200359}
360
Alex Bligh40daca52013-08-21 16:03:02 +0100361void qemu_clock_warp(QEMUClockType type)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200362{
Paolo Bonzinice78d182013-10-07 17:30:02 +0200363 int64_t clock;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200364 int64_t deadline;
365
366 /*
367 * There are too many global variables to make the "warp" behavior
368 * applicable to other clocks. But a clock argument removes the
369 * need for if statements all over the place.
370 */
Alex Bligh40daca52013-08-21 16:03:02 +0100371 if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200372 return;
373 }
374
375 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100376 * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
377 * This ensures that the deadline for the timer is computed correctly below.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200378 * This also makes sure that the insn counter is synchronized before the
379 * CPU starts running, in case the CPU is woken by an event other than
Alex Bligh40daca52013-08-21 16:03:02 +0100380 * the earliest QEMU_CLOCK_VIRTUAL timer.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200381 */
382 icount_warp_rt(NULL);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200383 timer_del(icount_warp_timer);
384 if (!all_cpu_threads_idle()) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200385 return;
386 }
387
Paolo Bonzini8156be52012-03-28 15:42:04 +0200388 if (qtest_enabled()) {
389 /* When testing, qtest commands advance icount. */
390 return;
391 }
392
Alex Blighac70aaf2013-08-21 16:02:57 +0100393 /* We want to use the earliest deadline from ALL vm_clocks */
Paolo Bonzinice78d182013-10-07 17:30:02 +0200394 clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Alex Bligh40daca52013-08-21 16:03:02 +0100395 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200396 if (deadline < 0) {
397 return;
Alex Blighac70aaf2013-08-21 16:02:57 +0100398 }
399
Paolo Bonzini946fb272011-09-12 13:57:37 +0200400 if (deadline > 0) {
401 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100402 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
Paolo Bonzini946fb272011-09-12 13:57:37 +0200403 * sleep. Otherwise, the CPU might be waiting for a future timer
404 * interrupt to wake it up, but the interrupt never comes because
405 * the vCPU isn't running any insns and thus doesn't advance the
Alex Bligh40daca52013-08-21 16:03:02 +0100406 * QEMU_CLOCK_VIRTUAL.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200407 *
408 * An extreme solution for this problem would be to never let VCPUs
Alex Bligh40daca52013-08-21 16:03:02 +0100409 * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
410 * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
411 * event. Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
412 * after some e"real" time, (related to the time left until the next
413 * event) has passed. The QEMU_CLOCK_REALTIME timer will do this.
414 * This avoids that the warps are visible externally; for example,
415 * you will not be sending network packets continuously instead of
416 * every 100ms.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200417 */
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200418 seqlock_write_lock(&timers_state.vm_clock_seqlock);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200419 if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
420 vm_clock_warp_start = clock;
421 }
Paolo Bonzini17a15f12013-10-03 15:17:25 +0200422 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
Paolo Bonzinice78d182013-10-07 17:30:02 +0200423 timer_mod_anticipate(icount_warp_timer, clock + deadline);
Alex Blighac70aaf2013-08-21 16:02:57 +0100424 } else if (deadline == 0) {
Alex Bligh40daca52013-08-21 16:03:02 +0100425 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200426 }
427}
428
429static const VMStateDescription vmstate_timers = {
430 .name = "timer",
431 .version_id = 2,
432 .minimum_version_id = 1,
Juan Quintela35d08452014-04-16 16:01:33 +0200433 .fields = (VMStateField[]) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200434 VMSTATE_INT64(cpu_ticks_offset, TimersState),
435 VMSTATE_INT64(dummy, TimersState),
436 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
437 VMSTATE_END_OF_LIST()
438 }
439};
440
441void configure_icount(const char *option)
442{
Liu Ping Fancb365642013-09-25 14:20:58 +0800443 seqlock_init(&timers_state.vm_clock_seqlock, NULL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200444 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
445 if (!option) {
446 return;
447 }
448
Alex Bligh40daca52013-08-21 16:03:02 +0100449 icount_warp_timer = timer_new_ns(QEMU_CLOCK_REALTIME,
450 icount_warp_rt, NULL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200451 if (strcmp(option, "auto") != 0) {
452 icount_time_shift = strtol(option, NULL, 0);
453 use_icount = 1;
454 return;
455 }
456
457 use_icount = 2;
458
459 /* 125MIPS seems a reasonable initial guess at the guest speed.
460 It will be corrected fairly quickly anyway. */
461 icount_time_shift = 3;
462
463 /* Have both realtime and virtual time triggers for speed adjustment.
464 The realtime trigger catches emulated time passing too slowly,
465 the virtual time trigger catches emulated time passing too fast.
466 Realtime triggers occur even when idle, so use them less frequently
467 than VM triggers. */
Alex Bligh40daca52013-08-21 16:03:02 +0100468 icount_rt_timer = timer_new_ms(QEMU_CLOCK_REALTIME,
469 icount_adjust_rt, NULL);
470 timer_mod(icount_rt_timer,
471 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
472 icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
473 icount_adjust_vm, NULL);
474 timer_mod(icount_vm_timer,
475 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
476 get_ticks_per_sec() / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200477}
478
479/***********************************************************/
Blue Swirl296af7c2010-03-29 19:23:50 +0000480void hw_error(const char *fmt, ...)
481{
482 va_list ap;
Andreas Färber55e5c282012-12-17 06:18:02 +0100483 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000484
485 va_start(ap, fmt);
486 fprintf(stderr, "qemu: hardware error: ");
487 vfprintf(stderr, fmt, ap);
488 fprintf(stderr, "\n");
Andreas Färberbdc44642013-06-24 23:50:24 +0200489 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100490 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
Andreas Färber878096e2013-05-27 01:33:50 +0200491 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
Blue Swirl296af7c2010-03-29 19:23:50 +0000492 }
493 va_end(ap);
494 abort();
495}
496
497void cpu_synchronize_all_states(void)
498{
Andreas Färber182735e2013-05-29 22:29:20 +0200499 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000500
Andreas Färberbdc44642013-06-24 23:50:24 +0200501 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200502 cpu_synchronize_state(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000503 }
504}
505
506void cpu_synchronize_all_post_reset(void)
507{
Andreas Färber182735e2013-05-29 22:29:20 +0200508 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000509
Andreas Färberbdc44642013-06-24 23:50:24 +0200510 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200511 cpu_synchronize_post_reset(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000512 }
513}
514
515void cpu_synchronize_all_post_init(void)
516{
Andreas Färber182735e2013-05-29 22:29:20 +0200517 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000518
Andreas Färberbdc44642013-06-24 23:50:24 +0200519 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200520 cpu_synchronize_post_init(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000521 }
522}
523
Kevin Wolf56983462013-07-05 13:49:54 +0200524static int do_vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +0000525{
Kevin Wolf56983462013-07-05 13:49:54 +0200526 int ret = 0;
527
Luiz Capitulino13548692011-07-29 15:36:43 -0300528 if (runstate_is_running()) {
Blue Swirl296af7c2010-03-29 19:23:50 +0000529 cpu_disable_ticks();
Blue Swirl296af7c2010-03-29 19:23:50 +0000530 pause_all_vcpus();
Luiz Capitulinof5bbfba2011-07-29 15:04:45 -0300531 runstate_set(state);
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -0300532 vm_state_notify(0, state);
Blue Swirl296af7c2010-03-29 19:23:50 +0000533 monitor_protocol_event(QEVENT_STOP, NULL);
534 }
Kevin Wolf56983462013-07-05 13:49:54 +0200535
Kevin Wolf594a45c2013-07-18 14:52:19 +0200536 bdrv_drain_all();
537 ret = bdrv_flush_all();
538
Kevin Wolf56983462013-07-05 13:49:54 +0200539 return ret;
Blue Swirl296af7c2010-03-29 19:23:50 +0000540}
541
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200542static bool cpu_can_run(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000543{
Andreas Färber4fdeee72012-05-02 23:10:09 +0200544 if (cpu->stop) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200545 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100546 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +0800547 if (cpu_is_stopped(cpu)) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200548 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100549 }
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200550 return true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000551}
552
Andreas Färber91325042013-05-27 02:07:49 +0200553static void cpu_handle_guest_debug(CPUState *cpu)
Jan Kiszka3c638d02010-06-25 16:56:56 +0200554{
Andreas Färber64f6b342013-05-27 02:06:09 +0200555 gdb_set_stop_cpu(cpu);
Jan Kiszka8cf71712011-02-07 12:19:16 +0100556 qemu_system_debug_request();
Andreas Färberf324e762012-05-02 23:26:21 +0200557 cpu->stopped = true;
Jan Kiszka3c638d02010-06-25 16:56:56 +0200558}
559
Paolo Bonzini714bd042011-03-12 17:44:06 +0100560static void cpu_signal(int sig)
561{
Andreas Färber4917cf42013-05-27 05:17:50 +0200562 if (current_cpu) {
563 cpu_exit(current_cpu);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100564 }
565 exit_request = 1;
566}
Paolo Bonzini714bd042011-03-12 17:44:06 +0100567
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100568#ifdef CONFIG_LINUX
569static void sigbus_reraise(void)
570{
571 sigset_t set;
572 struct sigaction action;
573
574 memset(&action, 0, sizeof(action));
575 action.sa_handler = SIG_DFL;
576 if (!sigaction(SIGBUS, &action, NULL)) {
577 raise(SIGBUS);
578 sigemptyset(&set);
579 sigaddset(&set, SIGBUS);
580 sigprocmask(SIG_UNBLOCK, &set, NULL);
581 }
582 perror("Failed to re-raise SIGBUS!\n");
583 abort();
584}
585
586static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
587 void *ctx)
588{
589 if (kvm_on_sigbus(siginfo->ssi_code,
590 (void *)(intptr_t)siginfo->ssi_addr)) {
591 sigbus_reraise();
592 }
593}
594
595static void qemu_init_sigbus(void)
596{
597 struct sigaction action;
598
599 memset(&action, 0, sizeof(action));
600 action.sa_flags = SA_SIGINFO;
601 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
602 sigaction(SIGBUS, &action, NULL);
603
604 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
605}
606
Andreas Färber290adf32013-01-17 09:30:27 +0100607static void qemu_kvm_eat_signals(CPUState *cpu)
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100608{
609 struct timespec ts = { 0, 0 };
610 siginfo_t siginfo;
611 sigset_t waitset;
612 sigset_t chkset;
613 int r;
614
615 sigemptyset(&waitset);
616 sigaddset(&waitset, SIG_IPI);
617 sigaddset(&waitset, SIGBUS);
618
619 do {
620 r = sigtimedwait(&waitset, &siginfo, &ts);
621 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
622 perror("sigtimedwait");
623 exit(1);
624 }
625
626 switch (r) {
627 case SIGBUS:
Andreas Färber290adf32013-01-17 09:30:27 +0100628 if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100629 sigbus_reraise();
630 }
631 break;
632 default:
633 break;
634 }
635
636 r = sigpending(&chkset);
637 if (r == -1) {
638 perror("sigpending");
639 exit(1);
640 }
641 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100642}
643
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100644#else /* !CONFIG_LINUX */
645
646static void qemu_init_sigbus(void)
647{
648}
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100649
Andreas Färber290adf32013-01-17 09:30:27 +0100650static void qemu_kvm_eat_signals(CPUState *cpu)
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100651{
652}
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100653#endif /* !CONFIG_LINUX */
654
Blue Swirl296af7c2010-03-29 19:23:50 +0000655#ifndef _WIN32
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100656static void dummy_signal(int sig)
Blue Swirl296af7c2010-03-29 19:23:50 +0000657{
658}
659
Andreas Färber13618e02013-05-26 23:41:00 +0200660static void qemu_kvm_init_cpu_signals(CPUState *cpu)
Paolo Bonzini714bd042011-03-12 17:44:06 +0100661{
662 int r;
663 sigset_t set;
664 struct sigaction sigact;
665
666 memset(&sigact, 0, sizeof(sigact));
667 sigact.sa_handler = dummy_signal;
668 sigaction(SIG_IPI, &sigact, NULL);
669
Paolo Bonzini714bd042011-03-12 17:44:06 +0100670 pthread_sigmask(SIG_BLOCK, NULL, &set);
671 sigdelset(&set, SIG_IPI);
672 sigdelset(&set, SIGBUS);
Andreas Färber491d6e82013-05-26 23:38:10 +0200673 r = kvm_set_signal_mask(cpu, &set);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100674 if (r) {
675 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
676 exit(1);
677 }
Paolo Bonzini714bd042011-03-12 17:44:06 +0100678}
679
680static void qemu_tcg_init_cpu_signals(void)
681{
Paolo Bonzini714bd042011-03-12 17:44:06 +0100682 sigset_t set;
683 struct sigaction sigact;
684
685 memset(&sigact, 0, sizeof(sigact));
686 sigact.sa_handler = cpu_signal;
687 sigaction(SIG_IPI, &sigact, NULL);
688
689 sigemptyset(&set);
690 sigaddset(&set, SIG_IPI);
691 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100692}
693
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100694#else /* _WIN32 */
Andreas Färber13618e02013-05-26 23:41:00 +0200695static void qemu_kvm_init_cpu_signals(CPUState *cpu)
Paolo Bonzini714bd042011-03-12 17:44:06 +0100696{
697 abort();
698}
699
700static void qemu_tcg_init_cpu_signals(void)
701{
702}
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100703#endif /* _WIN32 */
Blue Swirl296af7c2010-03-29 19:23:50 +0000704
Stefan Weilb2532d82012-09-27 07:41:42 +0200705static QemuMutex qemu_global_mutex;
Paolo Bonzini46daff12011-06-09 13:10:24 +0200706static QemuCond qemu_io_proceeded_cond;
707static bool iothread_requesting_mutex;
Blue Swirl296af7c2010-03-29 19:23:50 +0000708
709static QemuThread io_thread;
710
711static QemuThread *tcg_cpu_thread;
712static QemuCond *tcg_halt_cond;
713
Blue Swirl296af7c2010-03-29 19:23:50 +0000714/* cpu creation */
715static QemuCond qemu_cpu_cond;
716/* system init */
Blue Swirl296af7c2010-03-29 19:23:50 +0000717static QemuCond qemu_pause_cond;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300718static QemuCond qemu_work_cond;
Blue Swirl296af7c2010-03-29 19:23:50 +0000719
Paolo Bonzinid3b12f52011-09-13 10:30:52 +0200720void qemu_init_cpu_loop(void)
Blue Swirl296af7c2010-03-29 19:23:50 +0000721{
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100722 qemu_init_sigbus();
Anthony Liguoried945922011-02-08 18:18:18 +0100723 qemu_cond_init(&qemu_cpu_cond);
Anthony Liguoried945922011-02-08 18:18:18 +0100724 qemu_cond_init(&qemu_pause_cond);
725 qemu_cond_init(&qemu_work_cond);
Paolo Bonzini46daff12011-06-09 13:10:24 +0200726 qemu_cond_init(&qemu_io_proceeded_cond);
Blue Swirl296af7c2010-03-29 19:23:50 +0000727 qemu_mutex_init(&qemu_global_mutex);
Blue Swirl296af7c2010-03-29 19:23:50 +0000728
Jan Kiszkab7680cb2011-03-12 17:43:51 +0100729 qemu_thread_get_self(&io_thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000730}
731
Andreas Färberf100f0b2012-05-03 14:58:47 +0200732void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300733{
734 struct qemu_work_item wi;
735
Andreas Färber60e82572012-05-02 22:23:49 +0200736 if (qemu_cpu_is_self(cpu)) {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300737 func(data);
738 return;
739 }
740
741 wi.func = func;
742 wi.data = data;
Chegu Vinod3c022702013-06-24 03:49:41 -0600743 wi.free = false;
Andreas Färberc64ca812012-05-03 02:11:45 +0200744 if (cpu->queued_work_first == NULL) {
745 cpu->queued_work_first = &wi;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100746 } else {
Andreas Färberc64ca812012-05-03 02:11:45 +0200747 cpu->queued_work_last->next = &wi;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100748 }
Andreas Färberc64ca812012-05-03 02:11:45 +0200749 cpu->queued_work_last = &wi;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300750 wi.next = NULL;
751 wi.done = false;
752
Andreas Färberc08d7422012-05-03 04:34:15 +0200753 qemu_cpu_kick(cpu);
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300754 while (!wi.done) {
Andreas Färber4917cf42013-05-27 05:17:50 +0200755 CPUState *self_cpu = current_cpu;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300756
757 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
Andreas Färber4917cf42013-05-27 05:17:50 +0200758 current_cpu = self_cpu;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300759 }
760}
761
Chegu Vinod3c022702013-06-24 03:49:41 -0600762void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
763{
764 struct qemu_work_item *wi;
765
766 if (qemu_cpu_is_self(cpu)) {
767 func(data);
768 return;
769 }
770
771 wi = g_malloc0(sizeof(struct qemu_work_item));
772 wi->func = func;
773 wi->data = data;
774 wi->free = true;
775 if (cpu->queued_work_first == NULL) {
776 cpu->queued_work_first = wi;
777 } else {
778 cpu->queued_work_last->next = wi;
779 }
780 cpu->queued_work_last = wi;
781 wi->next = NULL;
782 wi->done = false;
783
784 qemu_cpu_kick(cpu);
785}
786
Andreas Färber6d45b102012-05-03 02:13:22 +0200787static void flush_queued_work(CPUState *cpu)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300788{
789 struct qemu_work_item *wi;
790
Andreas Färberc64ca812012-05-03 02:11:45 +0200791 if (cpu->queued_work_first == NULL) {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300792 return;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100793 }
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300794
Andreas Färberc64ca812012-05-03 02:11:45 +0200795 while ((wi = cpu->queued_work_first)) {
796 cpu->queued_work_first = wi->next;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300797 wi->func(wi->data);
798 wi->done = true;
Chegu Vinod3c022702013-06-24 03:49:41 -0600799 if (wi->free) {
800 g_free(wi);
801 }
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300802 }
Andreas Färberc64ca812012-05-03 02:11:45 +0200803 cpu->queued_work_last = NULL;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300804 qemu_cond_broadcast(&qemu_work_cond);
805}
806
Andreas Färber509a0d72012-05-03 02:18:09 +0200807static void qemu_wait_io_event_common(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000808{
Andreas Färber4fdeee72012-05-02 23:10:09 +0200809 if (cpu->stop) {
810 cpu->stop = false;
Andreas Färberf324e762012-05-02 23:26:21 +0200811 cpu->stopped = true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000812 qemu_cond_signal(&qemu_pause_cond);
813 }
Andreas Färber6d45b102012-05-03 02:13:22 +0200814 flush_queued_work(cpu);
Andreas Färber216fc9a2012-05-02 17:49:49 +0200815 cpu->thread_kicked = false;
Blue Swirl296af7c2010-03-29 19:23:50 +0000816}
817
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200818static void qemu_tcg_wait_io_event(void)
Blue Swirl296af7c2010-03-29 19:23:50 +0000819{
Andreas Färber182735e2013-05-29 22:29:20 +0200820 CPUState *cpu;
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200821
Jan Kiszka16400322011-02-09 16:29:37 +0100822 while (all_cpu_threads_idle()) {
Paolo Bonziniab33fcd2011-04-13 10:03:44 +0200823 /* Start accounting real time to the virtual clock if the CPUs
824 are idle. */
Alex Bligh40daca52013-08-21 16:03:02 +0100825 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini9705fbb2011-03-12 17:44:00 +0100826 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +0100827 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000828
Paolo Bonzini46daff12011-06-09 13:10:24 +0200829 while (iothread_requesting_mutex) {
830 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
831 }
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200832
Andreas Färberbdc44642013-06-24 23:50:24 +0200833 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200834 qemu_wait_io_event_common(cpu);
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200835 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000836}
837
Andreas Färberfd529e82013-05-26 23:24:55 +0200838static void qemu_kvm_wait_io_event(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000839{
Andreas Färbera98ae1d2013-05-26 23:21:08 +0200840 while (cpu_thread_is_idle(cpu)) {
Andreas Färberf5c121b2012-05-03 01:22:49 +0200841 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +0100842 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000843
Andreas Färber290adf32013-01-17 09:30:27 +0100844 qemu_kvm_eat_signals(cpu);
Andreas Färber509a0d72012-05-03 02:18:09 +0200845 qemu_wait_io_event_common(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000846}
847
Jan Kiszka7e97cd82011-02-07 12:19:12 +0100848static void *qemu_kvm_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +0000849{
Andreas Färber48a106b2013-05-27 02:20:39 +0200850 CPUState *cpu = arg;
Jan Kiszka84b49152011-02-01 22:15:50 +0100851 int r;
Blue Swirl296af7c2010-03-29 19:23:50 +0000852
Marcelo Tosatti6164e6d2010-03-23 13:37:13 -0300853 qemu_mutex_lock(&qemu_global_mutex);
Andreas Färber814e6122012-05-02 17:00:37 +0200854 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +0200855 cpu->thread_id = qemu_get_thread_id();
Andreas Färber4917cf42013-05-27 05:17:50 +0200856 current_cpu = cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000857
Andreas Färber504134d2012-12-17 06:38:45 +0100858 r = kvm_init_vcpu(cpu);
Jan Kiszka84b49152011-02-01 22:15:50 +0100859 if (r < 0) {
860 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
861 exit(1);
862 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000863
Andreas Färber13618e02013-05-26 23:41:00 +0200864 qemu_kvm_init_cpu_signals(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000865
866 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +0200867 cpu->created = true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000868 qemu_cond_signal(&qemu_cpu_cond);
869
Blue Swirl296af7c2010-03-29 19:23:50 +0000870 while (1) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200871 if (cpu_can_run(cpu)) {
Andreas Färber1458c362013-05-26 23:46:55 +0200872 r = kvm_cpu_exec(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +0100873 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +0200874 cpu_handle_guest_debug(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +0100875 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100876 }
Andreas Färberfd529e82013-05-26 23:24:55 +0200877 qemu_kvm_wait_io_event(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000878 }
879
880 return NULL;
881}
882
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200883static void *qemu_dummy_cpu_thread_fn(void *arg)
884{
885#ifdef _WIN32
886 fprintf(stderr, "qtest is not supported under Windows\n");
887 exit(1);
888#else
Andreas Färber10a90212013-05-27 02:24:35 +0200889 CPUState *cpu = arg;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200890 sigset_t waitset;
891 int r;
892
893 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +0200894 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +0200895 cpu->thread_id = qemu_get_thread_id();
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200896
897 sigemptyset(&waitset);
898 sigaddset(&waitset, SIG_IPI);
899
900 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +0200901 cpu->created = true;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200902 qemu_cond_signal(&qemu_cpu_cond);
903
Andreas Färber4917cf42013-05-27 05:17:50 +0200904 current_cpu = cpu;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200905 while (1) {
Andreas Färber4917cf42013-05-27 05:17:50 +0200906 current_cpu = NULL;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200907 qemu_mutex_unlock_iothread();
908 do {
909 int sig;
910 r = sigwait(&waitset, &sig);
911 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
912 if (r == -1) {
913 perror("sigwait");
914 exit(1);
915 }
916 qemu_mutex_lock_iothread();
Andreas Färber4917cf42013-05-27 05:17:50 +0200917 current_cpu = cpu;
Andreas Färber509a0d72012-05-03 02:18:09 +0200918 qemu_wait_io_event_common(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200919 }
920
921 return NULL;
922#endif
923}
924
Jan Kiszkabdb7ca62011-09-26 09:40:39 +0200925static void tcg_exec_all(void);
926
Jan Kiszka7e97cd82011-02-07 12:19:12 +0100927static void *qemu_tcg_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +0000928{
Andreas Färberc3586ba2012-05-03 01:41:24 +0200929 CPUState *cpu = arg;
Blue Swirl296af7c2010-03-29 19:23:50 +0000930
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100931 qemu_tcg_init_cpu_signals();
Andreas Färber814e6122012-05-02 17:00:37 +0200932 qemu_thread_get_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000933
Blue Swirl296af7c2010-03-29 19:23:50 +0000934 qemu_mutex_lock(&qemu_global_mutex);
Andreas Färber38fcbd32013-07-07 19:50:23 +0200935 CPU_FOREACH(cpu) {
936 cpu->thread_id = qemu_get_thread_id();
937 cpu->created = true;
938 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000939 qemu_cond_signal(&qemu_cpu_cond);
940
Jan Kiszkafa7d1862011-08-22 18:35:25 +0200941 /* wait for initial kick-off after machine start */
Andreas Färberbdc44642013-06-24 23:50:24 +0200942 while (QTAILQ_FIRST(&cpus)->stopped) {
Jan Kiszkafa7d1862011-08-22 18:35:25 +0200943 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
Jan Kiszka8e564b42012-02-17 18:31:15 +0100944
945 /* process any pending work */
Andreas Färberbdc44642013-06-24 23:50:24 +0200946 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200947 qemu_wait_io_event_common(cpu);
Jan Kiszka8e564b42012-02-17 18:31:15 +0100948 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100949 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000950
951 while (1) {
Jan Kiszkabdb7ca62011-09-26 09:40:39 +0200952 tcg_exec_all();
Alex Blighac70aaf2013-08-21 16:02:57 +0100953
954 if (use_icount) {
Alex Bligh40daca52013-08-21 16:03:02 +0100955 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +0100956
957 if (deadline == 0) {
Alex Bligh40daca52013-08-21 16:03:02 +0100958 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +0100959 }
Paolo Bonzini3b2319a2011-04-13 10:03:43 +0200960 }
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200961 qemu_tcg_wait_io_event();
Blue Swirl296af7c2010-03-29 19:23:50 +0000962 }
963
964 return NULL;
965}
966
Andreas Färber2ff09a42012-05-03 00:23:30 +0200967static void qemu_cpu_kick_thread(CPUState *cpu)
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100968{
969#ifndef _WIN32
970 int err;
971
Andreas Färber814e6122012-05-02 17:00:37 +0200972 err = pthread_kill(cpu->thread->thread, SIG_IPI);
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100973 if (err) {
974 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
975 exit(1);
976 }
977#else /* _WIN32 */
Andreas Färber60e82572012-05-02 22:23:49 +0200978 if (!qemu_cpu_is_self(cpu)) {
Olivier Hainqueed9164a2013-04-09 18:06:53 +0200979 CONTEXT tcgContext;
980
981 if (SuspendThread(cpu->hThread) == (DWORD)-1) {
Stefan Weil7f1721d2013-04-13 22:45:50 +0200982 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
Olivier Hainqueed9164a2013-04-09 18:06:53 +0200983 GetLastError());
984 exit(1);
985 }
986
987 /* On multi-core systems, we are not sure that the thread is actually
988 * suspended until we can get the context.
989 */
990 tcgContext.ContextFlags = CONTEXT_CONTROL;
991 while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
992 continue;
993 }
994
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100995 cpu_signal(0);
Olivier Hainqueed9164a2013-04-09 18:06:53 +0200996
997 if (ResumeThread(cpu->hThread) == (DWORD)-1) {
Stefan Weil7f1721d2013-04-13 22:45:50 +0200998 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
Olivier Hainqueed9164a2013-04-09 18:06:53 +0200999 GetLastError());
1000 exit(1);
1001 }
Paolo Bonzinicc015e92011-03-12 17:44:08 +01001002 }
1003#endif
1004}
1005
Andreas Färberc08d7422012-05-03 04:34:15 +02001006void qemu_cpu_kick(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001007{
Andreas Färberf5c121b2012-05-03 01:22:49 +02001008 qemu_cond_broadcast(cpu->halt_cond);
Andreas Färber216fc9a2012-05-02 17:49:49 +02001009 if (!tcg_enabled() && !cpu->thread_kicked) {
Andreas Färber2ff09a42012-05-03 00:23:30 +02001010 qemu_cpu_kick_thread(cpu);
Andreas Färber216fc9a2012-05-02 17:49:49 +02001011 cpu->thread_kicked = true;
Jan Kiszkaaa2c3642011-02-01 22:15:42 +01001012 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001013}
1014
Jan Kiszka46d62fa2011-02-01 22:15:59 +01001015void qemu_cpu_kick_self(void)
1016{
Paolo Bonzinib55c22c2011-03-12 17:44:07 +01001017#ifndef _WIN32
Andreas Färber4917cf42013-05-27 05:17:50 +02001018 assert(current_cpu);
Jan Kiszka46d62fa2011-02-01 22:15:59 +01001019
Andreas Färber4917cf42013-05-27 05:17:50 +02001020 if (!current_cpu->thread_kicked) {
1021 qemu_cpu_kick_thread(current_cpu);
1022 current_cpu->thread_kicked = true;
Jan Kiszka46d62fa2011-02-01 22:15:59 +01001023 }
Paolo Bonzinib55c22c2011-03-12 17:44:07 +01001024#else
1025 abort();
1026#endif
Blue Swirl296af7c2010-03-29 19:23:50 +00001027}
1028
Andreas Färber60e82572012-05-02 22:23:49 +02001029bool qemu_cpu_is_self(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001030{
Andreas Färber814e6122012-05-02 17:00:37 +02001031 return qemu_thread_is_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +00001032}
1033
Juan Quintelaaa723c22012-09-18 16:30:11 +02001034static bool qemu_in_vcpu_thread(void)
1035{
Andreas Färber4917cf42013-05-27 05:17:50 +02001036 return current_cpu && qemu_cpu_is_self(current_cpu);
Juan Quintelaaa723c22012-09-18 16:30:11 +02001037}
1038
Blue Swirl296af7c2010-03-29 19:23:50 +00001039void qemu_mutex_lock_iothread(void)
1040{
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001041 if (!tcg_enabled()) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001042 qemu_mutex_lock(&qemu_global_mutex);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001043 } else {
Paolo Bonzini46daff12011-06-09 13:10:24 +02001044 iothread_requesting_mutex = true;
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001045 if (qemu_mutex_trylock(&qemu_global_mutex)) {
Andreas Färber182735e2013-05-29 22:29:20 +02001046 qemu_cpu_kick_thread(first_cpu);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001047 qemu_mutex_lock(&qemu_global_mutex);
1048 }
Paolo Bonzini46daff12011-06-09 13:10:24 +02001049 iothread_requesting_mutex = false;
1050 qemu_cond_broadcast(&qemu_io_proceeded_cond);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -03001051 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001052}
1053
1054void qemu_mutex_unlock_iothread(void)
1055{
1056 qemu_mutex_unlock(&qemu_global_mutex);
1057}
1058
1059static int all_vcpus_paused(void)
1060{
Andreas Färberbdc44642013-06-24 23:50:24 +02001061 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001062
Andreas Färberbdc44642013-06-24 23:50:24 +02001063 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001064 if (!cpu->stopped) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001065 return 0;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001066 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001067 }
1068
1069 return 1;
1070}
1071
1072void pause_all_vcpus(void)
1073{
Andreas Färberbdc44642013-06-24 23:50:24 +02001074 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001075
Alex Bligh40daca52013-08-21 16:03:02 +01001076 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
Andreas Färberbdc44642013-06-24 23:50:24 +02001077 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001078 cpu->stop = true;
1079 qemu_cpu_kick(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001080 }
1081
Juan Quintelaaa723c22012-09-18 16:30:11 +02001082 if (qemu_in_vcpu_thread()) {
Jan Kiszkad798e972012-02-17 18:31:16 +01001083 cpu_stop_current();
1084 if (!kvm_enabled()) {
Andreas Färberbdc44642013-06-24 23:50:24 +02001085 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001086 cpu->stop = false;
1087 cpu->stopped = true;
Jan Kiszkad798e972012-02-17 18:31:16 +01001088 }
1089 return;
1090 }
1091 }
1092
Blue Swirl296af7c2010-03-29 19:23:50 +00001093 while (!all_vcpus_paused()) {
Paolo Bonzinibe7d6c52011-03-12 17:44:02 +01001094 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
Andreas Färberbdc44642013-06-24 23:50:24 +02001095 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001096 qemu_cpu_kick(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001097 }
1098 }
1099}
1100
Igor Mammedov29936832013-04-23 10:29:37 +02001101void cpu_resume(CPUState *cpu)
1102{
1103 cpu->stop = false;
1104 cpu->stopped = false;
1105 qemu_cpu_kick(cpu);
1106}
1107
Blue Swirl296af7c2010-03-29 19:23:50 +00001108void resume_all_vcpus(void)
1109{
Andreas Färberbdc44642013-06-24 23:50:24 +02001110 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001111
Alex Bligh40daca52013-08-21 16:03:02 +01001112 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
Andreas Färberbdc44642013-06-24 23:50:24 +02001113 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001114 cpu_resume(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001115 }
1116}
1117
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001118/* For temporary buffers for forming a name */
1119#define VCPU_THREAD_NAME_SIZE 16
1120
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001121static void qemu_tcg_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001122{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001123 char thread_name[VCPU_THREAD_NAME_SIZE];
1124
Edgar E. Iglesias09daed82013-12-17 13:06:51 +10001125 tcg_cpu_address_space_init(cpu, cpu->as);
1126
Blue Swirl296af7c2010-03-29 19:23:50 +00001127 /* share a single thread for all cpus with TCG */
1128 if (!tcg_cpu_thread) {
Andreas Färber814e6122012-05-02 17:00:37 +02001129 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001130 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1131 qemu_cond_init(cpu->halt_cond);
1132 tcg_halt_cond = cpu->halt_cond;
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001133 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
1134 cpu->cpu_index);
1135 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1136 cpu, QEMU_THREAD_JOINABLE);
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001137#ifdef _WIN32
Andreas Färber814e6122012-05-02 17:00:37 +02001138 cpu->hThread = qemu_thread_get_handle(cpu->thread);
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001139#endif
Andreas Färber61a46212012-05-02 22:49:36 +02001140 while (!cpu->created) {
Paolo Bonzini18a85722011-03-12 17:44:03 +01001141 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001142 }
Andreas Färber814e6122012-05-02 17:00:37 +02001143 tcg_cpu_thread = cpu->thread;
Blue Swirl296af7c2010-03-29 19:23:50 +00001144 } else {
Andreas Färber814e6122012-05-02 17:00:37 +02001145 cpu->thread = tcg_cpu_thread;
Andreas Färberf5c121b2012-05-03 01:22:49 +02001146 cpu->halt_cond = tcg_halt_cond;
Blue Swirl296af7c2010-03-29 19:23:50 +00001147 }
1148}
1149
Andreas Färber48a106b2013-05-27 02:20:39 +02001150static void qemu_kvm_start_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001151{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001152 char thread_name[VCPU_THREAD_NAME_SIZE];
1153
Andreas Färber814e6122012-05-02 17:00:37 +02001154 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001155 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1156 qemu_cond_init(cpu->halt_cond);
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001157 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1158 cpu->cpu_index);
1159 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1160 cpu, QEMU_THREAD_JOINABLE);
Andreas Färber61a46212012-05-02 22:49:36 +02001161 while (!cpu->created) {
Paolo Bonzini18a85722011-03-12 17:44:03 +01001162 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001163 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001164}
1165
Andreas Färber10a90212013-05-27 02:24:35 +02001166static void qemu_dummy_start_vcpu(CPUState *cpu)
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001167{
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001168 char thread_name[VCPU_THREAD_NAME_SIZE];
1169
Andreas Färber814e6122012-05-02 17:00:37 +02001170 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001171 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1172 qemu_cond_init(cpu->halt_cond);
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +00001173 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
1174 cpu->cpu_index);
1175 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001176 QEMU_THREAD_JOINABLE);
Andreas Färber61a46212012-05-02 22:49:36 +02001177 while (!cpu->created) {
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001178 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1179 }
1180}
1181
Andreas Färberc643bed2013-05-27 03:23:24 +02001182void qemu_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001183{
Andreas Färberce3960e2012-12-17 03:27:07 +01001184 cpu->nr_cores = smp_cores;
1185 cpu->nr_threads = smp_threads;
Andreas Färberf324e762012-05-02 23:26:21 +02001186 cpu->stopped = true;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001187 if (kvm_enabled()) {
Andreas Färber48a106b2013-05-27 02:20:39 +02001188 qemu_kvm_start_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001189 } else if (tcg_enabled()) {
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001190 qemu_tcg_init_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001191 } else {
Andreas Färber10a90212013-05-27 02:24:35 +02001192 qemu_dummy_start_vcpu(cpu);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001193 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001194}
1195
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001196void cpu_stop_current(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001197{
Andreas Färber4917cf42013-05-27 05:17:50 +02001198 if (current_cpu) {
1199 current_cpu->stop = false;
1200 current_cpu->stopped = true;
1201 cpu_exit(current_cpu);
Paolo Bonzini67bb1722011-03-12 17:43:59 +01001202 qemu_cond_signal(&qemu_pause_cond);
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001203 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001204}
1205
Kevin Wolf56983462013-07-05 13:49:54 +02001206int vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +00001207{
Juan Quintelaaa723c22012-09-18 16:30:11 +02001208 if (qemu_in_vcpu_thread()) {
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -03001209 qemu_system_vmstop_request(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00001210 /*
1211 * FIXME: should not return to device code in case
1212 * vm_stop() has been requested.
1213 */
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001214 cpu_stop_current();
Kevin Wolf56983462013-07-05 13:49:54 +02001215 return 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00001216 }
Kevin Wolf56983462013-07-05 13:49:54 +02001217
1218 return do_vm_stop(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00001219}
1220
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001221/* does a state transition even if the VM is already stopped,
1222 current state is forgotten forever */
Kevin Wolf56983462013-07-05 13:49:54 +02001223int vm_stop_force_state(RunState state)
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001224{
1225 if (runstate_is_running()) {
Kevin Wolf56983462013-07-05 13:49:54 +02001226 return vm_stop(state);
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001227 } else {
1228 runstate_set(state);
Kevin Wolf594a45c2013-07-18 14:52:19 +02001229 /* Make sure to return an error if the flush in a previous vm_stop()
1230 * failed. */
1231 return bdrv_flush_all();
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001232 }
1233}
1234
Andreas Färber9349b4f2012-03-14 01:38:32 +01001235static int tcg_cpu_exec(CPUArchState *env)
Blue Swirl296af7c2010-03-29 19:23:50 +00001236{
Andreas Färberefee7342013-08-26 05:39:29 +02001237 CPUState *cpu = ENV_GET_CPU(env);
Blue Swirl296af7c2010-03-29 19:23:50 +00001238 int ret;
1239#ifdef CONFIG_PROFILER
1240 int64_t ti;
1241#endif
1242
1243#ifdef CONFIG_PROFILER
1244 ti = profile_getclock();
1245#endif
1246 if (use_icount) {
1247 int64_t count;
Alex Blighac70aaf2013-08-21 16:02:57 +01001248 int64_t deadline;
Blue Swirl296af7c2010-03-29 19:23:50 +00001249 int decr;
Andreas Färber28ecfd72013-08-26 05:51:49 +02001250 qemu_icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
1251 cpu->icount_decr.u16.low = 0;
Andreas Färberefee7342013-08-26 05:39:29 +02001252 cpu->icount_extra = 0;
Alex Bligh40daca52013-08-21 16:03:02 +01001253 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +01001254
1255 /* Maintain prior (possibly buggy) behaviour where if no deadline
Alex Bligh40daca52013-08-21 16:03:02 +01001256 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
Alex Blighac70aaf2013-08-21 16:02:57 +01001257 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1258 * nanoseconds.
1259 */
1260 if ((deadline < 0) || (deadline > INT32_MAX)) {
1261 deadline = INT32_MAX;
1262 }
1263
1264 count = qemu_icount_round(deadline);
Blue Swirl296af7c2010-03-29 19:23:50 +00001265 qemu_icount += count;
1266 decr = (count > 0xffff) ? 0xffff : count;
1267 count -= decr;
Andreas Färber28ecfd72013-08-26 05:51:49 +02001268 cpu->icount_decr.u16.low = decr;
Andreas Färberefee7342013-08-26 05:39:29 +02001269 cpu->icount_extra = count;
Blue Swirl296af7c2010-03-29 19:23:50 +00001270 }
1271 ret = cpu_exec(env);
1272#ifdef CONFIG_PROFILER
1273 qemu_time += profile_getclock() - ti;
1274#endif
1275 if (use_icount) {
1276 /* Fold pending instructions back into the
1277 instruction counter, and clear the interrupt flag. */
Andreas Färber28ecfd72013-08-26 05:51:49 +02001278 qemu_icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
1279 cpu->icount_decr.u32 = 0;
Andreas Färberefee7342013-08-26 05:39:29 +02001280 cpu->icount_extra = 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00001281 }
1282 return ret;
1283}
1284
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001285static void tcg_exec_all(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001286{
Jan Kiszka9a360852011-02-01 22:15:55 +01001287 int r;
1288
Alex Bligh40daca52013-08-21 16:03:02 +01001289 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1290 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
Paolo Bonziniab33fcd2011-04-13 10:03:44 +02001291
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001292 if (next_cpu == NULL) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001293 next_cpu = first_cpu;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001294 }
Andreas Färberbdc44642013-06-24 23:50:24 +02001295 for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
Andreas Färber182735e2013-05-29 22:29:20 +02001296 CPUState *cpu = next_cpu;
1297 CPUArchState *env = cpu->env_ptr;
Blue Swirl296af7c2010-03-29 19:23:50 +00001298
Alex Bligh40daca52013-08-21 16:03:02 +01001299 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
Andreas Färbered2803d2013-06-21 20:20:45 +02001300 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
Blue Swirl296af7c2010-03-29 19:23:50 +00001301
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001302 if (cpu_can_run(cpu)) {
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001303 r = tcg_cpu_exec(env);
Jan Kiszka9a360852011-02-01 22:15:55 +01001304 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +02001305 cpu_handle_guest_debug(cpu);
Jan Kiszka3c638d02010-06-25 16:56:56 +02001306 break;
1307 }
Andreas Färberf324e762012-05-02 23:26:21 +02001308 } else if (cpu->stop || cpu->stopped) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001309 break;
1310 }
1311 }
Jan Kiszkac629a4b2010-06-25 16:56:52 +02001312 exit_request = 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00001313}
1314
1315void set_numa_modes(void)
1316{
Andreas Färber1b1ed8d2012-12-17 04:22:03 +01001317 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001318 int i;
1319
Andreas Färberbdc44642013-06-24 23:50:24 +02001320 CPU_FOREACH(cpu) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001321 for (i = 0; i < nb_numa_nodes; i++) {
Andreas Färber55e5c282012-12-17 06:18:02 +01001322 if (test_bit(cpu->cpu_index, node_cpumask[i])) {
Andreas Färber1b1ed8d2012-12-17 04:22:03 +01001323 cpu->numa_node = i;
Blue Swirl296af7c2010-03-29 19:23:50 +00001324 }
1325 }
1326 }
1327}
1328
Stefan Weil9a78eea2010-10-22 23:03:33 +02001329void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
Blue Swirl262353c2010-05-04 19:55:35 +00001330{
1331 /* XXX: implement xxx_cpu_list for targets that still miss it */
Peter Maydelle916cbf2012-09-05 17:41:08 -03001332#if defined(cpu_list)
1333 cpu_list(f, cpu_fprintf);
Blue Swirl262353c2010-05-04 19:55:35 +00001334#endif
1335}
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001336
1337CpuInfoList *qmp_query_cpus(Error **errp)
1338{
1339 CpuInfoList *head = NULL, *cur_item = NULL;
Andreas Färber182735e2013-05-29 22:29:20 +02001340 CPUState *cpu;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001341
Andreas Färberbdc44642013-06-24 23:50:24 +02001342 CPU_FOREACH(cpu) {
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001343 CpuInfoList *info;
Andreas Färber182735e2013-05-29 22:29:20 +02001344#if defined(TARGET_I386)
1345 X86CPU *x86_cpu = X86_CPU(cpu);
1346 CPUX86State *env = &x86_cpu->env;
1347#elif defined(TARGET_PPC)
1348 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1349 CPUPPCState *env = &ppc_cpu->env;
1350#elif defined(TARGET_SPARC)
1351 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1352 CPUSPARCState *env = &sparc_cpu->env;
1353#elif defined(TARGET_MIPS)
1354 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1355 CPUMIPSState *env = &mips_cpu->env;
1356#endif
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001357
Andreas Färbercb446ec2013-05-01 14:24:52 +02001358 cpu_synchronize_state(cpu);
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001359
1360 info = g_malloc0(sizeof(*info));
1361 info->value = g_malloc0(sizeof(*info->value));
Andreas Färber55e5c282012-12-17 06:18:02 +01001362 info->value->CPU = cpu->cpu_index;
Andreas Färber182735e2013-05-29 22:29:20 +02001363 info->value->current = (cpu == first_cpu);
Andreas Färber259186a2013-01-17 18:51:17 +01001364 info->value->halted = cpu->halted;
Andreas Färber9f09e182012-05-03 06:59:07 +02001365 info->value->thread_id = cpu->thread_id;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001366#if defined(TARGET_I386)
1367 info->value->has_pc = true;
1368 info->value->pc = env->eip + env->segs[R_CS].base;
1369#elif defined(TARGET_PPC)
1370 info->value->has_nip = true;
1371 info->value->nip = env->nip;
1372#elif defined(TARGET_SPARC)
1373 info->value->has_pc = true;
1374 info->value->pc = env->pc;
1375 info->value->has_npc = true;
1376 info->value->npc = env->npc;
1377#elif defined(TARGET_MIPS)
1378 info->value->has_PC = true;
1379 info->value->PC = env->active_tc.PC;
1380#endif
1381
1382 /* XXX: waiting for the qapi to support GSList */
1383 if (!cur_item) {
1384 head = cur_item = info;
1385 } else {
1386 cur_item->next = info;
1387 cur_item = info;
1388 }
1389 }
1390
1391 return head;
1392}
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001393
1394void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1395 bool has_cpu, int64_t cpu_index, Error **errp)
1396{
1397 FILE *f;
1398 uint32_t l;
Andreas Färber55e5c282012-12-17 06:18:02 +01001399 CPUState *cpu;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001400 uint8_t buf[1024];
1401
1402 if (!has_cpu) {
1403 cpu_index = 0;
1404 }
1405
Andreas Färber151d1322013-02-15 15:41:49 +01001406 cpu = qemu_get_cpu(cpu_index);
1407 if (cpu == NULL) {
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001408 error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1409 "a CPU number");
1410 return;
1411 }
1412
1413 f = fopen(filename, "wb");
1414 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04001415 error_setg_file_open(errp, errno, filename);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001416 return;
1417 }
1418
1419 while (size != 0) {
1420 l = sizeof(buf);
1421 if (l > size)
1422 l = size;
Aneesh Kumar K.V2f4d0f52013-10-01 21:49:30 +05301423 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
1424 error_setg(errp, "Invalid addr 0x%016" PRIx64 "specified", addr);
1425 goto exit;
1426 }
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001427 if (fwrite(buf, 1, l, f) != l) {
1428 error_set(errp, QERR_IO_ERROR);
1429 goto exit;
1430 }
1431 addr += l;
1432 size -= l;
1433 }
1434
1435exit:
1436 fclose(f);
1437}
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001438
1439void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1440 Error **errp)
1441{
1442 FILE *f;
1443 uint32_t l;
1444 uint8_t buf[1024];
1445
1446 f = fopen(filename, "wb");
1447 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04001448 error_setg_file_open(errp, errno, filename);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001449 return;
1450 }
1451
1452 while (size != 0) {
1453 l = sizeof(buf);
1454 if (l > size)
1455 l = size;
Stefan Weileb6282f2014-04-07 20:28:23 +02001456 cpu_physical_memory_read(addr, buf, l);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001457 if (fwrite(buf, 1, l, f) != l) {
1458 error_set(errp, QERR_IO_ERROR);
1459 goto exit;
1460 }
1461 addr += l;
1462 size -= l;
1463 }
1464
1465exit:
1466 fclose(f);
1467}
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001468
1469void qmp_inject_nmi(Error **errp)
1470{
1471#if defined(TARGET_I386)
Andreas Färber182735e2013-05-29 22:29:20 +02001472 CPUState *cs;
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001473
Andreas Färberbdc44642013-06-24 23:50:24 +02001474 CPU_FOREACH(cs) {
Andreas Färber182735e2013-05-29 22:29:20 +02001475 X86CPU *cpu = X86_CPU(cs);
Andreas Färber182735e2013-05-29 22:29:20 +02001476
Chen Fan02e51482013-12-23 17:04:02 +08001477 if (!cpu->apic_state) {
Andreas Färber182735e2013-05-29 22:29:20 +02001478 cpu_interrupt(cs, CPU_INTERRUPT_NMI);
Jan Kiszka02c09192011-10-18 00:00:06 +08001479 } else {
Chen Fan02e51482013-12-23 17:04:02 +08001480 apic_deliver_nmi(cpu->apic_state);
Jan Kiszka02c09192011-10-18 00:00:06 +08001481 }
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001482 }
Eugene (jno) Dvurechenski7f7f9752012-12-05 15:50:07 +01001483#elif defined(TARGET_S390X)
1484 CPUState *cs;
1485 S390CPU *cpu;
1486
Andreas Färberbdc44642013-06-24 23:50:24 +02001487 CPU_FOREACH(cs) {
Eugene (jno) Dvurechenski7f7f9752012-12-05 15:50:07 +01001488 cpu = S390_CPU(cs);
1489 if (cpu->env.cpu_num == monitor_get_cpu_index()) {
1490 if (s390_cpu_restart(S390_CPU(cs)) == -1) {
1491 error_set(errp, QERR_UNSUPPORTED);
1492 return;
1493 }
1494 break;
1495 }
1496 }
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001497#else
1498 error_set(errp, QERR_UNSUPPORTED);
1499#endif
1500}