blob: 363d392cd99d087d3b30d29000ee9629ef095436 [file] [log] [blame]
Blue Swirl296af7c2010-03-29 19:23:50 +00001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Needed early for CONFIG_BSD etc. */
26#include "config-host.h"
27
Paolo Bonzini83c90892012-12-17 18:19:49 +010028#include "monitor/monitor.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010029#include "sysemu/sysemu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010030#include "exec/gdbstub.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010031#include "sysemu/dma.h"
32#include "sysemu/kvm.h"
Luiz Capitulinode0b36b2011-09-21 16:38:35 -030033#include "qmp-commands.h"
Blue Swirl296af7c2010-03-29 19:23:50 +000034
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/thread.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010036#include "sysemu/cpus.h"
37#include "sysemu/qtest.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010038#include "qemu/main-loop.h"
39#include "qemu/bitmap.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020040
41#ifndef _WIN32
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010042#include "qemu/compatfd.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020043#endif
Blue Swirl296af7c2010-03-29 19:23:50 +000044
Jan Kiszka6d9cb732011-02-01 22:15:58 +010045#ifdef CONFIG_LINUX
46
47#include <sys/prctl.h>
48
Marcelo Tosattic0532a72010-10-11 15:31:21 -030049#ifndef PR_MCE_KILL
50#define PR_MCE_KILL 33
51#endif
52
Jan Kiszka6d9cb732011-02-01 22:15:58 +010053#ifndef PR_MCE_KILL_SET
54#define PR_MCE_KILL_SET 1
55#endif
56
57#ifndef PR_MCE_KILL_EARLY
58#define PR_MCE_KILL_EARLY 1
59#endif
60
61#endif /* CONFIG_LINUX */
62
Andreas Färber182735e2013-05-29 22:29:20 +020063static CPUState *next_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +000064
Tiejun Chen321bc0b2013-08-02 09:43:09 +080065bool cpu_is_stopped(CPUState *cpu)
66{
67 return cpu->stopped || !runstate_is_running();
68}
69
Andreas Färbera98ae1d2013-05-26 23:21:08 +020070static bool cpu_thread_is_idle(CPUState *cpu)
Peter Maydellac873f12012-07-19 16:52:27 +010071{
Andreas Färberc64ca812012-05-03 02:11:45 +020072 if (cpu->stop || cpu->queued_work_first) {
Peter Maydellac873f12012-07-19 16:52:27 +010073 return false;
74 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +080075 if (cpu_is_stopped(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010076 return true;
77 }
Andreas Färber259186a2013-01-17 18:51:17 +010078 if (!cpu->halted || qemu_cpu_has_work(cpu) ||
Alexander Graf215e79c2013-04-24 22:24:12 +020079 kvm_halt_in_kernel()) {
Peter Maydellac873f12012-07-19 16:52:27 +010080 return false;
81 }
82 return true;
83}
84
85static bool all_cpu_threads_idle(void)
86{
Andreas Färber182735e2013-05-29 22:29:20 +020087 CPUState *cpu;
Peter Maydellac873f12012-07-19 16:52:27 +010088
Andreas Färberbdc44642013-06-24 23:50:24 +020089 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +020090 if (!cpu_thread_is_idle(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010091 return false;
92 }
93 }
94 return true;
95}
96
Blue Swirl296af7c2010-03-29 19:23:50 +000097/***********************************************************/
Paolo Bonzini946fb272011-09-12 13:57:37 +020098/* guest cycle counter */
99
100/* Conversion factor from emulated instructions to virtual clock ticks. */
101static int icount_time_shift;
102/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
103#define MAX_ICOUNT_SHIFT 10
104/* Compensate for varying guest execution speed. */
105static int64_t qemu_icount_bias;
106static QEMUTimer *icount_rt_timer;
107static QEMUTimer *icount_vm_timer;
108static QEMUTimer *icount_warp_timer;
109static int64_t vm_clock_warp_start;
110static int64_t qemu_icount;
111
112typedef struct TimersState {
113 int64_t cpu_ticks_prev;
114 int64_t cpu_ticks_offset;
115 int64_t cpu_clock_offset;
116 int32_t cpu_ticks_enabled;
117 int64_t dummy;
118} TimersState;
119
Liu Ping Fand9cd4002013-07-21 08:43:00 +0000120static TimersState timers_state;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200121
122/* Return the virtual CPU time, based on the instruction counter. */
123int64_t cpu_get_icount(void)
124{
125 int64_t icount;
Andreas Färber4917cf42013-05-27 05:17:50 +0200126 CPUState *cpu = current_cpu;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200127
128 icount = qemu_icount;
Andreas Färber4917cf42013-05-27 05:17:50 +0200129 if (cpu) {
130 CPUArchState *env = cpu->env_ptr;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200131 if (!can_do_io(env)) {
132 fprintf(stderr, "Bad clock read\n");
133 }
134 icount -= (env->icount_decr.u16.low + env->icount_extra);
135 }
136 return qemu_icount_bias + (icount << icount_time_shift);
137}
138
139/* return the host CPU cycle counter and handle stop/restart */
140int64_t cpu_get_ticks(void)
141{
142 if (use_icount) {
143 return cpu_get_icount();
144 }
145 if (!timers_state.cpu_ticks_enabled) {
146 return timers_state.cpu_ticks_offset;
147 } else {
148 int64_t ticks;
149 ticks = cpu_get_real_ticks();
150 if (timers_state.cpu_ticks_prev > ticks) {
151 /* Note: non increasing ticks may happen if the host uses
152 software suspend */
153 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
154 }
155 timers_state.cpu_ticks_prev = ticks;
156 return ticks + timers_state.cpu_ticks_offset;
157 }
158}
159
160/* return the host CPU monotonic timer and handle stop/restart */
161int64_t cpu_get_clock(void)
162{
163 int64_t ti;
164 if (!timers_state.cpu_ticks_enabled) {
165 return timers_state.cpu_clock_offset;
166 } else {
167 ti = get_clock();
168 return ti + timers_state.cpu_clock_offset;
169 }
170}
171
172/* enable cpu_get_ticks() */
173void cpu_enable_ticks(void)
174{
175 if (!timers_state.cpu_ticks_enabled) {
176 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
177 timers_state.cpu_clock_offset -= get_clock();
178 timers_state.cpu_ticks_enabled = 1;
179 }
180}
181
182/* disable cpu_get_ticks() : the clock is stopped. You must not call
183 cpu_get_ticks() after that. */
184void cpu_disable_ticks(void)
185{
186 if (timers_state.cpu_ticks_enabled) {
187 timers_state.cpu_ticks_offset = cpu_get_ticks();
188 timers_state.cpu_clock_offset = cpu_get_clock();
189 timers_state.cpu_ticks_enabled = 0;
190 }
191}
192
193/* Correlation between real and virtual time is always going to be
194 fairly approximate, so ignore small variation.
195 When the guest is idle real and virtual time will be aligned in
196 the IO wait loop. */
197#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
198
199static void icount_adjust(void)
200{
201 int64_t cur_time;
202 int64_t cur_icount;
203 int64_t delta;
204 static int64_t last_delta;
205 /* If the VM is not running, then do nothing. */
206 if (!runstate_is_running()) {
207 return;
208 }
209 cur_time = cpu_get_clock();
Alex Bligh40daca52013-08-21 16:03:02 +0100210 cur_icount = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200211 delta = cur_icount - cur_time;
212 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
213 if (delta > 0
214 && last_delta + ICOUNT_WOBBLE < delta * 2
215 && icount_time_shift > 0) {
216 /* The guest is getting too far ahead. Slow time down. */
217 icount_time_shift--;
218 }
219 if (delta < 0
220 && last_delta - ICOUNT_WOBBLE > delta * 2
221 && icount_time_shift < MAX_ICOUNT_SHIFT) {
222 /* The guest is getting too far behind. Speed time up. */
223 icount_time_shift++;
224 }
225 last_delta = delta;
226 qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
227}
228
229static void icount_adjust_rt(void *opaque)
230{
Alex Bligh40daca52013-08-21 16:03:02 +0100231 timer_mod(icount_rt_timer,
232 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200233 icount_adjust();
234}
235
236static void icount_adjust_vm(void *opaque)
237{
Alex Bligh40daca52013-08-21 16:03:02 +0100238 timer_mod(icount_vm_timer,
239 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
240 get_ticks_per_sec() / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200241 icount_adjust();
242}
243
244static int64_t qemu_icount_round(int64_t count)
245{
246 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
247}
248
249static void icount_warp_rt(void *opaque)
250{
251 if (vm_clock_warp_start == -1) {
252 return;
253 }
254
255 if (runstate_is_running()) {
Alex Bligh40daca52013-08-21 16:03:02 +0100256 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200257 int64_t warp_delta = clock - vm_clock_warp_start;
258 if (use_icount == 1) {
259 qemu_icount_bias += warp_delta;
260 } else {
261 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100262 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
Paolo Bonzini946fb272011-09-12 13:57:37 +0200263 * far ahead of real time.
264 */
265 int64_t cur_time = cpu_get_clock();
Alex Bligh40daca52013-08-21 16:03:02 +0100266 int64_t cur_icount = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200267 int64_t delta = cur_time - cur_icount;
268 qemu_icount_bias += MIN(warp_delta, delta);
269 }
Alex Bligh40daca52013-08-21 16:03:02 +0100270 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
271 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200272 }
273 }
274 vm_clock_warp_start = -1;
275}
276
Paolo Bonzini8156be52012-03-28 15:42:04 +0200277void qtest_clock_warp(int64_t dest)
278{
Alex Bligh40daca52013-08-21 16:03:02 +0100279 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200280 assert(qtest_enabled());
281 while (clock < dest) {
Alex Bligh40daca52013-08-21 16:03:02 +0100282 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200283 int64_t warp = MIN(dest - clock, deadline);
284 qemu_icount_bias += warp;
Alex Bligh40daca52013-08-21 16:03:02 +0100285 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
286 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200287 }
Alex Bligh40daca52013-08-21 16:03:02 +0100288 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200289}
290
Alex Bligh40daca52013-08-21 16:03:02 +0100291void qemu_clock_warp(QEMUClockType type)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200292{
293 int64_t deadline;
294
295 /*
296 * There are too many global variables to make the "warp" behavior
297 * applicable to other clocks. But a clock argument removes the
298 * need for if statements all over the place.
299 */
Alex Bligh40daca52013-08-21 16:03:02 +0100300 if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
Paolo Bonzini946fb272011-09-12 13:57:37 +0200301 return;
302 }
303
304 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100305 * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
306 * This ensures that the deadline for the timer is computed correctly below.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200307 * This also makes sure that the insn counter is synchronized before the
308 * CPU starts running, in case the CPU is woken by an event other than
Alex Bligh40daca52013-08-21 16:03:02 +0100309 * the earliest QEMU_CLOCK_VIRTUAL timer.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200310 */
311 icount_warp_rt(NULL);
Alex Bligh40daca52013-08-21 16:03:02 +0100312 if (!all_cpu_threads_idle() || !qemu_clock_has_timers(QEMU_CLOCK_VIRTUAL)) {
313 timer_del(icount_warp_timer);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200314 return;
315 }
316
Paolo Bonzini8156be52012-03-28 15:42:04 +0200317 if (qtest_enabled()) {
318 /* When testing, qtest commands advance icount. */
319 return;
320 }
321
Alex Bligh40daca52013-08-21 16:03:02 +0100322 vm_clock_warp_start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Alex Blighac70aaf2013-08-21 16:02:57 +0100323 /* We want to use the earliest deadline from ALL vm_clocks */
Alex Bligh40daca52013-08-21 16:03:02 +0100324 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +0100325
326 /* Maintain prior (possibly buggy) behaviour where if no deadline
Alex Bligh40daca52013-08-21 16:03:02 +0100327 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
Alex Blighac70aaf2013-08-21 16:02:57 +0100328 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
329 * nanoseconds.
330 */
331 if ((deadline < 0) || (deadline > INT32_MAX)) {
332 deadline = INT32_MAX;
333 }
334
Paolo Bonzini946fb272011-09-12 13:57:37 +0200335 if (deadline > 0) {
336 /*
Alex Bligh40daca52013-08-21 16:03:02 +0100337 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
Paolo Bonzini946fb272011-09-12 13:57:37 +0200338 * sleep. Otherwise, the CPU might be waiting for a future timer
339 * interrupt to wake it up, but the interrupt never comes because
340 * the vCPU isn't running any insns and thus doesn't advance the
Alex Bligh40daca52013-08-21 16:03:02 +0100341 * QEMU_CLOCK_VIRTUAL.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200342 *
343 * An extreme solution for this problem would be to never let VCPUs
Alex Bligh40daca52013-08-21 16:03:02 +0100344 * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL
345 * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL
346 * event. Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL
347 * after some e"real" time, (related to the time left until the next
348 * event) has passed. The QEMU_CLOCK_REALTIME timer will do this.
349 * This avoids that the warps are visible externally; for example,
350 * you will not be sending network packets continuously instead of
351 * every 100ms.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200352 */
Alex Bligh40daca52013-08-21 16:03:02 +0100353 timer_mod(icount_warp_timer, vm_clock_warp_start + deadline);
Alex Blighac70aaf2013-08-21 16:02:57 +0100354 } else if (deadline == 0) {
Alex Bligh40daca52013-08-21 16:03:02 +0100355 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200356 }
357}
358
359static const VMStateDescription vmstate_timers = {
360 .name = "timer",
361 .version_id = 2,
362 .minimum_version_id = 1,
363 .minimum_version_id_old = 1,
364 .fields = (VMStateField[]) {
365 VMSTATE_INT64(cpu_ticks_offset, TimersState),
366 VMSTATE_INT64(dummy, TimersState),
367 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
368 VMSTATE_END_OF_LIST()
369 }
370};
371
372void configure_icount(const char *option)
373{
374 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
375 if (!option) {
376 return;
377 }
378
Alex Bligh40daca52013-08-21 16:03:02 +0100379 icount_warp_timer = timer_new_ns(QEMU_CLOCK_REALTIME,
380 icount_warp_rt, NULL);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200381 if (strcmp(option, "auto") != 0) {
382 icount_time_shift = strtol(option, NULL, 0);
383 use_icount = 1;
384 return;
385 }
386
387 use_icount = 2;
388
389 /* 125MIPS seems a reasonable initial guess at the guest speed.
390 It will be corrected fairly quickly anyway. */
391 icount_time_shift = 3;
392
393 /* Have both realtime and virtual time triggers for speed adjustment.
394 The realtime trigger catches emulated time passing too slowly,
395 the virtual time trigger catches emulated time passing too fast.
396 Realtime triggers occur even when idle, so use them less frequently
397 than VM triggers. */
Alex Bligh40daca52013-08-21 16:03:02 +0100398 icount_rt_timer = timer_new_ms(QEMU_CLOCK_REALTIME,
399 icount_adjust_rt, NULL);
400 timer_mod(icount_rt_timer,
401 qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
402 icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
403 icount_adjust_vm, NULL);
404 timer_mod(icount_vm_timer,
405 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
406 get_ticks_per_sec() / 10);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200407}
408
409/***********************************************************/
Blue Swirl296af7c2010-03-29 19:23:50 +0000410void hw_error(const char *fmt, ...)
411{
412 va_list ap;
Andreas Färber55e5c282012-12-17 06:18:02 +0100413 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000414
415 va_start(ap, fmt);
416 fprintf(stderr, "qemu: hardware error: ");
417 vfprintf(stderr, fmt, ap);
418 fprintf(stderr, "\n");
Andreas Färberbdc44642013-06-24 23:50:24 +0200419 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100420 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
Andreas Färber878096e2013-05-27 01:33:50 +0200421 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
Blue Swirl296af7c2010-03-29 19:23:50 +0000422 }
423 va_end(ap);
424 abort();
425}
426
427void cpu_synchronize_all_states(void)
428{
Andreas Färber182735e2013-05-29 22:29:20 +0200429 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000430
Andreas Färberbdc44642013-06-24 23:50:24 +0200431 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200432 cpu_synchronize_state(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000433 }
434}
435
436void cpu_synchronize_all_post_reset(void)
437{
Andreas Färber182735e2013-05-29 22:29:20 +0200438 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000439
Andreas Färberbdc44642013-06-24 23:50:24 +0200440 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200441 cpu_synchronize_post_reset(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000442 }
443}
444
445void cpu_synchronize_all_post_init(void)
446{
Andreas Färber182735e2013-05-29 22:29:20 +0200447 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000448
Andreas Färberbdc44642013-06-24 23:50:24 +0200449 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200450 cpu_synchronize_post_init(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000451 }
452}
453
Kevin Wolf56983462013-07-05 13:49:54 +0200454static int do_vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +0000455{
Kevin Wolf56983462013-07-05 13:49:54 +0200456 int ret = 0;
457
Luiz Capitulino13548692011-07-29 15:36:43 -0300458 if (runstate_is_running()) {
Blue Swirl296af7c2010-03-29 19:23:50 +0000459 cpu_disable_ticks();
Blue Swirl296af7c2010-03-29 19:23:50 +0000460 pause_all_vcpus();
Luiz Capitulinof5bbfba2011-07-29 15:04:45 -0300461 runstate_set(state);
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -0300462 vm_state_notify(0, state);
Blue Swirl296af7c2010-03-29 19:23:50 +0000463 monitor_protocol_event(QEVENT_STOP, NULL);
464 }
Kevin Wolf56983462013-07-05 13:49:54 +0200465
Kevin Wolf594a45c2013-07-18 14:52:19 +0200466 bdrv_drain_all();
467 ret = bdrv_flush_all();
468
Kevin Wolf56983462013-07-05 13:49:54 +0200469 return ret;
Blue Swirl296af7c2010-03-29 19:23:50 +0000470}
471
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200472static bool cpu_can_run(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000473{
Andreas Färber4fdeee72012-05-02 23:10:09 +0200474 if (cpu->stop) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200475 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100476 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +0800477 if (cpu_is_stopped(cpu)) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200478 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100479 }
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200480 return true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000481}
482
Andreas Färber91325042013-05-27 02:07:49 +0200483static void cpu_handle_guest_debug(CPUState *cpu)
Jan Kiszka3c638d02010-06-25 16:56:56 +0200484{
Andreas Färber64f6b342013-05-27 02:06:09 +0200485 gdb_set_stop_cpu(cpu);
Jan Kiszka8cf71712011-02-07 12:19:16 +0100486 qemu_system_debug_request();
Andreas Färberf324e762012-05-02 23:26:21 +0200487 cpu->stopped = true;
Jan Kiszka3c638d02010-06-25 16:56:56 +0200488}
489
Paolo Bonzini714bd042011-03-12 17:44:06 +0100490static void cpu_signal(int sig)
491{
Andreas Färber4917cf42013-05-27 05:17:50 +0200492 if (current_cpu) {
493 cpu_exit(current_cpu);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100494 }
495 exit_request = 1;
496}
Paolo Bonzini714bd042011-03-12 17:44:06 +0100497
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100498#ifdef CONFIG_LINUX
499static void sigbus_reraise(void)
500{
501 sigset_t set;
502 struct sigaction action;
503
504 memset(&action, 0, sizeof(action));
505 action.sa_handler = SIG_DFL;
506 if (!sigaction(SIGBUS, &action, NULL)) {
507 raise(SIGBUS);
508 sigemptyset(&set);
509 sigaddset(&set, SIGBUS);
510 sigprocmask(SIG_UNBLOCK, &set, NULL);
511 }
512 perror("Failed to re-raise SIGBUS!\n");
513 abort();
514}
515
516static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
517 void *ctx)
518{
519 if (kvm_on_sigbus(siginfo->ssi_code,
520 (void *)(intptr_t)siginfo->ssi_addr)) {
521 sigbus_reraise();
522 }
523}
524
525static void qemu_init_sigbus(void)
526{
527 struct sigaction action;
528
529 memset(&action, 0, sizeof(action));
530 action.sa_flags = SA_SIGINFO;
531 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
532 sigaction(SIGBUS, &action, NULL);
533
534 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
535}
536
Andreas Färber290adf32013-01-17 09:30:27 +0100537static void qemu_kvm_eat_signals(CPUState *cpu)
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100538{
539 struct timespec ts = { 0, 0 };
540 siginfo_t siginfo;
541 sigset_t waitset;
542 sigset_t chkset;
543 int r;
544
545 sigemptyset(&waitset);
546 sigaddset(&waitset, SIG_IPI);
547 sigaddset(&waitset, SIGBUS);
548
549 do {
550 r = sigtimedwait(&waitset, &siginfo, &ts);
551 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
552 perror("sigtimedwait");
553 exit(1);
554 }
555
556 switch (r) {
557 case SIGBUS:
Andreas Färber290adf32013-01-17 09:30:27 +0100558 if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100559 sigbus_reraise();
560 }
561 break;
562 default:
563 break;
564 }
565
566 r = sigpending(&chkset);
567 if (r == -1) {
568 perror("sigpending");
569 exit(1);
570 }
571 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100572}
573
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100574#else /* !CONFIG_LINUX */
575
576static void qemu_init_sigbus(void)
577{
578}
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100579
Andreas Färber290adf32013-01-17 09:30:27 +0100580static void qemu_kvm_eat_signals(CPUState *cpu)
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100581{
582}
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100583#endif /* !CONFIG_LINUX */
584
Blue Swirl296af7c2010-03-29 19:23:50 +0000585#ifndef _WIN32
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100586static void dummy_signal(int sig)
Blue Swirl296af7c2010-03-29 19:23:50 +0000587{
588}
589
Andreas Färber13618e02013-05-26 23:41:00 +0200590static void qemu_kvm_init_cpu_signals(CPUState *cpu)
Paolo Bonzini714bd042011-03-12 17:44:06 +0100591{
592 int r;
593 sigset_t set;
594 struct sigaction sigact;
595
596 memset(&sigact, 0, sizeof(sigact));
597 sigact.sa_handler = dummy_signal;
598 sigaction(SIG_IPI, &sigact, NULL);
599
Paolo Bonzini714bd042011-03-12 17:44:06 +0100600 pthread_sigmask(SIG_BLOCK, NULL, &set);
601 sigdelset(&set, SIG_IPI);
602 sigdelset(&set, SIGBUS);
Andreas Färber491d6e82013-05-26 23:38:10 +0200603 r = kvm_set_signal_mask(cpu, &set);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100604 if (r) {
605 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
606 exit(1);
607 }
Paolo Bonzini714bd042011-03-12 17:44:06 +0100608}
609
610static void qemu_tcg_init_cpu_signals(void)
611{
Paolo Bonzini714bd042011-03-12 17:44:06 +0100612 sigset_t set;
613 struct sigaction sigact;
614
615 memset(&sigact, 0, sizeof(sigact));
616 sigact.sa_handler = cpu_signal;
617 sigaction(SIG_IPI, &sigact, NULL);
618
619 sigemptyset(&set);
620 sigaddset(&set, SIG_IPI);
621 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100622}
623
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100624#else /* _WIN32 */
Andreas Färber13618e02013-05-26 23:41:00 +0200625static void qemu_kvm_init_cpu_signals(CPUState *cpu)
Paolo Bonzini714bd042011-03-12 17:44:06 +0100626{
627 abort();
628}
629
630static void qemu_tcg_init_cpu_signals(void)
631{
632}
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100633#endif /* _WIN32 */
Blue Swirl296af7c2010-03-29 19:23:50 +0000634
Stefan Weilb2532d82012-09-27 07:41:42 +0200635static QemuMutex qemu_global_mutex;
Paolo Bonzini46daff12011-06-09 13:10:24 +0200636static QemuCond qemu_io_proceeded_cond;
637static bool iothread_requesting_mutex;
Blue Swirl296af7c2010-03-29 19:23:50 +0000638
639static QemuThread io_thread;
640
641static QemuThread *tcg_cpu_thread;
642static QemuCond *tcg_halt_cond;
643
Blue Swirl296af7c2010-03-29 19:23:50 +0000644/* cpu creation */
645static QemuCond qemu_cpu_cond;
646/* system init */
Blue Swirl296af7c2010-03-29 19:23:50 +0000647static QemuCond qemu_pause_cond;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300648static QemuCond qemu_work_cond;
Blue Swirl296af7c2010-03-29 19:23:50 +0000649
Paolo Bonzinid3b12f52011-09-13 10:30:52 +0200650void qemu_init_cpu_loop(void)
Blue Swirl296af7c2010-03-29 19:23:50 +0000651{
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100652 qemu_init_sigbus();
Anthony Liguoried945922011-02-08 18:18:18 +0100653 qemu_cond_init(&qemu_cpu_cond);
Anthony Liguoried945922011-02-08 18:18:18 +0100654 qemu_cond_init(&qemu_pause_cond);
655 qemu_cond_init(&qemu_work_cond);
Paolo Bonzini46daff12011-06-09 13:10:24 +0200656 qemu_cond_init(&qemu_io_proceeded_cond);
Blue Swirl296af7c2010-03-29 19:23:50 +0000657 qemu_mutex_init(&qemu_global_mutex);
Blue Swirl296af7c2010-03-29 19:23:50 +0000658
Jan Kiszkab7680cb2011-03-12 17:43:51 +0100659 qemu_thread_get_self(&io_thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000660}
661
Andreas Färberf100f0b2012-05-03 14:58:47 +0200662void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300663{
664 struct qemu_work_item wi;
665
Andreas Färber60e82572012-05-02 22:23:49 +0200666 if (qemu_cpu_is_self(cpu)) {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300667 func(data);
668 return;
669 }
670
671 wi.func = func;
672 wi.data = data;
Chegu Vinod3c022702013-06-24 03:49:41 -0600673 wi.free = false;
Andreas Färberc64ca812012-05-03 02:11:45 +0200674 if (cpu->queued_work_first == NULL) {
675 cpu->queued_work_first = &wi;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100676 } else {
Andreas Färberc64ca812012-05-03 02:11:45 +0200677 cpu->queued_work_last->next = &wi;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100678 }
Andreas Färberc64ca812012-05-03 02:11:45 +0200679 cpu->queued_work_last = &wi;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300680 wi.next = NULL;
681 wi.done = false;
682
Andreas Färberc08d7422012-05-03 04:34:15 +0200683 qemu_cpu_kick(cpu);
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300684 while (!wi.done) {
Andreas Färber4917cf42013-05-27 05:17:50 +0200685 CPUState *self_cpu = current_cpu;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300686
687 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
Andreas Färber4917cf42013-05-27 05:17:50 +0200688 current_cpu = self_cpu;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300689 }
690}
691
Chegu Vinod3c022702013-06-24 03:49:41 -0600692void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
693{
694 struct qemu_work_item *wi;
695
696 if (qemu_cpu_is_self(cpu)) {
697 func(data);
698 return;
699 }
700
701 wi = g_malloc0(sizeof(struct qemu_work_item));
702 wi->func = func;
703 wi->data = data;
704 wi->free = true;
705 if (cpu->queued_work_first == NULL) {
706 cpu->queued_work_first = wi;
707 } else {
708 cpu->queued_work_last->next = wi;
709 }
710 cpu->queued_work_last = wi;
711 wi->next = NULL;
712 wi->done = false;
713
714 qemu_cpu_kick(cpu);
715}
716
Andreas Färber6d45b102012-05-03 02:13:22 +0200717static void flush_queued_work(CPUState *cpu)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300718{
719 struct qemu_work_item *wi;
720
Andreas Färberc64ca812012-05-03 02:11:45 +0200721 if (cpu->queued_work_first == NULL) {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300722 return;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100723 }
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300724
Andreas Färberc64ca812012-05-03 02:11:45 +0200725 while ((wi = cpu->queued_work_first)) {
726 cpu->queued_work_first = wi->next;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300727 wi->func(wi->data);
728 wi->done = true;
Chegu Vinod3c022702013-06-24 03:49:41 -0600729 if (wi->free) {
730 g_free(wi);
731 }
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300732 }
Andreas Färberc64ca812012-05-03 02:11:45 +0200733 cpu->queued_work_last = NULL;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300734 qemu_cond_broadcast(&qemu_work_cond);
735}
736
Andreas Färber509a0d72012-05-03 02:18:09 +0200737static void qemu_wait_io_event_common(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000738{
Andreas Färber4fdeee72012-05-02 23:10:09 +0200739 if (cpu->stop) {
740 cpu->stop = false;
Andreas Färberf324e762012-05-02 23:26:21 +0200741 cpu->stopped = true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000742 qemu_cond_signal(&qemu_pause_cond);
743 }
Andreas Färber6d45b102012-05-03 02:13:22 +0200744 flush_queued_work(cpu);
Andreas Färber216fc9a2012-05-02 17:49:49 +0200745 cpu->thread_kicked = false;
Blue Swirl296af7c2010-03-29 19:23:50 +0000746}
747
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200748static void qemu_tcg_wait_io_event(void)
Blue Swirl296af7c2010-03-29 19:23:50 +0000749{
Andreas Färber182735e2013-05-29 22:29:20 +0200750 CPUState *cpu;
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200751
Jan Kiszka16400322011-02-09 16:29:37 +0100752 while (all_cpu_threads_idle()) {
Paolo Bonziniab33fcd2011-04-13 10:03:44 +0200753 /* Start accounting real time to the virtual clock if the CPUs
754 are idle. */
Alex Bligh40daca52013-08-21 16:03:02 +0100755 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
Paolo Bonzini9705fbb2011-03-12 17:44:00 +0100756 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +0100757 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000758
Paolo Bonzini46daff12011-06-09 13:10:24 +0200759 while (iothread_requesting_mutex) {
760 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
761 }
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200762
Andreas Färberbdc44642013-06-24 23:50:24 +0200763 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200764 qemu_wait_io_event_common(cpu);
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200765 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000766}
767
Andreas Färberfd529e82013-05-26 23:24:55 +0200768static void qemu_kvm_wait_io_event(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000769{
Andreas Färbera98ae1d2013-05-26 23:21:08 +0200770 while (cpu_thread_is_idle(cpu)) {
Andreas Färberf5c121b2012-05-03 01:22:49 +0200771 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +0100772 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000773
Andreas Färber290adf32013-01-17 09:30:27 +0100774 qemu_kvm_eat_signals(cpu);
Andreas Färber509a0d72012-05-03 02:18:09 +0200775 qemu_wait_io_event_common(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000776}
777
Jan Kiszka7e97cd82011-02-07 12:19:12 +0100778static void *qemu_kvm_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +0000779{
Andreas Färber48a106b2013-05-27 02:20:39 +0200780 CPUState *cpu = arg;
Jan Kiszka84b49152011-02-01 22:15:50 +0100781 int r;
Blue Swirl296af7c2010-03-29 19:23:50 +0000782
Marcelo Tosatti6164e6d2010-03-23 13:37:13 -0300783 qemu_mutex_lock(&qemu_global_mutex);
Andreas Färber814e6122012-05-02 17:00:37 +0200784 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +0200785 cpu->thread_id = qemu_get_thread_id();
Andreas Färber4917cf42013-05-27 05:17:50 +0200786 current_cpu = cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000787
Andreas Färber504134d2012-12-17 06:38:45 +0100788 r = kvm_init_vcpu(cpu);
Jan Kiszka84b49152011-02-01 22:15:50 +0100789 if (r < 0) {
790 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
791 exit(1);
792 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000793
Andreas Färber13618e02013-05-26 23:41:00 +0200794 qemu_kvm_init_cpu_signals(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000795
796 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +0200797 cpu->created = true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000798 qemu_cond_signal(&qemu_cpu_cond);
799
Blue Swirl296af7c2010-03-29 19:23:50 +0000800 while (1) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200801 if (cpu_can_run(cpu)) {
Andreas Färber1458c362013-05-26 23:46:55 +0200802 r = kvm_cpu_exec(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +0100803 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +0200804 cpu_handle_guest_debug(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +0100805 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100806 }
Andreas Färberfd529e82013-05-26 23:24:55 +0200807 qemu_kvm_wait_io_event(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000808 }
809
810 return NULL;
811}
812
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200813static void *qemu_dummy_cpu_thread_fn(void *arg)
814{
815#ifdef _WIN32
816 fprintf(stderr, "qtest is not supported under Windows\n");
817 exit(1);
818#else
Andreas Färber10a90212013-05-27 02:24:35 +0200819 CPUState *cpu = arg;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200820 sigset_t waitset;
821 int r;
822
823 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +0200824 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +0200825 cpu->thread_id = qemu_get_thread_id();
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200826
827 sigemptyset(&waitset);
828 sigaddset(&waitset, SIG_IPI);
829
830 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +0200831 cpu->created = true;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200832 qemu_cond_signal(&qemu_cpu_cond);
833
Andreas Färber4917cf42013-05-27 05:17:50 +0200834 current_cpu = cpu;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200835 while (1) {
Andreas Färber4917cf42013-05-27 05:17:50 +0200836 current_cpu = NULL;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200837 qemu_mutex_unlock_iothread();
838 do {
839 int sig;
840 r = sigwait(&waitset, &sig);
841 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
842 if (r == -1) {
843 perror("sigwait");
844 exit(1);
845 }
846 qemu_mutex_lock_iothread();
Andreas Färber4917cf42013-05-27 05:17:50 +0200847 current_cpu = cpu;
Andreas Färber509a0d72012-05-03 02:18:09 +0200848 qemu_wait_io_event_common(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200849 }
850
851 return NULL;
852#endif
853}
854
Jan Kiszkabdb7ca62011-09-26 09:40:39 +0200855static void tcg_exec_all(void);
856
Igor Mammedova37677c2013-04-23 10:29:42 +0200857static void tcg_signal_cpu_creation(CPUState *cpu, void *data)
858{
859 cpu->thread_id = qemu_get_thread_id();
860 cpu->created = true;
861}
862
Jan Kiszka7e97cd82011-02-07 12:19:12 +0100863static void *qemu_tcg_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +0000864{
Andreas Färberc3586ba2012-05-03 01:41:24 +0200865 CPUState *cpu = arg;
Blue Swirl296af7c2010-03-29 19:23:50 +0000866
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100867 qemu_tcg_init_cpu_signals();
Andreas Färber814e6122012-05-02 17:00:37 +0200868 qemu_thread_get_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000869
Blue Swirl296af7c2010-03-29 19:23:50 +0000870 qemu_mutex_lock(&qemu_global_mutex);
Igor Mammedova37677c2013-04-23 10:29:42 +0200871 qemu_for_each_cpu(tcg_signal_cpu_creation, NULL);
Blue Swirl296af7c2010-03-29 19:23:50 +0000872 qemu_cond_signal(&qemu_cpu_cond);
873
Jan Kiszkafa7d1862011-08-22 18:35:25 +0200874 /* wait for initial kick-off after machine start */
Andreas Färberbdc44642013-06-24 23:50:24 +0200875 while (QTAILQ_FIRST(&cpus)->stopped) {
Jan Kiszkafa7d1862011-08-22 18:35:25 +0200876 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
Jan Kiszka8e564b42012-02-17 18:31:15 +0100877
878 /* process any pending work */
Andreas Färberbdc44642013-06-24 23:50:24 +0200879 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200880 qemu_wait_io_event_common(cpu);
Jan Kiszka8e564b42012-02-17 18:31:15 +0100881 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100882 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000883
884 while (1) {
Jan Kiszkabdb7ca62011-09-26 09:40:39 +0200885 tcg_exec_all();
Alex Blighac70aaf2013-08-21 16:02:57 +0100886
887 if (use_icount) {
Alex Bligh40daca52013-08-21 16:03:02 +0100888 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +0100889
890 if (deadline == 0) {
Alex Bligh40daca52013-08-21 16:03:02 +0100891 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +0100892 }
Paolo Bonzini3b2319a2011-04-13 10:03:43 +0200893 }
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200894 qemu_tcg_wait_io_event();
Blue Swirl296af7c2010-03-29 19:23:50 +0000895 }
896
897 return NULL;
898}
899
Andreas Färber2ff09a42012-05-03 00:23:30 +0200900static void qemu_cpu_kick_thread(CPUState *cpu)
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100901{
902#ifndef _WIN32
903 int err;
904
Andreas Färber814e6122012-05-02 17:00:37 +0200905 err = pthread_kill(cpu->thread->thread, SIG_IPI);
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100906 if (err) {
907 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
908 exit(1);
909 }
910#else /* _WIN32 */
Andreas Färber60e82572012-05-02 22:23:49 +0200911 if (!qemu_cpu_is_self(cpu)) {
Olivier Hainqueed9164a2013-04-09 18:06:53 +0200912 CONTEXT tcgContext;
913
914 if (SuspendThread(cpu->hThread) == (DWORD)-1) {
Stefan Weil7f1721d2013-04-13 22:45:50 +0200915 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
Olivier Hainqueed9164a2013-04-09 18:06:53 +0200916 GetLastError());
917 exit(1);
918 }
919
920 /* On multi-core systems, we are not sure that the thread is actually
921 * suspended until we can get the context.
922 */
923 tcgContext.ContextFlags = CONTEXT_CONTROL;
924 while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
925 continue;
926 }
927
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100928 cpu_signal(0);
Olivier Hainqueed9164a2013-04-09 18:06:53 +0200929
930 if (ResumeThread(cpu->hThread) == (DWORD)-1) {
Stefan Weil7f1721d2013-04-13 22:45:50 +0200931 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
Olivier Hainqueed9164a2013-04-09 18:06:53 +0200932 GetLastError());
933 exit(1);
934 }
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100935 }
936#endif
937}
938
Andreas Färberc08d7422012-05-03 04:34:15 +0200939void qemu_cpu_kick(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000940{
Andreas Färberf5c121b2012-05-03 01:22:49 +0200941 qemu_cond_broadcast(cpu->halt_cond);
Andreas Färber216fc9a2012-05-02 17:49:49 +0200942 if (!tcg_enabled() && !cpu->thread_kicked) {
Andreas Färber2ff09a42012-05-03 00:23:30 +0200943 qemu_cpu_kick_thread(cpu);
Andreas Färber216fc9a2012-05-02 17:49:49 +0200944 cpu->thread_kicked = true;
Jan Kiszkaaa2c3642011-02-01 22:15:42 +0100945 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000946}
947
Jan Kiszka46d62fa2011-02-01 22:15:59 +0100948void qemu_cpu_kick_self(void)
949{
Paolo Bonzinib55c22c2011-03-12 17:44:07 +0100950#ifndef _WIN32
Andreas Färber4917cf42013-05-27 05:17:50 +0200951 assert(current_cpu);
Jan Kiszka46d62fa2011-02-01 22:15:59 +0100952
Andreas Färber4917cf42013-05-27 05:17:50 +0200953 if (!current_cpu->thread_kicked) {
954 qemu_cpu_kick_thread(current_cpu);
955 current_cpu->thread_kicked = true;
Jan Kiszka46d62fa2011-02-01 22:15:59 +0100956 }
Paolo Bonzinib55c22c2011-03-12 17:44:07 +0100957#else
958 abort();
959#endif
Blue Swirl296af7c2010-03-29 19:23:50 +0000960}
961
Andreas Färber60e82572012-05-02 22:23:49 +0200962bool qemu_cpu_is_self(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000963{
Andreas Färber814e6122012-05-02 17:00:37 +0200964 return qemu_thread_is_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000965}
966
Juan Quintelaaa723c22012-09-18 16:30:11 +0200967static bool qemu_in_vcpu_thread(void)
968{
Andreas Färber4917cf42013-05-27 05:17:50 +0200969 return current_cpu && qemu_cpu_is_self(current_cpu);
Juan Quintelaaa723c22012-09-18 16:30:11 +0200970}
971
Blue Swirl296af7c2010-03-29 19:23:50 +0000972void qemu_mutex_lock_iothread(void)
973{
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200974 if (!tcg_enabled()) {
Blue Swirl296af7c2010-03-29 19:23:50 +0000975 qemu_mutex_lock(&qemu_global_mutex);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300976 } else {
Paolo Bonzini46daff12011-06-09 13:10:24 +0200977 iothread_requesting_mutex = true;
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300978 if (qemu_mutex_trylock(&qemu_global_mutex)) {
Andreas Färber182735e2013-05-29 22:29:20 +0200979 qemu_cpu_kick_thread(first_cpu);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300980 qemu_mutex_lock(&qemu_global_mutex);
981 }
Paolo Bonzini46daff12011-06-09 13:10:24 +0200982 iothread_requesting_mutex = false;
983 qemu_cond_broadcast(&qemu_io_proceeded_cond);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300984 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000985}
986
987void qemu_mutex_unlock_iothread(void)
988{
989 qemu_mutex_unlock(&qemu_global_mutex);
990}
991
992static int all_vcpus_paused(void)
993{
Andreas Färberbdc44642013-06-24 23:50:24 +0200994 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000995
Andreas Färberbdc44642013-06-24 23:50:24 +0200996 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200997 if (!cpu->stopped) {
Blue Swirl296af7c2010-03-29 19:23:50 +0000998 return 0;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100999 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001000 }
1001
1002 return 1;
1003}
1004
1005void pause_all_vcpus(void)
1006{
Andreas Färberbdc44642013-06-24 23:50:24 +02001007 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001008
Alex Bligh40daca52013-08-21 16:03:02 +01001009 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
Andreas Färberbdc44642013-06-24 23:50:24 +02001010 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001011 cpu->stop = true;
1012 qemu_cpu_kick(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001013 }
1014
Juan Quintelaaa723c22012-09-18 16:30:11 +02001015 if (qemu_in_vcpu_thread()) {
Jan Kiszkad798e972012-02-17 18:31:16 +01001016 cpu_stop_current();
1017 if (!kvm_enabled()) {
Andreas Färberbdc44642013-06-24 23:50:24 +02001018 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001019 cpu->stop = false;
1020 cpu->stopped = true;
Jan Kiszkad798e972012-02-17 18:31:16 +01001021 }
1022 return;
1023 }
1024 }
1025
Blue Swirl296af7c2010-03-29 19:23:50 +00001026 while (!all_vcpus_paused()) {
Paolo Bonzinibe7d6c52011-03-12 17:44:02 +01001027 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
Andreas Färberbdc44642013-06-24 23:50:24 +02001028 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001029 qemu_cpu_kick(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001030 }
1031 }
1032}
1033
Igor Mammedov29936832013-04-23 10:29:37 +02001034void cpu_resume(CPUState *cpu)
1035{
1036 cpu->stop = false;
1037 cpu->stopped = false;
1038 qemu_cpu_kick(cpu);
1039}
1040
Blue Swirl296af7c2010-03-29 19:23:50 +00001041void resume_all_vcpus(void)
1042{
Andreas Färberbdc44642013-06-24 23:50:24 +02001043 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001044
Alex Bligh40daca52013-08-21 16:03:02 +01001045 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
Andreas Färberbdc44642013-06-24 23:50:24 +02001046 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001047 cpu_resume(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +00001048 }
1049}
1050
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001051static void qemu_tcg_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001052{
Blue Swirl296af7c2010-03-29 19:23:50 +00001053 /* share a single thread for all cpus with TCG */
1054 if (!tcg_cpu_thread) {
Andreas Färber814e6122012-05-02 17:00:37 +02001055 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001056 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1057 qemu_cond_init(cpu->halt_cond);
1058 tcg_halt_cond = cpu->halt_cond;
Andreas Färberc3586ba2012-05-03 01:41:24 +02001059 qemu_thread_create(cpu->thread, qemu_tcg_cpu_thread_fn, cpu,
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001060 QEMU_THREAD_JOINABLE);
1061#ifdef _WIN32
Andreas Färber814e6122012-05-02 17:00:37 +02001062 cpu->hThread = qemu_thread_get_handle(cpu->thread);
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001063#endif
Andreas Färber61a46212012-05-02 22:49:36 +02001064 while (!cpu->created) {
Paolo Bonzini18a85722011-03-12 17:44:03 +01001065 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001066 }
Andreas Färber814e6122012-05-02 17:00:37 +02001067 tcg_cpu_thread = cpu->thread;
Blue Swirl296af7c2010-03-29 19:23:50 +00001068 } else {
Andreas Färber814e6122012-05-02 17:00:37 +02001069 cpu->thread = tcg_cpu_thread;
Andreas Färberf5c121b2012-05-03 01:22:49 +02001070 cpu->halt_cond = tcg_halt_cond;
Blue Swirl296af7c2010-03-29 19:23:50 +00001071 }
1072}
1073
Andreas Färber48a106b2013-05-27 02:20:39 +02001074static void qemu_kvm_start_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001075{
Andreas Färber814e6122012-05-02 17:00:37 +02001076 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001077 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1078 qemu_cond_init(cpu->halt_cond);
Andreas Färber48a106b2013-05-27 02:20:39 +02001079 qemu_thread_create(cpu->thread, qemu_kvm_cpu_thread_fn, cpu,
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001080 QEMU_THREAD_JOINABLE);
Andreas Färber61a46212012-05-02 22:49:36 +02001081 while (!cpu->created) {
Paolo Bonzini18a85722011-03-12 17:44:03 +01001082 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001083 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001084}
1085
Andreas Färber10a90212013-05-27 02:24:35 +02001086static void qemu_dummy_start_vcpu(CPUState *cpu)
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001087{
Andreas Färber814e6122012-05-02 17:00:37 +02001088 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001089 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1090 qemu_cond_init(cpu->halt_cond);
Andreas Färber10a90212013-05-27 02:24:35 +02001091 qemu_thread_create(cpu->thread, qemu_dummy_cpu_thread_fn, cpu,
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001092 QEMU_THREAD_JOINABLE);
Andreas Färber61a46212012-05-02 22:49:36 +02001093 while (!cpu->created) {
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001094 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1095 }
1096}
1097
Andreas Färberc643bed2013-05-27 03:23:24 +02001098void qemu_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001099{
Andreas Färberce3960e2012-12-17 03:27:07 +01001100 cpu->nr_cores = smp_cores;
1101 cpu->nr_threads = smp_threads;
Andreas Färberf324e762012-05-02 23:26:21 +02001102 cpu->stopped = true;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001103 if (kvm_enabled()) {
Andreas Färber48a106b2013-05-27 02:20:39 +02001104 qemu_kvm_start_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001105 } else if (tcg_enabled()) {
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001106 qemu_tcg_init_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001107 } else {
Andreas Färber10a90212013-05-27 02:24:35 +02001108 qemu_dummy_start_vcpu(cpu);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001109 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001110}
1111
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001112void cpu_stop_current(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001113{
Andreas Färber4917cf42013-05-27 05:17:50 +02001114 if (current_cpu) {
1115 current_cpu->stop = false;
1116 current_cpu->stopped = true;
1117 cpu_exit(current_cpu);
Paolo Bonzini67bb1722011-03-12 17:43:59 +01001118 qemu_cond_signal(&qemu_pause_cond);
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001119 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001120}
1121
Kevin Wolf56983462013-07-05 13:49:54 +02001122int vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +00001123{
Juan Quintelaaa723c22012-09-18 16:30:11 +02001124 if (qemu_in_vcpu_thread()) {
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -03001125 qemu_system_vmstop_request(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00001126 /*
1127 * FIXME: should not return to device code in case
1128 * vm_stop() has been requested.
1129 */
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001130 cpu_stop_current();
Kevin Wolf56983462013-07-05 13:49:54 +02001131 return 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00001132 }
Kevin Wolf56983462013-07-05 13:49:54 +02001133
1134 return do_vm_stop(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00001135}
1136
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001137/* does a state transition even if the VM is already stopped,
1138 current state is forgotten forever */
Kevin Wolf56983462013-07-05 13:49:54 +02001139int vm_stop_force_state(RunState state)
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001140{
1141 if (runstate_is_running()) {
Kevin Wolf56983462013-07-05 13:49:54 +02001142 return vm_stop(state);
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001143 } else {
1144 runstate_set(state);
Kevin Wolf594a45c2013-07-18 14:52:19 +02001145 /* Make sure to return an error if the flush in a previous vm_stop()
1146 * failed. */
1147 return bdrv_flush_all();
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001148 }
1149}
1150
Andreas Färber9349b4f2012-03-14 01:38:32 +01001151static int tcg_cpu_exec(CPUArchState *env)
Blue Swirl296af7c2010-03-29 19:23:50 +00001152{
1153 int ret;
1154#ifdef CONFIG_PROFILER
1155 int64_t ti;
1156#endif
1157
1158#ifdef CONFIG_PROFILER
1159 ti = profile_getclock();
1160#endif
1161 if (use_icount) {
1162 int64_t count;
Alex Blighac70aaf2013-08-21 16:02:57 +01001163 int64_t deadline;
Blue Swirl296af7c2010-03-29 19:23:50 +00001164 int decr;
1165 qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
1166 env->icount_decr.u16.low = 0;
1167 env->icount_extra = 0;
Alex Bligh40daca52013-08-21 16:03:02 +01001168 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
Alex Blighac70aaf2013-08-21 16:02:57 +01001169
1170 /* Maintain prior (possibly buggy) behaviour where if no deadline
Alex Bligh40daca52013-08-21 16:03:02 +01001171 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
Alex Blighac70aaf2013-08-21 16:02:57 +01001172 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1173 * nanoseconds.
1174 */
1175 if ((deadline < 0) || (deadline > INT32_MAX)) {
1176 deadline = INT32_MAX;
1177 }
1178
1179 count = qemu_icount_round(deadline);
Blue Swirl296af7c2010-03-29 19:23:50 +00001180 qemu_icount += count;
1181 decr = (count > 0xffff) ? 0xffff : count;
1182 count -= decr;
1183 env->icount_decr.u16.low = decr;
1184 env->icount_extra = count;
1185 }
1186 ret = cpu_exec(env);
1187#ifdef CONFIG_PROFILER
1188 qemu_time += profile_getclock() - ti;
1189#endif
1190 if (use_icount) {
1191 /* Fold pending instructions back into the
1192 instruction counter, and clear the interrupt flag. */
1193 qemu_icount -= (env->icount_decr.u16.low
1194 + env->icount_extra);
1195 env->icount_decr.u32 = 0;
1196 env->icount_extra = 0;
1197 }
1198 return ret;
1199}
1200
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001201static void tcg_exec_all(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001202{
Jan Kiszka9a360852011-02-01 22:15:55 +01001203 int r;
1204
Alex Bligh40daca52013-08-21 16:03:02 +01001205 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1206 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
Paolo Bonziniab33fcd2011-04-13 10:03:44 +02001207
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001208 if (next_cpu == NULL) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001209 next_cpu = first_cpu;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001210 }
Andreas Färberbdc44642013-06-24 23:50:24 +02001211 for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
Andreas Färber182735e2013-05-29 22:29:20 +02001212 CPUState *cpu = next_cpu;
1213 CPUArchState *env = cpu->env_ptr;
Blue Swirl296af7c2010-03-29 19:23:50 +00001214
Alex Bligh40daca52013-08-21 16:03:02 +01001215 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
Andreas Färbered2803d2013-06-21 20:20:45 +02001216 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
Blue Swirl296af7c2010-03-29 19:23:50 +00001217
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001218 if (cpu_can_run(cpu)) {
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001219 r = tcg_cpu_exec(env);
Jan Kiszka9a360852011-02-01 22:15:55 +01001220 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +02001221 cpu_handle_guest_debug(cpu);
Jan Kiszka3c638d02010-06-25 16:56:56 +02001222 break;
1223 }
Andreas Färberf324e762012-05-02 23:26:21 +02001224 } else if (cpu->stop || cpu->stopped) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001225 break;
1226 }
1227 }
Jan Kiszkac629a4b2010-06-25 16:56:52 +02001228 exit_request = 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00001229}
1230
1231void set_numa_modes(void)
1232{
Andreas Färber1b1ed8d2012-12-17 04:22:03 +01001233 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001234 int i;
1235
Andreas Färberbdc44642013-06-24 23:50:24 +02001236 CPU_FOREACH(cpu) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001237 for (i = 0; i < nb_numa_nodes; i++) {
Andreas Färber55e5c282012-12-17 06:18:02 +01001238 if (test_bit(cpu->cpu_index, node_cpumask[i])) {
Andreas Färber1b1ed8d2012-12-17 04:22:03 +01001239 cpu->numa_node = i;
Blue Swirl296af7c2010-03-29 19:23:50 +00001240 }
1241 }
1242 }
1243}
1244
Stefan Weil9a78eea2010-10-22 23:03:33 +02001245void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
Blue Swirl262353c2010-05-04 19:55:35 +00001246{
1247 /* XXX: implement xxx_cpu_list for targets that still miss it */
Peter Maydelle916cbf2012-09-05 17:41:08 -03001248#if defined(cpu_list)
1249 cpu_list(f, cpu_fprintf);
Blue Swirl262353c2010-05-04 19:55:35 +00001250#endif
1251}
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001252
1253CpuInfoList *qmp_query_cpus(Error **errp)
1254{
1255 CpuInfoList *head = NULL, *cur_item = NULL;
Andreas Färber182735e2013-05-29 22:29:20 +02001256 CPUState *cpu;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001257
Andreas Färberbdc44642013-06-24 23:50:24 +02001258 CPU_FOREACH(cpu) {
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001259 CpuInfoList *info;
Andreas Färber182735e2013-05-29 22:29:20 +02001260#if defined(TARGET_I386)
1261 X86CPU *x86_cpu = X86_CPU(cpu);
1262 CPUX86State *env = &x86_cpu->env;
1263#elif defined(TARGET_PPC)
1264 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1265 CPUPPCState *env = &ppc_cpu->env;
1266#elif defined(TARGET_SPARC)
1267 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1268 CPUSPARCState *env = &sparc_cpu->env;
1269#elif defined(TARGET_MIPS)
1270 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1271 CPUMIPSState *env = &mips_cpu->env;
1272#endif
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001273
Andreas Färbercb446ec2013-05-01 14:24:52 +02001274 cpu_synchronize_state(cpu);
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001275
1276 info = g_malloc0(sizeof(*info));
1277 info->value = g_malloc0(sizeof(*info->value));
Andreas Färber55e5c282012-12-17 06:18:02 +01001278 info->value->CPU = cpu->cpu_index;
Andreas Färber182735e2013-05-29 22:29:20 +02001279 info->value->current = (cpu == first_cpu);
Andreas Färber259186a2013-01-17 18:51:17 +01001280 info->value->halted = cpu->halted;
Andreas Färber9f09e182012-05-03 06:59:07 +02001281 info->value->thread_id = cpu->thread_id;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001282#if defined(TARGET_I386)
1283 info->value->has_pc = true;
1284 info->value->pc = env->eip + env->segs[R_CS].base;
1285#elif defined(TARGET_PPC)
1286 info->value->has_nip = true;
1287 info->value->nip = env->nip;
1288#elif defined(TARGET_SPARC)
1289 info->value->has_pc = true;
1290 info->value->pc = env->pc;
1291 info->value->has_npc = true;
1292 info->value->npc = env->npc;
1293#elif defined(TARGET_MIPS)
1294 info->value->has_PC = true;
1295 info->value->PC = env->active_tc.PC;
1296#endif
1297
1298 /* XXX: waiting for the qapi to support GSList */
1299 if (!cur_item) {
1300 head = cur_item = info;
1301 } else {
1302 cur_item->next = info;
1303 cur_item = info;
1304 }
1305 }
1306
1307 return head;
1308}
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001309
1310void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1311 bool has_cpu, int64_t cpu_index, Error **errp)
1312{
1313 FILE *f;
1314 uint32_t l;
Andreas Färber55e5c282012-12-17 06:18:02 +01001315 CPUState *cpu;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001316 uint8_t buf[1024];
1317
1318 if (!has_cpu) {
1319 cpu_index = 0;
1320 }
1321
Andreas Färber151d1322013-02-15 15:41:49 +01001322 cpu = qemu_get_cpu(cpu_index);
1323 if (cpu == NULL) {
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001324 error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1325 "a CPU number");
1326 return;
1327 }
1328
1329 f = fopen(filename, "wb");
1330 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04001331 error_setg_file_open(errp, errno, filename);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001332 return;
1333 }
1334
1335 while (size != 0) {
1336 l = sizeof(buf);
1337 if (l > size)
1338 l = size;
Andreas Färberf17ec442013-06-29 19:40:58 +02001339 cpu_memory_rw_debug(cpu, addr, buf, l, 0);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001340 if (fwrite(buf, 1, l, f) != l) {
1341 error_set(errp, QERR_IO_ERROR);
1342 goto exit;
1343 }
1344 addr += l;
1345 size -= l;
1346 }
1347
1348exit:
1349 fclose(f);
1350}
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001351
1352void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1353 Error **errp)
1354{
1355 FILE *f;
1356 uint32_t l;
1357 uint8_t buf[1024];
1358
1359 f = fopen(filename, "wb");
1360 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04001361 error_setg_file_open(errp, errno, filename);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001362 return;
1363 }
1364
1365 while (size != 0) {
1366 l = sizeof(buf);
1367 if (l > size)
1368 l = size;
1369 cpu_physical_memory_rw(addr, buf, l, 0);
1370 if (fwrite(buf, 1, l, f) != l) {
1371 error_set(errp, QERR_IO_ERROR);
1372 goto exit;
1373 }
1374 addr += l;
1375 size -= l;
1376 }
1377
1378exit:
1379 fclose(f);
1380}
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001381
1382void qmp_inject_nmi(Error **errp)
1383{
1384#if defined(TARGET_I386)
Andreas Färber182735e2013-05-29 22:29:20 +02001385 CPUState *cs;
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001386
Andreas Färberbdc44642013-06-24 23:50:24 +02001387 CPU_FOREACH(cs) {
Andreas Färber182735e2013-05-29 22:29:20 +02001388 X86CPU *cpu = X86_CPU(cs);
1389 CPUX86State *env = &cpu->env;
1390
Jan Kiszka02c09192011-10-18 00:00:06 +08001391 if (!env->apic_state) {
Andreas Färber182735e2013-05-29 22:29:20 +02001392 cpu_interrupt(cs, CPU_INTERRUPT_NMI);
Jan Kiszka02c09192011-10-18 00:00:06 +08001393 } else {
1394 apic_deliver_nmi(env->apic_state);
1395 }
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001396 }
Eugene (jno) Dvurechenski7f7f9752012-12-05 15:50:07 +01001397#elif defined(TARGET_S390X)
1398 CPUState *cs;
1399 S390CPU *cpu;
1400
Andreas Färberbdc44642013-06-24 23:50:24 +02001401 CPU_FOREACH(cs) {
Eugene (jno) Dvurechenski7f7f9752012-12-05 15:50:07 +01001402 cpu = S390_CPU(cs);
1403 if (cpu->env.cpu_num == monitor_get_cpu_index()) {
1404 if (s390_cpu_restart(S390_CPU(cs)) == -1) {
1405 error_set(errp, QERR_UNSUPPORTED);
1406 return;
1407 }
1408 break;
1409 }
1410 }
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001411#else
1412 error_set(errp, QERR_UNSUPPORTED);
1413#endif
1414}