blob: 254eb4c3e84029b09bc4a550bdc607caad7d21ad [file] [log] [blame]
Blue Swirl296af7c2010-03-29 19:23:50 +00001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Needed early for CONFIG_BSD etc. */
26#include "config-host.h"
27
Paolo Bonzini83c90892012-12-17 18:19:49 +010028#include "monitor/monitor.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010029#include "sysemu/sysemu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010030#include "exec/gdbstub.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010031#include "sysemu/dma.h"
32#include "sysemu/kvm.h"
Luiz Capitulinode0b36b2011-09-21 16:38:35 -030033#include "qmp-commands.h"
Blue Swirl296af7c2010-03-29 19:23:50 +000034
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/thread.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010036#include "sysemu/cpus.h"
37#include "sysemu/qtest.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010038#include "qemu/main-loop.h"
39#include "qemu/bitmap.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020040
41#ifndef _WIN32
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010042#include "qemu/compatfd.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020043#endif
Blue Swirl296af7c2010-03-29 19:23:50 +000044
Jan Kiszka6d9cb732011-02-01 22:15:58 +010045#ifdef CONFIG_LINUX
46
47#include <sys/prctl.h>
48
Marcelo Tosattic0532a72010-10-11 15:31:21 -030049#ifndef PR_MCE_KILL
50#define PR_MCE_KILL 33
51#endif
52
Jan Kiszka6d9cb732011-02-01 22:15:58 +010053#ifndef PR_MCE_KILL_SET
54#define PR_MCE_KILL_SET 1
55#endif
56
57#ifndef PR_MCE_KILL_EARLY
58#define PR_MCE_KILL_EARLY 1
59#endif
60
61#endif /* CONFIG_LINUX */
62
Andreas Färber182735e2013-05-29 22:29:20 +020063static CPUState *next_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +000064
Tiejun Chen321bc0b2013-08-02 09:43:09 +080065bool cpu_is_stopped(CPUState *cpu)
66{
67 return cpu->stopped || !runstate_is_running();
68}
69
Andreas Färbera98ae1d2013-05-26 23:21:08 +020070static bool cpu_thread_is_idle(CPUState *cpu)
Peter Maydellac873f12012-07-19 16:52:27 +010071{
Andreas Färberc64ca812012-05-03 02:11:45 +020072 if (cpu->stop || cpu->queued_work_first) {
Peter Maydellac873f12012-07-19 16:52:27 +010073 return false;
74 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +080075 if (cpu_is_stopped(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010076 return true;
77 }
Andreas Färber259186a2013-01-17 18:51:17 +010078 if (!cpu->halted || qemu_cpu_has_work(cpu) ||
Alexander Graf215e79c2013-04-24 22:24:12 +020079 kvm_halt_in_kernel()) {
Peter Maydellac873f12012-07-19 16:52:27 +010080 return false;
81 }
82 return true;
83}
84
85static bool all_cpu_threads_idle(void)
86{
Andreas Färber182735e2013-05-29 22:29:20 +020087 CPUState *cpu;
Peter Maydellac873f12012-07-19 16:52:27 +010088
Andreas Färber182735e2013-05-29 22:29:20 +020089 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
90 if (!cpu_thread_is_idle(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010091 return false;
92 }
93 }
94 return true;
95}
96
Blue Swirl296af7c2010-03-29 19:23:50 +000097/***********************************************************/
Paolo Bonzini946fb272011-09-12 13:57:37 +020098/* guest cycle counter */
99
100/* Conversion factor from emulated instructions to virtual clock ticks. */
101static int icount_time_shift;
102/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
103#define MAX_ICOUNT_SHIFT 10
104/* Compensate for varying guest execution speed. */
105static int64_t qemu_icount_bias;
106static QEMUTimer *icount_rt_timer;
107static QEMUTimer *icount_vm_timer;
108static QEMUTimer *icount_warp_timer;
109static int64_t vm_clock_warp_start;
110static int64_t qemu_icount;
111
112typedef struct TimersState {
113 int64_t cpu_ticks_prev;
114 int64_t cpu_ticks_offset;
115 int64_t cpu_clock_offset;
116 int32_t cpu_ticks_enabled;
117 int64_t dummy;
118} TimersState;
119
Liu Ping Fand9cd4002013-07-21 08:43:00 +0000120static TimersState timers_state;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200121
122/* Return the virtual CPU time, based on the instruction counter. */
123int64_t cpu_get_icount(void)
124{
125 int64_t icount;
Andreas Färber4917cf42013-05-27 05:17:50 +0200126 CPUState *cpu = current_cpu;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200127
128 icount = qemu_icount;
Andreas Färber4917cf42013-05-27 05:17:50 +0200129 if (cpu) {
130 CPUArchState *env = cpu->env_ptr;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200131 if (!can_do_io(env)) {
132 fprintf(stderr, "Bad clock read\n");
133 }
134 icount -= (env->icount_decr.u16.low + env->icount_extra);
135 }
136 return qemu_icount_bias + (icount << icount_time_shift);
137}
138
139/* return the host CPU cycle counter and handle stop/restart */
140int64_t cpu_get_ticks(void)
141{
142 if (use_icount) {
143 return cpu_get_icount();
144 }
145 if (!timers_state.cpu_ticks_enabled) {
146 return timers_state.cpu_ticks_offset;
147 } else {
148 int64_t ticks;
149 ticks = cpu_get_real_ticks();
150 if (timers_state.cpu_ticks_prev > ticks) {
151 /* Note: non increasing ticks may happen if the host uses
152 software suspend */
153 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
154 }
155 timers_state.cpu_ticks_prev = ticks;
156 return ticks + timers_state.cpu_ticks_offset;
157 }
158}
159
160/* return the host CPU monotonic timer and handle stop/restart */
161int64_t cpu_get_clock(void)
162{
163 int64_t ti;
164 if (!timers_state.cpu_ticks_enabled) {
165 return timers_state.cpu_clock_offset;
166 } else {
167 ti = get_clock();
168 return ti + timers_state.cpu_clock_offset;
169 }
170}
171
172/* enable cpu_get_ticks() */
173void cpu_enable_ticks(void)
174{
175 if (!timers_state.cpu_ticks_enabled) {
176 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
177 timers_state.cpu_clock_offset -= get_clock();
178 timers_state.cpu_ticks_enabled = 1;
179 }
180}
181
182/* disable cpu_get_ticks() : the clock is stopped. You must not call
183 cpu_get_ticks() after that. */
184void cpu_disable_ticks(void)
185{
186 if (timers_state.cpu_ticks_enabled) {
187 timers_state.cpu_ticks_offset = cpu_get_ticks();
188 timers_state.cpu_clock_offset = cpu_get_clock();
189 timers_state.cpu_ticks_enabled = 0;
190 }
191}
192
193/* Correlation between real and virtual time is always going to be
194 fairly approximate, so ignore small variation.
195 When the guest is idle real and virtual time will be aligned in
196 the IO wait loop. */
197#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
198
199static void icount_adjust(void)
200{
201 int64_t cur_time;
202 int64_t cur_icount;
203 int64_t delta;
204 static int64_t last_delta;
205 /* If the VM is not running, then do nothing. */
206 if (!runstate_is_running()) {
207 return;
208 }
209 cur_time = cpu_get_clock();
210 cur_icount = qemu_get_clock_ns(vm_clock);
211 delta = cur_icount - cur_time;
212 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
213 if (delta > 0
214 && last_delta + ICOUNT_WOBBLE < delta * 2
215 && icount_time_shift > 0) {
216 /* The guest is getting too far ahead. Slow time down. */
217 icount_time_shift--;
218 }
219 if (delta < 0
220 && last_delta - ICOUNT_WOBBLE > delta * 2
221 && icount_time_shift < MAX_ICOUNT_SHIFT) {
222 /* The guest is getting too far behind. Speed time up. */
223 icount_time_shift++;
224 }
225 last_delta = delta;
226 qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
227}
228
229static void icount_adjust_rt(void *opaque)
230{
231 qemu_mod_timer(icount_rt_timer,
232 qemu_get_clock_ms(rt_clock) + 1000);
233 icount_adjust();
234}
235
236static void icount_adjust_vm(void *opaque)
237{
238 qemu_mod_timer(icount_vm_timer,
239 qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
240 icount_adjust();
241}
242
243static int64_t qemu_icount_round(int64_t count)
244{
245 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
246}
247
248static void icount_warp_rt(void *opaque)
249{
250 if (vm_clock_warp_start == -1) {
251 return;
252 }
253
254 if (runstate_is_running()) {
255 int64_t clock = qemu_get_clock_ns(rt_clock);
256 int64_t warp_delta = clock - vm_clock_warp_start;
257 if (use_icount == 1) {
258 qemu_icount_bias += warp_delta;
259 } else {
260 /*
261 * In adaptive mode, do not let the vm_clock run too
262 * far ahead of real time.
263 */
264 int64_t cur_time = cpu_get_clock();
265 int64_t cur_icount = qemu_get_clock_ns(vm_clock);
266 int64_t delta = cur_time - cur_icount;
267 qemu_icount_bias += MIN(warp_delta, delta);
268 }
269 if (qemu_clock_expired(vm_clock)) {
Alex Blighac70aaf2013-08-21 16:02:57 +0100270 qemu_clock_notify(vm_clock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200271 }
272 }
273 vm_clock_warp_start = -1;
274}
275
Paolo Bonzini8156be52012-03-28 15:42:04 +0200276void qtest_clock_warp(int64_t dest)
277{
278 int64_t clock = qemu_get_clock_ns(vm_clock);
279 assert(qtest_enabled());
280 while (clock < dest) {
Alex Blighac70aaf2013-08-21 16:02:57 +0100281 int64_t deadline = qemu_clock_deadline_ns_all(vm_clock);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200282 int64_t warp = MIN(dest - clock, deadline);
283 qemu_icount_bias += warp;
284 qemu_run_timers(vm_clock);
285 clock = qemu_get_clock_ns(vm_clock);
286 }
Alex Blighac70aaf2013-08-21 16:02:57 +0100287 qemu_clock_notify(vm_clock);
Paolo Bonzini8156be52012-03-28 15:42:04 +0200288}
289
Paolo Bonzini946fb272011-09-12 13:57:37 +0200290void qemu_clock_warp(QEMUClock *clock)
291{
292 int64_t deadline;
293
294 /*
295 * There are too many global variables to make the "warp" behavior
296 * applicable to other clocks. But a clock argument removes the
297 * need for if statements all over the place.
298 */
299 if (clock != vm_clock || !use_icount) {
300 return;
301 }
302
303 /*
304 * If the CPUs have been sleeping, advance the vm_clock timer now. This
305 * ensures that the deadline for the timer is computed correctly below.
306 * This also makes sure that the insn counter is synchronized before the
307 * CPU starts running, in case the CPU is woken by an event other than
308 * the earliest vm_clock timer.
309 */
310 icount_warp_rt(NULL);
311 if (!all_cpu_threads_idle() || !qemu_clock_has_timers(vm_clock)) {
312 qemu_del_timer(icount_warp_timer);
313 return;
314 }
315
Paolo Bonzini8156be52012-03-28 15:42:04 +0200316 if (qtest_enabled()) {
317 /* When testing, qtest commands advance icount. */
318 return;
319 }
320
Paolo Bonzini946fb272011-09-12 13:57:37 +0200321 vm_clock_warp_start = qemu_get_clock_ns(rt_clock);
Alex Blighac70aaf2013-08-21 16:02:57 +0100322 /* We want to use the earliest deadline from ALL vm_clocks */
323 deadline = qemu_clock_deadline_ns_all(vm_clock);
324
325 /* Maintain prior (possibly buggy) behaviour where if no deadline
326 * was set (as there is no vm_clock timer) or it is more than
327 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
328 * nanoseconds.
329 */
330 if ((deadline < 0) || (deadline > INT32_MAX)) {
331 deadline = INT32_MAX;
332 }
333
Paolo Bonzini946fb272011-09-12 13:57:37 +0200334 if (deadline > 0) {
335 /*
336 * Ensure the vm_clock proceeds even when the virtual CPU goes to
337 * sleep. Otherwise, the CPU might be waiting for a future timer
338 * interrupt to wake it up, but the interrupt never comes because
339 * the vCPU isn't running any insns and thus doesn't advance the
340 * vm_clock.
341 *
342 * An extreme solution for this problem would be to never let VCPUs
343 * sleep in icount mode if there is a pending vm_clock timer; rather
344 * time could just advance to the next vm_clock event. Instead, we
345 * do stop VCPUs and only advance vm_clock after some "real" time,
346 * (related to the time left until the next event) has passed. This
347 * rt_clock timer will do this. This avoids that the warps are too
348 * visible externally---for example, you will not be sending network
Dong Xu Wang07f35072011-11-22 18:06:26 +0800349 * packets continuously instead of every 100ms.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200350 */
351 qemu_mod_timer(icount_warp_timer, vm_clock_warp_start + deadline);
Alex Blighac70aaf2013-08-21 16:02:57 +0100352 } else if (deadline == 0) {
353 qemu_clock_notify(vm_clock);
Paolo Bonzini946fb272011-09-12 13:57:37 +0200354 }
355}
356
357static const VMStateDescription vmstate_timers = {
358 .name = "timer",
359 .version_id = 2,
360 .minimum_version_id = 1,
361 .minimum_version_id_old = 1,
362 .fields = (VMStateField[]) {
363 VMSTATE_INT64(cpu_ticks_offset, TimersState),
364 VMSTATE_INT64(dummy, TimersState),
365 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
366 VMSTATE_END_OF_LIST()
367 }
368};
369
370void configure_icount(const char *option)
371{
372 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
373 if (!option) {
374 return;
375 }
376
377 icount_warp_timer = qemu_new_timer_ns(rt_clock, icount_warp_rt, NULL);
378 if (strcmp(option, "auto") != 0) {
379 icount_time_shift = strtol(option, NULL, 0);
380 use_icount = 1;
381 return;
382 }
383
384 use_icount = 2;
385
386 /* 125MIPS seems a reasonable initial guess at the guest speed.
387 It will be corrected fairly quickly anyway. */
388 icount_time_shift = 3;
389
390 /* Have both realtime and virtual time triggers for speed adjustment.
391 The realtime trigger catches emulated time passing too slowly,
392 the virtual time trigger catches emulated time passing too fast.
393 Realtime triggers occur even when idle, so use them less frequently
394 than VM triggers. */
395 icount_rt_timer = qemu_new_timer_ms(rt_clock, icount_adjust_rt, NULL);
396 qemu_mod_timer(icount_rt_timer,
397 qemu_get_clock_ms(rt_clock) + 1000);
398 icount_vm_timer = qemu_new_timer_ns(vm_clock, icount_adjust_vm, NULL);
399 qemu_mod_timer(icount_vm_timer,
400 qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
401}
402
403/***********************************************************/
Blue Swirl296af7c2010-03-29 19:23:50 +0000404void hw_error(const char *fmt, ...)
405{
406 va_list ap;
Andreas Färber55e5c282012-12-17 06:18:02 +0100407 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000408
409 va_start(ap, fmt);
410 fprintf(stderr, "qemu: hardware error: ");
411 vfprintf(stderr, fmt, ap);
412 fprintf(stderr, "\n");
Andreas Färber182735e2013-05-29 22:29:20 +0200413 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100414 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
Andreas Färber878096e2013-05-27 01:33:50 +0200415 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
Blue Swirl296af7c2010-03-29 19:23:50 +0000416 }
417 va_end(ap);
418 abort();
419}
420
421void cpu_synchronize_all_states(void)
422{
Andreas Färber182735e2013-05-29 22:29:20 +0200423 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000424
Andreas Färber182735e2013-05-29 22:29:20 +0200425 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
426 cpu_synchronize_state(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000427 }
428}
429
430void cpu_synchronize_all_post_reset(void)
431{
Andreas Färber182735e2013-05-29 22:29:20 +0200432 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000433
434 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200435 cpu_synchronize_post_reset(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000436 }
437}
438
439void cpu_synchronize_all_post_init(void)
440{
Andreas Färber182735e2013-05-29 22:29:20 +0200441 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000442
443 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200444 cpu_synchronize_post_init(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000445 }
446}
447
Kevin Wolf56983462013-07-05 13:49:54 +0200448static int do_vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +0000449{
Kevin Wolf56983462013-07-05 13:49:54 +0200450 int ret = 0;
451
Luiz Capitulino13548692011-07-29 15:36:43 -0300452 if (runstate_is_running()) {
Blue Swirl296af7c2010-03-29 19:23:50 +0000453 cpu_disable_ticks();
Blue Swirl296af7c2010-03-29 19:23:50 +0000454 pause_all_vcpus();
Luiz Capitulinof5bbfba2011-07-29 15:04:45 -0300455 runstate_set(state);
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -0300456 vm_state_notify(0, state);
Blue Swirl296af7c2010-03-29 19:23:50 +0000457 monitor_protocol_event(QEVENT_STOP, NULL);
458 }
Kevin Wolf56983462013-07-05 13:49:54 +0200459
Kevin Wolf594a45c2013-07-18 14:52:19 +0200460 bdrv_drain_all();
461 ret = bdrv_flush_all();
462
Kevin Wolf56983462013-07-05 13:49:54 +0200463 return ret;
Blue Swirl296af7c2010-03-29 19:23:50 +0000464}
465
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200466static bool cpu_can_run(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000467{
Andreas Färber4fdeee72012-05-02 23:10:09 +0200468 if (cpu->stop) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200469 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100470 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +0800471 if (cpu_is_stopped(cpu)) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200472 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100473 }
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200474 return true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000475}
476
Andreas Färber91325042013-05-27 02:07:49 +0200477static void cpu_handle_guest_debug(CPUState *cpu)
Jan Kiszka3c638d02010-06-25 16:56:56 +0200478{
Andreas Färber64f6b342013-05-27 02:06:09 +0200479 gdb_set_stop_cpu(cpu);
Jan Kiszka8cf71712011-02-07 12:19:16 +0100480 qemu_system_debug_request();
Andreas Färberf324e762012-05-02 23:26:21 +0200481 cpu->stopped = true;
Jan Kiszka3c638d02010-06-25 16:56:56 +0200482}
483
Paolo Bonzini714bd042011-03-12 17:44:06 +0100484static void cpu_signal(int sig)
485{
Andreas Färber4917cf42013-05-27 05:17:50 +0200486 if (current_cpu) {
487 cpu_exit(current_cpu);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100488 }
489 exit_request = 1;
490}
Paolo Bonzini714bd042011-03-12 17:44:06 +0100491
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100492#ifdef CONFIG_LINUX
493static void sigbus_reraise(void)
494{
495 sigset_t set;
496 struct sigaction action;
497
498 memset(&action, 0, sizeof(action));
499 action.sa_handler = SIG_DFL;
500 if (!sigaction(SIGBUS, &action, NULL)) {
501 raise(SIGBUS);
502 sigemptyset(&set);
503 sigaddset(&set, SIGBUS);
504 sigprocmask(SIG_UNBLOCK, &set, NULL);
505 }
506 perror("Failed to re-raise SIGBUS!\n");
507 abort();
508}
509
510static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
511 void *ctx)
512{
513 if (kvm_on_sigbus(siginfo->ssi_code,
514 (void *)(intptr_t)siginfo->ssi_addr)) {
515 sigbus_reraise();
516 }
517}
518
519static void qemu_init_sigbus(void)
520{
521 struct sigaction action;
522
523 memset(&action, 0, sizeof(action));
524 action.sa_flags = SA_SIGINFO;
525 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
526 sigaction(SIGBUS, &action, NULL);
527
528 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
529}
530
Andreas Färber290adf32013-01-17 09:30:27 +0100531static void qemu_kvm_eat_signals(CPUState *cpu)
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100532{
533 struct timespec ts = { 0, 0 };
534 siginfo_t siginfo;
535 sigset_t waitset;
536 sigset_t chkset;
537 int r;
538
539 sigemptyset(&waitset);
540 sigaddset(&waitset, SIG_IPI);
541 sigaddset(&waitset, SIGBUS);
542
543 do {
544 r = sigtimedwait(&waitset, &siginfo, &ts);
545 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
546 perror("sigtimedwait");
547 exit(1);
548 }
549
550 switch (r) {
551 case SIGBUS:
Andreas Färber290adf32013-01-17 09:30:27 +0100552 if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100553 sigbus_reraise();
554 }
555 break;
556 default:
557 break;
558 }
559
560 r = sigpending(&chkset);
561 if (r == -1) {
562 perror("sigpending");
563 exit(1);
564 }
565 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100566}
567
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100568#else /* !CONFIG_LINUX */
569
570static void qemu_init_sigbus(void)
571{
572}
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100573
Andreas Färber290adf32013-01-17 09:30:27 +0100574static void qemu_kvm_eat_signals(CPUState *cpu)
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100575{
576}
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100577#endif /* !CONFIG_LINUX */
578
Blue Swirl296af7c2010-03-29 19:23:50 +0000579#ifndef _WIN32
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100580static void dummy_signal(int sig)
Blue Swirl296af7c2010-03-29 19:23:50 +0000581{
582}
583
Andreas Färber13618e02013-05-26 23:41:00 +0200584static void qemu_kvm_init_cpu_signals(CPUState *cpu)
Paolo Bonzini714bd042011-03-12 17:44:06 +0100585{
586 int r;
587 sigset_t set;
588 struct sigaction sigact;
589
590 memset(&sigact, 0, sizeof(sigact));
591 sigact.sa_handler = dummy_signal;
592 sigaction(SIG_IPI, &sigact, NULL);
593
Paolo Bonzini714bd042011-03-12 17:44:06 +0100594 pthread_sigmask(SIG_BLOCK, NULL, &set);
595 sigdelset(&set, SIG_IPI);
596 sigdelset(&set, SIGBUS);
Andreas Färber491d6e82013-05-26 23:38:10 +0200597 r = kvm_set_signal_mask(cpu, &set);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100598 if (r) {
599 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
600 exit(1);
601 }
Paolo Bonzini714bd042011-03-12 17:44:06 +0100602}
603
604static void qemu_tcg_init_cpu_signals(void)
605{
Paolo Bonzini714bd042011-03-12 17:44:06 +0100606 sigset_t set;
607 struct sigaction sigact;
608
609 memset(&sigact, 0, sizeof(sigact));
610 sigact.sa_handler = cpu_signal;
611 sigaction(SIG_IPI, &sigact, NULL);
612
613 sigemptyset(&set);
614 sigaddset(&set, SIG_IPI);
615 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100616}
617
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100618#else /* _WIN32 */
Andreas Färber13618e02013-05-26 23:41:00 +0200619static void qemu_kvm_init_cpu_signals(CPUState *cpu)
Paolo Bonzini714bd042011-03-12 17:44:06 +0100620{
621 abort();
622}
623
624static void qemu_tcg_init_cpu_signals(void)
625{
626}
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100627#endif /* _WIN32 */
Blue Swirl296af7c2010-03-29 19:23:50 +0000628
Stefan Weilb2532d82012-09-27 07:41:42 +0200629static QemuMutex qemu_global_mutex;
Paolo Bonzini46daff12011-06-09 13:10:24 +0200630static QemuCond qemu_io_proceeded_cond;
631static bool iothread_requesting_mutex;
Blue Swirl296af7c2010-03-29 19:23:50 +0000632
633static QemuThread io_thread;
634
635static QemuThread *tcg_cpu_thread;
636static QemuCond *tcg_halt_cond;
637
Blue Swirl296af7c2010-03-29 19:23:50 +0000638/* cpu creation */
639static QemuCond qemu_cpu_cond;
640/* system init */
Blue Swirl296af7c2010-03-29 19:23:50 +0000641static QemuCond qemu_pause_cond;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300642static QemuCond qemu_work_cond;
Blue Swirl296af7c2010-03-29 19:23:50 +0000643
Paolo Bonzinid3b12f52011-09-13 10:30:52 +0200644void qemu_init_cpu_loop(void)
Blue Swirl296af7c2010-03-29 19:23:50 +0000645{
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100646 qemu_init_sigbus();
Anthony Liguoried945922011-02-08 18:18:18 +0100647 qemu_cond_init(&qemu_cpu_cond);
Anthony Liguoried945922011-02-08 18:18:18 +0100648 qemu_cond_init(&qemu_pause_cond);
649 qemu_cond_init(&qemu_work_cond);
Paolo Bonzini46daff12011-06-09 13:10:24 +0200650 qemu_cond_init(&qemu_io_proceeded_cond);
Blue Swirl296af7c2010-03-29 19:23:50 +0000651 qemu_mutex_init(&qemu_global_mutex);
Blue Swirl296af7c2010-03-29 19:23:50 +0000652
Jan Kiszkab7680cb2011-03-12 17:43:51 +0100653 qemu_thread_get_self(&io_thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000654}
655
Andreas Färberf100f0b2012-05-03 14:58:47 +0200656void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300657{
658 struct qemu_work_item wi;
659
Andreas Färber60e82572012-05-02 22:23:49 +0200660 if (qemu_cpu_is_self(cpu)) {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300661 func(data);
662 return;
663 }
664
665 wi.func = func;
666 wi.data = data;
Chegu Vinod3c022702013-06-24 03:49:41 -0600667 wi.free = false;
Andreas Färberc64ca812012-05-03 02:11:45 +0200668 if (cpu->queued_work_first == NULL) {
669 cpu->queued_work_first = &wi;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100670 } else {
Andreas Färberc64ca812012-05-03 02:11:45 +0200671 cpu->queued_work_last->next = &wi;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100672 }
Andreas Färberc64ca812012-05-03 02:11:45 +0200673 cpu->queued_work_last = &wi;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300674 wi.next = NULL;
675 wi.done = false;
676
Andreas Färberc08d7422012-05-03 04:34:15 +0200677 qemu_cpu_kick(cpu);
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300678 while (!wi.done) {
Andreas Färber4917cf42013-05-27 05:17:50 +0200679 CPUState *self_cpu = current_cpu;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300680
681 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
Andreas Färber4917cf42013-05-27 05:17:50 +0200682 current_cpu = self_cpu;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300683 }
684}
685
Chegu Vinod3c022702013-06-24 03:49:41 -0600686void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
687{
688 struct qemu_work_item *wi;
689
690 if (qemu_cpu_is_self(cpu)) {
691 func(data);
692 return;
693 }
694
695 wi = g_malloc0(sizeof(struct qemu_work_item));
696 wi->func = func;
697 wi->data = data;
698 wi->free = true;
699 if (cpu->queued_work_first == NULL) {
700 cpu->queued_work_first = wi;
701 } else {
702 cpu->queued_work_last->next = wi;
703 }
704 cpu->queued_work_last = wi;
705 wi->next = NULL;
706 wi->done = false;
707
708 qemu_cpu_kick(cpu);
709}
710
Andreas Färber6d45b102012-05-03 02:13:22 +0200711static void flush_queued_work(CPUState *cpu)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300712{
713 struct qemu_work_item *wi;
714
Andreas Färberc64ca812012-05-03 02:11:45 +0200715 if (cpu->queued_work_first == NULL) {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300716 return;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100717 }
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300718
Andreas Färberc64ca812012-05-03 02:11:45 +0200719 while ((wi = cpu->queued_work_first)) {
720 cpu->queued_work_first = wi->next;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300721 wi->func(wi->data);
722 wi->done = true;
Chegu Vinod3c022702013-06-24 03:49:41 -0600723 if (wi->free) {
724 g_free(wi);
725 }
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300726 }
Andreas Färberc64ca812012-05-03 02:11:45 +0200727 cpu->queued_work_last = NULL;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300728 qemu_cond_broadcast(&qemu_work_cond);
729}
730
Andreas Färber509a0d72012-05-03 02:18:09 +0200731static void qemu_wait_io_event_common(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000732{
Andreas Färber4fdeee72012-05-02 23:10:09 +0200733 if (cpu->stop) {
734 cpu->stop = false;
Andreas Färberf324e762012-05-02 23:26:21 +0200735 cpu->stopped = true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000736 qemu_cond_signal(&qemu_pause_cond);
737 }
Andreas Färber6d45b102012-05-03 02:13:22 +0200738 flush_queued_work(cpu);
Andreas Färber216fc9a2012-05-02 17:49:49 +0200739 cpu->thread_kicked = false;
Blue Swirl296af7c2010-03-29 19:23:50 +0000740}
741
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200742static void qemu_tcg_wait_io_event(void)
Blue Swirl296af7c2010-03-29 19:23:50 +0000743{
Andreas Färber182735e2013-05-29 22:29:20 +0200744 CPUState *cpu;
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200745
Jan Kiszka16400322011-02-09 16:29:37 +0100746 while (all_cpu_threads_idle()) {
Paolo Bonziniab33fcd2011-04-13 10:03:44 +0200747 /* Start accounting real time to the virtual clock if the CPUs
748 are idle. */
749 qemu_clock_warp(vm_clock);
Paolo Bonzini9705fbb2011-03-12 17:44:00 +0100750 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +0100751 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000752
Paolo Bonzini46daff12011-06-09 13:10:24 +0200753 while (iothread_requesting_mutex) {
754 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
755 }
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200756
Andreas Färber182735e2013-05-29 22:29:20 +0200757 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
758 qemu_wait_io_event_common(cpu);
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200759 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000760}
761
Andreas Färberfd529e82013-05-26 23:24:55 +0200762static void qemu_kvm_wait_io_event(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000763{
Andreas Färbera98ae1d2013-05-26 23:21:08 +0200764 while (cpu_thread_is_idle(cpu)) {
Andreas Färberf5c121b2012-05-03 01:22:49 +0200765 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +0100766 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000767
Andreas Färber290adf32013-01-17 09:30:27 +0100768 qemu_kvm_eat_signals(cpu);
Andreas Färber509a0d72012-05-03 02:18:09 +0200769 qemu_wait_io_event_common(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000770}
771
Jan Kiszka7e97cd82011-02-07 12:19:12 +0100772static void *qemu_kvm_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +0000773{
Andreas Färber48a106b2013-05-27 02:20:39 +0200774 CPUState *cpu = arg;
Jan Kiszka84b49152011-02-01 22:15:50 +0100775 int r;
Blue Swirl296af7c2010-03-29 19:23:50 +0000776
Marcelo Tosatti6164e6d2010-03-23 13:37:13 -0300777 qemu_mutex_lock(&qemu_global_mutex);
Andreas Färber814e6122012-05-02 17:00:37 +0200778 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +0200779 cpu->thread_id = qemu_get_thread_id();
Andreas Färber4917cf42013-05-27 05:17:50 +0200780 current_cpu = cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000781
Andreas Färber504134d2012-12-17 06:38:45 +0100782 r = kvm_init_vcpu(cpu);
Jan Kiszka84b49152011-02-01 22:15:50 +0100783 if (r < 0) {
784 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
785 exit(1);
786 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000787
Andreas Färber13618e02013-05-26 23:41:00 +0200788 qemu_kvm_init_cpu_signals(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000789
790 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +0200791 cpu->created = true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000792 qemu_cond_signal(&qemu_cpu_cond);
793
Blue Swirl296af7c2010-03-29 19:23:50 +0000794 while (1) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200795 if (cpu_can_run(cpu)) {
Andreas Färber1458c362013-05-26 23:46:55 +0200796 r = kvm_cpu_exec(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +0100797 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +0200798 cpu_handle_guest_debug(cpu);
Jan Kiszka83f338f2011-02-07 12:19:17 +0100799 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100800 }
Andreas Färberfd529e82013-05-26 23:24:55 +0200801 qemu_kvm_wait_io_event(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000802 }
803
804 return NULL;
805}
806
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200807static void *qemu_dummy_cpu_thread_fn(void *arg)
808{
809#ifdef _WIN32
810 fprintf(stderr, "qtest is not supported under Windows\n");
811 exit(1);
812#else
Andreas Färber10a90212013-05-27 02:24:35 +0200813 CPUState *cpu = arg;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200814 sigset_t waitset;
815 int r;
816
817 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +0200818 qemu_thread_get_self(cpu->thread);
Andreas Färber9f09e182012-05-03 06:59:07 +0200819 cpu->thread_id = qemu_get_thread_id();
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200820
821 sigemptyset(&waitset);
822 sigaddset(&waitset, SIG_IPI);
823
824 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +0200825 cpu->created = true;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200826 qemu_cond_signal(&qemu_cpu_cond);
827
Andreas Färber4917cf42013-05-27 05:17:50 +0200828 current_cpu = cpu;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200829 while (1) {
Andreas Färber4917cf42013-05-27 05:17:50 +0200830 current_cpu = NULL;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200831 qemu_mutex_unlock_iothread();
832 do {
833 int sig;
834 r = sigwait(&waitset, &sig);
835 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
836 if (r == -1) {
837 perror("sigwait");
838 exit(1);
839 }
840 qemu_mutex_lock_iothread();
Andreas Färber4917cf42013-05-27 05:17:50 +0200841 current_cpu = cpu;
Andreas Färber509a0d72012-05-03 02:18:09 +0200842 qemu_wait_io_event_common(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200843 }
844
845 return NULL;
846#endif
847}
848
Jan Kiszkabdb7ca62011-09-26 09:40:39 +0200849static void tcg_exec_all(void);
850
Igor Mammedova37677c2013-04-23 10:29:42 +0200851static void tcg_signal_cpu_creation(CPUState *cpu, void *data)
852{
853 cpu->thread_id = qemu_get_thread_id();
854 cpu->created = true;
855}
856
Jan Kiszka7e97cd82011-02-07 12:19:12 +0100857static void *qemu_tcg_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +0000858{
Andreas Färberc3586ba2012-05-03 01:41:24 +0200859 CPUState *cpu = arg;
Blue Swirl296af7c2010-03-29 19:23:50 +0000860
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100861 qemu_tcg_init_cpu_signals();
Andreas Färber814e6122012-05-02 17:00:37 +0200862 qemu_thread_get_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000863
Blue Swirl296af7c2010-03-29 19:23:50 +0000864 qemu_mutex_lock(&qemu_global_mutex);
Igor Mammedova37677c2013-04-23 10:29:42 +0200865 qemu_for_each_cpu(tcg_signal_cpu_creation, NULL);
Blue Swirl296af7c2010-03-29 19:23:50 +0000866 qemu_cond_signal(&qemu_cpu_cond);
867
Jan Kiszkafa7d1862011-08-22 18:35:25 +0200868 /* wait for initial kick-off after machine start */
Andreas Färber182735e2013-05-29 22:29:20 +0200869 while (first_cpu->stopped) {
Jan Kiszkafa7d1862011-08-22 18:35:25 +0200870 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
Jan Kiszka8e564b42012-02-17 18:31:15 +0100871
872 /* process any pending work */
Andreas Färber182735e2013-05-29 22:29:20 +0200873 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
874 qemu_wait_io_event_common(cpu);
Jan Kiszka8e564b42012-02-17 18:31:15 +0100875 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100876 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000877
878 while (1) {
Jan Kiszkabdb7ca62011-09-26 09:40:39 +0200879 tcg_exec_all();
Alex Blighac70aaf2013-08-21 16:02:57 +0100880
881 if (use_icount) {
882 int64_t deadline = qemu_clock_deadline_ns_all(vm_clock);
883
884 if (deadline == 0) {
885 qemu_clock_notify(vm_clock);
886 }
Paolo Bonzini3b2319a2011-04-13 10:03:43 +0200887 }
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200888 qemu_tcg_wait_io_event();
Blue Swirl296af7c2010-03-29 19:23:50 +0000889 }
890
891 return NULL;
892}
893
Andreas Färber2ff09a42012-05-03 00:23:30 +0200894static void qemu_cpu_kick_thread(CPUState *cpu)
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100895{
896#ifndef _WIN32
897 int err;
898
Andreas Färber814e6122012-05-02 17:00:37 +0200899 err = pthread_kill(cpu->thread->thread, SIG_IPI);
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100900 if (err) {
901 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
902 exit(1);
903 }
904#else /* _WIN32 */
Andreas Färber60e82572012-05-02 22:23:49 +0200905 if (!qemu_cpu_is_self(cpu)) {
Olivier Hainqueed9164a2013-04-09 18:06:53 +0200906 CONTEXT tcgContext;
907
908 if (SuspendThread(cpu->hThread) == (DWORD)-1) {
Stefan Weil7f1721d2013-04-13 22:45:50 +0200909 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
Olivier Hainqueed9164a2013-04-09 18:06:53 +0200910 GetLastError());
911 exit(1);
912 }
913
914 /* On multi-core systems, we are not sure that the thread is actually
915 * suspended until we can get the context.
916 */
917 tcgContext.ContextFlags = CONTEXT_CONTROL;
918 while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
919 continue;
920 }
921
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100922 cpu_signal(0);
Olivier Hainqueed9164a2013-04-09 18:06:53 +0200923
924 if (ResumeThread(cpu->hThread) == (DWORD)-1) {
Stefan Weil7f1721d2013-04-13 22:45:50 +0200925 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
Olivier Hainqueed9164a2013-04-09 18:06:53 +0200926 GetLastError());
927 exit(1);
928 }
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100929 }
930#endif
931}
932
Andreas Färberc08d7422012-05-03 04:34:15 +0200933void qemu_cpu_kick(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000934{
Andreas Färberf5c121b2012-05-03 01:22:49 +0200935 qemu_cond_broadcast(cpu->halt_cond);
Andreas Färber216fc9a2012-05-02 17:49:49 +0200936 if (!tcg_enabled() && !cpu->thread_kicked) {
Andreas Färber2ff09a42012-05-03 00:23:30 +0200937 qemu_cpu_kick_thread(cpu);
Andreas Färber216fc9a2012-05-02 17:49:49 +0200938 cpu->thread_kicked = true;
Jan Kiszkaaa2c3642011-02-01 22:15:42 +0100939 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000940}
941
Jan Kiszka46d62fa2011-02-01 22:15:59 +0100942void qemu_cpu_kick_self(void)
943{
Paolo Bonzinib55c22c2011-03-12 17:44:07 +0100944#ifndef _WIN32
Andreas Färber4917cf42013-05-27 05:17:50 +0200945 assert(current_cpu);
Jan Kiszka46d62fa2011-02-01 22:15:59 +0100946
Andreas Färber4917cf42013-05-27 05:17:50 +0200947 if (!current_cpu->thread_kicked) {
948 qemu_cpu_kick_thread(current_cpu);
949 current_cpu->thread_kicked = true;
Jan Kiszka46d62fa2011-02-01 22:15:59 +0100950 }
Paolo Bonzinib55c22c2011-03-12 17:44:07 +0100951#else
952 abort();
953#endif
Blue Swirl296af7c2010-03-29 19:23:50 +0000954}
955
Andreas Färber60e82572012-05-02 22:23:49 +0200956bool qemu_cpu_is_self(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000957{
Andreas Färber814e6122012-05-02 17:00:37 +0200958 return qemu_thread_is_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000959}
960
Juan Quintelaaa723c22012-09-18 16:30:11 +0200961static bool qemu_in_vcpu_thread(void)
962{
Andreas Färber4917cf42013-05-27 05:17:50 +0200963 return current_cpu && qemu_cpu_is_self(current_cpu);
Juan Quintelaaa723c22012-09-18 16:30:11 +0200964}
965
Blue Swirl296af7c2010-03-29 19:23:50 +0000966void qemu_mutex_lock_iothread(void)
967{
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200968 if (!tcg_enabled()) {
Blue Swirl296af7c2010-03-29 19:23:50 +0000969 qemu_mutex_lock(&qemu_global_mutex);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300970 } else {
Paolo Bonzini46daff12011-06-09 13:10:24 +0200971 iothread_requesting_mutex = true;
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300972 if (qemu_mutex_trylock(&qemu_global_mutex)) {
Andreas Färber182735e2013-05-29 22:29:20 +0200973 qemu_cpu_kick_thread(first_cpu);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300974 qemu_mutex_lock(&qemu_global_mutex);
975 }
Paolo Bonzini46daff12011-06-09 13:10:24 +0200976 iothread_requesting_mutex = false;
977 qemu_cond_broadcast(&qemu_io_proceeded_cond);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300978 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000979}
980
981void qemu_mutex_unlock_iothread(void)
982{
983 qemu_mutex_unlock(&qemu_global_mutex);
984}
985
986static int all_vcpus_paused(void)
987{
Andreas Färber182735e2013-05-29 22:29:20 +0200988 CPUState *cpu = first_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000989
Andreas Färber182735e2013-05-29 22:29:20 +0200990 while (cpu) {
991 if (!cpu->stopped) {
Blue Swirl296af7c2010-03-29 19:23:50 +0000992 return 0;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100993 }
Andreas Färber182735e2013-05-29 22:29:20 +0200994 cpu = cpu->next_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000995 }
996
997 return 1;
998}
999
1000void pause_all_vcpus(void)
1001{
Andreas Färber182735e2013-05-29 22:29:20 +02001002 CPUState *cpu = first_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001003
Paolo Bonzinia5c57d62011-09-12 14:40:36 +02001004 qemu_clock_enable(vm_clock, false);
Andreas Färber182735e2013-05-29 22:29:20 +02001005 while (cpu) {
1006 cpu->stop = true;
1007 qemu_cpu_kick(cpu);
1008 cpu = cpu->next_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001009 }
1010
Juan Quintelaaa723c22012-09-18 16:30:11 +02001011 if (qemu_in_vcpu_thread()) {
Jan Kiszkad798e972012-02-17 18:31:16 +01001012 cpu_stop_current();
1013 if (!kvm_enabled()) {
Andreas Färber182735e2013-05-29 22:29:20 +02001014 cpu = first_cpu;
1015 while (cpu) {
1016 cpu->stop = false;
1017 cpu->stopped = true;
1018 cpu = cpu->next_cpu;
Jan Kiszkad798e972012-02-17 18:31:16 +01001019 }
1020 return;
1021 }
1022 }
1023
Blue Swirl296af7c2010-03-29 19:23:50 +00001024 while (!all_vcpus_paused()) {
Paolo Bonzinibe7d6c52011-03-12 17:44:02 +01001025 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
Andreas Färber182735e2013-05-29 22:29:20 +02001026 cpu = first_cpu;
1027 while (cpu) {
1028 qemu_cpu_kick(cpu);
1029 cpu = cpu->next_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001030 }
1031 }
1032}
1033
Igor Mammedov29936832013-04-23 10:29:37 +02001034void cpu_resume(CPUState *cpu)
1035{
1036 cpu->stop = false;
1037 cpu->stopped = false;
1038 qemu_cpu_kick(cpu);
1039}
1040
Blue Swirl296af7c2010-03-29 19:23:50 +00001041void resume_all_vcpus(void)
1042{
Andreas Färber182735e2013-05-29 22:29:20 +02001043 CPUState *cpu = first_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001044
Wen Congyang47113ab2011-11-04 10:45:58 +08001045 qemu_clock_enable(vm_clock, true);
Andreas Färber182735e2013-05-29 22:29:20 +02001046 while (cpu) {
1047 cpu_resume(cpu);
1048 cpu = cpu->next_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001049 }
1050}
1051
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001052static void qemu_tcg_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001053{
Blue Swirl296af7c2010-03-29 19:23:50 +00001054 /* share a single thread for all cpus with TCG */
1055 if (!tcg_cpu_thread) {
Andreas Färber814e6122012-05-02 17:00:37 +02001056 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001057 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1058 qemu_cond_init(cpu->halt_cond);
1059 tcg_halt_cond = cpu->halt_cond;
Andreas Färberc3586ba2012-05-03 01:41:24 +02001060 qemu_thread_create(cpu->thread, qemu_tcg_cpu_thread_fn, cpu,
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001061 QEMU_THREAD_JOINABLE);
1062#ifdef _WIN32
Andreas Färber814e6122012-05-02 17:00:37 +02001063 cpu->hThread = qemu_thread_get_handle(cpu->thread);
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001064#endif
Andreas Färber61a46212012-05-02 22:49:36 +02001065 while (!cpu->created) {
Paolo Bonzini18a85722011-03-12 17:44:03 +01001066 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001067 }
Andreas Färber814e6122012-05-02 17:00:37 +02001068 tcg_cpu_thread = cpu->thread;
Blue Swirl296af7c2010-03-29 19:23:50 +00001069 } else {
Andreas Färber814e6122012-05-02 17:00:37 +02001070 cpu->thread = tcg_cpu_thread;
Andreas Färberf5c121b2012-05-03 01:22:49 +02001071 cpu->halt_cond = tcg_halt_cond;
Blue Swirl296af7c2010-03-29 19:23:50 +00001072 }
1073}
1074
Andreas Färber48a106b2013-05-27 02:20:39 +02001075static void qemu_kvm_start_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001076{
Andreas Färber814e6122012-05-02 17:00:37 +02001077 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001078 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1079 qemu_cond_init(cpu->halt_cond);
Andreas Färber48a106b2013-05-27 02:20:39 +02001080 qemu_thread_create(cpu->thread, qemu_kvm_cpu_thread_fn, cpu,
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001081 QEMU_THREAD_JOINABLE);
Andreas Färber61a46212012-05-02 22:49:36 +02001082 while (!cpu->created) {
Paolo Bonzini18a85722011-03-12 17:44:03 +01001083 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001084 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001085}
1086
Andreas Färber10a90212013-05-27 02:24:35 +02001087static void qemu_dummy_start_vcpu(CPUState *cpu)
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001088{
Andreas Färber814e6122012-05-02 17:00:37 +02001089 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001090 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1091 qemu_cond_init(cpu->halt_cond);
Andreas Färber10a90212013-05-27 02:24:35 +02001092 qemu_thread_create(cpu->thread, qemu_dummy_cpu_thread_fn, cpu,
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001093 QEMU_THREAD_JOINABLE);
Andreas Färber61a46212012-05-02 22:49:36 +02001094 while (!cpu->created) {
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001095 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1096 }
1097}
1098
Andreas Färberc643bed2013-05-27 03:23:24 +02001099void qemu_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +00001100{
Andreas Färberce3960e2012-12-17 03:27:07 +01001101 cpu->nr_cores = smp_cores;
1102 cpu->nr_threads = smp_threads;
Andreas Färberf324e762012-05-02 23:26:21 +02001103 cpu->stopped = true;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001104 if (kvm_enabled()) {
Andreas Färber48a106b2013-05-27 02:20:39 +02001105 qemu_kvm_start_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001106 } else if (tcg_enabled()) {
Andreas Färbere5ab30a2012-05-03 01:50:44 +02001107 qemu_tcg_init_vcpu(cpu);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001108 } else {
Andreas Färber10a90212013-05-27 02:24:35 +02001109 qemu_dummy_start_vcpu(cpu);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001110 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001111}
1112
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001113void cpu_stop_current(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001114{
Andreas Färber4917cf42013-05-27 05:17:50 +02001115 if (current_cpu) {
1116 current_cpu->stop = false;
1117 current_cpu->stopped = true;
1118 cpu_exit(current_cpu);
Paolo Bonzini67bb1722011-03-12 17:43:59 +01001119 qemu_cond_signal(&qemu_pause_cond);
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001120 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001121}
1122
Kevin Wolf56983462013-07-05 13:49:54 +02001123int vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +00001124{
Juan Quintelaaa723c22012-09-18 16:30:11 +02001125 if (qemu_in_vcpu_thread()) {
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -03001126 qemu_system_vmstop_request(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00001127 /*
1128 * FIXME: should not return to device code in case
1129 * vm_stop() has been requested.
1130 */
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001131 cpu_stop_current();
Kevin Wolf56983462013-07-05 13:49:54 +02001132 return 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00001133 }
Kevin Wolf56983462013-07-05 13:49:54 +02001134
1135 return do_vm_stop(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00001136}
1137
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001138/* does a state transition even if the VM is already stopped,
1139 current state is forgotten forever */
Kevin Wolf56983462013-07-05 13:49:54 +02001140int vm_stop_force_state(RunState state)
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001141{
1142 if (runstate_is_running()) {
Kevin Wolf56983462013-07-05 13:49:54 +02001143 return vm_stop(state);
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001144 } else {
1145 runstate_set(state);
Kevin Wolf594a45c2013-07-18 14:52:19 +02001146 /* Make sure to return an error if the flush in a previous vm_stop()
1147 * failed. */
1148 return bdrv_flush_all();
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001149 }
1150}
1151
Andreas Färber9349b4f2012-03-14 01:38:32 +01001152static int tcg_cpu_exec(CPUArchState *env)
Blue Swirl296af7c2010-03-29 19:23:50 +00001153{
1154 int ret;
1155#ifdef CONFIG_PROFILER
1156 int64_t ti;
1157#endif
1158
1159#ifdef CONFIG_PROFILER
1160 ti = profile_getclock();
1161#endif
1162 if (use_icount) {
1163 int64_t count;
Alex Blighac70aaf2013-08-21 16:02:57 +01001164 int64_t deadline;
Blue Swirl296af7c2010-03-29 19:23:50 +00001165 int decr;
1166 qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
1167 env->icount_decr.u16.low = 0;
1168 env->icount_extra = 0;
Alex Blighac70aaf2013-08-21 16:02:57 +01001169 deadline = qemu_clock_deadline_ns_all(vm_clock);
1170
1171 /* Maintain prior (possibly buggy) behaviour where if no deadline
1172 * was set (as there is no vm_clock timer) or it is more than
1173 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1174 * nanoseconds.
1175 */
1176 if ((deadline < 0) || (deadline > INT32_MAX)) {
1177 deadline = INT32_MAX;
1178 }
1179
1180 count = qemu_icount_round(deadline);
Blue Swirl296af7c2010-03-29 19:23:50 +00001181 qemu_icount += count;
1182 decr = (count > 0xffff) ? 0xffff : count;
1183 count -= decr;
1184 env->icount_decr.u16.low = decr;
1185 env->icount_extra = count;
1186 }
1187 ret = cpu_exec(env);
1188#ifdef CONFIG_PROFILER
1189 qemu_time += profile_getclock() - ti;
1190#endif
1191 if (use_icount) {
1192 /* Fold pending instructions back into the
1193 instruction counter, and clear the interrupt flag. */
1194 qemu_icount -= (env->icount_decr.u16.low
1195 + env->icount_extra);
1196 env->icount_decr.u32 = 0;
1197 env->icount_extra = 0;
1198 }
1199 return ret;
1200}
1201
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001202static void tcg_exec_all(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001203{
Jan Kiszka9a360852011-02-01 22:15:55 +01001204 int r;
1205
Paolo Bonziniab33fcd2011-04-13 10:03:44 +02001206 /* Account partial waits to the vm_clock. */
1207 qemu_clock_warp(vm_clock);
1208
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001209 if (next_cpu == NULL) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001210 next_cpu = first_cpu;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001211 }
Jan Kiszkac629a4b2010-06-25 16:56:52 +02001212 for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001213 CPUState *cpu = next_cpu;
1214 CPUArchState *env = cpu->env_ptr;
Blue Swirl296af7c2010-03-29 19:23:50 +00001215
1216 qemu_clock_enable(vm_clock,
Andreas Färbered2803d2013-06-21 20:20:45 +02001217 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
Blue Swirl296af7c2010-03-29 19:23:50 +00001218
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001219 if (cpu_can_run(cpu)) {
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001220 r = tcg_cpu_exec(env);
Jan Kiszka9a360852011-02-01 22:15:55 +01001221 if (r == EXCP_DEBUG) {
Andreas Färber91325042013-05-27 02:07:49 +02001222 cpu_handle_guest_debug(cpu);
Jan Kiszka3c638d02010-06-25 16:56:56 +02001223 break;
1224 }
Andreas Färberf324e762012-05-02 23:26:21 +02001225 } else if (cpu->stop || cpu->stopped) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001226 break;
1227 }
1228 }
Jan Kiszkac629a4b2010-06-25 16:56:52 +02001229 exit_request = 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00001230}
1231
1232void set_numa_modes(void)
1233{
Andreas Färber1b1ed8d2012-12-17 04:22:03 +01001234 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +00001235 int i;
1236
Andreas Färber182735e2013-05-29 22:29:20 +02001237 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001238 for (i = 0; i < nb_numa_nodes; i++) {
Andreas Färber55e5c282012-12-17 06:18:02 +01001239 if (test_bit(cpu->cpu_index, node_cpumask[i])) {
Andreas Färber1b1ed8d2012-12-17 04:22:03 +01001240 cpu->numa_node = i;
Blue Swirl296af7c2010-03-29 19:23:50 +00001241 }
1242 }
1243 }
1244}
1245
Stefan Weil9a78eea2010-10-22 23:03:33 +02001246void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
Blue Swirl262353c2010-05-04 19:55:35 +00001247{
1248 /* XXX: implement xxx_cpu_list for targets that still miss it */
Peter Maydelle916cbf2012-09-05 17:41:08 -03001249#if defined(cpu_list)
1250 cpu_list(f, cpu_fprintf);
Blue Swirl262353c2010-05-04 19:55:35 +00001251#endif
1252}
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001253
1254CpuInfoList *qmp_query_cpus(Error **errp)
1255{
1256 CpuInfoList *head = NULL, *cur_item = NULL;
Andreas Färber182735e2013-05-29 22:29:20 +02001257 CPUState *cpu;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001258
Andreas Färber182735e2013-05-29 22:29:20 +02001259 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001260 CpuInfoList *info;
Andreas Färber182735e2013-05-29 22:29:20 +02001261#if defined(TARGET_I386)
1262 X86CPU *x86_cpu = X86_CPU(cpu);
1263 CPUX86State *env = &x86_cpu->env;
1264#elif defined(TARGET_PPC)
1265 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1266 CPUPPCState *env = &ppc_cpu->env;
1267#elif defined(TARGET_SPARC)
1268 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1269 CPUSPARCState *env = &sparc_cpu->env;
1270#elif defined(TARGET_MIPS)
1271 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1272 CPUMIPSState *env = &mips_cpu->env;
1273#endif
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001274
Andreas Färbercb446ec2013-05-01 14:24:52 +02001275 cpu_synchronize_state(cpu);
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001276
1277 info = g_malloc0(sizeof(*info));
1278 info->value = g_malloc0(sizeof(*info->value));
Andreas Färber55e5c282012-12-17 06:18:02 +01001279 info->value->CPU = cpu->cpu_index;
Andreas Färber182735e2013-05-29 22:29:20 +02001280 info->value->current = (cpu == first_cpu);
Andreas Färber259186a2013-01-17 18:51:17 +01001281 info->value->halted = cpu->halted;
Andreas Färber9f09e182012-05-03 06:59:07 +02001282 info->value->thread_id = cpu->thread_id;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001283#if defined(TARGET_I386)
1284 info->value->has_pc = true;
1285 info->value->pc = env->eip + env->segs[R_CS].base;
1286#elif defined(TARGET_PPC)
1287 info->value->has_nip = true;
1288 info->value->nip = env->nip;
1289#elif defined(TARGET_SPARC)
1290 info->value->has_pc = true;
1291 info->value->pc = env->pc;
1292 info->value->has_npc = true;
1293 info->value->npc = env->npc;
1294#elif defined(TARGET_MIPS)
1295 info->value->has_PC = true;
1296 info->value->PC = env->active_tc.PC;
1297#endif
1298
1299 /* XXX: waiting for the qapi to support GSList */
1300 if (!cur_item) {
1301 head = cur_item = info;
1302 } else {
1303 cur_item->next = info;
1304 cur_item = info;
1305 }
1306 }
1307
1308 return head;
1309}
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001310
1311void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1312 bool has_cpu, int64_t cpu_index, Error **errp)
1313{
1314 FILE *f;
1315 uint32_t l;
Andreas Färber55e5c282012-12-17 06:18:02 +01001316 CPUState *cpu;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001317 uint8_t buf[1024];
1318
1319 if (!has_cpu) {
1320 cpu_index = 0;
1321 }
1322
Andreas Färber151d1322013-02-15 15:41:49 +01001323 cpu = qemu_get_cpu(cpu_index);
1324 if (cpu == NULL) {
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001325 error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1326 "a CPU number");
1327 return;
1328 }
1329
1330 f = fopen(filename, "wb");
1331 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04001332 error_setg_file_open(errp, errno, filename);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001333 return;
1334 }
1335
1336 while (size != 0) {
1337 l = sizeof(buf);
1338 if (l > size)
1339 l = size;
Andreas Färberf17ec442013-06-29 19:40:58 +02001340 cpu_memory_rw_debug(cpu, addr, buf, l, 0);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001341 if (fwrite(buf, 1, l, f) != l) {
1342 error_set(errp, QERR_IO_ERROR);
1343 goto exit;
1344 }
1345 addr += l;
1346 size -= l;
1347 }
1348
1349exit:
1350 fclose(f);
1351}
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001352
1353void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1354 Error **errp)
1355{
1356 FILE *f;
1357 uint32_t l;
1358 uint8_t buf[1024];
1359
1360 f = fopen(filename, "wb");
1361 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -04001362 error_setg_file_open(errp, errno, filename);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001363 return;
1364 }
1365
1366 while (size != 0) {
1367 l = sizeof(buf);
1368 if (l > size)
1369 l = size;
1370 cpu_physical_memory_rw(addr, buf, l, 0);
1371 if (fwrite(buf, 1, l, f) != l) {
1372 error_set(errp, QERR_IO_ERROR);
1373 goto exit;
1374 }
1375 addr += l;
1376 size -= l;
1377 }
1378
1379exit:
1380 fclose(f);
1381}
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001382
1383void qmp_inject_nmi(Error **errp)
1384{
1385#if defined(TARGET_I386)
Andreas Färber182735e2013-05-29 22:29:20 +02001386 CPUState *cs;
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001387
Andreas Färber182735e2013-05-29 22:29:20 +02001388 for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) {
1389 X86CPU *cpu = X86_CPU(cs);
1390 CPUX86State *env = &cpu->env;
1391
Jan Kiszka02c09192011-10-18 00:00:06 +08001392 if (!env->apic_state) {
Andreas Färber182735e2013-05-29 22:29:20 +02001393 cpu_interrupt(cs, CPU_INTERRUPT_NMI);
Jan Kiszka02c09192011-10-18 00:00:06 +08001394 } else {
1395 apic_deliver_nmi(env->apic_state);
1396 }
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001397 }
1398#else
1399 error_set(errp, QERR_UNSUPPORTED);
1400#endif
1401}