blob: cbc8067b37a122052821965b4093525691cfcf3c [file] [log] [blame]
bellard7d132992003-03-06 23:23:54 +00001/*
陳韋任e965fc32012-02-06 14:02:55 +08002 * emulator main execution loop
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard66321a12005-04-06 20:47:48 +00004 * Copyright (c) 2003-2005 Fabrice Bellard
bellard7d132992003-03-06 23:23:54 +00005 *
bellard3ef693a2003-03-23 20:17:16 +00006 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
bellard7d132992003-03-06 23:23:54 +000010 *
bellard3ef693a2003-03-23 20:17:16 +000011 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
bellard7d132992003-03-06 23:23:54 +000015 *
bellard3ef693a2003-03-23 20:17:16 +000016 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard7d132992003-03-06 23:23:54 +000018 */
bellarde4533c72003-06-15 19:51:39 +000019#include "config.h"
Blue Swirlcea5f9a2011-05-15 16:03:25 +000020#include "cpu.h"
Paolo Bonzini76cad712012-10-24 11:12:21 +020021#include "disas/disas.h"
bellard7cb69ca2008-05-10 10:55:51 +000022#include "tcg.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010023#include "qemu/atomic.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010024#include "sysemu/qtest.h"
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020025#include "qemu/timer.h"
26
27/* -icount align implementation. */
28
29typedef struct SyncClocks {
30 int64_t diff_clk;
31 int64_t last_cpu_icount;
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020032 int64_t realtime_clock;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020033} SyncClocks;
34
35#if !defined(CONFIG_USER_ONLY)
36/* Allow the guest to have a max 3ms advance.
37 * The difference between the 2 clocks could therefore
38 * oscillate around 0.
39 */
40#define VM_CLOCK_ADVANCE 3000000
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020041#define THRESHOLD_REDUCE 1.5
42#define MAX_DELAY_PRINT_RATE 2000000000LL
43#define MAX_NB_PRINTS 100
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020044
45static void align_clocks(SyncClocks *sc, const CPUState *cpu)
46{
47 int64_t cpu_icount;
48
49 if (!icount_align_option) {
50 return;
51 }
52
53 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
54 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
55 sc->last_cpu_icount = cpu_icount;
56
57 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
58#ifndef _WIN32
59 struct timespec sleep_delay, rem_delay;
60 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
61 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
62 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
63 sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL;
64 sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec;
65 } else {
66 sc->diff_clk = 0;
67 }
68#else
69 Sleep(sc->diff_clk / SCALE_MS);
70 sc->diff_clk = 0;
71#endif
72 }
73}
74
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020075static void print_delay(const SyncClocks *sc)
76{
77 static float threshold_delay;
78 static int64_t last_realtime_clock;
79 static int nb_prints;
80
81 if (icount_align_option &&
82 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
83 nb_prints < MAX_NB_PRINTS) {
84 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
85 (-sc->diff_clk / (float)1000000000LL <
86 (threshold_delay - THRESHOLD_REDUCE))) {
87 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
88 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
89 threshold_delay - 1,
90 threshold_delay);
91 nb_prints++;
92 last_realtime_clock = sc->realtime_clock;
93 }
94 }
95}
96
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020097static void init_delay_params(SyncClocks *sc,
98 const CPUState *cpu)
99{
100 if (!icount_align_option) {
101 return;
102 }
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200103 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200104 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200105 sc->realtime_clock +
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200106 cpu_get_clock_offset();
107 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
Sebastian Tanase27498be2014-07-25 11:56:33 +0200108 if (sc->diff_clk < max_delay) {
109 max_delay = sc->diff_clk;
110 }
111 if (sc->diff_clk > max_advance) {
112 max_advance = sc->diff_clk;
113 }
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200114
115 /* Print every 2s max if the guest is late. We limit the number
116 of printed messages to NB_PRINT_MAX(currently 100) */
117 print_delay(sc);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200118}
119#else
120static void align_clocks(SyncClocks *sc, const CPUState *cpu)
121{
122}
123
124static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
125{
126}
127#endif /* CONFIG USER ONLY */
bellard7d132992003-03-06 23:23:54 +0000128
Andreas Färber5638d182013-08-27 17:52:12 +0200129void cpu_loop_exit(CPUState *cpu)
bellarde4533c72003-06-15 19:51:39 +0000130{
Andreas Färberd77953b2013-01-16 19:29:31 +0100131 cpu->current_tb = NULL;
Andreas Färber6f03bef2013-08-26 06:22:03 +0200132 siglongjmp(cpu->jmp_env, 1);
bellarde4533c72003-06-15 19:51:39 +0000133}
thsbfed01f2007-06-03 17:44:37 +0000134
bellardfbf9eeb2004-04-25 21:21:33 +0000135/* exit the current TB from a signal handler. The host registers are
136 restored in a state compatible with the CPU emulator
137 */
Blue Swirl9eff14f2011-05-21 08:42:35 +0000138#if defined(CONFIG_SOFTMMU)
Andreas Färber0ea8cb82013-09-03 02:12:23 +0200139void cpu_resume_from_signal(CPUState *cpu, void *puc)
bellardfbf9eeb2004-04-25 21:21:33 +0000140{
Blue Swirl9eff14f2011-05-21 08:42:35 +0000141 /* XXX: restore cpu registers saved in host registers */
142
Andreas Färber27103422013-08-26 08:31:06 +0200143 cpu->exception_index = -1;
Andreas Färber6f03bef2013-08-26 06:22:03 +0200144 siglongjmp(cpu->jmp_env, 1);
Blue Swirl9eff14f2011-05-21 08:42:35 +0000145}
Blue Swirl9eff14f2011-05-21 08:42:35 +0000146#endif
bellardfbf9eeb2004-04-25 21:21:33 +0000147
Peter Maydell77211372013-02-22 18:10:02 +0000148/* Execute a TB, and fix up the CPU state afterwards if necessary */
149static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
150{
151 CPUArchState *env = cpu->env_ptr;
Richard Henderson03afa5f2013-11-06 17:29:39 +1000152 uintptr_t next_tb;
153
154#if defined(DEBUG_DISAS)
155 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
156#if defined(TARGET_I386)
157 log_cpu_state(cpu, CPU_DUMP_CCOP);
158#elif defined(TARGET_M68K)
159 /* ??? Should not modify env state for dumping. */
160 cpu_m68k_flush_flags(env, env->cc_op);
161 env->cc_op = CC_OP_FLAGS;
162 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
163 log_cpu_state(cpu, 0);
164#else
165 log_cpu_state(cpu, 0);
166#endif
167 }
168#endif /* DEBUG_DISAS */
169
170 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
Peter Maydell77211372013-02-22 18:10:02 +0000171 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
172 /* We didn't start executing this TB (eg because the instruction
173 * counter hit zero); we must restore the guest PC to the address
174 * of the start of the TB.
175 */
Andreas Färberbdf7ae52013-06-28 19:31:32 +0200176 CPUClass *cc = CPU_GET_CLASS(cpu);
Peter Maydell77211372013-02-22 18:10:02 +0000177 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
Andreas Färberbdf7ae52013-06-28 19:31:32 +0200178 if (cc->synchronize_from_tb) {
179 cc->synchronize_from_tb(cpu, tb);
180 } else {
181 assert(cc->set_pc);
182 cc->set_pc(cpu, tb->pc);
183 }
Peter Maydell77211372013-02-22 18:10:02 +0000184 }
Peter Maydell378df4b2013-02-22 18:10:03 +0000185 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
186 /* We were asked to stop executing TBs (probably a pending
187 * interrupt. We've now stopped, so clear the flag.
188 */
189 cpu->tcg_exit_req = 0;
190 }
Peter Maydell77211372013-02-22 18:10:02 +0000191 return next_tb;
192}
193
pbrook2e70f6e2008-06-29 01:03:05 +0000194/* Execute the code without caching the generated code. An interpreter
195 could be used if available. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100196static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000197 TranslationBlock *orig_tb)
pbrook2e70f6e2008-06-29 01:03:05 +0000198{
Andreas Färberd77953b2013-01-16 19:29:31 +0100199 CPUState *cpu = ENV_GET_CPU(env);
pbrook2e70f6e2008-06-29 01:03:05 +0000200 TranslationBlock *tb;
201
202 /* Should never happen.
203 We only end up here when an existing TB is too long. */
204 if (max_cycles > CF_COUNT_MASK)
205 max_cycles = CF_COUNT_MASK;
206
Andreas Färber648f0342013-09-01 17:43:17 +0200207 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
pbrook2e70f6e2008-06-29 01:03:05 +0000208 max_cycles);
Andreas Färberd77953b2013-01-16 19:29:31 +0100209 cpu->current_tb = tb;
pbrook2e70f6e2008-06-29 01:03:05 +0000210 /* execute the generated code */
Peter Maydell77211372013-02-22 18:10:02 +0000211 cpu_tb_exec(cpu, tb->tc_ptr);
Andreas Färberd77953b2013-01-16 19:29:31 +0100212 cpu->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +0000213 tb_phys_invalidate(tb, -1);
214 tb_free(tb);
215}
216
Andreas Färber9349b4f2012-03-14 01:38:32 +0100217static TranslationBlock *tb_find_slow(CPUArchState *env,
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000218 target_ulong pc,
bellard8a40a182005-11-20 10:35:40 +0000219 target_ulong cs_base,
j_mayerc0686882007-09-20 22:47:42 +0000220 uint64_t flags)
bellard8a40a182005-11-20 10:35:40 +0000221{
Andreas Färber8cd70432013-08-26 06:03:38 +0200222 CPUState *cpu = ENV_GET_CPU(env);
bellard8a40a182005-11-20 10:35:40 +0000223 TranslationBlock *tb, **ptb1;
bellard8a40a182005-11-20 10:35:40 +0000224 unsigned int h;
Blue Swirl337fc752011-09-04 11:06:22 +0000225 tb_page_addr_t phys_pc, phys_page1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000226 target_ulong virt_page2;
ths3b46e622007-09-17 08:09:54 +0000227
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700228 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
ths3b46e622007-09-17 08:09:54 +0000229
bellard8a40a182005-11-20 10:35:40 +0000230 /* find translated block using physical mappings */
Paul Brook41c1b1c2010-03-12 16:54:58 +0000231 phys_pc = get_page_addr_code(env, pc);
bellard8a40a182005-11-20 10:35:40 +0000232 phys_page1 = phys_pc & TARGET_PAGE_MASK;
bellard8a40a182005-11-20 10:35:40 +0000233 h = tb_phys_hash_func(phys_pc);
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700234 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
bellard8a40a182005-11-20 10:35:40 +0000235 for(;;) {
236 tb = *ptb1;
237 if (!tb)
238 goto not_found;
ths5fafdf22007-09-16 21:08:06 +0000239 if (tb->pc == pc &&
bellard8a40a182005-11-20 10:35:40 +0000240 tb->page_addr[0] == phys_page1 &&
ths5fafdf22007-09-16 21:08:06 +0000241 tb->cs_base == cs_base &&
bellard8a40a182005-11-20 10:35:40 +0000242 tb->flags == flags) {
243 /* check next page if needed */
244 if (tb->page_addr[1] != -1) {
Blue Swirl337fc752011-09-04 11:06:22 +0000245 tb_page_addr_t phys_page2;
246
ths5fafdf22007-09-16 21:08:06 +0000247 virt_page2 = (pc & TARGET_PAGE_MASK) +
bellard8a40a182005-11-20 10:35:40 +0000248 TARGET_PAGE_SIZE;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000249 phys_page2 = get_page_addr_code(env, virt_page2);
bellard8a40a182005-11-20 10:35:40 +0000250 if (tb->page_addr[1] == phys_page2)
251 goto found;
252 } else {
253 goto found;
254 }
255 }
256 ptb1 = &tb->phys_hash_next;
257 }
258 not_found:
pbrook2e70f6e2008-06-29 01:03:05 +0000259 /* if no translated code available, then translate it now */
Andreas Färber648f0342013-09-01 17:43:17 +0200260 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
ths3b46e622007-09-17 08:09:54 +0000261
bellard8a40a182005-11-20 10:35:40 +0000262 found:
Kirill Batuzov2c90fe22010-12-02 16:12:46 +0300263 /* Move the last found TB to the head of the list */
264 if (likely(*ptb1)) {
265 *ptb1 = tb->phys_hash_next;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700266 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
267 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
Kirill Batuzov2c90fe22010-12-02 16:12:46 +0300268 }
bellard8a40a182005-11-20 10:35:40 +0000269 /* we add the TB in the virtual pc hash table */
Andreas Färber8cd70432013-08-26 06:03:38 +0200270 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
bellard8a40a182005-11-20 10:35:40 +0000271 return tb;
272}
273
Andreas Färber9349b4f2012-03-14 01:38:32 +0100274static inline TranslationBlock *tb_find_fast(CPUArchState *env)
bellard8a40a182005-11-20 10:35:40 +0000275{
Andreas Färber8cd70432013-08-26 06:03:38 +0200276 CPUState *cpu = ENV_GET_CPU(env);
bellard8a40a182005-11-20 10:35:40 +0000277 TranslationBlock *tb;
278 target_ulong cs_base, pc;
aliguori6b917542008-11-18 19:46:41 +0000279 int flags;
bellard8a40a182005-11-20 10:35:40 +0000280
281 /* we record a subset of the CPU state. It will
282 always be the same before a given translated block
283 is executed. */
aliguori6b917542008-11-18 19:46:41 +0000284 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
Andreas Färber8cd70432013-08-26 06:03:38 +0200285 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
ths551bd272008-07-03 17:57:36 +0000286 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
287 tb->flags != flags)) {
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000288 tb = tb_find_slow(env, pc, cs_base, flags);
bellard8a40a182005-11-20 10:35:40 +0000289 }
290 return tb;
291}
292
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100293static CPUDebugExcpHandler *debug_excp_handler;
294
Igor Mammedov84e3b602012-06-21 18:29:38 +0200295void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100296{
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100297 debug_excp_handler = handler;
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100298}
299
Andreas Färber9349b4f2012-03-14 01:38:32 +0100300static void cpu_handle_debug_exception(CPUArchState *env)
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100301{
Andreas Färberff4700b2013-08-26 18:23:18 +0200302 CPUState *cpu = ENV_GET_CPU(env);
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100303 CPUWatchpoint *wp;
304
Andreas Färberff4700b2013-08-26 18:23:18 +0200305 if (!cpu->watchpoint_hit) {
306 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100307 wp->flags &= ~BP_WATCHPOINT_HIT;
308 }
309 }
310 if (debug_excp_handler) {
311 debug_excp_handler(env);
312 }
313}
314
bellard7d132992003-03-06 23:23:54 +0000315/* main execution loop */
316
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300317volatile sig_atomic_t exit_request;
318
Andreas Färber9349b4f2012-03-14 01:38:32 +0100319int cpu_exec(CPUArchState *env)
bellard7d132992003-03-06 23:23:54 +0000320{
Andreas Färberc356a1b2012-05-04 19:39:23 +0200321 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färber97a8ea52013-02-02 10:57:51 +0100322#if !(defined(CONFIG_USER_ONLY) && \
323 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
324 CPUClass *cc = CPU_GET_CLASS(cpu);
325#endif
Andreas Färber693fa552013-12-24 03:18:12 +0100326#ifdef TARGET_I386
327 X86CPU *x86_cpu = X86_CPU(cpu);
328#endif
bellard8a40a182005-11-20 10:35:40 +0000329 int ret, interrupt_request;
bellard8a40a182005-11-20 10:35:40 +0000330 TranslationBlock *tb;
bellardc27004e2005-01-03 23:35:10 +0000331 uint8_t *tc_ptr;
Richard Henderson3e9bd632013-08-20 14:40:25 -0700332 uintptr_t next_tb;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200333 SyncClocks sc;
334
Peter Maydellbae2c272014-04-04 17:42:56 +0100335 /* This must be volatile so it is not trashed by longjmp() */
336 volatile bool have_tb_lock = false;
bellard8c6939c2003-06-09 15:28:00 +0000337
Andreas Färber259186a2013-01-17 18:51:17 +0100338 if (cpu->halted) {
Andreas Färber3993c6b2012-05-03 06:43:49 +0200339 if (!cpu_has_work(cpu)) {
Paolo Bonzinieda48c32011-03-12 17:43:56 +0100340 return EXCP_HALTED;
341 }
342
Andreas Färber259186a2013-01-17 18:51:17 +0100343 cpu->halted = 0;
Paolo Bonzinieda48c32011-03-12 17:43:56 +0100344 }
bellard5a1e3cf2005-11-23 21:02:53 +0000345
Andreas Färber4917cf42013-05-27 05:17:50 +0200346 current_cpu = cpu;
bellarde4533c72003-06-15 19:51:39 +0000347
Andreas Färber4917cf42013-05-27 05:17:50 +0200348 /* As long as current_cpu is null, up to the assignment just above,
Olivier Hainqueec9bd892013-04-09 18:06:54 +0200349 * requests by other threads to exit the execution loop are expected to
350 * be issued using the exit_request global. We must make sure that our
Andreas Färber4917cf42013-05-27 05:17:50 +0200351 * evaluation of the global value is performed past the current_cpu
Olivier Hainqueec9bd892013-04-09 18:06:54 +0200352 * value transition point, which requires a memory barrier as well as
353 * an instruction scheduling constraint on modern architectures. */
354 smp_mb();
355
Jan Kiszkac629a4b2010-06-25 16:56:52 +0200356 if (unlikely(exit_request)) {
Andreas Färberfcd7d002012-12-17 08:02:44 +0100357 cpu->exit_request = 1;
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300358 }
359
thsecb644f2007-06-03 18:45:53 +0000360#if defined(TARGET_I386)
Jan Kiszka6792a572011-02-07 12:19:18 +0100361 /* put eflags in CPU temporary format */
362 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
liguang80cf2c82013-05-28 16:21:08 +0800363 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
Jan Kiszka6792a572011-02-07 12:19:18 +0100364 CC_OP = CC_OP_EFLAGS;
365 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
bellard93ac68b2003-09-30 20:57:29 +0000366#elif defined(TARGET_SPARC)
pbrooke6e59062006-10-22 00:18:54 +0000367#elif defined(TARGET_M68K)
368 env->cc_op = CC_OP_FLAGS;
369 env->cc_dest = env->sr & 0xf;
370 env->cc_x = (env->sr >> 4) & 1;
thsecb644f2007-06-03 18:45:53 +0000371#elif defined(TARGET_ALPHA)
372#elif defined(TARGET_ARM)
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800373#elif defined(TARGET_UNICORE32)
thsecb644f2007-06-03 18:45:53 +0000374#elif defined(TARGET_PPC)
Elie Richa4e85f822011-07-22 05:58:39 +0000375 env->reserve_addr = -1;
Michael Walle81ea0e12011-02-17 23:45:02 +0100376#elif defined(TARGET_LM32)
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200377#elif defined(TARGET_MICROBLAZE)
bellard6af0bf92005-07-02 14:58:51 +0000378#elif defined(TARGET_MIPS)
Anthony Greend15a9c22013-03-18 15:49:25 -0400379#elif defined(TARGET_MOXIE)
Jia Liue67db062012-07-20 15:50:39 +0800380#elif defined(TARGET_OPENRISC)
bellardfdf9b3e2006-04-27 21:07:38 +0000381#elif defined(TARGET_SH4)
thsf1ccf902007-10-08 13:16:14 +0000382#elif defined(TARGET_CRIS)
Alexander Graf10ec5112009-12-05 12:44:21 +0100383#elif defined(TARGET_S390X)
Max Filippov23288262011-09-06 03:55:25 +0400384#elif defined(TARGET_XTENSA)
bellardfdf9b3e2006-04-27 21:07:38 +0000385 /* XXXXX */
bellarde4533c72003-06-15 19:51:39 +0000386#else
387#error unsupported target CPU
388#endif
Andreas Färber27103422013-08-26 08:31:06 +0200389 cpu->exception_index = -1;
bellard9d27abd2003-05-10 13:13:54 +0000390
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200391 /* Calculate difference between guest clock and host clock.
392 * This delay includes the delay of the last cycle, so
393 * what we have to do is sleep until it is 0. As for the
394 * advance/delay we gain here, we try to fix it next time.
395 */
396 init_delay_params(&sc, cpu);
397
bellard7d132992003-03-06 23:23:54 +0000398 /* prepare setjmp context for exception handling */
bellard3fb2ded2003-06-24 13:22:59 +0000399 for(;;) {
Andreas Färber6f03bef2013-08-26 06:22:03 +0200400 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
bellard3fb2ded2003-06-24 13:22:59 +0000401 /* if an exception is pending, we execute it here */
Andreas Färber27103422013-08-26 08:31:06 +0200402 if (cpu->exception_index >= 0) {
403 if (cpu->exception_index >= EXCP_INTERRUPT) {
bellard3fb2ded2003-06-24 13:22:59 +0000404 /* exit request from the cpu execution loop */
Andreas Färber27103422013-08-26 08:31:06 +0200405 ret = cpu->exception_index;
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100406 if (ret == EXCP_DEBUG) {
407 cpu_handle_debug_exception(env);
408 }
bellard3fb2ded2003-06-24 13:22:59 +0000409 break;
aurel3272d239e2009-01-14 19:40:27 +0000410 } else {
411#if defined(CONFIG_USER_ONLY)
bellard3fb2ded2003-06-24 13:22:59 +0000412 /* if user mode only, we simulate a fake exception
ths9f083492006-12-07 18:28:42 +0000413 which will be handled outside the cpu execution
bellard3fb2ded2003-06-24 13:22:59 +0000414 loop */
bellard83479e72003-06-25 16:12:37 +0000415#if defined(TARGET_I386)
Andreas Färber97a8ea52013-02-02 10:57:51 +0100416 cc->do_interrupt(cpu);
bellard83479e72003-06-25 16:12:37 +0000417#endif
Andreas Färber27103422013-08-26 08:31:06 +0200418 ret = cpu->exception_index;
bellard3fb2ded2003-06-24 13:22:59 +0000419 break;
aurel3272d239e2009-01-14 19:40:27 +0000420#else
Andreas Färber97a8ea52013-02-02 10:57:51 +0100421 cc->do_interrupt(cpu);
Andreas Färber27103422013-08-26 08:31:06 +0200422 cpu->exception_index = -1;
aurel3272d239e2009-01-14 19:40:27 +0000423#endif
bellard3fb2ded2003-06-24 13:22:59 +0000424 }
ths5fafdf22007-09-16 21:08:06 +0000425 }
bellard9df217a2005-02-10 22:05:51 +0000426
blueswir1b5fc09a2008-05-04 06:38:18 +0000427 next_tb = 0; /* force lookup of first TB */
bellard3fb2ded2003-06-24 13:22:59 +0000428 for(;;) {
Andreas Färber259186a2013-01-17 18:51:17 +0100429 interrupt_request = cpu->interrupt_request;
malce1638bd2008-11-06 18:54:46 +0000430 if (unlikely(interrupt_request)) {
Andreas Färbered2803d2013-06-21 20:20:45 +0200431 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
malce1638bd2008-11-06 18:54:46 +0000432 /* Mask out external interrupts for this step. */
Richard Henderson3125f762011-05-04 13:34:25 -0700433 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
malce1638bd2008-11-06 18:54:46 +0000434 }
pbrook6658ffb2007-03-16 23:58:11 +0000435 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
Andreas Färber259186a2013-01-17 18:51:17 +0100436 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
Andreas Färber27103422013-08-26 08:31:06 +0200437 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +0200438 cpu_loop_exit(cpu);
pbrook6658ffb2007-03-16 23:58:11 +0000439 }
balroga90b7312007-05-01 01:28:01 +0000440#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200441 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800442 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
balroga90b7312007-05-01 01:28:01 +0000443 if (interrupt_request & CPU_INTERRUPT_HALT) {
Andreas Färber259186a2013-01-17 18:51:17 +0100444 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
445 cpu->halted = 1;
Andreas Färber27103422013-08-26 08:31:06 +0200446 cpu->exception_index = EXCP_HLT;
Andreas Färber5638d182013-08-27 17:52:12 +0200447 cpu_loop_exit(cpu);
balroga90b7312007-05-01 01:28:01 +0000448 }
449#endif
bellard68a79312003-06-30 13:12:32 +0000450#if defined(TARGET_I386)
Paolo Bonzini4a92a552013-03-05 15:35:17 +0100451 if (interrupt_request & CPU_INTERRUPT_INIT) {
452 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
453 do_cpu_init(x86_cpu);
454 cpu->exception_index = EXCP_HALTED;
455 cpu_loop_exit(cpu);
456 }
457#else
458 if (interrupt_request & CPU_INTERRUPT_RESET) {
459 cpu_reset(cpu);
460 }
461#endif
462#if defined(TARGET_I386)
Jan Kiszka5d62c432012-07-09 16:42:32 +0200463#if !defined(CONFIG_USER_ONLY)
464 if (interrupt_request & CPU_INTERRUPT_POLL) {
Andreas Färber259186a2013-01-17 18:51:17 +0100465 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
Andreas Färber693fa552013-12-24 03:18:12 +0100466 apic_poll_irq(x86_cpu->apic_state);
Jan Kiszka5d62c432012-07-09 16:42:32 +0200467 }
468#endif
Paolo Bonzini4a92a552013-03-05 15:35:17 +0100469 if (interrupt_request & CPU_INTERRUPT_SIPI) {
Andreas Färber693fa552013-12-24 03:18:12 +0100470 do_cpu_sipi(x86_cpu);
Gleb Natapovb09ea7d2009-06-17 23:26:59 +0300471 } else if (env->hflags2 & HF2_GIF_MASK) {
bellarddb620f42008-06-04 17:02:19 +0000472 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
473 !(env->hflags & HF_SMM_MASK)) {
Blue Swirl77b2bc22012-04-28 19:35:10 +0000474 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
475 0);
Andreas Färber259186a2013-01-17 18:51:17 +0100476 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
Andreas Färber693fa552013-12-24 03:18:12 +0100477 do_smm_enter(x86_cpu);
bellarddb620f42008-06-04 17:02:19 +0000478 next_tb = 0;
479 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
480 !(env->hflags2 & HF2_NMI_MASK)) {
Andreas Färber259186a2013-01-17 18:51:17 +0100481 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
bellarddb620f42008-06-04 17:02:19 +0000482 env->hflags2 |= HF2_NMI_MASK;
Blue Swirle694d4e2011-05-16 19:38:48 +0000483 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
bellarddb620f42008-06-04 17:02:19 +0000484 next_tb = 0;
陳韋任e965fc32012-02-06 14:02:55 +0800485 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
Andreas Färber259186a2013-01-17 18:51:17 +0100486 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
Blue Swirle694d4e2011-05-16 19:38:48 +0000487 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
Huang Ying79c4f6b2009-06-23 10:05:14 +0800488 next_tb = 0;
bellarddb620f42008-06-04 17:02:19 +0000489 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
490 (((env->hflags2 & HF2_VINTR_MASK) &&
491 (env->hflags2 & HF2_HIF_MASK)) ||
492 (!(env->hflags2 & HF2_VINTR_MASK) &&
493 (env->eflags & IF_MASK &&
494 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
495 int intno;
Blue Swirl77b2bc22012-04-28 19:35:10 +0000496 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
497 0);
Andreas Färber259186a2013-01-17 18:51:17 +0100498 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
499 CPU_INTERRUPT_VIRQ);
bellarddb620f42008-06-04 17:02:19 +0000500 intno = cpu_get_pic_interrupt(env);
malc4f213872012-08-27 18:33:12 +0400501 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
502 do_interrupt_x86_hardirq(env, intno, 1);
503 /* ensure that no TB jump will be modified as
504 the program flow was changed */
505 next_tb = 0;
ths0573fbf2007-09-23 15:28:04 +0000506#if !defined(CONFIG_USER_ONLY)
bellarddb620f42008-06-04 17:02:19 +0000507 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
508 (env->eflags & IF_MASK) &&
509 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
510 int intno;
511 /* FIXME: this should respect TPR */
Blue Swirl77b2bc22012-04-28 19:35:10 +0000512 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
513 0);
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +0100514 intno = ldl_phys(cpu->as,
515 env->vm_vmcb
516 + offsetof(struct vmcb,
517 control.int_vector));
aliguori93fcfe32009-01-15 22:34:14 +0000518 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
Blue Swirle694d4e2011-05-16 19:38:48 +0000519 do_interrupt_x86_hardirq(env, intno, 1);
Andreas Färber259186a2013-01-17 18:51:17 +0100520 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
bellarddb620f42008-06-04 17:02:19 +0000521 next_tb = 0;
ths0573fbf2007-09-23 15:28:04 +0000522#endif
bellarddb620f42008-06-04 17:02:19 +0000523 }
bellard68a79312003-06-30 13:12:32 +0000524 }
bellardce097762004-01-04 23:53:18 +0000525#elif defined(TARGET_PPC)
j_mayer47103572007-03-30 09:38:04 +0000526 if (interrupt_request & CPU_INTERRUPT_HARD) {
j_mayere9df0142007-04-09 22:45:36 +0000527 ppc_hw_interrupt(env);
Andreas Färber259186a2013-01-17 18:51:17 +0100528 if (env->pending_interrupts == 0) {
529 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
530 }
blueswir1b5fc09a2008-05-04 06:38:18 +0000531 next_tb = 0;
bellardce097762004-01-04 23:53:18 +0000532 }
Michael Walle81ea0e12011-02-17 23:45:02 +0100533#elif defined(TARGET_LM32)
534 if ((interrupt_request & CPU_INTERRUPT_HARD)
535 && (env->ie & IE_IE)) {
Andreas Färber27103422013-08-26 08:31:06 +0200536 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100537 cc->do_interrupt(cpu);
Michael Walle81ea0e12011-02-17 23:45:02 +0100538 next_tb = 0;
539 }
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200540#elif defined(TARGET_MICROBLAZE)
541 if ((interrupt_request & CPU_INTERRUPT_HARD)
542 && (env->sregs[SR_MSR] & MSR_IE)
543 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
544 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
Andreas Färber27103422013-08-26 08:31:06 +0200545 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100546 cc->do_interrupt(cpu);
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200547 next_tb = 0;
548 }
bellard6af0bf92005-07-02 14:58:51 +0000549#elif defined(TARGET_MIPS)
550 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
Aurelien Jarno4cdc1cd2010-12-25 22:56:32 +0100551 cpu_mips_hw_interrupts_pending(env)) {
bellard6af0bf92005-07-02 14:58:51 +0000552 /* Raise it */
Andreas Färber27103422013-08-26 08:31:06 +0200553 cpu->exception_index = EXCP_EXT_INTERRUPT;
bellard6af0bf92005-07-02 14:58:51 +0000554 env->error_code = 0;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100555 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000556 next_tb = 0;
bellard6af0bf92005-07-02 14:58:51 +0000557 }
Jia Liub6a71ef2012-07-20 15:50:41 +0800558#elif defined(TARGET_OPENRISC)
559 {
560 int idx = -1;
561 if ((interrupt_request & CPU_INTERRUPT_HARD)
562 && (env->sr & SR_IEE)) {
563 idx = EXCP_INT;
564 }
565 if ((interrupt_request & CPU_INTERRUPT_TIMER)
566 && (env->sr & SR_TEE)) {
567 idx = EXCP_TICK;
568 }
569 if (idx >= 0) {
Andreas Färber27103422013-08-26 08:31:06 +0200570 cpu->exception_index = idx;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100571 cc->do_interrupt(cpu);
Jia Liub6a71ef2012-07-20 15:50:41 +0800572 next_tb = 0;
573 }
574 }
bellarde95c8d52004-09-30 22:22:08 +0000575#elif defined(TARGET_SPARC)
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300576 if (interrupt_request & CPU_INTERRUPT_HARD) {
577 if (cpu_interrupts_enabled(env) &&
578 env->interrupt_index > 0) {
579 int pil = env->interrupt_index & 0xf;
580 int type = env->interrupt_index & 0xf0;
bellard66321a12005-04-06 20:47:48 +0000581
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300582 if (((type == TT_EXTINT) &&
583 cpu_pil_allowed(env, pil)) ||
584 type != TT_EXTINT) {
Andreas Färber27103422013-08-26 08:31:06 +0200585 cpu->exception_index = env->interrupt_index;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100586 cc->do_interrupt(cpu);
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300587 next_tb = 0;
588 }
589 }
陳韋任e965fc32012-02-06 14:02:55 +0800590 }
bellardb5ff1b32005-11-26 10:38:39 +0000591#elif defined(TARGET_ARM)
592 if (interrupt_request & CPU_INTERRUPT_FIQ
Peter Maydell4cc35612014-02-26 17:20:06 +0000593 && !(env->daif & PSTATE_F)) {
Andreas Färber27103422013-08-26 08:31:06 +0200594 cpu->exception_index = EXCP_FIQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100595 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000596 next_tb = 0;
bellardb5ff1b32005-11-26 10:38:39 +0000597 }
pbrook9ee6e8b2007-11-11 00:04:49 +0000598 /* ARMv7-M interrupt return works by loading a magic value
599 into the PC. On real hardware the load causes the
600 return to occur. The qemu implementation performs the
601 jump normally, then does the exception return when the
602 CPU tries to execute code at the magic address.
603 This will cause the magic PC value to be pushed to
Stefan Weila1c72732011-04-28 17:20:38 +0200604 the stack if an interrupt occurred at the wrong time.
pbrook9ee6e8b2007-11-11 00:04:49 +0000605 We avoid this by disabling interrupts when
606 pc contains a magic address. */
bellardb5ff1b32005-11-26 10:38:39 +0000607 if (interrupt_request & CPU_INTERRUPT_HARD
pbrook9ee6e8b2007-11-11 00:04:49 +0000608 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
Peter Maydell4cc35612014-02-26 17:20:06 +0000609 || !(env->daif & PSTATE_I))) {
Andreas Färber27103422013-08-26 08:31:06 +0200610 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100611 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000612 next_tb = 0;
bellardb5ff1b32005-11-26 10:38:39 +0000613 }
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800614#elif defined(TARGET_UNICORE32)
615 if (interrupt_request & CPU_INTERRUPT_HARD
616 && !(env->uncached_asr & ASR_I)) {
Andreas Färber27103422013-08-26 08:31:06 +0200617 cpu->exception_index = UC32_EXCP_INTR;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100618 cc->do_interrupt(cpu);
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800619 next_tb = 0;
620 }
bellardfdf9b3e2006-04-27 21:07:38 +0000621#elif defined(TARGET_SH4)
thse96e2042007-12-02 06:18:24 +0000622 if (interrupt_request & CPU_INTERRUPT_HARD) {
Andreas Färber97a8ea52013-02-02 10:57:51 +0100623 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000624 next_tb = 0;
thse96e2042007-12-02 06:18:24 +0000625 }
j_mayereddf68a2007-04-05 07:22:49 +0000626#elif defined(TARGET_ALPHA)
Richard Henderson6a80e082011-04-18 15:09:09 -0700627 {
628 int idx = -1;
629 /* ??? This hard-codes the OSF/1 interrupt levels. */
陳韋任e965fc32012-02-06 14:02:55 +0800630 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
Richard Henderson6a80e082011-04-18 15:09:09 -0700631 case 0 ... 3:
632 if (interrupt_request & CPU_INTERRUPT_HARD) {
633 idx = EXCP_DEV_INTERRUPT;
634 }
635 /* FALLTHRU */
636 case 4:
637 if (interrupt_request & CPU_INTERRUPT_TIMER) {
638 idx = EXCP_CLK_INTERRUPT;
639 }
640 /* FALLTHRU */
641 case 5:
642 if (interrupt_request & CPU_INTERRUPT_SMP) {
643 idx = EXCP_SMP_INTERRUPT;
644 }
645 /* FALLTHRU */
646 case 6:
647 if (interrupt_request & CPU_INTERRUPT_MCHK) {
648 idx = EXCP_MCHK;
649 }
650 }
651 if (idx >= 0) {
Andreas Färber27103422013-08-26 08:31:06 +0200652 cpu->exception_index = idx;
Richard Henderson6a80e082011-04-18 15:09:09 -0700653 env->error_code = 0;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100654 cc->do_interrupt(cpu);
Richard Henderson6a80e082011-04-18 15:09:09 -0700655 next_tb = 0;
656 }
j_mayereddf68a2007-04-05 07:22:49 +0000657 }
thsf1ccf902007-10-08 13:16:14 +0000658#elif defined(TARGET_CRIS)
edgar_igl1b1a38b2008-06-09 23:18:06 +0000659 if (interrupt_request & CPU_INTERRUPT_HARD
Edgar E. Iglesiasfb9fb692010-02-15 11:17:33 +0100660 && (env->pregs[PR_CCS] & I_FLAG)
661 && !env->locked_irq) {
Andreas Färber27103422013-08-26 08:31:06 +0200662 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100663 cc->do_interrupt(cpu);
edgar_igl1b1a38b2008-06-09 23:18:06 +0000664 next_tb = 0;
665 }
Lars Persson82193142012-06-14 16:23:55 +0200666 if (interrupt_request & CPU_INTERRUPT_NMI) {
667 unsigned int m_flag_archval;
668 if (env->pregs[PR_VR] < 32) {
669 m_flag_archval = M_FLAG_V10;
670 } else {
671 m_flag_archval = M_FLAG_V32;
672 }
673 if ((env->pregs[PR_CCS] & m_flag_archval)) {
Andreas Färber27103422013-08-26 08:31:06 +0200674 cpu->exception_index = EXCP_NMI;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100675 cc->do_interrupt(cpu);
Lars Persson82193142012-06-14 16:23:55 +0200676 next_tb = 0;
677 }
thsf1ccf902007-10-08 13:16:14 +0000678 }
pbrook06338792007-05-23 19:58:11 +0000679#elif defined(TARGET_M68K)
680 if (interrupt_request & CPU_INTERRUPT_HARD
681 && ((env->sr & SR_I) >> SR_I_SHIFT)
682 < env->pending_level) {
683 /* Real hardware gets the interrupt vector via an
684 IACK cycle at this point. Current emulated
685 hardware doesn't rely on this, so we
686 provide/save the vector when the interrupt is
687 first signalled. */
Andreas Färber27103422013-08-26 08:31:06 +0200688 cpu->exception_index = env->pending_vector;
Blue Swirl3c688822011-05-21 07:55:24 +0000689 do_interrupt_m68k_hardirq(env);
blueswir1b5fc09a2008-05-04 06:38:18 +0000690 next_tb = 0;
pbrook06338792007-05-23 19:58:11 +0000691 }
Alexander Graf3110e292011-04-15 17:32:48 +0200692#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
693 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
694 (env->psw.mask & PSW_MASK_EXT)) {
Andreas Färber97a8ea52013-02-02 10:57:51 +0100695 cc->do_interrupt(cpu);
Alexander Graf3110e292011-04-15 17:32:48 +0200696 next_tb = 0;
697 }
Max Filippov40643d72011-09-06 03:55:41 +0400698#elif defined(TARGET_XTENSA)
699 if (interrupt_request & CPU_INTERRUPT_HARD) {
Andreas Färber27103422013-08-26 08:31:06 +0200700 cpu->exception_index = EXC_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100701 cc->do_interrupt(cpu);
Max Filippov40643d72011-09-06 03:55:41 +0400702 next_tb = 0;
703 }
bellard68a79312003-06-30 13:12:32 +0000704#endif
Stefan Weilff2712b2011-04-28 17:20:35 +0200705 /* Don't use the cached interrupt_request value,
bellard9d050952006-05-22 22:03:52 +0000706 do_interrupt may have updated the EXITTB flag. */
Andreas Färber259186a2013-01-17 18:51:17 +0100707 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
708 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bellardbf3e8bf2004-02-16 21:58:54 +0000709 /* ensure that no TB jump will be modified as
710 the program flow was changed */
blueswir1b5fc09a2008-05-04 06:38:18 +0000711 next_tb = 0;
bellardbf3e8bf2004-02-16 21:58:54 +0000712 }
aurel32be214e62009-03-06 21:48:00 +0000713 }
Andreas Färberfcd7d002012-12-17 08:02:44 +0100714 if (unlikely(cpu->exit_request)) {
715 cpu->exit_request = 0;
Andreas Färber27103422013-08-26 08:31:06 +0200716 cpu->exception_index = EXCP_INTERRUPT;
Andreas Färber5638d182013-08-27 17:52:12 +0200717 cpu_loop_exit(cpu);
bellard3fb2ded2003-06-24 13:22:59 +0000718 }
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700719 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
Peter Maydellbae2c272014-04-04 17:42:56 +0100720 have_tb_lock = true;
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000721 tb = tb_find_fast(env);
pbrookd5975362008-06-07 20:50:51 +0000722 /* Note: we do it here to avoid a gcc bug on Mac OS X when
723 doing it in tb_find_slow */
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700724 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
pbrookd5975362008-06-07 20:50:51 +0000725 /* as some TB could have been invalidated because
726 of memory exceptions while generating the code, we
727 must recompute the hash index here */
728 next_tb = 0;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700729 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
pbrookd5975362008-06-07 20:50:51 +0000730 }
Peter Maydellc30d1ae2013-04-11 21:21:46 +0100731 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
732 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
733 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
734 }
bellard8a40a182005-11-20 10:35:40 +0000735 /* see if we can patch the calling TB. When the TB
736 spans two pages, we cannot safely do a direct
737 jump. */
Paolo Bonzini040f2fb2010-01-15 08:56:36 +0100738 if (next_tb != 0 && tb->page_addr[1] == -1) {
Peter Maydell09800112013-02-22 18:10:00 +0000739 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
740 next_tb & TB_EXIT_MASK, tb);
bellard3fb2ded2003-06-24 13:22:59 +0000741 }
Peter Maydellbae2c272014-04-04 17:42:56 +0100742 have_tb_lock = false;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700743 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
malc55e8b852008-11-04 14:18:13 +0000744
745 /* cpu_interrupt might be called while translating the
746 TB, but before it is linked into a potentially
747 infinite loop and becomes env->current_tb. Avoid
748 starting execution if there is a pending interrupt. */
Andreas Färberd77953b2013-01-16 19:29:31 +0100749 cpu->current_tb = tb;
Jan Kiszkab0052d12010-06-25 16:56:50 +0200750 barrier();
Andreas Färberfcd7d002012-12-17 08:02:44 +0100751 if (likely(!cpu->exit_request)) {
pbrook2e70f6e2008-06-29 01:03:05 +0000752 tc_ptr = tb->tc_ptr;
陳韋任e965fc32012-02-06 14:02:55 +0800753 /* execute the generated code */
Peter Maydell77211372013-02-22 18:10:02 +0000754 next_tb = cpu_tb_exec(cpu, tc_ptr);
Peter Maydell378df4b2013-02-22 18:10:03 +0000755 switch (next_tb & TB_EXIT_MASK) {
756 case TB_EXIT_REQUESTED:
757 /* Something asked us to stop executing
758 * chained TBs; just continue round the main
759 * loop. Whatever requested the exit will also
760 * have set something else (eg exit_request or
761 * interrupt_request) which we will handle
762 * next time around the loop.
763 */
764 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
765 next_tb = 0;
766 break;
767 case TB_EXIT_ICOUNT_EXPIRED:
768 {
thsbf20dc02008-06-30 17:22:19 +0000769 /* Instruction counter expired. */
pbrook2e70f6e2008-06-29 01:03:05 +0000770 int insns_left;
Peter Maydell09800112013-02-22 18:10:00 +0000771 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
Andreas Färber28ecfd72013-08-26 05:51:49 +0200772 insns_left = cpu->icount_decr.u32;
Andreas Färberefee7342013-08-26 05:39:29 +0200773 if (cpu->icount_extra && insns_left >= 0) {
pbrook2e70f6e2008-06-29 01:03:05 +0000774 /* Refill decrementer and continue execution. */
Andreas Färberefee7342013-08-26 05:39:29 +0200775 cpu->icount_extra += insns_left;
776 if (cpu->icount_extra > 0xffff) {
pbrook2e70f6e2008-06-29 01:03:05 +0000777 insns_left = 0xffff;
778 } else {
Andreas Färberefee7342013-08-26 05:39:29 +0200779 insns_left = cpu->icount_extra;
pbrook2e70f6e2008-06-29 01:03:05 +0000780 }
Andreas Färberefee7342013-08-26 05:39:29 +0200781 cpu->icount_extra -= insns_left;
Andreas Färber28ecfd72013-08-26 05:51:49 +0200782 cpu->icount_decr.u16.low = insns_left;
pbrook2e70f6e2008-06-29 01:03:05 +0000783 } else {
784 if (insns_left > 0) {
785 /* Execute remaining instructions. */
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000786 cpu_exec_nocache(env, insns_left, tb);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200787 align_clocks(&sc, cpu);
pbrook2e70f6e2008-06-29 01:03:05 +0000788 }
Andreas Färber27103422013-08-26 08:31:06 +0200789 cpu->exception_index = EXCP_INTERRUPT;
pbrook2e70f6e2008-06-29 01:03:05 +0000790 next_tb = 0;
Andreas Färber5638d182013-08-27 17:52:12 +0200791 cpu_loop_exit(cpu);
pbrook2e70f6e2008-06-29 01:03:05 +0000792 }
Peter Maydell378df4b2013-02-22 18:10:03 +0000793 break;
794 }
795 default:
796 break;
pbrook2e70f6e2008-06-29 01:03:05 +0000797 }
798 }
Andreas Färberd77953b2013-01-16 19:29:31 +0100799 cpu->current_tb = NULL;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200800 /* Try to align the host and virtual clocks
801 if the guest is in advance */
802 align_clocks(&sc, cpu);
bellard4cbf74b2003-08-10 21:48:43 +0000803 /* reset soft MMU for next block (it can currently
804 only be set by a memory fault) */
ths50a518e2007-06-03 18:52:15 +0000805 } /* for(;;) */
Jan Kiszka0d101932011-07-02 09:50:51 +0200806 } else {
807 /* Reload env after longjmp - the compiler may have smashed all
808 * local variables as longjmp is marked 'noreturn'. */
Andreas Färber4917cf42013-05-27 05:17:50 +0200809 cpu = current_cpu;
810 env = cpu->env_ptr;
Juergen Lock6c78f292013-10-03 16:09:37 +0200811#if !(defined(CONFIG_USER_ONLY) && \
812 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
813 cc = CPU_GET_CLASS(cpu);
814#endif
Andreas Färber693fa552013-12-24 03:18:12 +0100815#ifdef TARGET_I386
816 x86_cpu = X86_CPU(cpu);
817#endif
Peter Maydellbae2c272014-04-04 17:42:56 +0100818 if (have_tb_lock) {
819 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
820 have_tb_lock = false;
821 }
bellard7d132992003-03-06 23:23:54 +0000822 }
bellard3fb2ded2003-06-24 13:22:59 +0000823 } /* for(;;) */
824
bellard7d132992003-03-06 23:23:54 +0000825
bellarde4533c72003-06-15 19:51:39 +0000826#if defined(TARGET_I386)
bellard9de5e442003-03-23 16:49:39 +0000827 /* restore flags in standard format */
Blue Swirle694d4e2011-05-16 19:38:48 +0000828 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
liguang80cf2c82013-05-28 16:21:08 +0800829 | (env->df & DF_MASK);
bellarde4533c72003-06-15 19:51:39 +0000830#elif defined(TARGET_ARM)
bellardb7bcbe92005-02-22 19:27:29 +0000831 /* XXX: Save/restore host fpu exception state?. */
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800832#elif defined(TARGET_UNICORE32)
bellard93ac68b2003-09-30 20:57:29 +0000833#elif defined(TARGET_SPARC)
bellard67867302003-11-23 17:05:30 +0000834#elif defined(TARGET_PPC)
Michael Walle81ea0e12011-02-17 23:45:02 +0100835#elif defined(TARGET_LM32)
pbrooke6e59062006-10-22 00:18:54 +0000836#elif defined(TARGET_M68K)
837 cpu_m68k_flush_flags(env, env->cc_op);
838 env->cc_op = CC_OP_FLAGS;
839 env->sr = (env->sr & 0xffe0)
840 | env->cc_dest | (env->cc_x << 4);
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200841#elif defined(TARGET_MICROBLAZE)
bellard6af0bf92005-07-02 14:58:51 +0000842#elif defined(TARGET_MIPS)
Anthony Greend15a9c22013-03-18 15:49:25 -0400843#elif defined(TARGET_MOXIE)
Jia Liue67db062012-07-20 15:50:39 +0800844#elif defined(TARGET_OPENRISC)
bellardfdf9b3e2006-04-27 21:07:38 +0000845#elif defined(TARGET_SH4)
j_mayereddf68a2007-04-05 07:22:49 +0000846#elif defined(TARGET_ALPHA)
thsf1ccf902007-10-08 13:16:14 +0000847#elif defined(TARGET_CRIS)
Alexander Graf10ec5112009-12-05 12:44:21 +0100848#elif defined(TARGET_S390X)
Max Filippov23288262011-09-06 03:55:25 +0400849#elif defined(TARGET_XTENSA)
bellardfdf9b3e2006-04-27 21:07:38 +0000850 /* XXXXX */
bellarde4533c72003-06-15 19:51:39 +0000851#else
852#error unsupported target CPU
853#endif
pbrook1057eaa2007-02-04 13:37:44 +0000854
Andreas Färber4917cf42013-05-27 05:17:50 +0200855 /* fail safe : never use current_cpu outside cpu_exec() */
856 current_cpu = NULL;
bellard7d132992003-03-06 23:23:54 +0000857 return ret;
858}