blob: 3c1450232951ebe46b0efeb19a277ff485a82d58 [file] [log] [blame]
bellard7d132992003-03-06 23:23:54 +00001/*
陳韋任e965fc32012-02-06 14:02:55 +08002 * emulator main execution loop
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard66321a12005-04-06 20:47:48 +00004 * Copyright (c) 2003-2005 Fabrice Bellard
bellard7d132992003-03-06 23:23:54 +00005 *
bellard3ef693a2003-03-23 20:17:16 +00006 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
bellard7d132992003-03-06 23:23:54 +000010 *
bellard3ef693a2003-03-23 20:17:16 +000011 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
bellard7d132992003-03-06 23:23:54 +000015 *
bellard3ef693a2003-03-23 20:17:16 +000016 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard7d132992003-03-06 23:23:54 +000018 */
bellarde4533c72003-06-15 19:51:39 +000019#include "config.h"
Blue Swirlcea5f9a2011-05-15 16:03:25 +000020#include "cpu.h"
Paolo Bonzini76cad712012-10-24 11:12:21 +020021#include "disas/disas.h"
bellard7cb69ca2008-05-10 10:55:51 +000022#include "tcg.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010023#include "qemu/atomic.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010024#include "sysemu/qtest.h"
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020025#include "qemu/timer.h"
26
27/* -icount align implementation. */
28
29typedef struct SyncClocks {
30 int64_t diff_clk;
31 int64_t last_cpu_icount;
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020032 int64_t realtime_clock;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020033} SyncClocks;
34
35#if !defined(CONFIG_USER_ONLY)
36/* Allow the guest to have a max 3ms advance.
37 * The difference between the 2 clocks could therefore
38 * oscillate around 0.
39 */
40#define VM_CLOCK_ADVANCE 3000000
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020041#define THRESHOLD_REDUCE 1.5
42#define MAX_DELAY_PRINT_RATE 2000000000LL
43#define MAX_NB_PRINTS 100
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020044
45static void align_clocks(SyncClocks *sc, const CPUState *cpu)
46{
47 int64_t cpu_icount;
48
49 if (!icount_align_option) {
50 return;
51 }
52
53 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
54 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
55 sc->last_cpu_icount = cpu_icount;
56
57 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
58#ifndef _WIN32
59 struct timespec sleep_delay, rem_delay;
60 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
61 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
62 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
63 sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL;
64 sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec;
65 } else {
66 sc->diff_clk = 0;
67 }
68#else
69 Sleep(sc->diff_clk / SCALE_MS);
70 sc->diff_clk = 0;
71#endif
72 }
73}
74
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020075static void print_delay(const SyncClocks *sc)
76{
77 static float threshold_delay;
78 static int64_t last_realtime_clock;
79 static int nb_prints;
80
81 if (icount_align_option &&
82 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
83 nb_prints < MAX_NB_PRINTS) {
84 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
85 (-sc->diff_clk / (float)1000000000LL <
86 (threshold_delay - THRESHOLD_REDUCE))) {
87 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
88 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
89 threshold_delay - 1,
90 threshold_delay);
91 nb_prints++;
92 last_realtime_clock = sc->realtime_clock;
93 }
94 }
95}
96
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020097static void init_delay_params(SyncClocks *sc,
98 const CPUState *cpu)
99{
100 if (!icount_align_option) {
101 return;
102 }
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200103 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200104 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200105 sc->realtime_clock +
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200106 cpu_get_clock_offset();
107 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200108
109 /* Print every 2s max if the guest is late. We limit the number
110 of printed messages to NB_PRINT_MAX(currently 100) */
111 print_delay(sc);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200112}
113#else
114static void align_clocks(SyncClocks *sc, const CPUState *cpu)
115{
116}
117
118static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
119{
120}
121#endif /* CONFIG USER ONLY */
bellard7d132992003-03-06 23:23:54 +0000122
Andreas Färber5638d182013-08-27 17:52:12 +0200123void cpu_loop_exit(CPUState *cpu)
bellarde4533c72003-06-15 19:51:39 +0000124{
Andreas Färberd77953b2013-01-16 19:29:31 +0100125 cpu->current_tb = NULL;
Andreas Färber6f03bef2013-08-26 06:22:03 +0200126 siglongjmp(cpu->jmp_env, 1);
bellarde4533c72003-06-15 19:51:39 +0000127}
thsbfed01f2007-06-03 17:44:37 +0000128
bellardfbf9eeb2004-04-25 21:21:33 +0000129/* exit the current TB from a signal handler. The host registers are
130 restored in a state compatible with the CPU emulator
131 */
Blue Swirl9eff14f2011-05-21 08:42:35 +0000132#if defined(CONFIG_SOFTMMU)
Andreas Färber0ea8cb82013-09-03 02:12:23 +0200133void cpu_resume_from_signal(CPUState *cpu, void *puc)
bellardfbf9eeb2004-04-25 21:21:33 +0000134{
Blue Swirl9eff14f2011-05-21 08:42:35 +0000135 /* XXX: restore cpu registers saved in host registers */
136
Andreas Färber27103422013-08-26 08:31:06 +0200137 cpu->exception_index = -1;
Andreas Färber6f03bef2013-08-26 06:22:03 +0200138 siglongjmp(cpu->jmp_env, 1);
Blue Swirl9eff14f2011-05-21 08:42:35 +0000139}
Blue Swirl9eff14f2011-05-21 08:42:35 +0000140#endif
bellardfbf9eeb2004-04-25 21:21:33 +0000141
Peter Maydell77211372013-02-22 18:10:02 +0000142/* Execute a TB, and fix up the CPU state afterwards if necessary */
143static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
144{
145 CPUArchState *env = cpu->env_ptr;
Richard Henderson03afa5f2013-11-06 17:29:39 +1000146 uintptr_t next_tb;
147
148#if defined(DEBUG_DISAS)
149 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
150#if defined(TARGET_I386)
151 log_cpu_state(cpu, CPU_DUMP_CCOP);
152#elif defined(TARGET_M68K)
153 /* ??? Should not modify env state for dumping. */
154 cpu_m68k_flush_flags(env, env->cc_op);
155 env->cc_op = CC_OP_FLAGS;
156 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
157 log_cpu_state(cpu, 0);
158#else
159 log_cpu_state(cpu, 0);
160#endif
161 }
162#endif /* DEBUG_DISAS */
163
164 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
Peter Maydell77211372013-02-22 18:10:02 +0000165 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
166 /* We didn't start executing this TB (eg because the instruction
167 * counter hit zero); we must restore the guest PC to the address
168 * of the start of the TB.
169 */
Andreas Färberbdf7ae52013-06-28 19:31:32 +0200170 CPUClass *cc = CPU_GET_CLASS(cpu);
Peter Maydell77211372013-02-22 18:10:02 +0000171 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
Andreas Färberbdf7ae52013-06-28 19:31:32 +0200172 if (cc->synchronize_from_tb) {
173 cc->synchronize_from_tb(cpu, tb);
174 } else {
175 assert(cc->set_pc);
176 cc->set_pc(cpu, tb->pc);
177 }
Peter Maydell77211372013-02-22 18:10:02 +0000178 }
Peter Maydell378df4b2013-02-22 18:10:03 +0000179 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
180 /* We were asked to stop executing TBs (probably a pending
181 * interrupt. We've now stopped, so clear the flag.
182 */
183 cpu->tcg_exit_req = 0;
184 }
Peter Maydell77211372013-02-22 18:10:02 +0000185 return next_tb;
186}
187
pbrook2e70f6e2008-06-29 01:03:05 +0000188/* Execute the code without caching the generated code. An interpreter
189 could be used if available. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100190static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000191 TranslationBlock *orig_tb)
pbrook2e70f6e2008-06-29 01:03:05 +0000192{
Andreas Färberd77953b2013-01-16 19:29:31 +0100193 CPUState *cpu = ENV_GET_CPU(env);
pbrook2e70f6e2008-06-29 01:03:05 +0000194 TranslationBlock *tb;
195
196 /* Should never happen.
197 We only end up here when an existing TB is too long. */
198 if (max_cycles > CF_COUNT_MASK)
199 max_cycles = CF_COUNT_MASK;
200
Andreas Färber648f0342013-09-01 17:43:17 +0200201 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
pbrook2e70f6e2008-06-29 01:03:05 +0000202 max_cycles);
Andreas Färberd77953b2013-01-16 19:29:31 +0100203 cpu->current_tb = tb;
pbrook2e70f6e2008-06-29 01:03:05 +0000204 /* execute the generated code */
Peter Maydell77211372013-02-22 18:10:02 +0000205 cpu_tb_exec(cpu, tb->tc_ptr);
Andreas Färberd77953b2013-01-16 19:29:31 +0100206 cpu->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +0000207 tb_phys_invalidate(tb, -1);
208 tb_free(tb);
209}
210
Andreas Färber9349b4f2012-03-14 01:38:32 +0100211static TranslationBlock *tb_find_slow(CPUArchState *env,
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000212 target_ulong pc,
bellard8a40a182005-11-20 10:35:40 +0000213 target_ulong cs_base,
j_mayerc0686882007-09-20 22:47:42 +0000214 uint64_t flags)
bellard8a40a182005-11-20 10:35:40 +0000215{
Andreas Färber8cd70432013-08-26 06:03:38 +0200216 CPUState *cpu = ENV_GET_CPU(env);
bellard8a40a182005-11-20 10:35:40 +0000217 TranslationBlock *tb, **ptb1;
bellard8a40a182005-11-20 10:35:40 +0000218 unsigned int h;
Blue Swirl337fc752011-09-04 11:06:22 +0000219 tb_page_addr_t phys_pc, phys_page1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000220 target_ulong virt_page2;
ths3b46e622007-09-17 08:09:54 +0000221
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700222 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
ths3b46e622007-09-17 08:09:54 +0000223
bellard8a40a182005-11-20 10:35:40 +0000224 /* find translated block using physical mappings */
Paul Brook41c1b1c2010-03-12 16:54:58 +0000225 phys_pc = get_page_addr_code(env, pc);
bellard8a40a182005-11-20 10:35:40 +0000226 phys_page1 = phys_pc & TARGET_PAGE_MASK;
bellard8a40a182005-11-20 10:35:40 +0000227 h = tb_phys_hash_func(phys_pc);
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700228 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
bellard8a40a182005-11-20 10:35:40 +0000229 for(;;) {
230 tb = *ptb1;
231 if (!tb)
232 goto not_found;
ths5fafdf22007-09-16 21:08:06 +0000233 if (tb->pc == pc &&
bellard8a40a182005-11-20 10:35:40 +0000234 tb->page_addr[0] == phys_page1 &&
ths5fafdf22007-09-16 21:08:06 +0000235 tb->cs_base == cs_base &&
bellard8a40a182005-11-20 10:35:40 +0000236 tb->flags == flags) {
237 /* check next page if needed */
238 if (tb->page_addr[1] != -1) {
Blue Swirl337fc752011-09-04 11:06:22 +0000239 tb_page_addr_t phys_page2;
240
ths5fafdf22007-09-16 21:08:06 +0000241 virt_page2 = (pc & TARGET_PAGE_MASK) +
bellard8a40a182005-11-20 10:35:40 +0000242 TARGET_PAGE_SIZE;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000243 phys_page2 = get_page_addr_code(env, virt_page2);
bellard8a40a182005-11-20 10:35:40 +0000244 if (tb->page_addr[1] == phys_page2)
245 goto found;
246 } else {
247 goto found;
248 }
249 }
250 ptb1 = &tb->phys_hash_next;
251 }
252 not_found:
pbrook2e70f6e2008-06-29 01:03:05 +0000253 /* if no translated code available, then translate it now */
Andreas Färber648f0342013-09-01 17:43:17 +0200254 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
ths3b46e622007-09-17 08:09:54 +0000255
bellard8a40a182005-11-20 10:35:40 +0000256 found:
Kirill Batuzov2c90fe22010-12-02 16:12:46 +0300257 /* Move the last found TB to the head of the list */
258 if (likely(*ptb1)) {
259 *ptb1 = tb->phys_hash_next;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700260 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
261 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
Kirill Batuzov2c90fe22010-12-02 16:12:46 +0300262 }
bellard8a40a182005-11-20 10:35:40 +0000263 /* we add the TB in the virtual pc hash table */
Andreas Färber8cd70432013-08-26 06:03:38 +0200264 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
bellard8a40a182005-11-20 10:35:40 +0000265 return tb;
266}
267
Andreas Färber9349b4f2012-03-14 01:38:32 +0100268static inline TranslationBlock *tb_find_fast(CPUArchState *env)
bellard8a40a182005-11-20 10:35:40 +0000269{
Andreas Färber8cd70432013-08-26 06:03:38 +0200270 CPUState *cpu = ENV_GET_CPU(env);
bellard8a40a182005-11-20 10:35:40 +0000271 TranslationBlock *tb;
272 target_ulong cs_base, pc;
aliguori6b917542008-11-18 19:46:41 +0000273 int flags;
bellard8a40a182005-11-20 10:35:40 +0000274
275 /* we record a subset of the CPU state. It will
276 always be the same before a given translated block
277 is executed. */
aliguori6b917542008-11-18 19:46:41 +0000278 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
Andreas Färber8cd70432013-08-26 06:03:38 +0200279 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
ths551bd272008-07-03 17:57:36 +0000280 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
281 tb->flags != flags)) {
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000282 tb = tb_find_slow(env, pc, cs_base, flags);
bellard8a40a182005-11-20 10:35:40 +0000283 }
284 return tb;
285}
286
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100287static CPUDebugExcpHandler *debug_excp_handler;
288
Igor Mammedov84e3b602012-06-21 18:29:38 +0200289void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100290{
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100291 debug_excp_handler = handler;
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100292}
293
Andreas Färber9349b4f2012-03-14 01:38:32 +0100294static void cpu_handle_debug_exception(CPUArchState *env)
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100295{
Andreas Färberff4700b2013-08-26 18:23:18 +0200296 CPUState *cpu = ENV_GET_CPU(env);
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100297 CPUWatchpoint *wp;
298
Andreas Färberff4700b2013-08-26 18:23:18 +0200299 if (!cpu->watchpoint_hit) {
300 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100301 wp->flags &= ~BP_WATCHPOINT_HIT;
302 }
303 }
304 if (debug_excp_handler) {
305 debug_excp_handler(env);
306 }
307}
308
bellard7d132992003-03-06 23:23:54 +0000309/* main execution loop */
310
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300311volatile sig_atomic_t exit_request;
312
Andreas Färber9349b4f2012-03-14 01:38:32 +0100313int cpu_exec(CPUArchState *env)
bellard7d132992003-03-06 23:23:54 +0000314{
Andreas Färberc356a1b2012-05-04 19:39:23 +0200315 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färber97a8ea52013-02-02 10:57:51 +0100316#if !(defined(CONFIG_USER_ONLY) && \
317 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
318 CPUClass *cc = CPU_GET_CLASS(cpu);
319#endif
Andreas Färber693fa552013-12-24 03:18:12 +0100320#ifdef TARGET_I386
321 X86CPU *x86_cpu = X86_CPU(cpu);
322#endif
bellard8a40a182005-11-20 10:35:40 +0000323 int ret, interrupt_request;
bellard8a40a182005-11-20 10:35:40 +0000324 TranslationBlock *tb;
bellardc27004e2005-01-03 23:35:10 +0000325 uint8_t *tc_ptr;
Richard Henderson3e9bd632013-08-20 14:40:25 -0700326 uintptr_t next_tb;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200327 SyncClocks sc;
328
Peter Maydellbae2c272014-04-04 17:42:56 +0100329 /* This must be volatile so it is not trashed by longjmp() */
330 volatile bool have_tb_lock = false;
bellard8c6939c2003-06-09 15:28:00 +0000331
Andreas Färber259186a2013-01-17 18:51:17 +0100332 if (cpu->halted) {
Andreas Färber3993c6b2012-05-03 06:43:49 +0200333 if (!cpu_has_work(cpu)) {
Paolo Bonzinieda48c32011-03-12 17:43:56 +0100334 return EXCP_HALTED;
335 }
336
Andreas Färber259186a2013-01-17 18:51:17 +0100337 cpu->halted = 0;
Paolo Bonzinieda48c32011-03-12 17:43:56 +0100338 }
bellard5a1e3cf2005-11-23 21:02:53 +0000339
Andreas Färber4917cf42013-05-27 05:17:50 +0200340 current_cpu = cpu;
bellarde4533c72003-06-15 19:51:39 +0000341
Andreas Färber4917cf42013-05-27 05:17:50 +0200342 /* As long as current_cpu is null, up to the assignment just above,
Olivier Hainqueec9bd892013-04-09 18:06:54 +0200343 * requests by other threads to exit the execution loop are expected to
344 * be issued using the exit_request global. We must make sure that our
Andreas Färber4917cf42013-05-27 05:17:50 +0200345 * evaluation of the global value is performed past the current_cpu
Olivier Hainqueec9bd892013-04-09 18:06:54 +0200346 * value transition point, which requires a memory barrier as well as
347 * an instruction scheduling constraint on modern architectures. */
348 smp_mb();
349
Jan Kiszkac629a4b2010-06-25 16:56:52 +0200350 if (unlikely(exit_request)) {
Andreas Färberfcd7d002012-12-17 08:02:44 +0100351 cpu->exit_request = 1;
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300352 }
353
thsecb644f2007-06-03 18:45:53 +0000354#if defined(TARGET_I386)
Jan Kiszka6792a572011-02-07 12:19:18 +0100355 /* put eflags in CPU temporary format */
356 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
liguang80cf2c82013-05-28 16:21:08 +0800357 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
Jan Kiszka6792a572011-02-07 12:19:18 +0100358 CC_OP = CC_OP_EFLAGS;
359 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
bellard93ac68b2003-09-30 20:57:29 +0000360#elif defined(TARGET_SPARC)
pbrooke6e59062006-10-22 00:18:54 +0000361#elif defined(TARGET_M68K)
362 env->cc_op = CC_OP_FLAGS;
363 env->cc_dest = env->sr & 0xf;
364 env->cc_x = (env->sr >> 4) & 1;
thsecb644f2007-06-03 18:45:53 +0000365#elif defined(TARGET_ALPHA)
366#elif defined(TARGET_ARM)
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800367#elif defined(TARGET_UNICORE32)
thsecb644f2007-06-03 18:45:53 +0000368#elif defined(TARGET_PPC)
Elie Richa4e85f822011-07-22 05:58:39 +0000369 env->reserve_addr = -1;
Michael Walle81ea0e12011-02-17 23:45:02 +0100370#elif defined(TARGET_LM32)
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200371#elif defined(TARGET_MICROBLAZE)
bellard6af0bf92005-07-02 14:58:51 +0000372#elif defined(TARGET_MIPS)
Anthony Greend15a9c22013-03-18 15:49:25 -0400373#elif defined(TARGET_MOXIE)
Jia Liue67db062012-07-20 15:50:39 +0800374#elif defined(TARGET_OPENRISC)
bellardfdf9b3e2006-04-27 21:07:38 +0000375#elif defined(TARGET_SH4)
thsf1ccf902007-10-08 13:16:14 +0000376#elif defined(TARGET_CRIS)
Alexander Graf10ec5112009-12-05 12:44:21 +0100377#elif defined(TARGET_S390X)
Max Filippov23288262011-09-06 03:55:25 +0400378#elif defined(TARGET_XTENSA)
bellardfdf9b3e2006-04-27 21:07:38 +0000379 /* XXXXX */
bellarde4533c72003-06-15 19:51:39 +0000380#else
381#error unsupported target CPU
382#endif
Andreas Färber27103422013-08-26 08:31:06 +0200383 cpu->exception_index = -1;
bellard9d27abd2003-05-10 13:13:54 +0000384
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200385 /* Calculate difference between guest clock and host clock.
386 * This delay includes the delay of the last cycle, so
387 * what we have to do is sleep until it is 0. As for the
388 * advance/delay we gain here, we try to fix it next time.
389 */
390 init_delay_params(&sc, cpu);
391
bellard7d132992003-03-06 23:23:54 +0000392 /* prepare setjmp context for exception handling */
bellard3fb2ded2003-06-24 13:22:59 +0000393 for(;;) {
Andreas Färber6f03bef2013-08-26 06:22:03 +0200394 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
bellard3fb2ded2003-06-24 13:22:59 +0000395 /* if an exception is pending, we execute it here */
Andreas Färber27103422013-08-26 08:31:06 +0200396 if (cpu->exception_index >= 0) {
397 if (cpu->exception_index >= EXCP_INTERRUPT) {
bellard3fb2ded2003-06-24 13:22:59 +0000398 /* exit request from the cpu execution loop */
Andreas Färber27103422013-08-26 08:31:06 +0200399 ret = cpu->exception_index;
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100400 if (ret == EXCP_DEBUG) {
401 cpu_handle_debug_exception(env);
402 }
bellard3fb2ded2003-06-24 13:22:59 +0000403 break;
aurel3272d239e2009-01-14 19:40:27 +0000404 } else {
405#if defined(CONFIG_USER_ONLY)
bellard3fb2ded2003-06-24 13:22:59 +0000406 /* if user mode only, we simulate a fake exception
ths9f083492006-12-07 18:28:42 +0000407 which will be handled outside the cpu execution
bellard3fb2ded2003-06-24 13:22:59 +0000408 loop */
bellard83479e72003-06-25 16:12:37 +0000409#if defined(TARGET_I386)
Andreas Färber97a8ea52013-02-02 10:57:51 +0100410 cc->do_interrupt(cpu);
bellard83479e72003-06-25 16:12:37 +0000411#endif
Andreas Färber27103422013-08-26 08:31:06 +0200412 ret = cpu->exception_index;
bellard3fb2ded2003-06-24 13:22:59 +0000413 break;
aurel3272d239e2009-01-14 19:40:27 +0000414#else
Andreas Färber97a8ea52013-02-02 10:57:51 +0100415 cc->do_interrupt(cpu);
Andreas Färber27103422013-08-26 08:31:06 +0200416 cpu->exception_index = -1;
aurel3272d239e2009-01-14 19:40:27 +0000417#endif
bellard3fb2ded2003-06-24 13:22:59 +0000418 }
ths5fafdf22007-09-16 21:08:06 +0000419 }
bellard9df217a2005-02-10 22:05:51 +0000420
blueswir1b5fc09a2008-05-04 06:38:18 +0000421 next_tb = 0; /* force lookup of first TB */
bellard3fb2ded2003-06-24 13:22:59 +0000422 for(;;) {
Andreas Färber259186a2013-01-17 18:51:17 +0100423 interrupt_request = cpu->interrupt_request;
malce1638bd2008-11-06 18:54:46 +0000424 if (unlikely(interrupt_request)) {
Andreas Färbered2803d2013-06-21 20:20:45 +0200425 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
malce1638bd2008-11-06 18:54:46 +0000426 /* Mask out external interrupts for this step. */
Richard Henderson3125f762011-05-04 13:34:25 -0700427 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
malce1638bd2008-11-06 18:54:46 +0000428 }
pbrook6658ffb2007-03-16 23:58:11 +0000429 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
Andreas Färber259186a2013-01-17 18:51:17 +0100430 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
Andreas Färber27103422013-08-26 08:31:06 +0200431 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +0200432 cpu_loop_exit(cpu);
pbrook6658ffb2007-03-16 23:58:11 +0000433 }
balroga90b7312007-05-01 01:28:01 +0000434#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200435 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800436 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
balroga90b7312007-05-01 01:28:01 +0000437 if (interrupt_request & CPU_INTERRUPT_HALT) {
Andreas Färber259186a2013-01-17 18:51:17 +0100438 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
439 cpu->halted = 1;
Andreas Färber27103422013-08-26 08:31:06 +0200440 cpu->exception_index = EXCP_HLT;
Andreas Färber5638d182013-08-27 17:52:12 +0200441 cpu_loop_exit(cpu);
balroga90b7312007-05-01 01:28:01 +0000442 }
443#endif
bellard68a79312003-06-30 13:12:32 +0000444#if defined(TARGET_I386)
Paolo Bonzini4a92a552013-03-05 15:35:17 +0100445 if (interrupt_request & CPU_INTERRUPT_INIT) {
446 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
447 do_cpu_init(x86_cpu);
448 cpu->exception_index = EXCP_HALTED;
449 cpu_loop_exit(cpu);
450 }
451#else
452 if (interrupt_request & CPU_INTERRUPT_RESET) {
453 cpu_reset(cpu);
454 }
455#endif
456#if defined(TARGET_I386)
Jan Kiszka5d62c432012-07-09 16:42:32 +0200457#if !defined(CONFIG_USER_ONLY)
458 if (interrupt_request & CPU_INTERRUPT_POLL) {
Andreas Färber259186a2013-01-17 18:51:17 +0100459 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
Andreas Färber693fa552013-12-24 03:18:12 +0100460 apic_poll_irq(x86_cpu->apic_state);
Jan Kiszka5d62c432012-07-09 16:42:32 +0200461 }
462#endif
Paolo Bonzini4a92a552013-03-05 15:35:17 +0100463 if (interrupt_request & CPU_INTERRUPT_SIPI) {
Andreas Färber693fa552013-12-24 03:18:12 +0100464 do_cpu_sipi(x86_cpu);
Gleb Natapovb09ea7d2009-06-17 23:26:59 +0300465 } else if (env->hflags2 & HF2_GIF_MASK) {
bellarddb620f42008-06-04 17:02:19 +0000466 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
467 !(env->hflags & HF_SMM_MASK)) {
Blue Swirl77b2bc22012-04-28 19:35:10 +0000468 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
469 0);
Andreas Färber259186a2013-01-17 18:51:17 +0100470 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
Andreas Färber693fa552013-12-24 03:18:12 +0100471 do_smm_enter(x86_cpu);
bellarddb620f42008-06-04 17:02:19 +0000472 next_tb = 0;
473 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
474 !(env->hflags2 & HF2_NMI_MASK)) {
Andreas Färber259186a2013-01-17 18:51:17 +0100475 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
bellarddb620f42008-06-04 17:02:19 +0000476 env->hflags2 |= HF2_NMI_MASK;
Blue Swirle694d4e2011-05-16 19:38:48 +0000477 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
bellarddb620f42008-06-04 17:02:19 +0000478 next_tb = 0;
陳韋任e965fc32012-02-06 14:02:55 +0800479 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
Andreas Färber259186a2013-01-17 18:51:17 +0100480 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
Blue Swirle694d4e2011-05-16 19:38:48 +0000481 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
Huang Ying79c4f6b2009-06-23 10:05:14 +0800482 next_tb = 0;
bellarddb620f42008-06-04 17:02:19 +0000483 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
484 (((env->hflags2 & HF2_VINTR_MASK) &&
485 (env->hflags2 & HF2_HIF_MASK)) ||
486 (!(env->hflags2 & HF2_VINTR_MASK) &&
487 (env->eflags & IF_MASK &&
488 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
489 int intno;
Blue Swirl77b2bc22012-04-28 19:35:10 +0000490 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
491 0);
Andreas Färber259186a2013-01-17 18:51:17 +0100492 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
493 CPU_INTERRUPT_VIRQ);
bellarddb620f42008-06-04 17:02:19 +0000494 intno = cpu_get_pic_interrupt(env);
malc4f213872012-08-27 18:33:12 +0400495 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
496 do_interrupt_x86_hardirq(env, intno, 1);
497 /* ensure that no TB jump will be modified as
498 the program flow was changed */
499 next_tb = 0;
ths0573fbf2007-09-23 15:28:04 +0000500#if !defined(CONFIG_USER_ONLY)
bellarddb620f42008-06-04 17:02:19 +0000501 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
502 (env->eflags & IF_MASK) &&
503 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
504 int intno;
505 /* FIXME: this should respect TPR */
Blue Swirl77b2bc22012-04-28 19:35:10 +0000506 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
507 0);
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +0100508 intno = ldl_phys(cpu->as,
509 env->vm_vmcb
510 + offsetof(struct vmcb,
511 control.int_vector));
aliguori93fcfe32009-01-15 22:34:14 +0000512 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
Blue Swirle694d4e2011-05-16 19:38:48 +0000513 do_interrupt_x86_hardirq(env, intno, 1);
Andreas Färber259186a2013-01-17 18:51:17 +0100514 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
bellarddb620f42008-06-04 17:02:19 +0000515 next_tb = 0;
ths0573fbf2007-09-23 15:28:04 +0000516#endif
bellarddb620f42008-06-04 17:02:19 +0000517 }
bellard68a79312003-06-30 13:12:32 +0000518 }
bellardce097762004-01-04 23:53:18 +0000519#elif defined(TARGET_PPC)
j_mayer47103572007-03-30 09:38:04 +0000520 if (interrupt_request & CPU_INTERRUPT_HARD) {
j_mayere9df0142007-04-09 22:45:36 +0000521 ppc_hw_interrupt(env);
Andreas Färber259186a2013-01-17 18:51:17 +0100522 if (env->pending_interrupts == 0) {
523 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
524 }
blueswir1b5fc09a2008-05-04 06:38:18 +0000525 next_tb = 0;
bellardce097762004-01-04 23:53:18 +0000526 }
Michael Walle81ea0e12011-02-17 23:45:02 +0100527#elif defined(TARGET_LM32)
528 if ((interrupt_request & CPU_INTERRUPT_HARD)
529 && (env->ie & IE_IE)) {
Andreas Färber27103422013-08-26 08:31:06 +0200530 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100531 cc->do_interrupt(cpu);
Michael Walle81ea0e12011-02-17 23:45:02 +0100532 next_tb = 0;
533 }
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200534#elif defined(TARGET_MICROBLAZE)
535 if ((interrupt_request & CPU_INTERRUPT_HARD)
536 && (env->sregs[SR_MSR] & MSR_IE)
537 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
538 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
Andreas Färber27103422013-08-26 08:31:06 +0200539 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100540 cc->do_interrupt(cpu);
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200541 next_tb = 0;
542 }
bellard6af0bf92005-07-02 14:58:51 +0000543#elif defined(TARGET_MIPS)
544 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
Aurelien Jarno4cdc1cd2010-12-25 22:56:32 +0100545 cpu_mips_hw_interrupts_pending(env)) {
bellard6af0bf92005-07-02 14:58:51 +0000546 /* Raise it */
Andreas Färber27103422013-08-26 08:31:06 +0200547 cpu->exception_index = EXCP_EXT_INTERRUPT;
bellard6af0bf92005-07-02 14:58:51 +0000548 env->error_code = 0;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100549 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000550 next_tb = 0;
bellard6af0bf92005-07-02 14:58:51 +0000551 }
Jia Liub6a71ef2012-07-20 15:50:41 +0800552#elif defined(TARGET_OPENRISC)
553 {
554 int idx = -1;
555 if ((interrupt_request & CPU_INTERRUPT_HARD)
556 && (env->sr & SR_IEE)) {
557 idx = EXCP_INT;
558 }
559 if ((interrupt_request & CPU_INTERRUPT_TIMER)
560 && (env->sr & SR_TEE)) {
561 idx = EXCP_TICK;
562 }
563 if (idx >= 0) {
Andreas Färber27103422013-08-26 08:31:06 +0200564 cpu->exception_index = idx;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100565 cc->do_interrupt(cpu);
Jia Liub6a71ef2012-07-20 15:50:41 +0800566 next_tb = 0;
567 }
568 }
bellarde95c8d52004-09-30 22:22:08 +0000569#elif defined(TARGET_SPARC)
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300570 if (interrupt_request & CPU_INTERRUPT_HARD) {
571 if (cpu_interrupts_enabled(env) &&
572 env->interrupt_index > 0) {
573 int pil = env->interrupt_index & 0xf;
574 int type = env->interrupt_index & 0xf0;
bellard66321a12005-04-06 20:47:48 +0000575
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300576 if (((type == TT_EXTINT) &&
577 cpu_pil_allowed(env, pil)) ||
578 type != TT_EXTINT) {
Andreas Färber27103422013-08-26 08:31:06 +0200579 cpu->exception_index = env->interrupt_index;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100580 cc->do_interrupt(cpu);
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300581 next_tb = 0;
582 }
583 }
陳韋任e965fc32012-02-06 14:02:55 +0800584 }
bellardb5ff1b32005-11-26 10:38:39 +0000585#elif defined(TARGET_ARM)
586 if (interrupt_request & CPU_INTERRUPT_FIQ
Peter Maydell4cc35612014-02-26 17:20:06 +0000587 && !(env->daif & PSTATE_F)) {
Andreas Färber27103422013-08-26 08:31:06 +0200588 cpu->exception_index = EXCP_FIQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100589 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000590 next_tb = 0;
bellardb5ff1b32005-11-26 10:38:39 +0000591 }
pbrook9ee6e8b2007-11-11 00:04:49 +0000592 /* ARMv7-M interrupt return works by loading a magic value
593 into the PC. On real hardware the load causes the
594 return to occur. The qemu implementation performs the
595 jump normally, then does the exception return when the
596 CPU tries to execute code at the magic address.
597 This will cause the magic PC value to be pushed to
Stefan Weila1c72732011-04-28 17:20:38 +0200598 the stack if an interrupt occurred at the wrong time.
pbrook9ee6e8b2007-11-11 00:04:49 +0000599 We avoid this by disabling interrupts when
600 pc contains a magic address. */
bellardb5ff1b32005-11-26 10:38:39 +0000601 if (interrupt_request & CPU_INTERRUPT_HARD
pbrook9ee6e8b2007-11-11 00:04:49 +0000602 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
Peter Maydell4cc35612014-02-26 17:20:06 +0000603 || !(env->daif & PSTATE_I))) {
Andreas Färber27103422013-08-26 08:31:06 +0200604 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100605 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000606 next_tb = 0;
bellardb5ff1b32005-11-26 10:38:39 +0000607 }
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800608#elif defined(TARGET_UNICORE32)
609 if (interrupt_request & CPU_INTERRUPT_HARD
610 && !(env->uncached_asr & ASR_I)) {
Andreas Färber27103422013-08-26 08:31:06 +0200611 cpu->exception_index = UC32_EXCP_INTR;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100612 cc->do_interrupt(cpu);
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800613 next_tb = 0;
614 }
bellardfdf9b3e2006-04-27 21:07:38 +0000615#elif defined(TARGET_SH4)
thse96e2042007-12-02 06:18:24 +0000616 if (interrupt_request & CPU_INTERRUPT_HARD) {
Andreas Färber97a8ea52013-02-02 10:57:51 +0100617 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000618 next_tb = 0;
thse96e2042007-12-02 06:18:24 +0000619 }
j_mayereddf68a2007-04-05 07:22:49 +0000620#elif defined(TARGET_ALPHA)
Richard Henderson6a80e082011-04-18 15:09:09 -0700621 {
622 int idx = -1;
623 /* ??? This hard-codes the OSF/1 interrupt levels. */
陳韋任e965fc32012-02-06 14:02:55 +0800624 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
Richard Henderson6a80e082011-04-18 15:09:09 -0700625 case 0 ... 3:
626 if (interrupt_request & CPU_INTERRUPT_HARD) {
627 idx = EXCP_DEV_INTERRUPT;
628 }
629 /* FALLTHRU */
630 case 4:
631 if (interrupt_request & CPU_INTERRUPT_TIMER) {
632 idx = EXCP_CLK_INTERRUPT;
633 }
634 /* FALLTHRU */
635 case 5:
636 if (interrupt_request & CPU_INTERRUPT_SMP) {
637 idx = EXCP_SMP_INTERRUPT;
638 }
639 /* FALLTHRU */
640 case 6:
641 if (interrupt_request & CPU_INTERRUPT_MCHK) {
642 idx = EXCP_MCHK;
643 }
644 }
645 if (idx >= 0) {
Andreas Färber27103422013-08-26 08:31:06 +0200646 cpu->exception_index = idx;
Richard Henderson6a80e082011-04-18 15:09:09 -0700647 env->error_code = 0;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100648 cc->do_interrupt(cpu);
Richard Henderson6a80e082011-04-18 15:09:09 -0700649 next_tb = 0;
650 }
j_mayereddf68a2007-04-05 07:22:49 +0000651 }
thsf1ccf902007-10-08 13:16:14 +0000652#elif defined(TARGET_CRIS)
edgar_igl1b1a38b2008-06-09 23:18:06 +0000653 if (interrupt_request & CPU_INTERRUPT_HARD
Edgar E. Iglesiasfb9fb692010-02-15 11:17:33 +0100654 && (env->pregs[PR_CCS] & I_FLAG)
655 && !env->locked_irq) {
Andreas Färber27103422013-08-26 08:31:06 +0200656 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100657 cc->do_interrupt(cpu);
edgar_igl1b1a38b2008-06-09 23:18:06 +0000658 next_tb = 0;
659 }
Lars Persson82193142012-06-14 16:23:55 +0200660 if (interrupt_request & CPU_INTERRUPT_NMI) {
661 unsigned int m_flag_archval;
662 if (env->pregs[PR_VR] < 32) {
663 m_flag_archval = M_FLAG_V10;
664 } else {
665 m_flag_archval = M_FLAG_V32;
666 }
667 if ((env->pregs[PR_CCS] & m_flag_archval)) {
Andreas Färber27103422013-08-26 08:31:06 +0200668 cpu->exception_index = EXCP_NMI;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100669 cc->do_interrupt(cpu);
Lars Persson82193142012-06-14 16:23:55 +0200670 next_tb = 0;
671 }
thsf1ccf902007-10-08 13:16:14 +0000672 }
pbrook06338792007-05-23 19:58:11 +0000673#elif defined(TARGET_M68K)
674 if (interrupt_request & CPU_INTERRUPT_HARD
675 && ((env->sr & SR_I) >> SR_I_SHIFT)
676 < env->pending_level) {
677 /* Real hardware gets the interrupt vector via an
678 IACK cycle at this point. Current emulated
679 hardware doesn't rely on this, so we
680 provide/save the vector when the interrupt is
681 first signalled. */
Andreas Färber27103422013-08-26 08:31:06 +0200682 cpu->exception_index = env->pending_vector;
Blue Swirl3c688822011-05-21 07:55:24 +0000683 do_interrupt_m68k_hardirq(env);
blueswir1b5fc09a2008-05-04 06:38:18 +0000684 next_tb = 0;
pbrook06338792007-05-23 19:58:11 +0000685 }
Alexander Graf3110e292011-04-15 17:32:48 +0200686#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
687 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
688 (env->psw.mask & PSW_MASK_EXT)) {
Andreas Färber97a8ea52013-02-02 10:57:51 +0100689 cc->do_interrupt(cpu);
Alexander Graf3110e292011-04-15 17:32:48 +0200690 next_tb = 0;
691 }
Max Filippov40643d72011-09-06 03:55:41 +0400692#elif defined(TARGET_XTENSA)
693 if (interrupt_request & CPU_INTERRUPT_HARD) {
Andreas Färber27103422013-08-26 08:31:06 +0200694 cpu->exception_index = EXC_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100695 cc->do_interrupt(cpu);
Max Filippov40643d72011-09-06 03:55:41 +0400696 next_tb = 0;
697 }
bellard68a79312003-06-30 13:12:32 +0000698#endif
Stefan Weilff2712b2011-04-28 17:20:35 +0200699 /* Don't use the cached interrupt_request value,
bellard9d050952006-05-22 22:03:52 +0000700 do_interrupt may have updated the EXITTB flag. */
Andreas Färber259186a2013-01-17 18:51:17 +0100701 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
702 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bellardbf3e8bf2004-02-16 21:58:54 +0000703 /* ensure that no TB jump will be modified as
704 the program flow was changed */
blueswir1b5fc09a2008-05-04 06:38:18 +0000705 next_tb = 0;
bellardbf3e8bf2004-02-16 21:58:54 +0000706 }
aurel32be214e62009-03-06 21:48:00 +0000707 }
Andreas Färberfcd7d002012-12-17 08:02:44 +0100708 if (unlikely(cpu->exit_request)) {
709 cpu->exit_request = 0;
Andreas Färber27103422013-08-26 08:31:06 +0200710 cpu->exception_index = EXCP_INTERRUPT;
Andreas Färber5638d182013-08-27 17:52:12 +0200711 cpu_loop_exit(cpu);
bellard3fb2ded2003-06-24 13:22:59 +0000712 }
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700713 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
Peter Maydellbae2c272014-04-04 17:42:56 +0100714 have_tb_lock = true;
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000715 tb = tb_find_fast(env);
pbrookd5975362008-06-07 20:50:51 +0000716 /* Note: we do it here to avoid a gcc bug on Mac OS X when
717 doing it in tb_find_slow */
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700718 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
pbrookd5975362008-06-07 20:50:51 +0000719 /* as some TB could have been invalidated because
720 of memory exceptions while generating the code, we
721 must recompute the hash index here */
722 next_tb = 0;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700723 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
pbrookd5975362008-06-07 20:50:51 +0000724 }
Peter Maydellc30d1ae2013-04-11 21:21:46 +0100725 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
726 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
727 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
728 }
bellard8a40a182005-11-20 10:35:40 +0000729 /* see if we can patch the calling TB. When the TB
730 spans two pages, we cannot safely do a direct
731 jump. */
Paolo Bonzini040f2fb2010-01-15 08:56:36 +0100732 if (next_tb != 0 && tb->page_addr[1] == -1) {
Peter Maydell09800112013-02-22 18:10:00 +0000733 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
734 next_tb & TB_EXIT_MASK, tb);
bellard3fb2ded2003-06-24 13:22:59 +0000735 }
Peter Maydellbae2c272014-04-04 17:42:56 +0100736 have_tb_lock = false;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700737 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
malc55e8b852008-11-04 14:18:13 +0000738
739 /* cpu_interrupt might be called while translating the
740 TB, but before it is linked into a potentially
741 infinite loop and becomes env->current_tb. Avoid
742 starting execution if there is a pending interrupt. */
Andreas Färberd77953b2013-01-16 19:29:31 +0100743 cpu->current_tb = tb;
Jan Kiszkab0052d12010-06-25 16:56:50 +0200744 barrier();
Andreas Färberfcd7d002012-12-17 08:02:44 +0100745 if (likely(!cpu->exit_request)) {
pbrook2e70f6e2008-06-29 01:03:05 +0000746 tc_ptr = tb->tc_ptr;
陳韋任e965fc32012-02-06 14:02:55 +0800747 /* execute the generated code */
Peter Maydell77211372013-02-22 18:10:02 +0000748 next_tb = cpu_tb_exec(cpu, tc_ptr);
Peter Maydell378df4b2013-02-22 18:10:03 +0000749 switch (next_tb & TB_EXIT_MASK) {
750 case TB_EXIT_REQUESTED:
751 /* Something asked us to stop executing
752 * chained TBs; just continue round the main
753 * loop. Whatever requested the exit will also
754 * have set something else (eg exit_request or
755 * interrupt_request) which we will handle
756 * next time around the loop.
757 */
758 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
759 next_tb = 0;
760 break;
761 case TB_EXIT_ICOUNT_EXPIRED:
762 {
thsbf20dc02008-06-30 17:22:19 +0000763 /* Instruction counter expired. */
pbrook2e70f6e2008-06-29 01:03:05 +0000764 int insns_left;
Peter Maydell09800112013-02-22 18:10:00 +0000765 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
Andreas Färber28ecfd72013-08-26 05:51:49 +0200766 insns_left = cpu->icount_decr.u32;
Andreas Färberefee7342013-08-26 05:39:29 +0200767 if (cpu->icount_extra && insns_left >= 0) {
pbrook2e70f6e2008-06-29 01:03:05 +0000768 /* Refill decrementer and continue execution. */
Andreas Färberefee7342013-08-26 05:39:29 +0200769 cpu->icount_extra += insns_left;
770 if (cpu->icount_extra > 0xffff) {
pbrook2e70f6e2008-06-29 01:03:05 +0000771 insns_left = 0xffff;
772 } else {
Andreas Färberefee7342013-08-26 05:39:29 +0200773 insns_left = cpu->icount_extra;
pbrook2e70f6e2008-06-29 01:03:05 +0000774 }
Andreas Färberefee7342013-08-26 05:39:29 +0200775 cpu->icount_extra -= insns_left;
Andreas Färber28ecfd72013-08-26 05:51:49 +0200776 cpu->icount_decr.u16.low = insns_left;
pbrook2e70f6e2008-06-29 01:03:05 +0000777 } else {
778 if (insns_left > 0) {
779 /* Execute remaining instructions. */
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000780 cpu_exec_nocache(env, insns_left, tb);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200781 align_clocks(&sc, cpu);
pbrook2e70f6e2008-06-29 01:03:05 +0000782 }
Andreas Färber27103422013-08-26 08:31:06 +0200783 cpu->exception_index = EXCP_INTERRUPT;
pbrook2e70f6e2008-06-29 01:03:05 +0000784 next_tb = 0;
Andreas Färber5638d182013-08-27 17:52:12 +0200785 cpu_loop_exit(cpu);
pbrook2e70f6e2008-06-29 01:03:05 +0000786 }
Peter Maydell378df4b2013-02-22 18:10:03 +0000787 break;
788 }
789 default:
790 break;
pbrook2e70f6e2008-06-29 01:03:05 +0000791 }
792 }
Andreas Färberd77953b2013-01-16 19:29:31 +0100793 cpu->current_tb = NULL;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200794 /* Try to align the host and virtual clocks
795 if the guest is in advance */
796 align_clocks(&sc, cpu);
bellard4cbf74b2003-08-10 21:48:43 +0000797 /* reset soft MMU for next block (it can currently
798 only be set by a memory fault) */
ths50a518e2007-06-03 18:52:15 +0000799 } /* for(;;) */
Jan Kiszka0d101932011-07-02 09:50:51 +0200800 } else {
801 /* Reload env after longjmp - the compiler may have smashed all
802 * local variables as longjmp is marked 'noreturn'. */
Andreas Färber4917cf42013-05-27 05:17:50 +0200803 cpu = current_cpu;
804 env = cpu->env_ptr;
Juergen Lock6c78f292013-10-03 16:09:37 +0200805#if !(defined(CONFIG_USER_ONLY) && \
806 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
807 cc = CPU_GET_CLASS(cpu);
808#endif
Andreas Färber693fa552013-12-24 03:18:12 +0100809#ifdef TARGET_I386
810 x86_cpu = X86_CPU(cpu);
811#endif
Peter Maydellbae2c272014-04-04 17:42:56 +0100812 if (have_tb_lock) {
813 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
814 have_tb_lock = false;
815 }
bellard7d132992003-03-06 23:23:54 +0000816 }
bellard3fb2ded2003-06-24 13:22:59 +0000817 } /* for(;;) */
818
bellard7d132992003-03-06 23:23:54 +0000819
bellarde4533c72003-06-15 19:51:39 +0000820#if defined(TARGET_I386)
bellard9de5e442003-03-23 16:49:39 +0000821 /* restore flags in standard format */
Blue Swirle694d4e2011-05-16 19:38:48 +0000822 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
liguang80cf2c82013-05-28 16:21:08 +0800823 | (env->df & DF_MASK);
bellarde4533c72003-06-15 19:51:39 +0000824#elif defined(TARGET_ARM)
bellardb7bcbe92005-02-22 19:27:29 +0000825 /* XXX: Save/restore host fpu exception state?. */
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800826#elif defined(TARGET_UNICORE32)
bellard93ac68b2003-09-30 20:57:29 +0000827#elif defined(TARGET_SPARC)
bellard67867302003-11-23 17:05:30 +0000828#elif defined(TARGET_PPC)
Michael Walle81ea0e12011-02-17 23:45:02 +0100829#elif defined(TARGET_LM32)
pbrooke6e59062006-10-22 00:18:54 +0000830#elif defined(TARGET_M68K)
831 cpu_m68k_flush_flags(env, env->cc_op);
832 env->cc_op = CC_OP_FLAGS;
833 env->sr = (env->sr & 0xffe0)
834 | env->cc_dest | (env->cc_x << 4);
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200835#elif defined(TARGET_MICROBLAZE)
bellard6af0bf92005-07-02 14:58:51 +0000836#elif defined(TARGET_MIPS)
Anthony Greend15a9c22013-03-18 15:49:25 -0400837#elif defined(TARGET_MOXIE)
Jia Liue67db062012-07-20 15:50:39 +0800838#elif defined(TARGET_OPENRISC)
bellardfdf9b3e2006-04-27 21:07:38 +0000839#elif defined(TARGET_SH4)
j_mayereddf68a2007-04-05 07:22:49 +0000840#elif defined(TARGET_ALPHA)
thsf1ccf902007-10-08 13:16:14 +0000841#elif defined(TARGET_CRIS)
Alexander Graf10ec5112009-12-05 12:44:21 +0100842#elif defined(TARGET_S390X)
Max Filippov23288262011-09-06 03:55:25 +0400843#elif defined(TARGET_XTENSA)
bellardfdf9b3e2006-04-27 21:07:38 +0000844 /* XXXXX */
bellarde4533c72003-06-15 19:51:39 +0000845#else
846#error unsupported target CPU
847#endif
pbrook1057eaa2007-02-04 13:37:44 +0000848
Andreas Färber4917cf42013-05-27 05:17:50 +0200849 /* fail safe : never use current_cpu outside cpu_exec() */
850 current_cpu = NULL;
bellard7d132992003-03-06 23:23:54 +0000851 return ret;
852}