blob: 68f82b631b070eab98f1d3aa90cde10b4af8c392 [file] [log] [blame]
bellard7d132992003-03-06 23:23:54 +00001/*
陳韋任e965fc32012-02-06 14:02:55 +08002 * emulator main execution loop
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard66321a12005-04-06 20:47:48 +00004 * Copyright (c) 2003-2005 Fabrice Bellard
bellard7d132992003-03-06 23:23:54 +00005 *
bellard3ef693a2003-03-23 20:17:16 +00006 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
bellard7d132992003-03-06 23:23:54 +000010 *
bellard3ef693a2003-03-23 20:17:16 +000011 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
bellard7d132992003-03-06 23:23:54 +000015 *
bellard3ef693a2003-03-23 20:17:16 +000016 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard7d132992003-03-06 23:23:54 +000018 */
bellarde4533c72003-06-15 19:51:39 +000019#include "config.h"
Blue Swirlcea5f9a2011-05-15 16:03:25 +000020#include "cpu.h"
Paolo Bonzini76cad712012-10-24 11:12:21 +020021#include "disas/disas.h"
bellard7cb69ca2008-05-10 10:55:51 +000022#include "tcg.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010023#include "qemu/atomic.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010024#include "sysemu/qtest.h"
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020025#include "qemu/timer.h"
26
27/* -icount align implementation. */
28
29typedef struct SyncClocks {
30 int64_t diff_clk;
31 int64_t last_cpu_icount;
32} SyncClocks;
33
34#if !defined(CONFIG_USER_ONLY)
35/* Allow the guest to have a max 3ms advance.
36 * The difference between the 2 clocks could therefore
37 * oscillate around 0.
38 */
39#define VM_CLOCK_ADVANCE 3000000
40
41static void align_clocks(SyncClocks *sc, const CPUState *cpu)
42{
43 int64_t cpu_icount;
44
45 if (!icount_align_option) {
46 return;
47 }
48
49 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
50 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
51 sc->last_cpu_icount = cpu_icount;
52
53 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
54#ifndef _WIN32
55 struct timespec sleep_delay, rem_delay;
56 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
57 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
58 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
59 sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL;
60 sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec;
61 } else {
62 sc->diff_clk = 0;
63 }
64#else
65 Sleep(sc->diff_clk / SCALE_MS);
66 sc->diff_clk = 0;
67#endif
68 }
69}
70
71static void init_delay_params(SyncClocks *sc,
72 const CPUState *cpu)
73{
74 if (!icount_align_option) {
75 return;
76 }
77 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
78 qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
79 cpu_get_clock_offset();
80 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
81}
82#else
83static void align_clocks(SyncClocks *sc, const CPUState *cpu)
84{
85}
86
87static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
88{
89}
90#endif /* CONFIG USER ONLY */
bellard7d132992003-03-06 23:23:54 +000091
Andreas Färber5638d182013-08-27 17:52:12 +020092void cpu_loop_exit(CPUState *cpu)
bellarde4533c72003-06-15 19:51:39 +000093{
Andreas Färberd77953b2013-01-16 19:29:31 +010094 cpu->current_tb = NULL;
Andreas Färber6f03bef2013-08-26 06:22:03 +020095 siglongjmp(cpu->jmp_env, 1);
bellarde4533c72003-06-15 19:51:39 +000096}
thsbfed01f2007-06-03 17:44:37 +000097
bellardfbf9eeb2004-04-25 21:21:33 +000098/* exit the current TB from a signal handler. The host registers are
99 restored in a state compatible with the CPU emulator
100 */
Blue Swirl9eff14f2011-05-21 08:42:35 +0000101#if defined(CONFIG_SOFTMMU)
Andreas Färber0ea8cb82013-09-03 02:12:23 +0200102void cpu_resume_from_signal(CPUState *cpu, void *puc)
bellardfbf9eeb2004-04-25 21:21:33 +0000103{
Blue Swirl9eff14f2011-05-21 08:42:35 +0000104 /* XXX: restore cpu registers saved in host registers */
105
Andreas Färber27103422013-08-26 08:31:06 +0200106 cpu->exception_index = -1;
Andreas Färber6f03bef2013-08-26 06:22:03 +0200107 siglongjmp(cpu->jmp_env, 1);
Blue Swirl9eff14f2011-05-21 08:42:35 +0000108}
Blue Swirl9eff14f2011-05-21 08:42:35 +0000109#endif
bellardfbf9eeb2004-04-25 21:21:33 +0000110
Peter Maydell77211372013-02-22 18:10:02 +0000111/* Execute a TB, and fix up the CPU state afterwards if necessary */
112static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
113{
114 CPUArchState *env = cpu->env_ptr;
Richard Henderson03afa5f2013-11-06 17:29:39 +1000115 uintptr_t next_tb;
116
117#if defined(DEBUG_DISAS)
118 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
119#if defined(TARGET_I386)
120 log_cpu_state(cpu, CPU_DUMP_CCOP);
121#elif defined(TARGET_M68K)
122 /* ??? Should not modify env state for dumping. */
123 cpu_m68k_flush_flags(env, env->cc_op);
124 env->cc_op = CC_OP_FLAGS;
125 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
126 log_cpu_state(cpu, 0);
127#else
128 log_cpu_state(cpu, 0);
129#endif
130 }
131#endif /* DEBUG_DISAS */
132
133 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
Peter Maydell77211372013-02-22 18:10:02 +0000134 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
135 /* We didn't start executing this TB (eg because the instruction
136 * counter hit zero); we must restore the guest PC to the address
137 * of the start of the TB.
138 */
Andreas Färberbdf7ae52013-06-28 19:31:32 +0200139 CPUClass *cc = CPU_GET_CLASS(cpu);
Peter Maydell77211372013-02-22 18:10:02 +0000140 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
Andreas Färberbdf7ae52013-06-28 19:31:32 +0200141 if (cc->synchronize_from_tb) {
142 cc->synchronize_from_tb(cpu, tb);
143 } else {
144 assert(cc->set_pc);
145 cc->set_pc(cpu, tb->pc);
146 }
Peter Maydell77211372013-02-22 18:10:02 +0000147 }
Peter Maydell378df4b2013-02-22 18:10:03 +0000148 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
149 /* We were asked to stop executing TBs (probably a pending
150 * interrupt. We've now stopped, so clear the flag.
151 */
152 cpu->tcg_exit_req = 0;
153 }
Peter Maydell77211372013-02-22 18:10:02 +0000154 return next_tb;
155}
156
pbrook2e70f6e2008-06-29 01:03:05 +0000157/* Execute the code without caching the generated code. An interpreter
158 could be used if available. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100159static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000160 TranslationBlock *orig_tb)
pbrook2e70f6e2008-06-29 01:03:05 +0000161{
Andreas Färberd77953b2013-01-16 19:29:31 +0100162 CPUState *cpu = ENV_GET_CPU(env);
pbrook2e70f6e2008-06-29 01:03:05 +0000163 TranslationBlock *tb;
164
165 /* Should never happen.
166 We only end up here when an existing TB is too long. */
167 if (max_cycles > CF_COUNT_MASK)
168 max_cycles = CF_COUNT_MASK;
169
Andreas Färber648f0342013-09-01 17:43:17 +0200170 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
pbrook2e70f6e2008-06-29 01:03:05 +0000171 max_cycles);
Andreas Färberd77953b2013-01-16 19:29:31 +0100172 cpu->current_tb = tb;
pbrook2e70f6e2008-06-29 01:03:05 +0000173 /* execute the generated code */
Peter Maydell77211372013-02-22 18:10:02 +0000174 cpu_tb_exec(cpu, tb->tc_ptr);
Andreas Färberd77953b2013-01-16 19:29:31 +0100175 cpu->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +0000176 tb_phys_invalidate(tb, -1);
177 tb_free(tb);
178}
179
Andreas Färber9349b4f2012-03-14 01:38:32 +0100180static TranslationBlock *tb_find_slow(CPUArchState *env,
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000181 target_ulong pc,
bellard8a40a182005-11-20 10:35:40 +0000182 target_ulong cs_base,
j_mayerc0686882007-09-20 22:47:42 +0000183 uint64_t flags)
bellard8a40a182005-11-20 10:35:40 +0000184{
Andreas Färber8cd70432013-08-26 06:03:38 +0200185 CPUState *cpu = ENV_GET_CPU(env);
bellard8a40a182005-11-20 10:35:40 +0000186 TranslationBlock *tb, **ptb1;
bellard8a40a182005-11-20 10:35:40 +0000187 unsigned int h;
Blue Swirl337fc752011-09-04 11:06:22 +0000188 tb_page_addr_t phys_pc, phys_page1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000189 target_ulong virt_page2;
ths3b46e622007-09-17 08:09:54 +0000190
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700191 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
ths3b46e622007-09-17 08:09:54 +0000192
bellard8a40a182005-11-20 10:35:40 +0000193 /* find translated block using physical mappings */
Paul Brook41c1b1c2010-03-12 16:54:58 +0000194 phys_pc = get_page_addr_code(env, pc);
bellard8a40a182005-11-20 10:35:40 +0000195 phys_page1 = phys_pc & TARGET_PAGE_MASK;
bellard8a40a182005-11-20 10:35:40 +0000196 h = tb_phys_hash_func(phys_pc);
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700197 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
bellard8a40a182005-11-20 10:35:40 +0000198 for(;;) {
199 tb = *ptb1;
200 if (!tb)
201 goto not_found;
ths5fafdf22007-09-16 21:08:06 +0000202 if (tb->pc == pc &&
bellard8a40a182005-11-20 10:35:40 +0000203 tb->page_addr[0] == phys_page1 &&
ths5fafdf22007-09-16 21:08:06 +0000204 tb->cs_base == cs_base &&
bellard8a40a182005-11-20 10:35:40 +0000205 tb->flags == flags) {
206 /* check next page if needed */
207 if (tb->page_addr[1] != -1) {
Blue Swirl337fc752011-09-04 11:06:22 +0000208 tb_page_addr_t phys_page2;
209
ths5fafdf22007-09-16 21:08:06 +0000210 virt_page2 = (pc & TARGET_PAGE_MASK) +
bellard8a40a182005-11-20 10:35:40 +0000211 TARGET_PAGE_SIZE;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000212 phys_page2 = get_page_addr_code(env, virt_page2);
bellard8a40a182005-11-20 10:35:40 +0000213 if (tb->page_addr[1] == phys_page2)
214 goto found;
215 } else {
216 goto found;
217 }
218 }
219 ptb1 = &tb->phys_hash_next;
220 }
221 not_found:
pbrook2e70f6e2008-06-29 01:03:05 +0000222 /* if no translated code available, then translate it now */
Andreas Färber648f0342013-09-01 17:43:17 +0200223 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
ths3b46e622007-09-17 08:09:54 +0000224
bellard8a40a182005-11-20 10:35:40 +0000225 found:
Kirill Batuzov2c90fe22010-12-02 16:12:46 +0300226 /* Move the last found TB to the head of the list */
227 if (likely(*ptb1)) {
228 *ptb1 = tb->phys_hash_next;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700229 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
230 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
Kirill Batuzov2c90fe22010-12-02 16:12:46 +0300231 }
bellard8a40a182005-11-20 10:35:40 +0000232 /* we add the TB in the virtual pc hash table */
Andreas Färber8cd70432013-08-26 06:03:38 +0200233 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
bellard8a40a182005-11-20 10:35:40 +0000234 return tb;
235}
236
Andreas Färber9349b4f2012-03-14 01:38:32 +0100237static inline TranslationBlock *tb_find_fast(CPUArchState *env)
bellard8a40a182005-11-20 10:35:40 +0000238{
Andreas Färber8cd70432013-08-26 06:03:38 +0200239 CPUState *cpu = ENV_GET_CPU(env);
bellard8a40a182005-11-20 10:35:40 +0000240 TranslationBlock *tb;
241 target_ulong cs_base, pc;
aliguori6b917542008-11-18 19:46:41 +0000242 int flags;
bellard8a40a182005-11-20 10:35:40 +0000243
244 /* we record a subset of the CPU state. It will
245 always be the same before a given translated block
246 is executed. */
aliguori6b917542008-11-18 19:46:41 +0000247 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
Andreas Färber8cd70432013-08-26 06:03:38 +0200248 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
ths551bd272008-07-03 17:57:36 +0000249 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
250 tb->flags != flags)) {
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000251 tb = tb_find_slow(env, pc, cs_base, flags);
bellard8a40a182005-11-20 10:35:40 +0000252 }
253 return tb;
254}
255
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100256static CPUDebugExcpHandler *debug_excp_handler;
257
Igor Mammedov84e3b602012-06-21 18:29:38 +0200258void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100259{
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100260 debug_excp_handler = handler;
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100261}
262
Andreas Färber9349b4f2012-03-14 01:38:32 +0100263static void cpu_handle_debug_exception(CPUArchState *env)
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100264{
Andreas Färberff4700b2013-08-26 18:23:18 +0200265 CPUState *cpu = ENV_GET_CPU(env);
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100266 CPUWatchpoint *wp;
267
Andreas Färberff4700b2013-08-26 18:23:18 +0200268 if (!cpu->watchpoint_hit) {
269 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100270 wp->flags &= ~BP_WATCHPOINT_HIT;
271 }
272 }
273 if (debug_excp_handler) {
274 debug_excp_handler(env);
275 }
276}
277
bellard7d132992003-03-06 23:23:54 +0000278/* main execution loop */
279
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300280volatile sig_atomic_t exit_request;
281
Andreas Färber9349b4f2012-03-14 01:38:32 +0100282int cpu_exec(CPUArchState *env)
bellard7d132992003-03-06 23:23:54 +0000283{
Andreas Färberc356a1b2012-05-04 19:39:23 +0200284 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färber97a8ea52013-02-02 10:57:51 +0100285#if !(defined(CONFIG_USER_ONLY) && \
286 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
287 CPUClass *cc = CPU_GET_CLASS(cpu);
288#endif
Andreas Färber693fa552013-12-24 03:18:12 +0100289#ifdef TARGET_I386
290 X86CPU *x86_cpu = X86_CPU(cpu);
291#endif
bellard8a40a182005-11-20 10:35:40 +0000292 int ret, interrupt_request;
bellard8a40a182005-11-20 10:35:40 +0000293 TranslationBlock *tb;
bellardc27004e2005-01-03 23:35:10 +0000294 uint8_t *tc_ptr;
Richard Henderson3e9bd632013-08-20 14:40:25 -0700295 uintptr_t next_tb;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200296 SyncClocks sc;
297
Peter Maydellbae2c272014-04-04 17:42:56 +0100298 /* This must be volatile so it is not trashed by longjmp() */
299 volatile bool have_tb_lock = false;
bellard8c6939c2003-06-09 15:28:00 +0000300
Andreas Färber259186a2013-01-17 18:51:17 +0100301 if (cpu->halted) {
Andreas Färber3993c6b2012-05-03 06:43:49 +0200302 if (!cpu_has_work(cpu)) {
Paolo Bonzinieda48c32011-03-12 17:43:56 +0100303 return EXCP_HALTED;
304 }
305
Andreas Färber259186a2013-01-17 18:51:17 +0100306 cpu->halted = 0;
Paolo Bonzinieda48c32011-03-12 17:43:56 +0100307 }
bellard5a1e3cf2005-11-23 21:02:53 +0000308
Andreas Färber4917cf42013-05-27 05:17:50 +0200309 current_cpu = cpu;
bellarde4533c72003-06-15 19:51:39 +0000310
Andreas Färber4917cf42013-05-27 05:17:50 +0200311 /* As long as current_cpu is null, up to the assignment just above,
Olivier Hainqueec9bd892013-04-09 18:06:54 +0200312 * requests by other threads to exit the execution loop are expected to
313 * be issued using the exit_request global. We must make sure that our
Andreas Färber4917cf42013-05-27 05:17:50 +0200314 * evaluation of the global value is performed past the current_cpu
Olivier Hainqueec9bd892013-04-09 18:06:54 +0200315 * value transition point, which requires a memory barrier as well as
316 * an instruction scheduling constraint on modern architectures. */
317 smp_mb();
318
Jan Kiszkac629a4b2010-06-25 16:56:52 +0200319 if (unlikely(exit_request)) {
Andreas Färberfcd7d002012-12-17 08:02:44 +0100320 cpu->exit_request = 1;
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300321 }
322
thsecb644f2007-06-03 18:45:53 +0000323#if defined(TARGET_I386)
Jan Kiszka6792a572011-02-07 12:19:18 +0100324 /* put eflags in CPU temporary format */
325 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
liguang80cf2c82013-05-28 16:21:08 +0800326 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
Jan Kiszka6792a572011-02-07 12:19:18 +0100327 CC_OP = CC_OP_EFLAGS;
328 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
bellard93ac68b2003-09-30 20:57:29 +0000329#elif defined(TARGET_SPARC)
pbrooke6e59062006-10-22 00:18:54 +0000330#elif defined(TARGET_M68K)
331 env->cc_op = CC_OP_FLAGS;
332 env->cc_dest = env->sr & 0xf;
333 env->cc_x = (env->sr >> 4) & 1;
thsecb644f2007-06-03 18:45:53 +0000334#elif defined(TARGET_ALPHA)
335#elif defined(TARGET_ARM)
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800336#elif defined(TARGET_UNICORE32)
thsecb644f2007-06-03 18:45:53 +0000337#elif defined(TARGET_PPC)
Elie Richa4e85f822011-07-22 05:58:39 +0000338 env->reserve_addr = -1;
Michael Walle81ea0e12011-02-17 23:45:02 +0100339#elif defined(TARGET_LM32)
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200340#elif defined(TARGET_MICROBLAZE)
bellard6af0bf92005-07-02 14:58:51 +0000341#elif defined(TARGET_MIPS)
Anthony Greend15a9c22013-03-18 15:49:25 -0400342#elif defined(TARGET_MOXIE)
Jia Liue67db062012-07-20 15:50:39 +0800343#elif defined(TARGET_OPENRISC)
bellardfdf9b3e2006-04-27 21:07:38 +0000344#elif defined(TARGET_SH4)
thsf1ccf902007-10-08 13:16:14 +0000345#elif defined(TARGET_CRIS)
Alexander Graf10ec5112009-12-05 12:44:21 +0100346#elif defined(TARGET_S390X)
Max Filippov23288262011-09-06 03:55:25 +0400347#elif defined(TARGET_XTENSA)
bellardfdf9b3e2006-04-27 21:07:38 +0000348 /* XXXXX */
bellarde4533c72003-06-15 19:51:39 +0000349#else
350#error unsupported target CPU
351#endif
Andreas Färber27103422013-08-26 08:31:06 +0200352 cpu->exception_index = -1;
bellard9d27abd2003-05-10 13:13:54 +0000353
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200354 /* Calculate difference between guest clock and host clock.
355 * This delay includes the delay of the last cycle, so
356 * what we have to do is sleep until it is 0. As for the
357 * advance/delay we gain here, we try to fix it next time.
358 */
359 init_delay_params(&sc, cpu);
360
bellard7d132992003-03-06 23:23:54 +0000361 /* prepare setjmp context for exception handling */
bellard3fb2ded2003-06-24 13:22:59 +0000362 for(;;) {
Andreas Färber6f03bef2013-08-26 06:22:03 +0200363 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
bellard3fb2ded2003-06-24 13:22:59 +0000364 /* if an exception is pending, we execute it here */
Andreas Färber27103422013-08-26 08:31:06 +0200365 if (cpu->exception_index >= 0) {
366 if (cpu->exception_index >= EXCP_INTERRUPT) {
bellard3fb2ded2003-06-24 13:22:59 +0000367 /* exit request from the cpu execution loop */
Andreas Färber27103422013-08-26 08:31:06 +0200368 ret = cpu->exception_index;
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100369 if (ret == EXCP_DEBUG) {
370 cpu_handle_debug_exception(env);
371 }
bellard3fb2ded2003-06-24 13:22:59 +0000372 break;
aurel3272d239e2009-01-14 19:40:27 +0000373 } else {
374#if defined(CONFIG_USER_ONLY)
bellard3fb2ded2003-06-24 13:22:59 +0000375 /* if user mode only, we simulate a fake exception
ths9f083492006-12-07 18:28:42 +0000376 which will be handled outside the cpu execution
bellard3fb2ded2003-06-24 13:22:59 +0000377 loop */
bellard83479e72003-06-25 16:12:37 +0000378#if defined(TARGET_I386)
Andreas Färber97a8ea52013-02-02 10:57:51 +0100379 cc->do_interrupt(cpu);
bellard83479e72003-06-25 16:12:37 +0000380#endif
Andreas Färber27103422013-08-26 08:31:06 +0200381 ret = cpu->exception_index;
bellard3fb2ded2003-06-24 13:22:59 +0000382 break;
aurel3272d239e2009-01-14 19:40:27 +0000383#else
Andreas Färber97a8ea52013-02-02 10:57:51 +0100384 cc->do_interrupt(cpu);
Andreas Färber27103422013-08-26 08:31:06 +0200385 cpu->exception_index = -1;
aurel3272d239e2009-01-14 19:40:27 +0000386#endif
bellard3fb2ded2003-06-24 13:22:59 +0000387 }
ths5fafdf22007-09-16 21:08:06 +0000388 }
bellard9df217a2005-02-10 22:05:51 +0000389
blueswir1b5fc09a2008-05-04 06:38:18 +0000390 next_tb = 0; /* force lookup of first TB */
bellard3fb2ded2003-06-24 13:22:59 +0000391 for(;;) {
Andreas Färber259186a2013-01-17 18:51:17 +0100392 interrupt_request = cpu->interrupt_request;
malce1638bd2008-11-06 18:54:46 +0000393 if (unlikely(interrupt_request)) {
Andreas Färbered2803d2013-06-21 20:20:45 +0200394 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
malce1638bd2008-11-06 18:54:46 +0000395 /* Mask out external interrupts for this step. */
Richard Henderson3125f762011-05-04 13:34:25 -0700396 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
malce1638bd2008-11-06 18:54:46 +0000397 }
pbrook6658ffb2007-03-16 23:58:11 +0000398 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
Andreas Färber259186a2013-01-17 18:51:17 +0100399 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
Andreas Färber27103422013-08-26 08:31:06 +0200400 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +0200401 cpu_loop_exit(cpu);
pbrook6658ffb2007-03-16 23:58:11 +0000402 }
balroga90b7312007-05-01 01:28:01 +0000403#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200404 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800405 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
balroga90b7312007-05-01 01:28:01 +0000406 if (interrupt_request & CPU_INTERRUPT_HALT) {
Andreas Färber259186a2013-01-17 18:51:17 +0100407 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
408 cpu->halted = 1;
Andreas Färber27103422013-08-26 08:31:06 +0200409 cpu->exception_index = EXCP_HLT;
Andreas Färber5638d182013-08-27 17:52:12 +0200410 cpu_loop_exit(cpu);
balroga90b7312007-05-01 01:28:01 +0000411 }
412#endif
bellard68a79312003-06-30 13:12:32 +0000413#if defined(TARGET_I386)
Paolo Bonzini4a92a552013-03-05 15:35:17 +0100414 if (interrupt_request & CPU_INTERRUPT_INIT) {
415 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
416 do_cpu_init(x86_cpu);
417 cpu->exception_index = EXCP_HALTED;
418 cpu_loop_exit(cpu);
419 }
420#else
421 if (interrupt_request & CPU_INTERRUPT_RESET) {
422 cpu_reset(cpu);
423 }
424#endif
425#if defined(TARGET_I386)
Jan Kiszka5d62c432012-07-09 16:42:32 +0200426#if !defined(CONFIG_USER_ONLY)
427 if (interrupt_request & CPU_INTERRUPT_POLL) {
Andreas Färber259186a2013-01-17 18:51:17 +0100428 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
Andreas Färber693fa552013-12-24 03:18:12 +0100429 apic_poll_irq(x86_cpu->apic_state);
Jan Kiszka5d62c432012-07-09 16:42:32 +0200430 }
431#endif
Paolo Bonzini4a92a552013-03-05 15:35:17 +0100432 if (interrupt_request & CPU_INTERRUPT_SIPI) {
Andreas Färber693fa552013-12-24 03:18:12 +0100433 do_cpu_sipi(x86_cpu);
Gleb Natapovb09ea7d2009-06-17 23:26:59 +0300434 } else if (env->hflags2 & HF2_GIF_MASK) {
bellarddb620f42008-06-04 17:02:19 +0000435 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
436 !(env->hflags & HF_SMM_MASK)) {
Blue Swirl77b2bc22012-04-28 19:35:10 +0000437 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
438 0);
Andreas Färber259186a2013-01-17 18:51:17 +0100439 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
Andreas Färber693fa552013-12-24 03:18:12 +0100440 do_smm_enter(x86_cpu);
bellarddb620f42008-06-04 17:02:19 +0000441 next_tb = 0;
442 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
443 !(env->hflags2 & HF2_NMI_MASK)) {
Andreas Färber259186a2013-01-17 18:51:17 +0100444 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
bellarddb620f42008-06-04 17:02:19 +0000445 env->hflags2 |= HF2_NMI_MASK;
Blue Swirle694d4e2011-05-16 19:38:48 +0000446 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
bellarddb620f42008-06-04 17:02:19 +0000447 next_tb = 0;
陳韋任e965fc32012-02-06 14:02:55 +0800448 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
Andreas Färber259186a2013-01-17 18:51:17 +0100449 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
Blue Swirle694d4e2011-05-16 19:38:48 +0000450 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
Huang Ying79c4f6b2009-06-23 10:05:14 +0800451 next_tb = 0;
bellarddb620f42008-06-04 17:02:19 +0000452 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
453 (((env->hflags2 & HF2_VINTR_MASK) &&
454 (env->hflags2 & HF2_HIF_MASK)) ||
455 (!(env->hflags2 & HF2_VINTR_MASK) &&
456 (env->eflags & IF_MASK &&
457 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
458 int intno;
Blue Swirl77b2bc22012-04-28 19:35:10 +0000459 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
460 0);
Andreas Färber259186a2013-01-17 18:51:17 +0100461 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
462 CPU_INTERRUPT_VIRQ);
bellarddb620f42008-06-04 17:02:19 +0000463 intno = cpu_get_pic_interrupt(env);
malc4f213872012-08-27 18:33:12 +0400464 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
465 do_interrupt_x86_hardirq(env, intno, 1);
466 /* ensure that no TB jump will be modified as
467 the program flow was changed */
468 next_tb = 0;
ths0573fbf2007-09-23 15:28:04 +0000469#if !defined(CONFIG_USER_ONLY)
bellarddb620f42008-06-04 17:02:19 +0000470 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
471 (env->eflags & IF_MASK) &&
472 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
473 int intno;
474 /* FIXME: this should respect TPR */
Blue Swirl77b2bc22012-04-28 19:35:10 +0000475 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
476 0);
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +0100477 intno = ldl_phys(cpu->as,
478 env->vm_vmcb
479 + offsetof(struct vmcb,
480 control.int_vector));
aliguori93fcfe32009-01-15 22:34:14 +0000481 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
Blue Swirle694d4e2011-05-16 19:38:48 +0000482 do_interrupt_x86_hardirq(env, intno, 1);
Andreas Färber259186a2013-01-17 18:51:17 +0100483 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
bellarddb620f42008-06-04 17:02:19 +0000484 next_tb = 0;
ths0573fbf2007-09-23 15:28:04 +0000485#endif
bellarddb620f42008-06-04 17:02:19 +0000486 }
bellard68a79312003-06-30 13:12:32 +0000487 }
bellardce097762004-01-04 23:53:18 +0000488#elif defined(TARGET_PPC)
j_mayer47103572007-03-30 09:38:04 +0000489 if (interrupt_request & CPU_INTERRUPT_HARD) {
j_mayere9df0142007-04-09 22:45:36 +0000490 ppc_hw_interrupt(env);
Andreas Färber259186a2013-01-17 18:51:17 +0100491 if (env->pending_interrupts == 0) {
492 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
493 }
blueswir1b5fc09a2008-05-04 06:38:18 +0000494 next_tb = 0;
bellardce097762004-01-04 23:53:18 +0000495 }
Michael Walle81ea0e12011-02-17 23:45:02 +0100496#elif defined(TARGET_LM32)
497 if ((interrupt_request & CPU_INTERRUPT_HARD)
498 && (env->ie & IE_IE)) {
Andreas Färber27103422013-08-26 08:31:06 +0200499 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100500 cc->do_interrupt(cpu);
Michael Walle81ea0e12011-02-17 23:45:02 +0100501 next_tb = 0;
502 }
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200503#elif defined(TARGET_MICROBLAZE)
504 if ((interrupt_request & CPU_INTERRUPT_HARD)
505 && (env->sregs[SR_MSR] & MSR_IE)
506 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
507 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
Andreas Färber27103422013-08-26 08:31:06 +0200508 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100509 cc->do_interrupt(cpu);
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200510 next_tb = 0;
511 }
bellard6af0bf92005-07-02 14:58:51 +0000512#elif defined(TARGET_MIPS)
513 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
Aurelien Jarno4cdc1cd2010-12-25 22:56:32 +0100514 cpu_mips_hw_interrupts_pending(env)) {
bellard6af0bf92005-07-02 14:58:51 +0000515 /* Raise it */
Andreas Färber27103422013-08-26 08:31:06 +0200516 cpu->exception_index = EXCP_EXT_INTERRUPT;
bellard6af0bf92005-07-02 14:58:51 +0000517 env->error_code = 0;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100518 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000519 next_tb = 0;
bellard6af0bf92005-07-02 14:58:51 +0000520 }
Jia Liub6a71ef2012-07-20 15:50:41 +0800521#elif defined(TARGET_OPENRISC)
522 {
523 int idx = -1;
524 if ((interrupt_request & CPU_INTERRUPT_HARD)
525 && (env->sr & SR_IEE)) {
526 idx = EXCP_INT;
527 }
528 if ((interrupt_request & CPU_INTERRUPT_TIMER)
529 && (env->sr & SR_TEE)) {
530 idx = EXCP_TICK;
531 }
532 if (idx >= 0) {
Andreas Färber27103422013-08-26 08:31:06 +0200533 cpu->exception_index = idx;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100534 cc->do_interrupt(cpu);
Jia Liub6a71ef2012-07-20 15:50:41 +0800535 next_tb = 0;
536 }
537 }
bellarde95c8d52004-09-30 22:22:08 +0000538#elif defined(TARGET_SPARC)
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300539 if (interrupt_request & CPU_INTERRUPT_HARD) {
540 if (cpu_interrupts_enabled(env) &&
541 env->interrupt_index > 0) {
542 int pil = env->interrupt_index & 0xf;
543 int type = env->interrupt_index & 0xf0;
bellard66321a12005-04-06 20:47:48 +0000544
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300545 if (((type == TT_EXTINT) &&
546 cpu_pil_allowed(env, pil)) ||
547 type != TT_EXTINT) {
Andreas Färber27103422013-08-26 08:31:06 +0200548 cpu->exception_index = env->interrupt_index;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100549 cc->do_interrupt(cpu);
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300550 next_tb = 0;
551 }
552 }
陳韋任e965fc32012-02-06 14:02:55 +0800553 }
bellardb5ff1b32005-11-26 10:38:39 +0000554#elif defined(TARGET_ARM)
555 if (interrupt_request & CPU_INTERRUPT_FIQ
Peter Maydell4cc35612014-02-26 17:20:06 +0000556 && !(env->daif & PSTATE_F)) {
Andreas Färber27103422013-08-26 08:31:06 +0200557 cpu->exception_index = EXCP_FIQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100558 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000559 next_tb = 0;
bellardb5ff1b32005-11-26 10:38:39 +0000560 }
pbrook9ee6e8b2007-11-11 00:04:49 +0000561 /* ARMv7-M interrupt return works by loading a magic value
562 into the PC. On real hardware the load causes the
563 return to occur. The qemu implementation performs the
564 jump normally, then does the exception return when the
565 CPU tries to execute code at the magic address.
566 This will cause the magic PC value to be pushed to
Stefan Weila1c72732011-04-28 17:20:38 +0200567 the stack if an interrupt occurred at the wrong time.
pbrook9ee6e8b2007-11-11 00:04:49 +0000568 We avoid this by disabling interrupts when
569 pc contains a magic address. */
bellardb5ff1b32005-11-26 10:38:39 +0000570 if (interrupt_request & CPU_INTERRUPT_HARD
pbrook9ee6e8b2007-11-11 00:04:49 +0000571 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
Peter Maydell4cc35612014-02-26 17:20:06 +0000572 || !(env->daif & PSTATE_I))) {
Andreas Färber27103422013-08-26 08:31:06 +0200573 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100574 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000575 next_tb = 0;
bellardb5ff1b32005-11-26 10:38:39 +0000576 }
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800577#elif defined(TARGET_UNICORE32)
578 if (interrupt_request & CPU_INTERRUPT_HARD
579 && !(env->uncached_asr & ASR_I)) {
Andreas Färber27103422013-08-26 08:31:06 +0200580 cpu->exception_index = UC32_EXCP_INTR;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100581 cc->do_interrupt(cpu);
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800582 next_tb = 0;
583 }
bellardfdf9b3e2006-04-27 21:07:38 +0000584#elif defined(TARGET_SH4)
thse96e2042007-12-02 06:18:24 +0000585 if (interrupt_request & CPU_INTERRUPT_HARD) {
Andreas Färber97a8ea52013-02-02 10:57:51 +0100586 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000587 next_tb = 0;
thse96e2042007-12-02 06:18:24 +0000588 }
j_mayereddf68a2007-04-05 07:22:49 +0000589#elif defined(TARGET_ALPHA)
Richard Henderson6a80e082011-04-18 15:09:09 -0700590 {
591 int idx = -1;
592 /* ??? This hard-codes the OSF/1 interrupt levels. */
陳韋任e965fc32012-02-06 14:02:55 +0800593 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
Richard Henderson6a80e082011-04-18 15:09:09 -0700594 case 0 ... 3:
595 if (interrupt_request & CPU_INTERRUPT_HARD) {
596 idx = EXCP_DEV_INTERRUPT;
597 }
598 /* FALLTHRU */
599 case 4:
600 if (interrupt_request & CPU_INTERRUPT_TIMER) {
601 idx = EXCP_CLK_INTERRUPT;
602 }
603 /* FALLTHRU */
604 case 5:
605 if (interrupt_request & CPU_INTERRUPT_SMP) {
606 idx = EXCP_SMP_INTERRUPT;
607 }
608 /* FALLTHRU */
609 case 6:
610 if (interrupt_request & CPU_INTERRUPT_MCHK) {
611 idx = EXCP_MCHK;
612 }
613 }
614 if (idx >= 0) {
Andreas Färber27103422013-08-26 08:31:06 +0200615 cpu->exception_index = idx;
Richard Henderson6a80e082011-04-18 15:09:09 -0700616 env->error_code = 0;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100617 cc->do_interrupt(cpu);
Richard Henderson6a80e082011-04-18 15:09:09 -0700618 next_tb = 0;
619 }
j_mayereddf68a2007-04-05 07:22:49 +0000620 }
thsf1ccf902007-10-08 13:16:14 +0000621#elif defined(TARGET_CRIS)
edgar_igl1b1a38b2008-06-09 23:18:06 +0000622 if (interrupt_request & CPU_INTERRUPT_HARD
Edgar E. Iglesiasfb9fb692010-02-15 11:17:33 +0100623 && (env->pregs[PR_CCS] & I_FLAG)
624 && !env->locked_irq) {
Andreas Färber27103422013-08-26 08:31:06 +0200625 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100626 cc->do_interrupt(cpu);
edgar_igl1b1a38b2008-06-09 23:18:06 +0000627 next_tb = 0;
628 }
Lars Persson82193142012-06-14 16:23:55 +0200629 if (interrupt_request & CPU_INTERRUPT_NMI) {
630 unsigned int m_flag_archval;
631 if (env->pregs[PR_VR] < 32) {
632 m_flag_archval = M_FLAG_V10;
633 } else {
634 m_flag_archval = M_FLAG_V32;
635 }
636 if ((env->pregs[PR_CCS] & m_flag_archval)) {
Andreas Färber27103422013-08-26 08:31:06 +0200637 cpu->exception_index = EXCP_NMI;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100638 cc->do_interrupt(cpu);
Lars Persson82193142012-06-14 16:23:55 +0200639 next_tb = 0;
640 }
thsf1ccf902007-10-08 13:16:14 +0000641 }
pbrook06338792007-05-23 19:58:11 +0000642#elif defined(TARGET_M68K)
643 if (interrupt_request & CPU_INTERRUPT_HARD
644 && ((env->sr & SR_I) >> SR_I_SHIFT)
645 < env->pending_level) {
646 /* Real hardware gets the interrupt vector via an
647 IACK cycle at this point. Current emulated
648 hardware doesn't rely on this, so we
649 provide/save the vector when the interrupt is
650 first signalled. */
Andreas Färber27103422013-08-26 08:31:06 +0200651 cpu->exception_index = env->pending_vector;
Blue Swirl3c688822011-05-21 07:55:24 +0000652 do_interrupt_m68k_hardirq(env);
blueswir1b5fc09a2008-05-04 06:38:18 +0000653 next_tb = 0;
pbrook06338792007-05-23 19:58:11 +0000654 }
Alexander Graf3110e292011-04-15 17:32:48 +0200655#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
656 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
657 (env->psw.mask & PSW_MASK_EXT)) {
Andreas Färber97a8ea52013-02-02 10:57:51 +0100658 cc->do_interrupt(cpu);
Alexander Graf3110e292011-04-15 17:32:48 +0200659 next_tb = 0;
660 }
Max Filippov40643d72011-09-06 03:55:41 +0400661#elif defined(TARGET_XTENSA)
662 if (interrupt_request & CPU_INTERRUPT_HARD) {
Andreas Färber27103422013-08-26 08:31:06 +0200663 cpu->exception_index = EXC_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100664 cc->do_interrupt(cpu);
Max Filippov40643d72011-09-06 03:55:41 +0400665 next_tb = 0;
666 }
bellard68a79312003-06-30 13:12:32 +0000667#endif
Stefan Weilff2712b2011-04-28 17:20:35 +0200668 /* Don't use the cached interrupt_request value,
bellard9d050952006-05-22 22:03:52 +0000669 do_interrupt may have updated the EXITTB flag. */
Andreas Färber259186a2013-01-17 18:51:17 +0100670 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
671 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bellardbf3e8bf2004-02-16 21:58:54 +0000672 /* ensure that no TB jump will be modified as
673 the program flow was changed */
blueswir1b5fc09a2008-05-04 06:38:18 +0000674 next_tb = 0;
bellardbf3e8bf2004-02-16 21:58:54 +0000675 }
aurel32be214e62009-03-06 21:48:00 +0000676 }
Andreas Färberfcd7d002012-12-17 08:02:44 +0100677 if (unlikely(cpu->exit_request)) {
678 cpu->exit_request = 0;
Andreas Färber27103422013-08-26 08:31:06 +0200679 cpu->exception_index = EXCP_INTERRUPT;
Andreas Färber5638d182013-08-27 17:52:12 +0200680 cpu_loop_exit(cpu);
bellard3fb2ded2003-06-24 13:22:59 +0000681 }
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700682 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
Peter Maydellbae2c272014-04-04 17:42:56 +0100683 have_tb_lock = true;
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000684 tb = tb_find_fast(env);
pbrookd5975362008-06-07 20:50:51 +0000685 /* Note: we do it here to avoid a gcc bug on Mac OS X when
686 doing it in tb_find_slow */
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700687 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
pbrookd5975362008-06-07 20:50:51 +0000688 /* as some TB could have been invalidated because
689 of memory exceptions while generating the code, we
690 must recompute the hash index here */
691 next_tb = 0;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700692 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
pbrookd5975362008-06-07 20:50:51 +0000693 }
Peter Maydellc30d1ae2013-04-11 21:21:46 +0100694 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
695 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
696 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
697 }
bellard8a40a182005-11-20 10:35:40 +0000698 /* see if we can patch the calling TB. When the TB
699 spans two pages, we cannot safely do a direct
700 jump. */
Paolo Bonzini040f2fb2010-01-15 08:56:36 +0100701 if (next_tb != 0 && tb->page_addr[1] == -1) {
Peter Maydell09800112013-02-22 18:10:00 +0000702 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
703 next_tb & TB_EXIT_MASK, tb);
bellard3fb2ded2003-06-24 13:22:59 +0000704 }
Peter Maydellbae2c272014-04-04 17:42:56 +0100705 have_tb_lock = false;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700706 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
malc55e8b852008-11-04 14:18:13 +0000707
708 /* cpu_interrupt might be called while translating the
709 TB, but before it is linked into a potentially
710 infinite loop and becomes env->current_tb. Avoid
711 starting execution if there is a pending interrupt. */
Andreas Färberd77953b2013-01-16 19:29:31 +0100712 cpu->current_tb = tb;
Jan Kiszkab0052d12010-06-25 16:56:50 +0200713 barrier();
Andreas Färberfcd7d002012-12-17 08:02:44 +0100714 if (likely(!cpu->exit_request)) {
pbrook2e70f6e2008-06-29 01:03:05 +0000715 tc_ptr = tb->tc_ptr;
陳韋任e965fc32012-02-06 14:02:55 +0800716 /* execute the generated code */
Peter Maydell77211372013-02-22 18:10:02 +0000717 next_tb = cpu_tb_exec(cpu, tc_ptr);
Peter Maydell378df4b2013-02-22 18:10:03 +0000718 switch (next_tb & TB_EXIT_MASK) {
719 case TB_EXIT_REQUESTED:
720 /* Something asked us to stop executing
721 * chained TBs; just continue round the main
722 * loop. Whatever requested the exit will also
723 * have set something else (eg exit_request or
724 * interrupt_request) which we will handle
725 * next time around the loop.
726 */
727 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
728 next_tb = 0;
729 break;
730 case TB_EXIT_ICOUNT_EXPIRED:
731 {
thsbf20dc02008-06-30 17:22:19 +0000732 /* Instruction counter expired. */
pbrook2e70f6e2008-06-29 01:03:05 +0000733 int insns_left;
Peter Maydell09800112013-02-22 18:10:00 +0000734 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
Andreas Färber28ecfd72013-08-26 05:51:49 +0200735 insns_left = cpu->icount_decr.u32;
Andreas Färberefee7342013-08-26 05:39:29 +0200736 if (cpu->icount_extra && insns_left >= 0) {
pbrook2e70f6e2008-06-29 01:03:05 +0000737 /* Refill decrementer and continue execution. */
Andreas Färberefee7342013-08-26 05:39:29 +0200738 cpu->icount_extra += insns_left;
739 if (cpu->icount_extra > 0xffff) {
pbrook2e70f6e2008-06-29 01:03:05 +0000740 insns_left = 0xffff;
741 } else {
Andreas Färberefee7342013-08-26 05:39:29 +0200742 insns_left = cpu->icount_extra;
pbrook2e70f6e2008-06-29 01:03:05 +0000743 }
Andreas Färberefee7342013-08-26 05:39:29 +0200744 cpu->icount_extra -= insns_left;
Andreas Färber28ecfd72013-08-26 05:51:49 +0200745 cpu->icount_decr.u16.low = insns_left;
pbrook2e70f6e2008-06-29 01:03:05 +0000746 } else {
747 if (insns_left > 0) {
748 /* Execute remaining instructions. */
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000749 cpu_exec_nocache(env, insns_left, tb);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200750 align_clocks(&sc, cpu);
pbrook2e70f6e2008-06-29 01:03:05 +0000751 }
Andreas Färber27103422013-08-26 08:31:06 +0200752 cpu->exception_index = EXCP_INTERRUPT;
pbrook2e70f6e2008-06-29 01:03:05 +0000753 next_tb = 0;
Andreas Färber5638d182013-08-27 17:52:12 +0200754 cpu_loop_exit(cpu);
pbrook2e70f6e2008-06-29 01:03:05 +0000755 }
Peter Maydell378df4b2013-02-22 18:10:03 +0000756 break;
757 }
758 default:
759 break;
pbrook2e70f6e2008-06-29 01:03:05 +0000760 }
761 }
Andreas Färberd77953b2013-01-16 19:29:31 +0100762 cpu->current_tb = NULL;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200763 /* Try to align the host and virtual clocks
764 if the guest is in advance */
765 align_clocks(&sc, cpu);
bellard4cbf74b2003-08-10 21:48:43 +0000766 /* reset soft MMU for next block (it can currently
767 only be set by a memory fault) */
ths50a518e2007-06-03 18:52:15 +0000768 } /* for(;;) */
Jan Kiszka0d101932011-07-02 09:50:51 +0200769 } else {
770 /* Reload env after longjmp - the compiler may have smashed all
771 * local variables as longjmp is marked 'noreturn'. */
Andreas Färber4917cf42013-05-27 05:17:50 +0200772 cpu = current_cpu;
773 env = cpu->env_ptr;
Juergen Lock6c78f292013-10-03 16:09:37 +0200774#if !(defined(CONFIG_USER_ONLY) && \
775 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
776 cc = CPU_GET_CLASS(cpu);
777#endif
Andreas Färber693fa552013-12-24 03:18:12 +0100778#ifdef TARGET_I386
779 x86_cpu = X86_CPU(cpu);
780#endif
Peter Maydellbae2c272014-04-04 17:42:56 +0100781 if (have_tb_lock) {
782 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
783 have_tb_lock = false;
784 }
bellard7d132992003-03-06 23:23:54 +0000785 }
bellard3fb2ded2003-06-24 13:22:59 +0000786 } /* for(;;) */
787
bellard7d132992003-03-06 23:23:54 +0000788
bellarde4533c72003-06-15 19:51:39 +0000789#if defined(TARGET_I386)
bellard9de5e442003-03-23 16:49:39 +0000790 /* restore flags in standard format */
Blue Swirle694d4e2011-05-16 19:38:48 +0000791 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
liguang80cf2c82013-05-28 16:21:08 +0800792 | (env->df & DF_MASK);
bellarde4533c72003-06-15 19:51:39 +0000793#elif defined(TARGET_ARM)
bellardb7bcbe92005-02-22 19:27:29 +0000794 /* XXX: Save/restore host fpu exception state?. */
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800795#elif defined(TARGET_UNICORE32)
bellard93ac68b2003-09-30 20:57:29 +0000796#elif defined(TARGET_SPARC)
bellard67867302003-11-23 17:05:30 +0000797#elif defined(TARGET_PPC)
Michael Walle81ea0e12011-02-17 23:45:02 +0100798#elif defined(TARGET_LM32)
pbrooke6e59062006-10-22 00:18:54 +0000799#elif defined(TARGET_M68K)
800 cpu_m68k_flush_flags(env, env->cc_op);
801 env->cc_op = CC_OP_FLAGS;
802 env->sr = (env->sr & 0xffe0)
803 | env->cc_dest | (env->cc_x << 4);
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200804#elif defined(TARGET_MICROBLAZE)
bellard6af0bf92005-07-02 14:58:51 +0000805#elif defined(TARGET_MIPS)
Anthony Greend15a9c22013-03-18 15:49:25 -0400806#elif defined(TARGET_MOXIE)
Jia Liue67db062012-07-20 15:50:39 +0800807#elif defined(TARGET_OPENRISC)
bellardfdf9b3e2006-04-27 21:07:38 +0000808#elif defined(TARGET_SH4)
j_mayereddf68a2007-04-05 07:22:49 +0000809#elif defined(TARGET_ALPHA)
thsf1ccf902007-10-08 13:16:14 +0000810#elif defined(TARGET_CRIS)
Alexander Graf10ec5112009-12-05 12:44:21 +0100811#elif defined(TARGET_S390X)
Max Filippov23288262011-09-06 03:55:25 +0400812#elif defined(TARGET_XTENSA)
bellardfdf9b3e2006-04-27 21:07:38 +0000813 /* XXXXX */
bellarde4533c72003-06-15 19:51:39 +0000814#else
815#error unsupported target CPU
816#endif
pbrook1057eaa2007-02-04 13:37:44 +0000817
Andreas Färber4917cf42013-05-27 05:17:50 +0200818 /* fail safe : never use current_cpu outside cpu_exec() */
819 current_cpu = NULL;
bellard7d132992003-03-06 23:23:54 +0000820 return ret;
821}