blob: 94fedc5805bd61086e6b7b772283bf5f02bd4c91 [file] [log] [blame]
bellard7d132992003-03-06 23:23:54 +00001/*
陳韋任e965fc32012-02-06 14:02:55 +08002 * emulator main execution loop
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard66321a12005-04-06 20:47:48 +00004 * Copyright (c) 2003-2005 Fabrice Bellard
bellard7d132992003-03-06 23:23:54 +00005 *
bellard3ef693a2003-03-23 20:17:16 +00006 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
bellard7d132992003-03-06 23:23:54 +000010 *
bellard3ef693a2003-03-23 20:17:16 +000011 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
bellard7d132992003-03-06 23:23:54 +000015 *
bellard3ef693a2003-03-23 20:17:16 +000016 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard7d132992003-03-06 23:23:54 +000018 */
bellarde4533c72003-06-15 19:51:39 +000019#include "config.h"
Blue Swirlcea5f9a2011-05-15 16:03:25 +000020#include "cpu.h"
Paolo Bonzini76cad712012-10-24 11:12:21 +020021#include "disas/disas.h"
bellard7cb69ca2008-05-10 10:55:51 +000022#include "tcg.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010023#include "qemu/atomic.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010024#include "sysemu/qtest.h"
bellard7d132992003-03-06 23:23:54 +000025
Juan Quintelaf0667e62009-07-27 16:13:05 +020026//#define CONFIG_DEBUG_EXEC
bellard7d132992003-03-06 23:23:54 +000027
Andreas Färber3993c6b2012-05-03 06:43:49 +020028bool qemu_cpu_has_work(CPUState *cpu)
aliguori6a4955a2009-04-24 18:03:20 +000029{
Andreas Färber3993c6b2012-05-03 06:43:49 +020030 return cpu_has_work(cpu);
aliguori6a4955a2009-04-24 18:03:20 +000031}
32
Andreas Färber9349b4f2012-03-14 01:38:32 +010033void cpu_loop_exit(CPUArchState *env)
bellarde4533c72003-06-15 19:51:39 +000034{
Andreas Färberd77953b2013-01-16 19:29:31 +010035 CPUState *cpu = ENV_GET_CPU(env);
36
37 cpu->current_tb = NULL;
Peter Maydell6ab7e542013-02-20 15:21:09 +000038 siglongjmp(env->jmp_env, 1);
bellarde4533c72003-06-15 19:51:39 +000039}
thsbfed01f2007-06-03 17:44:37 +000040
bellardfbf9eeb2004-04-25 21:21:33 +000041/* exit the current TB from a signal handler. The host registers are
42 restored in a state compatible with the CPU emulator
43 */
Blue Swirl9eff14f2011-05-21 08:42:35 +000044#if defined(CONFIG_SOFTMMU)
Andreas Färber9349b4f2012-03-14 01:38:32 +010045void cpu_resume_from_signal(CPUArchState *env, void *puc)
bellardfbf9eeb2004-04-25 21:21:33 +000046{
Blue Swirl9eff14f2011-05-21 08:42:35 +000047 /* XXX: restore cpu registers saved in host registers */
48
49 env->exception_index = -1;
Peter Maydell6ab7e542013-02-20 15:21:09 +000050 siglongjmp(env->jmp_env, 1);
Blue Swirl9eff14f2011-05-21 08:42:35 +000051}
Blue Swirl9eff14f2011-05-21 08:42:35 +000052#endif
bellardfbf9eeb2004-04-25 21:21:33 +000053
Peter Maydell77211372013-02-22 18:10:02 +000054/* Execute a TB, and fix up the CPU state afterwards if necessary */
55static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
56{
57 CPUArchState *env = cpu->env_ptr;
58 tcg_target_ulong next_tb = tcg_qemu_tb_exec(env, tb_ptr);
59 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
60 /* We didn't start executing this TB (eg because the instruction
61 * counter hit zero); we must restore the guest PC to the address
62 * of the start of the TB.
63 */
64 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
65 cpu_pc_from_tb(env, tb);
66 }
Peter Maydell378df4b2013-02-22 18:10:03 +000067 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
68 /* We were asked to stop executing TBs (probably a pending
69 * interrupt. We've now stopped, so clear the flag.
70 */
71 cpu->tcg_exit_req = 0;
72 }
Peter Maydell77211372013-02-22 18:10:02 +000073 return next_tb;
74}
75
pbrook2e70f6e2008-06-29 01:03:05 +000076/* Execute the code without caching the generated code. An interpreter
77 could be used if available. */
Andreas Färber9349b4f2012-03-14 01:38:32 +010078static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
Blue Swirlcea5f9a2011-05-15 16:03:25 +000079 TranslationBlock *orig_tb)
pbrook2e70f6e2008-06-29 01:03:05 +000080{
Andreas Färberd77953b2013-01-16 19:29:31 +010081 CPUState *cpu = ENV_GET_CPU(env);
pbrook2e70f6e2008-06-29 01:03:05 +000082 TranslationBlock *tb;
83
84 /* Should never happen.
85 We only end up here when an existing TB is too long. */
86 if (max_cycles > CF_COUNT_MASK)
87 max_cycles = CF_COUNT_MASK;
88
89 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
90 max_cycles);
Andreas Färberd77953b2013-01-16 19:29:31 +010091 cpu->current_tb = tb;
pbrook2e70f6e2008-06-29 01:03:05 +000092 /* execute the generated code */
Peter Maydell77211372013-02-22 18:10:02 +000093 cpu_tb_exec(cpu, tb->tc_ptr);
Andreas Färberd77953b2013-01-16 19:29:31 +010094 cpu->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +000095 tb_phys_invalidate(tb, -1);
96 tb_free(tb);
97}
98
Andreas Färber9349b4f2012-03-14 01:38:32 +010099static TranslationBlock *tb_find_slow(CPUArchState *env,
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000100 target_ulong pc,
bellard8a40a182005-11-20 10:35:40 +0000101 target_ulong cs_base,
j_mayerc0686882007-09-20 22:47:42 +0000102 uint64_t flags)
bellard8a40a182005-11-20 10:35:40 +0000103{
104 TranslationBlock *tb, **ptb1;
bellard8a40a182005-11-20 10:35:40 +0000105 unsigned int h;
Blue Swirl337fc752011-09-04 11:06:22 +0000106 tb_page_addr_t phys_pc, phys_page1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000107 target_ulong virt_page2;
ths3b46e622007-09-17 08:09:54 +0000108
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700109 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
ths3b46e622007-09-17 08:09:54 +0000110
bellard8a40a182005-11-20 10:35:40 +0000111 /* find translated block using physical mappings */
Paul Brook41c1b1c2010-03-12 16:54:58 +0000112 phys_pc = get_page_addr_code(env, pc);
bellard8a40a182005-11-20 10:35:40 +0000113 phys_page1 = phys_pc & TARGET_PAGE_MASK;
bellard8a40a182005-11-20 10:35:40 +0000114 h = tb_phys_hash_func(phys_pc);
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700115 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
bellard8a40a182005-11-20 10:35:40 +0000116 for(;;) {
117 tb = *ptb1;
118 if (!tb)
119 goto not_found;
ths5fafdf22007-09-16 21:08:06 +0000120 if (tb->pc == pc &&
bellard8a40a182005-11-20 10:35:40 +0000121 tb->page_addr[0] == phys_page1 &&
ths5fafdf22007-09-16 21:08:06 +0000122 tb->cs_base == cs_base &&
bellard8a40a182005-11-20 10:35:40 +0000123 tb->flags == flags) {
124 /* check next page if needed */
125 if (tb->page_addr[1] != -1) {
Blue Swirl337fc752011-09-04 11:06:22 +0000126 tb_page_addr_t phys_page2;
127
ths5fafdf22007-09-16 21:08:06 +0000128 virt_page2 = (pc & TARGET_PAGE_MASK) +
bellard8a40a182005-11-20 10:35:40 +0000129 TARGET_PAGE_SIZE;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000130 phys_page2 = get_page_addr_code(env, virt_page2);
bellard8a40a182005-11-20 10:35:40 +0000131 if (tb->page_addr[1] == phys_page2)
132 goto found;
133 } else {
134 goto found;
135 }
136 }
137 ptb1 = &tb->phys_hash_next;
138 }
139 not_found:
pbrook2e70f6e2008-06-29 01:03:05 +0000140 /* if no translated code available, then translate it now */
141 tb = tb_gen_code(env, pc, cs_base, flags, 0);
ths3b46e622007-09-17 08:09:54 +0000142
bellard8a40a182005-11-20 10:35:40 +0000143 found:
Kirill Batuzov2c90fe22010-12-02 16:12:46 +0300144 /* Move the last found TB to the head of the list */
145 if (likely(*ptb1)) {
146 *ptb1 = tb->phys_hash_next;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700147 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
148 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
Kirill Batuzov2c90fe22010-12-02 16:12:46 +0300149 }
bellard8a40a182005-11-20 10:35:40 +0000150 /* we add the TB in the virtual pc hash table */
151 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
bellard8a40a182005-11-20 10:35:40 +0000152 return tb;
153}
154
Andreas Färber9349b4f2012-03-14 01:38:32 +0100155static inline TranslationBlock *tb_find_fast(CPUArchState *env)
bellard8a40a182005-11-20 10:35:40 +0000156{
157 TranslationBlock *tb;
158 target_ulong cs_base, pc;
aliguori6b917542008-11-18 19:46:41 +0000159 int flags;
bellard8a40a182005-11-20 10:35:40 +0000160
161 /* we record a subset of the CPU state. It will
162 always be the same before a given translated block
163 is executed. */
aliguori6b917542008-11-18 19:46:41 +0000164 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
bellardbce61842008-02-01 22:18:51 +0000165 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
ths551bd272008-07-03 17:57:36 +0000166 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
167 tb->flags != flags)) {
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000168 tb = tb_find_slow(env, pc, cs_base, flags);
bellard8a40a182005-11-20 10:35:40 +0000169 }
170 return tb;
171}
172
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100173static CPUDebugExcpHandler *debug_excp_handler;
174
Igor Mammedov84e3b602012-06-21 18:29:38 +0200175void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100176{
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100177 debug_excp_handler = handler;
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100178}
179
Andreas Färber9349b4f2012-03-14 01:38:32 +0100180static void cpu_handle_debug_exception(CPUArchState *env)
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100181{
182 CPUWatchpoint *wp;
183
184 if (!env->watchpoint_hit) {
185 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
186 wp->flags &= ~BP_WATCHPOINT_HIT;
187 }
188 }
189 if (debug_excp_handler) {
190 debug_excp_handler(env);
191 }
192}
193
bellard7d132992003-03-06 23:23:54 +0000194/* main execution loop */
195
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300196volatile sig_atomic_t exit_request;
197
Andreas Färber9349b4f2012-03-14 01:38:32 +0100198int cpu_exec(CPUArchState *env)
bellard7d132992003-03-06 23:23:54 +0000199{
Andreas Färberc356a1b2012-05-04 19:39:23 +0200200 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färber97a8ea52013-02-02 10:57:51 +0100201#if !(defined(CONFIG_USER_ONLY) && \
202 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
203 CPUClass *cc = CPU_GET_CLASS(cpu);
204#endif
bellard8a40a182005-11-20 10:35:40 +0000205 int ret, interrupt_request;
bellard8a40a182005-11-20 10:35:40 +0000206 TranslationBlock *tb;
bellardc27004e2005-01-03 23:35:10 +0000207 uint8_t *tc_ptr;
Stefan Weil69784ea2012-03-16 23:50:54 +0100208 tcg_target_ulong next_tb;
bellard8c6939c2003-06-09 15:28:00 +0000209
Andreas Färber259186a2013-01-17 18:51:17 +0100210 if (cpu->halted) {
Andreas Färber3993c6b2012-05-03 06:43:49 +0200211 if (!cpu_has_work(cpu)) {
Paolo Bonzinieda48c32011-03-12 17:43:56 +0100212 return EXCP_HALTED;
213 }
214
Andreas Färber259186a2013-01-17 18:51:17 +0100215 cpu->halted = 0;
Paolo Bonzinieda48c32011-03-12 17:43:56 +0100216 }
bellard5a1e3cf2005-11-23 21:02:53 +0000217
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000218 cpu_single_env = env;
bellarde4533c72003-06-15 19:51:39 +0000219
Jan Kiszkac629a4b2010-06-25 16:56:52 +0200220 if (unlikely(exit_request)) {
Andreas Färberfcd7d002012-12-17 08:02:44 +0100221 cpu->exit_request = 1;
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300222 }
223
thsecb644f2007-06-03 18:45:53 +0000224#if defined(TARGET_I386)
Jan Kiszka6792a572011-02-07 12:19:18 +0100225 /* put eflags in CPU temporary format */
226 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
227 DF = 1 - (2 * ((env->eflags >> 10) & 1));
228 CC_OP = CC_OP_EFLAGS;
229 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
bellard93ac68b2003-09-30 20:57:29 +0000230#elif defined(TARGET_SPARC)
pbrooke6e59062006-10-22 00:18:54 +0000231#elif defined(TARGET_M68K)
232 env->cc_op = CC_OP_FLAGS;
233 env->cc_dest = env->sr & 0xf;
234 env->cc_x = (env->sr >> 4) & 1;
thsecb644f2007-06-03 18:45:53 +0000235#elif defined(TARGET_ALPHA)
236#elif defined(TARGET_ARM)
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800237#elif defined(TARGET_UNICORE32)
thsecb644f2007-06-03 18:45:53 +0000238#elif defined(TARGET_PPC)
Elie Richa4e85f822011-07-22 05:58:39 +0000239 env->reserve_addr = -1;
Michael Walle81ea0e12011-02-17 23:45:02 +0100240#elif defined(TARGET_LM32)
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200241#elif defined(TARGET_MICROBLAZE)
bellard6af0bf92005-07-02 14:58:51 +0000242#elif defined(TARGET_MIPS)
Jia Liue67db062012-07-20 15:50:39 +0800243#elif defined(TARGET_OPENRISC)
bellardfdf9b3e2006-04-27 21:07:38 +0000244#elif defined(TARGET_SH4)
thsf1ccf902007-10-08 13:16:14 +0000245#elif defined(TARGET_CRIS)
Alexander Graf10ec5112009-12-05 12:44:21 +0100246#elif defined(TARGET_S390X)
Max Filippov23288262011-09-06 03:55:25 +0400247#elif defined(TARGET_XTENSA)
bellardfdf9b3e2006-04-27 21:07:38 +0000248 /* XXXXX */
bellarde4533c72003-06-15 19:51:39 +0000249#else
250#error unsupported target CPU
251#endif
bellard3fb2ded2003-06-24 13:22:59 +0000252 env->exception_index = -1;
bellard9d27abd2003-05-10 13:13:54 +0000253
bellard7d132992003-03-06 23:23:54 +0000254 /* prepare setjmp context for exception handling */
bellard3fb2ded2003-06-24 13:22:59 +0000255 for(;;) {
Peter Maydell6ab7e542013-02-20 15:21:09 +0000256 if (sigsetjmp(env->jmp_env, 0) == 0) {
bellard3fb2ded2003-06-24 13:22:59 +0000257 /* if an exception is pending, we execute it here */
258 if (env->exception_index >= 0) {
259 if (env->exception_index >= EXCP_INTERRUPT) {
260 /* exit request from the cpu execution loop */
261 ret = env->exception_index;
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100262 if (ret == EXCP_DEBUG) {
263 cpu_handle_debug_exception(env);
264 }
bellard3fb2ded2003-06-24 13:22:59 +0000265 break;
aurel3272d239e2009-01-14 19:40:27 +0000266 } else {
267#if defined(CONFIG_USER_ONLY)
bellard3fb2ded2003-06-24 13:22:59 +0000268 /* if user mode only, we simulate a fake exception
ths9f083492006-12-07 18:28:42 +0000269 which will be handled outside the cpu execution
bellard3fb2ded2003-06-24 13:22:59 +0000270 loop */
bellard83479e72003-06-25 16:12:37 +0000271#if defined(TARGET_I386)
Andreas Färber97a8ea52013-02-02 10:57:51 +0100272 cc->do_interrupt(cpu);
bellard83479e72003-06-25 16:12:37 +0000273#endif
bellard3fb2ded2003-06-24 13:22:59 +0000274 ret = env->exception_index;
275 break;
aurel3272d239e2009-01-14 19:40:27 +0000276#else
Andreas Färber97a8ea52013-02-02 10:57:51 +0100277 cc->do_interrupt(cpu);
Paolo Bonzini301d2902010-01-15 09:41:01 +0100278 env->exception_index = -1;
aurel3272d239e2009-01-14 19:40:27 +0000279#endif
bellard3fb2ded2003-06-24 13:22:59 +0000280 }
ths5fafdf22007-09-16 21:08:06 +0000281 }
bellard9df217a2005-02-10 22:05:51 +0000282
blueswir1b5fc09a2008-05-04 06:38:18 +0000283 next_tb = 0; /* force lookup of first TB */
bellard3fb2ded2003-06-24 13:22:59 +0000284 for(;;) {
Andreas Färber259186a2013-01-17 18:51:17 +0100285 interrupt_request = cpu->interrupt_request;
malce1638bd2008-11-06 18:54:46 +0000286 if (unlikely(interrupt_request)) {
287 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
288 /* Mask out external interrupts for this step. */
Richard Henderson3125f762011-05-04 13:34:25 -0700289 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
malce1638bd2008-11-06 18:54:46 +0000290 }
pbrook6658ffb2007-03-16 23:58:11 +0000291 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
Andreas Färber259186a2013-01-17 18:51:17 +0100292 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
pbrook6658ffb2007-03-16 23:58:11 +0000293 env->exception_index = EXCP_DEBUG;
Blue Swirl1162c042011-05-14 12:52:35 +0000294 cpu_loop_exit(env);
pbrook6658ffb2007-03-16 23:58:11 +0000295 }
balroga90b7312007-05-01 01:28:01 +0000296#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200297 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800298 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
balroga90b7312007-05-01 01:28:01 +0000299 if (interrupt_request & CPU_INTERRUPT_HALT) {
Andreas Färber259186a2013-01-17 18:51:17 +0100300 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
301 cpu->halted = 1;
balroga90b7312007-05-01 01:28:01 +0000302 env->exception_index = EXCP_HLT;
Blue Swirl1162c042011-05-14 12:52:35 +0000303 cpu_loop_exit(env);
balroga90b7312007-05-01 01:28:01 +0000304 }
305#endif
bellard68a79312003-06-30 13:12:32 +0000306#if defined(TARGET_I386)
Jan Kiszka5d62c432012-07-09 16:42:32 +0200307#if !defined(CONFIG_USER_ONLY)
308 if (interrupt_request & CPU_INTERRUPT_POLL) {
Andreas Färber259186a2013-01-17 18:51:17 +0100309 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
Jan Kiszka5d62c432012-07-09 16:42:32 +0200310 apic_poll_irq(env->apic_state);
311 }
312#endif
Gleb Natapovb09ea7d2009-06-17 23:26:59 +0300313 if (interrupt_request & CPU_INTERRUPT_INIT) {
Blue Swirl77b2bc22012-04-28 19:35:10 +0000314 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
315 0);
Andreas Färber232fc232012-05-05 01:14:41 +0200316 do_cpu_init(x86_env_get_cpu(env));
Gleb Natapovb09ea7d2009-06-17 23:26:59 +0300317 env->exception_index = EXCP_HALTED;
Blue Swirl1162c042011-05-14 12:52:35 +0000318 cpu_loop_exit(env);
Gleb Natapovb09ea7d2009-06-17 23:26:59 +0300319 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
Andreas Färber232fc232012-05-05 01:14:41 +0200320 do_cpu_sipi(x86_env_get_cpu(env));
Gleb Natapovb09ea7d2009-06-17 23:26:59 +0300321 } else if (env->hflags2 & HF2_GIF_MASK) {
bellarddb620f42008-06-04 17:02:19 +0000322 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
323 !(env->hflags & HF_SMM_MASK)) {
Blue Swirl77b2bc22012-04-28 19:35:10 +0000324 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
325 0);
Andreas Färber259186a2013-01-17 18:51:17 +0100326 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
Blue Swirle694d4e2011-05-16 19:38:48 +0000327 do_smm_enter(env);
bellarddb620f42008-06-04 17:02:19 +0000328 next_tb = 0;
329 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
330 !(env->hflags2 & HF2_NMI_MASK)) {
Andreas Färber259186a2013-01-17 18:51:17 +0100331 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
bellarddb620f42008-06-04 17:02:19 +0000332 env->hflags2 |= HF2_NMI_MASK;
Blue Swirle694d4e2011-05-16 19:38:48 +0000333 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
bellarddb620f42008-06-04 17:02:19 +0000334 next_tb = 0;
陳韋任e965fc32012-02-06 14:02:55 +0800335 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
Andreas Färber259186a2013-01-17 18:51:17 +0100336 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
Blue Swirle694d4e2011-05-16 19:38:48 +0000337 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
Huang Ying79c4f6b2009-06-23 10:05:14 +0800338 next_tb = 0;
bellarddb620f42008-06-04 17:02:19 +0000339 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
340 (((env->hflags2 & HF2_VINTR_MASK) &&
341 (env->hflags2 & HF2_HIF_MASK)) ||
342 (!(env->hflags2 & HF2_VINTR_MASK) &&
343 (env->eflags & IF_MASK &&
344 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
345 int intno;
Blue Swirl77b2bc22012-04-28 19:35:10 +0000346 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
347 0);
Andreas Färber259186a2013-01-17 18:51:17 +0100348 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
349 CPU_INTERRUPT_VIRQ);
bellarddb620f42008-06-04 17:02:19 +0000350 intno = cpu_get_pic_interrupt(env);
malc4f213872012-08-27 18:33:12 +0400351 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
352 do_interrupt_x86_hardirq(env, intno, 1);
353 /* ensure that no TB jump will be modified as
354 the program flow was changed */
355 next_tb = 0;
ths0573fbf2007-09-23 15:28:04 +0000356#if !defined(CONFIG_USER_ONLY)
bellarddb620f42008-06-04 17:02:19 +0000357 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
358 (env->eflags & IF_MASK) &&
359 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
360 int intno;
361 /* FIXME: this should respect TPR */
Blue Swirl77b2bc22012-04-28 19:35:10 +0000362 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
363 0);
bellarddb620f42008-06-04 17:02:19 +0000364 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
aliguori93fcfe32009-01-15 22:34:14 +0000365 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
Blue Swirle694d4e2011-05-16 19:38:48 +0000366 do_interrupt_x86_hardirq(env, intno, 1);
Andreas Färber259186a2013-01-17 18:51:17 +0100367 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
bellarddb620f42008-06-04 17:02:19 +0000368 next_tb = 0;
ths0573fbf2007-09-23 15:28:04 +0000369#endif
bellarddb620f42008-06-04 17:02:19 +0000370 }
bellard68a79312003-06-30 13:12:32 +0000371 }
bellardce097762004-01-04 23:53:18 +0000372#elif defined(TARGET_PPC)
bellard9fddaa02004-05-21 12:59:32 +0000373 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
Andreas Färberc356a1b2012-05-04 19:39:23 +0200374 cpu_reset(cpu);
bellard9fddaa02004-05-21 12:59:32 +0000375 }
j_mayer47103572007-03-30 09:38:04 +0000376 if (interrupt_request & CPU_INTERRUPT_HARD) {
j_mayere9df0142007-04-09 22:45:36 +0000377 ppc_hw_interrupt(env);
Andreas Färber259186a2013-01-17 18:51:17 +0100378 if (env->pending_interrupts == 0) {
379 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
380 }
blueswir1b5fc09a2008-05-04 06:38:18 +0000381 next_tb = 0;
bellardce097762004-01-04 23:53:18 +0000382 }
Michael Walle81ea0e12011-02-17 23:45:02 +0100383#elif defined(TARGET_LM32)
384 if ((interrupt_request & CPU_INTERRUPT_HARD)
385 && (env->ie & IE_IE)) {
386 env->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100387 cc->do_interrupt(cpu);
Michael Walle81ea0e12011-02-17 23:45:02 +0100388 next_tb = 0;
389 }
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200390#elif defined(TARGET_MICROBLAZE)
391 if ((interrupt_request & CPU_INTERRUPT_HARD)
392 && (env->sregs[SR_MSR] & MSR_IE)
393 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
394 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
395 env->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100396 cc->do_interrupt(cpu);
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200397 next_tb = 0;
398 }
bellard6af0bf92005-07-02 14:58:51 +0000399#elif defined(TARGET_MIPS)
400 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
Aurelien Jarno4cdc1cd2010-12-25 22:56:32 +0100401 cpu_mips_hw_interrupts_pending(env)) {
bellard6af0bf92005-07-02 14:58:51 +0000402 /* Raise it */
403 env->exception_index = EXCP_EXT_INTERRUPT;
404 env->error_code = 0;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100405 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000406 next_tb = 0;
bellard6af0bf92005-07-02 14:58:51 +0000407 }
Jia Liub6a71ef2012-07-20 15:50:41 +0800408#elif defined(TARGET_OPENRISC)
409 {
410 int idx = -1;
411 if ((interrupt_request & CPU_INTERRUPT_HARD)
412 && (env->sr & SR_IEE)) {
413 idx = EXCP_INT;
414 }
415 if ((interrupt_request & CPU_INTERRUPT_TIMER)
416 && (env->sr & SR_TEE)) {
417 idx = EXCP_TICK;
418 }
419 if (idx >= 0) {
420 env->exception_index = idx;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100421 cc->do_interrupt(cpu);
Jia Liub6a71ef2012-07-20 15:50:41 +0800422 next_tb = 0;
423 }
424 }
bellarde95c8d52004-09-30 22:22:08 +0000425#elif defined(TARGET_SPARC)
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300426 if (interrupt_request & CPU_INTERRUPT_HARD) {
427 if (cpu_interrupts_enabled(env) &&
428 env->interrupt_index > 0) {
429 int pil = env->interrupt_index & 0xf;
430 int type = env->interrupt_index & 0xf0;
bellard66321a12005-04-06 20:47:48 +0000431
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300432 if (((type == TT_EXTINT) &&
433 cpu_pil_allowed(env, pil)) ||
434 type != TT_EXTINT) {
435 env->exception_index = env->interrupt_index;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100436 cc->do_interrupt(cpu);
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300437 next_tb = 0;
438 }
439 }
陳韋任e965fc32012-02-06 14:02:55 +0800440 }
bellardb5ff1b32005-11-26 10:38:39 +0000441#elif defined(TARGET_ARM)
442 if (interrupt_request & CPU_INTERRUPT_FIQ
443 && !(env->uncached_cpsr & CPSR_F)) {
444 env->exception_index = EXCP_FIQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100445 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000446 next_tb = 0;
bellardb5ff1b32005-11-26 10:38:39 +0000447 }
pbrook9ee6e8b2007-11-11 00:04:49 +0000448 /* ARMv7-M interrupt return works by loading a magic value
449 into the PC. On real hardware the load causes the
450 return to occur. The qemu implementation performs the
451 jump normally, then does the exception return when the
452 CPU tries to execute code at the magic address.
453 This will cause the magic PC value to be pushed to
Stefan Weila1c72732011-04-28 17:20:38 +0200454 the stack if an interrupt occurred at the wrong time.
pbrook9ee6e8b2007-11-11 00:04:49 +0000455 We avoid this by disabling interrupts when
456 pc contains a magic address. */
bellardb5ff1b32005-11-26 10:38:39 +0000457 if (interrupt_request & CPU_INTERRUPT_HARD
pbrook9ee6e8b2007-11-11 00:04:49 +0000458 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
459 || !(env->uncached_cpsr & CPSR_I))) {
bellardb5ff1b32005-11-26 10:38:39 +0000460 env->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100461 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000462 next_tb = 0;
bellardb5ff1b32005-11-26 10:38:39 +0000463 }
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800464#elif defined(TARGET_UNICORE32)
465 if (interrupt_request & CPU_INTERRUPT_HARD
466 && !(env->uncached_asr & ASR_I)) {
Guan Xuetaod48813d2012-08-10 14:42:23 +0800467 env->exception_index = UC32_EXCP_INTR;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100468 cc->do_interrupt(cpu);
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800469 next_tb = 0;
470 }
bellardfdf9b3e2006-04-27 21:07:38 +0000471#elif defined(TARGET_SH4)
thse96e2042007-12-02 06:18:24 +0000472 if (interrupt_request & CPU_INTERRUPT_HARD) {
Andreas Färber97a8ea52013-02-02 10:57:51 +0100473 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000474 next_tb = 0;
thse96e2042007-12-02 06:18:24 +0000475 }
j_mayereddf68a2007-04-05 07:22:49 +0000476#elif defined(TARGET_ALPHA)
Richard Henderson6a80e082011-04-18 15:09:09 -0700477 {
478 int idx = -1;
479 /* ??? This hard-codes the OSF/1 interrupt levels. */
陳韋任e965fc32012-02-06 14:02:55 +0800480 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
Richard Henderson6a80e082011-04-18 15:09:09 -0700481 case 0 ... 3:
482 if (interrupt_request & CPU_INTERRUPT_HARD) {
483 idx = EXCP_DEV_INTERRUPT;
484 }
485 /* FALLTHRU */
486 case 4:
487 if (interrupt_request & CPU_INTERRUPT_TIMER) {
488 idx = EXCP_CLK_INTERRUPT;
489 }
490 /* FALLTHRU */
491 case 5:
492 if (interrupt_request & CPU_INTERRUPT_SMP) {
493 idx = EXCP_SMP_INTERRUPT;
494 }
495 /* FALLTHRU */
496 case 6:
497 if (interrupt_request & CPU_INTERRUPT_MCHK) {
498 idx = EXCP_MCHK;
499 }
500 }
501 if (idx >= 0) {
502 env->exception_index = idx;
503 env->error_code = 0;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100504 cc->do_interrupt(cpu);
Richard Henderson6a80e082011-04-18 15:09:09 -0700505 next_tb = 0;
506 }
j_mayereddf68a2007-04-05 07:22:49 +0000507 }
thsf1ccf902007-10-08 13:16:14 +0000508#elif defined(TARGET_CRIS)
edgar_igl1b1a38b2008-06-09 23:18:06 +0000509 if (interrupt_request & CPU_INTERRUPT_HARD
Edgar E. Iglesiasfb9fb692010-02-15 11:17:33 +0100510 && (env->pregs[PR_CCS] & I_FLAG)
511 && !env->locked_irq) {
edgar_igl1b1a38b2008-06-09 23:18:06 +0000512 env->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100513 cc->do_interrupt(cpu);
edgar_igl1b1a38b2008-06-09 23:18:06 +0000514 next_tb = 0;
515 }
Lars Persson82193142012-06-14 16:23:55 +0200516 if (interrupt_request & CPU_INTERRUPT_NMI) {
517 unsigned int m_flag_archval;
518 if (env->pregs[PR_VR] < 32) {
519 m_flag_archval = M_FLAG_V10;
520 } else {
521 m_flag_archval = M_FLAG_V32;
522 }
523 if ((env->pregs[PR_CCS] & m_flag_archval)) {
524 env->exception_index = EXCP_NMI;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100525 cc->do_interrupt(cpu);
Lars Persson82193142012-06-14 16:23:55 +0200526 next_tb = 0;
527 }
thsf1ccf902007-10-08 13:16:14 +0000528 }
pbrook06338792007-05-23 19:58:11 +0000529#elif defined(TARGET_M68K)
530 if (interrupt_request & CPU_INTERRUPT_HARD
531 && ((env->sr & SR_I) >> SR_I_SHIFT)
532 < env->pending_level) {
533 /* Real hardware gets the interrupt vector via an
534 IACK cycle at this point. Current emulated
535 hardware doesn't rely on this, so we
536 provide/save the vector when the interrupt is
537 first signalled. */
538 env->exception_index = env->pending_vector;
Blue Swirl3c688822011-05-21 07:55:24 +0000539 do_interrupt_m68k_hardirq(env);
blueswir1b5fc09a2008-05-04 06:38:18 +0000540 next_tb = 0;
pbrook06338792007-05-23 19:58:11 +0000541 }
Alexander Graf3110e292011-04-15 17:32:48 +0200542#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
543 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
544 (env->psw.mask & PSW_MASK_EXT)) {
Andreas Färber97a8ea52013-02-02 10:57:51 +0100545 cc->do_interrupt(cpu);
Alexander Graf3110e292011-04-15 17:32:48 +0200546 next_tb = 0;
547 }
Max Filippov40643d72011-09-06 03:55:41 +0400548#elif defined(TARGET_XTENSA)
549 if (interrupt_request & CPU_INTERRUPT_HARD) {
550 env->exception_index = EXC_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100551 cc->do_interrupt(cpu);
Max Filippov40643d72011-09-06 03:55:41 +0400552 next_tb = 0;
553 }
bellard68a79312003-06-30 13:12:32 +0000554#endif
Stefan Weilff2712b2011-04-28 17:20:35 +0200555 /* Don't use the cached interrupt_request value,
bellard9d050952006-05-22 22:03:52 +0000556 do_interrupt may have updated the EXITTB flag. */
Andreas Färber259186a2013-01-17 18:51:17 +0100557 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
558 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bellardbf3e8bf2004-02-16 21:58:54 +0000559 /* ensure that no TB jump will be modified as
560 the program flow was changed */
blueswir1b5fc09a2008-05-04 06:38:18 +0000561 next_tb = 0;
bellardbf3e8bf2004-02-16 21:58:54 +0000562 }
aurel32be214e62009-03-06 21:48:00 +0000563 }
Andreas Färberfcd7d002012-12-17 08:02:44 +0100564 if (unlikely(cpu->exit_request)) {
565 cpu->exit_request = 0;
aurel32be214e62009-03-06 21:48:00 +0000566 env->exception_index = EXCP_INTERRUPT;
Blue Swirl1162c042011-05-14 12:52:35 +0000567 cpu_loop_exit(env);
bellard3fb2ded2003-06-24 13:22:59 +0000568 }
Richard Hendersona73b1fd2010-04-28 16:07:57 -0700569#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
aliguori8fec2b82009-01-15 22:36:53 +0000570 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
bellard3fb2ded2003-06-24 13:22:59 +0000571 /* restore flags in standard format */
thsecb644f2007-06-03 18:45:53 +0000572#if defined(TARGET_I386)
Blue Swirle694d4e2011-05-16 19:38:48 +0000573 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
574 | (DF & DF_MASK);
Peter Maydell6fd2a022012-10-05 15:04:43 +0100575 log_cpu_state(env, CPU_DUMP_CCOP);
bellard3fb2ded2003-06-24 13:22:59 +0000576 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
pbrooke6e59062006-10-22 00:18:54 +0000577#elif defined(TARGET_M68K)
578 cpu_m68k_flush_flags(env, env->cc_op);
579 env->cc_op = CC_OP_FLAGS;
580 env->sr = (env->sr & 0xffe0)
581 | env->cc_dest | (env->cc_x << 4);
aliguori93fcfe32009-01-15 22:34:14 +0000582 log_cpu_state(env, 0);
bellarde4533c72003-06-15 19:51:39 +0000583#else
Richard Hendersona73b1fd2010-04-28 16:07:57 -0700584 log_cpu_state(env, 0);
bellarde4533c72003-06-15 19:51:39 +0000585#endif
bellard3fb2ded2003-06-24 13:22:59 +0000586 }
Richard Hendersona73b1fd2010-04-28 16:07:57 -0700587#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700588 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000589 tb = tb_find_fast(env);
pbrookd5975362008-06-07 20:50:51 +0000590 /* Note: we do it here to avoid a gcc bug on Mac OS X when
591 doing it in tb_find_slow */
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700592 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
pbrookd5975362008-06-07 20:50:51 +0000593 /* as some TB could have been invalidated because
594 of memory exceptions while generating the code, we
595 must recompute the hash index here */
596 next_tb = 0;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700597 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
pbrookd5975362008-06-07 20:50:51 +0000598 }
Juan Quintelaf0667e62009-07-27 16:13:05 +0200599#ifdef CONFIG_DEBUG_EXEC
Stefan Weil3ba19252012-04-12 15:44:24 +0200600 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
601 tb->tc_ptr, tb->pc,
aliguori93fcfe32009-01-15 22:34:14 +0000602 lookup_symbol(tb->pc));
bellard9d27abd2003-05-10 13:13:54 +0000603#endif
bellard8a40a182005-11-20 10:35:40 +0000604 /* see if we can patch the calling TB. When the TB
605 spans two pages, we cannot safely do a direct
606 jump. */
Paolo Bonzini040f2fb2010-01-15 08:56:36 +0100607 if (next_tb != 0 && tb->page_addr[1] == -1) {
Peter Maydell09800112013-02-22 18:10:00 +0000608 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
609 next_tb & TB_EXIT_MASK, tb);
bellard3fb2ded2003-06-24 13:22:59 +0000610 }
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700611 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
malc55e8b852008-11-04 14:18:13 +0000612
613 /* cpu_interrupt might be called while translating the
614 TB, but before it is linked into a potentially
615 infinite loop and becomes env->current_tb. Avoid
616 starting execution if there is a pending interrupt. */
Andreas Färberd77953b2013-01-16 19:29:31 +0100617 cpu->current_tb = tb;
Jan Kiszkab0052d12010-06-25 16:56:50 +0200618 barrier();
Andreas Färberfcd7d002012-12-17 08:02:44 +0100619 if (likely(!cpu->exit_request)) {
pbrook2e70f6e2008-06-29 01:03:05 +0000620 tc_ptr = tb->tc_ptr;
陳韋任e965fc32012-02-06 14:02:55 +0800621 /* execute the generated code */
Peter Maydell77211372013-02-22 18:10:02 +0000622 next_tb = cpu_tb_exec(cpu, tc_ptr);
Peter Maydell378df4b2013-02-22 18:10:03 +0000623 switch (next_tb & TB_EXIT_MASK) {
624 case TB_EXIT_REQUESTED:
625 /* Something asked us to stop executing
626 * chained TBs; just continue round the main
627 * loop. Whatever requested the exit will also
628 * have set something else (eg exit_request or
629 * interrupt_request) which we will handle
630 * next time around the loop.
631 */
632 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
633 next_tb = 0;
634 break;
635 case TB_EXIT_ICOUNT_EXPIRED:
636 {
thsbf20dc02008-06-30 17:22:19 +0000637 /* Instruction counter expired. */
pbrook2e70f6e2008-06-29 01:03:05 +0000638 int insns_left;
Peter Maydell09800112013-02-22 18:10:00 +0000639 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
pbrook2e70f6e2008-06-29 01:03:05 +0000640 insns_left = env->icount_decr.u32;
641 if (env->icount_extra && insns_left >= 0) {
642 /* Refill decrementer and continue execution. */
643 env->icount_extra += insns_left;
644 if (env->icount_extra > 0xffff) {
645 insns_left = 0xffff;
646 } else {
647 insns_left = env->icount_extra;
648 }
649 env->icount_extra -= insns_left;
650 env->icount_decr.u16.low = insns_left;
651 } else {
652 if (insns_left > 0) {
653 /* Execute remaining instructions. */
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000654 cpu_exec_nocache(env, insns_left, tb);
pbrook2e70f6e2008-06-29 01:03:05 +0000655 }
656 env->exception_index = EXCP_INTERRUPT;
657 next_tb = 0;
Blue Swirl1162c042011-05-14 12:52:35 +0000658 cpu_loop_exit(env);
pbrook2e70f6e2008-06-29 01:03:05 +0000659 }
Peter Maydell378df4b2013-02-22 18:10:03 +0000660 break;
661 }
662 default:
663 break;
pbrook2e70f6e2008-06-29 01:03:05 +0000664 }
665 }
Andreas Färberd77953b2013-01-16 19:29:31 +0100666 cpu->current_tb = NULL;
bellard4cbf74b2003-08-10 21:48:43 +0000667 /* reset soft MMU for next block (it can currently
668 only be set by a memory fault) */
ths50a518e2007-06-03 18:52:15 +0000669 } /* for(;;) */
Jan Kiszka0d101932011-07-02 09:50:51 +0200670 } else {
671 /* Reload env after longjmp - the compiler may have smashed all
672 * local variables as longjmp is marked 'noreturn'. */
673 env = cpu_single_env;
bellard7d132992003-03-06 23:23:54 +0000674 }
bellard3fb2ded2003-06-24 13:22:59 +0000675 } /* for(;;) */
676
bellard7d132992003-03-06 23:23:54 +0000677
bellarde4533c72003-06-15 19:51:39 +0000678#if defined(TARGET_I386)
bellard9de5e442003-03-23 16:49:39 +0000679 /* restore flags in standard format */
Blue Swirle694d4e2011-05-16 19:38:48 +0000680 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
681 | (DF & DF_MASK);
bellarde4533c72003-06-15 19:51:39 +0000682#elif defined(TARGET_ARM)
bellardb7bcbe92005-02-22 19:27:29 +0000683 /* XXX: Save/restore host fpu exception state?. */
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800684#elif defined(TARGET_UNICORE32)
bellard93ac68b2003-09-30 20:57:29 +0000685#elif defined(TARGET_SPARC)
bellard67867302003-11-23 17:05:30 +0000686#elif defined(TARGET_PPC)
Michael Walle81ea0e12011-02-17 23:45:02 +0100687#elif defined(TARGET_LM32)
pbrooke6e59062006-10-22 00:18:54 +0000688#elif defined(TARGET_M68K)
689 cpu_m68k_flush_flags(env, env->cc_op);
690 env->cc_op = CC_OP_FLAGS;
691 env->sr = (env->sr & 0xffe0)
692 | env->cc_dest | (env->cc_x << 4);
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200693#elif defined(TARGET_MICROBLAZE)
bellard6af0bf92005-07-02 14:58:51 +0000694#elif defined(TARGET_MIPS)
Jia Liue67db062012-07-20 15:50:39 +0800695#elif defined(TARGET_OPENRISC)
bellardfdf9b3e2006-04-27 21:07:38 +0000696#elif defined(TARGET_SH4)
j_mayereddf68a2007-04-05 07:22:49 +0000697#elif defined(TARGET_ALPHA)
thsf1ccf902007-10-08 13:16:14 +0000698#elif defined(TARGET_CRIS)
Alexander Graf10ec5112009-12-05 12:44:21 +0100699#elif defined(TARGET_S390X)
Max Filippov23288262011-09-06 03:55:25 +0400700#elif defined(TARGET_XTENSA)
bellardfdf9b3e2006-04-27 21:07:38 +0000701 /* XXXXX */
bellarde4533c72003-06-15 19:51:39 +0000702#else
703#error unsupported target CPU
704#endif
pbrook1057eaa2007-02-04 13:37:44 +0000705
bellard6a00d602005-11-21 23:25:50 +0000706 /* fail safe : never use cpu_single_env outside cpu_exec() */
ths5fafdf22007-09-16 21:08:06 +0000707 cpu_single_env = NULL;
bellard7d132992003-03-06 23:23:54 +0000708 return ret;
709}