blob: c6aad742e19defac92c6c1c6c107b831436520cf [file] [log] [blame]
bellard7d132992003-03-06 23:23:54 +00001/*
陳韋任e965fc32012-02-06 14:02:55 +08002 * emulator main execution loop
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard66321a12005-04-06 20:47:48 +00004 * Copyright (c) 2003-2005 Fabrice Bellard
bellard7d132992003-03-06 23:23:54 +00005 *
bellard3ef693a2003-03-23 20:17:16 +00006 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
bellard7d132992003-03-06 23:23:54 +000010 *
bellard3ef693a2003-03-23 20:17:16 +000011 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
bellard7d132992003-03-06 23:23:54 +000015 *
bellard3ef693a2003-03-23 20:17:16 +000016 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard7d132992003-03-06 23:23:54 +000018 */
bellarde4533c72003-06-15 19:51:39 +000019#include "config.h"
Blue Swirlcea5f9a2011-05-15 16:03:25 +000020#include "cpu.h"
Alex Bennée6db8b532014-08-01 17:08:57 +010021#include "trace.h"
Paolo Bonzini76cad712012-10-24 11:12:21 +020022#include "disas/disas.h"
bellard7cb69ca2008-05-10 10:55:51 +000023#include "tcg.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010024#include "qemu/atomic.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010025#include "sysemu/qtest.h"
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020026#include "qemu/timer.h"
27
28/* -icount align implementation. */
29
30typedef struct SyncClocks {
31 int64_t diff_clk;
32 int64_t last_cpu_icount;
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020033 int64_t realtime_clock;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020034} SyncClocks;
35
36#if !defined(CONFIG_USER_ONLY)
37/* Allow the guest to have a max 3ms advance.
38 * The difference between the 2 clocks could therefore
39 * oscillate around 0.
40 */
41#define VM_CLOCK_ADVANCE 3000000
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020042#define THRESHOLD_REDUCE 1.5
43#define MAX_DELAY_PRINT_RATE 2000000000LL
44#define MAX_NB_PRINTS 100
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020045
46static void align_clocks(SyncClocks *sc, const CPUState *cpu)
47{
48 int64_t cpu_icount;
49
50 if (!icount_align_option) {
51 return;
52 }
53
54 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
55 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
56 sc->last_cpu_icount = cpu_icount;
57
58 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
59#ifndef _WIN32
60 struct timespec sleep_delay, rem_delay;
61 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
62 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
63 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
64 sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL;
65 sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec;
66 } else {
67 sc->diff_clk = 0;
68 }
69#else
70 Sleep(sc->diff_clk / SCALE_MS);
71 sc->diff_clk = 0;
72#endif
73 }
74}
75
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020076static void print_delay(const SyncClocks *sc)
77{
78 static float threshold_delay;
79 static int64_t last_realtime_clock;
80 static int nb_prints;
81
82 if (icount_align_option &&
83 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
84 nb_prints < MAX_NB_PRINTS) {
85 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
86 (-sc->diff_clk / (float)1000000000LL <
87 (threshold_delay - THRESHOLD_REDUCE))) {
88 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
89 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
90 threshold_delay - 1,
91 threshold_delay);
92 nb_prints++;
93 last_realtime_clock = sc->realtime_clock;
94 }
95 }
96}
97
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020098static void init_delay_params(SyncClocks *sc,
99 const CPUState *cpu)
100{
101 if (!icount_align_option) {
102 return;
103 }
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200104 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200105 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200106 sc->realtime_clock +
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200107 cpu_get_clock_offset();
108 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
Sebastian Tanase27498be2014-07-25 11:56:33 +0200109 if (sc->diff_clk < max_delay) {
110 max_delay = sc->diff_clk;
111 }
112 if (sc->diff_clk > max_advance) {
113 max_advance = sc->diff_clk;
114 }
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200115
116 /* Print every 2s max if the guest is late. We limit the number
117 of printed messages to NB_PRINT_MAX(currently 100) */
118 print_delay(sc);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200119}
120#else
121static void align_clocks(SyncClocks *sc, const CPUState *cpu)
122{
123}
124
125static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
126{
127}
128#endif /* CONFIG USER ONLY */
bellard7d132992003-03-06 23:23:54 +0000129
Andreas Färber5638d182013-08-27 17:52:12 +0200130void cpu_loop_exit(CPUState *cpu)
bellarde4533c72003-06-15 19:51:39 +0000131{
Andreas Färberd77953b2013-01-16 19:29:31 +0100132 cpu->current_tb = NULL;
Andreas Färber6f03bef2013-08-26 06:22:03 +0200133 siglongjmp(cpu->jmp_env, 1);
bellarde4533c72003-06-15 19:51:39 +0000134}
thsbfed01f2007-06-03 17:44:37 +0000135
bellardfbf9eeb2004-04-25 21:21:33 +0000136/* exit the current TB from a signal handler. The host registers are
137 restored in a state compatible with the CPU emulator
138 */
Blue Swirl9eff14f2011-05-21 08:42:35 +0000139#if defined(CONFIG_SOFTMMU)
Andreas Färber0ea8cb82013-09-03 02:12:23 +0200140void cpu_resume_from_signal(CPUState *cpu, void *puc)
bellardfbf9eeb2004-04-25 21:21:33 +0000141{
Blue Swirl9eff14f2011-05-21 08:42:35 +0000142 /* XXX: restore cpu registers saved in host registers */
143
Andreas Färber27103422013-08-26 08:31:06 +0200144 cpu->exception_index = -1;
Andreas Färber6f03bef2013-08-26 06:22:03 +0200145 siglongjmp(cpu->jmp_env, 1);
Blue Swirl9eff14f2011-05-21 08:42:35 +0000146}
Blue Swirl9eff14f2011-05-21 08:42:35 +0000147#endif
bellardfbf9eeb2004-04-25 21:21:33 +0000148
Peter Maydell77211372013-02-22 18:10:02 +0000149/* Execute a TB, and fix up the CPU state afterwards if necessary */
150static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
151{
152 CPUArchState *env = cpu->env_ptr;
Richard Henderson03afa5f2013-11-06 17:29:39 +1000153 uintptr_t next_tb;
154
155#if defined(DEBUG_DISAS)
156 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
157#if defined(TARGET_I386)
158 log_cpu_state(cpu, CPU_DUMP_CCOP);
159#elif defined(TARGET_M68K)
160 /* ??? Should not modify env state for dumping. */
161 cpu_m68k_flush_flags(env, env->cc_op);
162 env->cc_op = CC_OP_FLAGS;
163 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
164 log_cpu_state(cpu, 0);
165#else
166 log_cpu_state(cpu, 0);
167#endif
168 }
169#endif /* DEBUG_DISAS */
170
171 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
Alex Bennée6db8b532014-08-01 17:08:57 +0100172 trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
173 next_tb & TB_EXIT_MASK);
174
Peter Maydell77211372013-02-22 18:10:02 +0000175 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
176 /* We didn't start executing this TB (eg because the instruction
177 * counter hit zero); we must restore the guest PC to the address
178 * of the start of the TB.
179 */
Andreas Färberbdf7ae52013-06-28 19:31:32 +0200180 CPUClass *cc = CPU_GET_CLASS(cpu);
Peter Maydell77211372013-02-22 18:10:02 +0000181 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
Andreas Färberbdf7ae52013-06-28 19:31:32 +0200182 if (cc->synchronize_from_tb) {
183 cc->synchronize_from_tb(cpu, tb);
184 } else {
185 assert(cc->set_pc);
186 cc->set_pc(cpu, tb->pc);
187 }
Peter Maydell77211372013-02-22 18:10:02 +0000188 }
Peter Maydell378df4b2013-02-22 18:10:03 +0000189 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
190 /* We were asked to stop executing TBs (probably a pending
191 * interrupt. We've now stopped, so clear the flag.
192 */
193 cpu->tcg_exit_req = 0;
194 }
Peter Maydell77211372013-02-22 18:10:02 +0000195 return next_tb;
196}
197
pbrook2e70f6e2008-06-29 01:03:05 +0000198/* Execute the code without caching the generated code. An interpreter
199 could be used if available. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100200static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000201 TranslationBlock *orig_tb)
pbrook2e70f6e2008-06-29 01:03:05 +0000202{
Andreas Färberd77953b2013-01-16 19:29:31 +0100203 CPUState *cpu = ENV_GET_CPU(env);
pbrook2e70f6e2008-06-29 01:03:05 +0000204 TranslationBlock *tb;
205
206 /* Should never happen.
207 We only end up here when an existing TB is too long. */
208 if (max_cycles > CF_COUNT_MASK)
209 max_cycles = CF_COUNT_MASK;
210
Andreas Färber648f0342013-09-01 17:43:17 +0200211 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
pbrook2e70f6e2008-06-29 01:03:05 +0000212 max_cycles);
Andreas Färberd77953b2013-01-16 19:29:31 +0100213 cpu->current_tb = tb;
pbrook2e70f6e2008-06-29 01:03:05 +0000214 /* execute the generated code */
Alex Bennée6db8b532014-08-01 17:08:57 +0100215 trace_exec_tb_nocache(tb, tb->pc);
Peter Maydell77211372013-02-22 18:10:02 +0000216 cpu_tb_exec(cpu, tb->tc_ptr);
Andreas Färberd77953b2013-01-16 19:29:31 +0100217 cpu->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +0000218 tb_phys_invalidate(tb, -1);
219 tb_free(tb);
220}
221
Andreas Färber9349b4f2012-03-14 01:38:32 +0100222static TranslationBlock *tb_find_slow(CPUArchState *env,
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000223 target_ulong pc,
bellard8a40a182005-11-20 10:35:40 +0000224 target_ulong cs_base,
j_mayerc0686882007-09-20 22:47:42 +0000225 uint64_t flags)
bellard8a40a182005-11-20 10:35:40 +0000226{
Andreas Färber8cd70432013-08-26 06:03:38 +0200227 CPUState *cpu = ENV_GET_CPU(env);
bellard8a40a182005-11-20 10:35:40 +0000228 TranslationBlock *tb, **ptb1;
bellard8a40a182005-11-20 10:35:40 +0000229 unsigned int h;
Blue Swirl337fc752011-09-04 11:06:22 +0000230 tb_page_addr_t phys_pc, phys_page1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000231 target_ulong virt_page2;
ths3b46e622007-09-17 08:09:54 +0000232
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700233 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
ths3b46e622007-09-17 08:09:54 +0000234
bellard8a40a182005-11-20 10:35:40 +0000235 /* find translated block using physical mappings */
Paul Brook41c1b1c2010-03-12 16:54:58 +0000236 phys_pc = get_page_addr_code(env, pc);
bellard8a40a182005-11-20 10:35:40 +0000237 phys_page1 = phys_pc & TARGET_PAGE_MASK;
bellard8a40a182005-11-20 10:35:40 +0000238 h = tb_phys_hash_func(phys_pc);
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700239 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
bellard8a40a182005-11-20 10:35:40 +0000240 for(;;) {
241 tb = *ptb1;
242 if (!tb)
243 goto not_found;
ths5fafdf22007-09-16 21:08:06 +0000244 if (tb->pc == pc &&
bellard8a40a182005-11-20 10:35:40 +0000245 tb->page_addr[0] == phys_page1 &&
ths5fafdf22007-09-16 21:08:06 +0000246 tb->cs_base == cs_base &&
bellard8a40a182005-11-20 10:35:40 +0000247 tb->flags == flags) {
248 /* check next page if needed */
249 if (tb->page_addr[1] != -1) {
Blue Swirl337fc752011-09-04 11:06:22 +0000250 tb_page_addr_t phys_page2;
251
ths5fafdf22007-09-16 21:08:06 +0000252 virt_page2 = (pc & TARGET_PAGE_MASK) +
bellard8a40a182005-11-20 10:35:40 +0000253 TARGET_PAGE_SIZE;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000254 phys_page2 = get_page_addr_code(env, virt_page2);
bellard8a40a182005-11-20 10:35:40 +0000255 if (tb->page_addr[1] == phys_page2)
256 goto found;
257 } else {
258 goto found;
259 }
260 }
261 ptb1 = &tb->phys_hash_next;
262 }
263 not_found:
pbrook2e70f6e2008-06-29 01:03:05 +0000264 /* if no translated code available, then translate it now */
Andreas Färber648f0342013-09-01 17:43:17 +0200265 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
ths3b46e622007-09-17 08:09:54 +0000266
bellard8a40a182005-11-20 10:35:40 +0000267 found:
Kirill Batuzov2c90fe22010-12-02 16:12:46 +0300268 /* Move the last found TB to the head of the list */
269 if (likely(*ptb1)) {
270 *ptb1 = tb->phys_hash_next;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700271 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
272 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
Kirill Batuzov2c90fe22010-12-02 16:12:46 +0300273 }
bellard8a40a182005-11-20 10:35:40 +0000274 /* we add the TB in the virtual pc hash table */
Andreas Färber8cd70432013-08-26 06:03:38 +0200275 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
bellard8a40a182005-11-20 10:35:40 +0000276 return tb;
277}
278
Andreas Färber9349b4f2012-03-14 01:38:32 +0100279static inline TranslationBlock *tb_find_fast(CPUArchState *env)
bellard8a40a182005-11-20 10:35:40 +0000280{
Andreas Färber8cd70432013-08-26 06:03:38 +0200281 CPUState *cpu = ENV_GET_CPU(env);
bellard8a40a182005-11-20 10:35:40 +0000282 TranslationBlock *tb;
283 target_ulong cs_base, pc;
aliguori6b917542008-11-18 19:46:41 +0000284 int flags;
bellard8a40a182005-11-20 10:35:40 +0000285
286 /* we record a subset of the CPU state. It will
287 always be the same before a given translated block
288 is executed. */
aliguori6b917542008-11-18 19:46:41 +0000289 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
Andreas Färber8cd70432013-08-26 06:03:38 +0200290 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
ths551bd272008-07-03 17:57:36 +0000291 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
292 tb->flags != flags)) {
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000293 tb = tb_find_slow(env, pc, cs_base, flags);
bellard8a40a182005-11-20 10:35:40 +0000294 }
295 return tb;
296}
297
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100298static CPUDebugExcpHandler *debug_excp_handler;
299
Igor Mammedov84e3b602012-06-21 18:29:38 +0200300void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100301{
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100302 debug_excp_handler = handler;
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100303}
304
Andreas Färber9349b4f2012-03-14 01:38:32 +0100305static void cpu_handle_debug_exception(CPUArchState *env)
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100306{
Andreas Färberff4700b2013-08-26 18:23:18 +0200307 CPUState *cpu = ENV_GET_CPU(env);
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100308 CPUWatchpoint *wp;
309
Andreas Färberff4700b2013-08-26 18:23:18 +0200310 if (!cpu->watchpoint_hit) {
311 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100312 wp->flags &= ~BP_WATCHPOINT_HIT;
313 }
314 }
315 if (debug_excp_handler) {
316 debug_excp_handler(env);
317 }
318}
319
bellard7d132992003-03-06 23:23:54 +0000320/* main execution loop */
321
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300322volatile sig_atomic_t exit_request;
323
Andreas Färber9349b4f2012-03-14 01:38:32 +0100324int cpu_exec(CPUArchState *env)
bellard7d132992003-03-06 23:23:54 +0000325{
Andreas Färberc356a1b2012-05-04 19:39:23 +0200326 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färber97a8ea52013-02-02 10:57:51 +0100327#if !(defined(CONFIG_USER_ONLY) && \
328 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
329 CPUClass *cc = CPU_GET_CLASS(cpu);
330#endif
Andreas Färber693fa552013-12-24 03:18:12 +0100331#ifdef TARGET_I386
332 X86CPU *x86_cpu = X86_CPU(cpu);
333#endif
bellard8a40a182005-11-20 10:35:40 +0000334 int ret, interrupt_request;
bellard8a40a182005-11-20 10:35:40 +0000335 TranslationBlock *tb;
bellardc27004e2005-01-03 23:35:10 +0000336 uint8_t *tc_ptr;
Richard Henderson3e9bd632013-08-20 14:40:25 -0700337 uintptr_t next_tb;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200338 SyncClocks sc;
339
Peter Maydellbae2c272014-04-04 17:42:56 +0100340 /* This must be volatile so it is not trashed by longjmp() */
341 volatile bool have_tb_lock = false;
bellard8c6939c2003-06-09 15:28:00 +0000342
Andreas Färber259186a2013-01-17 18:51:17 +0100343 if (cpu->halted) {
Andreas Färber3993c6b2012-05-03 06:43:49 +0200344 if (!cpu_has_work(cpu)) {
Paolo Bonzinieda48c32011-03-12 17:43:56 +0100345 return EXCP_HALTED;
346 }
347
Andreas Färber259186a2013-01-17 18:51:17 +0100348 cpu->halted = 0;
Paolo Bonzinieda48c32011-03-12 17:43:56 +0100349 }
bellard5a1e3cf2005-11-23 21:02:53 +0000350
Andreas Färber4917cf42013-05-27 05:17:50 +0200351 current_cpu = cpu;
bellarde4533c72003-06-15 19:51:39 +0000352
Andreas Färber4917cf42013-05-27 05:17:50 +0200353 /* As long as current_cpu is null, up to the assignment just above,
Olivier Hainqueec9bd892013-04-09 18:06:54 +0200354 * requests by other threads to exit the execution loop are expected to
355 * be issued using the exit_request global. We must make sure that our
Andreas Färber4917cf42013-05-27 05:17:50 +0200356 * evaluation of the global value is performed past the current_cpu
Olivier Hainqueec9bd892013-04-09 18:06:54 +0200357 * value transition point, which requires a memory barrier as well as
358 * an instruction scheduling constraint on modern architectures. */
359 smp_mb();
360
Jan Kiszkac629a4b2010-06-25 16:56:52 +0200361 if (unlikely(exit_request)) {
Andreas Färberfcd7d002012-12-17 08:02:44 +0100362 cpu->exit_request = 1;
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300363 }
364
thsecb644f2007-06-03 18:45:53 +0000365#if defined(TARGET_I386)
Jan Kiszka6792a572011-02-07 12:19:18 +0100366 /* put eflags in CPU temporary format */
367 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
liguang80cf2c82013-05-28 16:21:08 +0800368 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
Jan Kiszka6792a572011-02-07 12:19:18 +0100369 CC_OP = CC_OP_EFLAGS;
370 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
bellard93ac68b2003-09-30 20:57:29 +0000371#elif defined(TARGET_SPARC)
pbrooke6e59062006-10-22 00:18:54 +0000372#elif defined(TARGET_M68K)
373 env->cc_op = CC_OP_FLAGS;
374 env->cc_dest = env->sr & 0xf;
375 env->cc_x = (env->sr >> 4) & 1;
thsecb644f2007-06-03 18:45:53 +0000376#elif defined(TARGET_ALPHA)
377#elif defined(TARGET_ARM)
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800378#elif defined(TARGET_UNICORE32)
thsecb644f2007-06-03 18:45:53 +0000379#elif defined(TARGET_PPC)
Elie Richa4e85f822011-07-22 05:58:39 +0000380 env->reserve_addr = -1;
Michael Walle81ea0e12011-02-17 23:45:02 +0100381#elif defined(TARGET_LM32)
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200382#elif defined(TARGET_MICROBLAZE)
bellard6af0bf92005-07-02 14:58:51 +0000383#elif defined(TARGET_MIPS)
Anthony Greend15a9c22013-03-18 15:49:25 -0400384#elif defined(TARGET_MOXIE)
Jia Liue67db062012-07-20 15:50:39 +0800385#elif defined(TARGET_OPENRISC)
bellardfdf9b3e2006-04-27 21:07:38 +0000386#elif defined(TARGET_SH4)
thsf1ccf902007-10-08 13:16:14 +0000387#elif defined(TARGET_CRIS)
Alexander Graf10ec5112009-12-05 12:44:21 +0100388#elif defined(TARGET_S390X)
Max Filippov23288262011-09-06 03:55:25 +0400389#elif defined(TARGET_XTENSA)
bellardfdf9b3e2006-04-27 21:07:38 +0000390 /* XXXXX */
bellarde4533c72003-06-15 19:51:39 +0000391#else
392#error unsupported target CPU
393#endif
Andreas Färber27103422013-08-26 08:31:06 +0200394 cpu->exception_index = -1;
bellard9d27abd2003-05-10 13:13:54 +0000395
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200396 /* Calculate difference between guest clock and host clock.
397 * This delay includes the delay of the last cycle, so
398 * what we have to do is sleep until it is 0. As for the
399 * advance/delay we gain here, we try to fix it next time.
400 */
401 init_delay_params(&sc, cpu);
402
bellard7d132992003-03-06 23:23:54 +0000403 /* prepare setjmp context for exception handling */
bellard3fb2ded2003-06-24 13:22:59 +0000404 for(;;) {
Andreas Färber6f03bef2013-08-26 06:22:03 +0200405 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
bellard3fb2ded2003-06-24 13:22:59 +0000406 /* if an exception is pending, we execute it here */
Andreas Färber27103422013-08-26 08:31:06 +0200407 if (cpu->exception_index >= 0) {
408 if (cpu->exception_index >= EXCP_INTERRUPT) {
bellard3fb2ded2003-06-24 13:22:59 +0000409 /* exit request from the cpu execution loop */
Andreas Färber27103422013-08-26 08:31:06 +0200410 ret = cpu->exception_index;
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100411 if (ret == EXCP_DEBUG) {
412 cpu_handle_debug_exception(env);
413 }
bellard3fb2ded2003-06-24 13:22:59 +0000414 break;
aurel3272d239e2009-01-14 19:40:27 +0000415 } else {
416#if defined(CONFIG_USER_ONLY)
bellard3fb2ded2003-06-24 13:22:59 +0000417 /* if user mode only, we simulate a fake exception
ths9f083492006-12-07 18:28:42 +0000418 which will be handled outside the cpu execution
bellard3fb2ded2003-06-24 13:22:59 +0000419 loop */
bellard83479e72003-06-25 16:12:37 +0000420#if defined(TARGET_I386)
Andreas Färber97a8ea52013-02-02 10:57:51 +0100421 cc->do_interrupt(cpu);
bellard83479e72003-06-25 16:12:37 +0000422#endif
Andreas Färber27103422013-08-26 08:31:06 +0200423 ret = cpu->exception_index;
bellard3fb2ded2003-06-24 13:22:59 +0000424 break;
aurel3272d239e2009-01-14 19:40:27 +0000425#else
Andreas Färber97a8ea52013-02-02 10:57:51 +0100426 cc->do_interrupt(cpu);
Andreas Färber27103422013-08-26 08:31:06 +0200427 cpu->exception_index = -1;
aurel3272d239e2009-01-14 19:40:27 +0000428#endif
bellard3fb2ded2003-06-24 13:22:59 +0000429 }
ths5fafdf22007-09-16 21:08:06 +0000430 }
bellard9df217a2005-02-10 22:05:51 +0000431
blueswir1b5fc09a2008-05-04 06:38:18 +0000432 next_tb = 0; /* force lookup of first TB */
bellard3fb2ded2003-06-24 13:22:59 +0000433 for(;;) {
Andreas Färber259186a2013-01-17 18:51:17 +0100434 interrupt_request = cpu->interrupt_request;
malce1638bd2008-11-06 18:54:46 +0000435 if (unlikely(interrupt_request)) {
Andreas Färbered2803d2013-06-21 20:20:45 +0200436 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
malce1638bd2008-11-06 18:54:46 +0000437 /* Mask out external interrupts for this step. */
Richard Henderson3125f762011-05-04 13:34:25 -0700438 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
malce1638bd2008-11-06 18:54:46 +0000439 }
pbrook6658ffb2007-03-16 23:58:11 +0000440 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
Andreas Färber259186a2013-01-17 18:51:17 +0100441 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
Andreas Färber27103422013-08-26 08:31:06 +0200442 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +0200443 cpu_loop_exit(cpu);
pbrook6658ffb2007-03-16 23:58:11 +0000444 }
balroga90b7312007-05-01 01:28:01 +0000445#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200446 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800447 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
balroga90b7312007-05-01 01:28:01 +0000448 if (interrupt_request & CPU_INTERRUPT_HALT) {
Andreas Färber259186a2013-01-17 18:51:17 +0100449 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
450 cpu->halted = 1;
Andreas Färber27103422013-08-26 08:31:06 +0200451 cpu->exception_index = EXCP_HLT;
Andreas Färber5638d182013-08-27 17:52:12 +0200452 cpu_loop_exit(cpu);
balroga90b7312007-05-01 01:28:01 +0000453 }
454#endif
bellard68a79312003-06-30 13:12:32 +0000455#if defined(TARGET_I386)
Paolo Bonzini4a92a552013-03-05 15:35:17 +0100456 if (interrupt_request & CPU_INTERRUPT_INIT) {
457 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
458 do_cpu_init(x86_cpu);
459 cpu->exception_index = EXCP_HALTED;
460 cpu_loop_exit(cpu);
461 }
462#else
463 if (interrupt_request & CPU_INTERRUPT_RESET) {
464 cpu_reset(cpu);
465 }
466#endif
467#if defined(TARGET_I386)
Jan Kiszka5d62c432012-07-09 16:42:32 +0200468#if !defined(CONFIG_USER_ONLY)
469 if (interrupt_request & CPU_INTERRUPT_POLL) {
Andreas Färber259186a2013-01-17 18:51:17 +0100470 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
Andreas Färber693fa552013-12-24 03:18:12 +0100471 apic_poll_irq(x86_cpu->apic_state);
Jan Kiszka5d62c432012-07-09 16:42:32 +0200472 }
473#endif
Paolo Bonzini4a92a552013-03-05 15:35:17 +0100474 if (interrupt_request & CPU_INTERRUPT_SIPI) {
Andreas Färber693fa552013-12-24 03:18:12 +0100475 do_cpu_sipi(x86_cpu);
Gleb Natapovb09ea7d2009-06-17 23:26:59 +0300476 } else if (env->hflags2 & HF2_GIF_MASK) {
bellarddb620f42008-06-04 17:02:19 +0000477 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
478 !(env->hflags & HF_SMM_MASK)) {
Blue Swirl77b2bc22012-04-28 19:35:10 +0000479 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
480 0);
Andreas Färber259186a2013-01-17 18:51:17 +0100481 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
Andreas Färber693fa552013-12-24 03:18:12 +0100482 do_smm_enter(x86_cpu);
bellarddb620f42008-06-04 17:02:19 +0000483 next_tb = 0;
484 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
485 !(env->hflags2 & HF2_NMI_MASK)) {
Andreas Färber259186a2013-01-17 18:51:17 +0100486 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
bellarddb620f42008-06-04 17:02:19 +0000487 env->hflags2 |= HF2_NMI_MASK;
Blue Swirle694d4e2011-05-16 19:38:48 +0000488 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
bellarddb620f42008-06-04 17:02:19 +0000489 next_tb = 0;
陳韋任e965fc32012-02-06 14:02:55 +0800490 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
Andreas Färber259186a2013-01-17 18:51:17 +0100491 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
Blue Swirle694d4e2011-05-16 19:38:48 +0000492 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
Huang Ying79c4f6b2009-06-23 10:05:14 +0800493 next_tb = 0;
bellarddb620f42008-06-04 17:02:19 +0000494 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
495 (((env->hflags2 & HF2_VINTR_MASK) &&
496 (env->hflags2 & HF2_HIF_MASK)) ||
497 (!(env->hflags2 & HF2_VINTR_MASK) &&
498 (env->eflags & IF_MASK &&
499 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
500 int intno;
Blue Swirl77b2bc22012-04-28 19:35:10 +0000501 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
502 0);
Andreas Färber259186a2013-01-17 18:51:17 +0100503 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
504 CPU_INTERRUPT_VIRQ);
bellarddb620f42008-06-04 17:02:19 +0000505 intno = cpu_get_pic_interrupt(env);
malc4f213872012-08-27 18:33:12 +0400506 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
507 do_interrupt_x86_hardirq(env, intno, 1);
508 /* ensure that no TB jump will be modified as
509 the program flow was changed */
510 next_tb = 0;
ths0573fbf2007-09-23 15:28:04 +0000511#if !defined(CONFIG_USER_ONLY)
bellarddb620f42008-06-04 17:02:19 +0000512 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
513 (env->eflags & IF_MASK) &&
514 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
515 int intno;
516 /* FIXME: this should respect TPR */
Blue Swirl77b2bc22012-04-28 19:35:10 +0000517 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
518 0);
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +0100519 intno = ldl_phys(cpu->as,
520 env->vm_vmcb
521 + offsetof(struct vmcb,
522 control.int_vector));
aliguori93fcfe32009-01-15 22:34:14 +0000523 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
Blue Swirle694d4e2011-05-16 19:38:48 +0000524 do_interrupt_x86_hardirq(env, intno, 1);
Andreas Färber259186a2013-01-17 18:51:17 +0100525 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
bellarddb620f42008-06-04 17:02:19 +0000526 next_tb = 0;
ths0573fbf2007-09-23 15:28:04 +0000527#endif
bellarddb620f42008-06-04 17:02:19 +0000528 }
bellard68a79312003-06-30 13:12:32 +0000529 }
bellardce097762004-01-04 23:53:18 +0000530#elif defined(TARGET_PPC)
j_mayer47103572007-03-30 09:38:04 +0000531 if (interrupt_request & CPU_INTERRUPT_HARD) {
j_mayere9df0142007-04-09 22:45:36 +0000532 ppc_hw_interrupt(env);
Andreas Färber259186a2013-01-17 18:51:17 +0100533 if (env->pending_interrupts == 0) {
534 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
535 }
blueswir1b5fc09a2008-05-04 06:38:18 +0000536 next_tb = 0;
bellardce097762004-01-04 23:53:18 +0000537 }
Michael Walle81ea0e12011-02-17 23:45:02 +0100538#elif defined(TARGET_LM32)
539 if ((interrupt_request & CPU_INTERRUPT_HARD)
540 && (env->ie & IE_IE)) {
Andreas Färber27103422013-08-26 08:31:06 +0200541 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100542 cc->do_interrupt(cpu);
Michael Walle81ea0e12011-02-17 23:45:02 +0100543 next_tb = 0;
544 }
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200545#elif defined(TARGET_MICROBLAZE)
546 if ((interrupt_request & CPU_INTERRUPT_HARD)
547 && (env->sregs[SR_MSR] & MSR_IE)
548 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
549 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
Andreas Färber27103422013-08-26 08:31:06 +0200550 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100551 cc->do_interrupt(cpu);
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200552 next_tb = 0;
553 }
bellard6af0bf92005-07-02 14:58:51 +0000554#elif defined(TARGET_MIPS)
555 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
Aurelien Jarno4cdc1cd2010-12-25 22:56:32 +0100556 cpu_mips_hw_interrupts_pending(env)) {
bellard6af0bf92005-07-02 14:58:51 +0000557 /* Raise it */
Andreas Färber27103422013-08-26 08:31:06 +0200558 cpu->exception_index = EXCP_EXT_INTERRUPT;
bellard6af0bf92005-07-02 14:58:51 +0000559 env->error_code = 0;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100560 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000561 next_tb = 0;
bellard6af0bf92005-07-02 14:58:51 +0000562 }
Jia Liub6a71ef2012-07-20 15:50:41 +0800563#elif defined(TARGET_OPENRISC)
564 {
565 int idx = -1;
566 if ((interrupt_request & CPU_INTERRUPT_HARD)
567 && (env->sr & SR_IEE)) {
568 idx = EXCP_INT;
569 }
570 if ((interrupt_request & CPU_INTERRUPT_TIMER)
571 && (env->sr & SR_TEE)) {
572 idx = EXCP_TICK;
573 }
574 if (idx >= 0) {
Andreas Färber27103422013-08-26 08:31:06 +0200575 cpu->exception_index = idx;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100576 cc->do_interrupt(cpu);
Jia Liub6a71ef2012-07-20 15:50:41 +0800577 next_tb = 0;
578 }
579 }
bellarde95c8d52004-09-30 22:22:08 +0000580#elif defined(TARGET_SPARC)
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300581 if (interrupt_request & CPU_INTERRUPT_HARD) {
582 if (cpu_interrupts_enabled(env) &&
583 env->interrupt_index > 0) {
584 int pil = env->interrupt_index & 0xf;
585 int type = env->interrupt_index & 0xf0;
bellard66321a12005-04-06 20:47:48 +0000586
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300587 if (((type == TT_EXTINT) &&
588 cpu_pil_allowed(env, pil)) ||
589 type != TT_EXTINT) {
Andreas Färber27103422013-08-26 08:31:06 +0200590 cpu->exception_index = env->interrupt_index;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100591 cc->do_interrupt(cpu);
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300592 next_tb = 0;
593 }
594 }
陳韋任e965fc32012-02-06 14:02:55 +0800595 }
bellardb5ff1b32005-11-26 10:38:39 +0000596#elif defined(TARGET_ARM)
597 if (interrupt_request & CPU_INTERRUPT_FIQ
Peter Maydell4cc35612014-02-26 17:20:06 +0000598 && !(env->daif & PSTATE_F)) {
Andreas Färber27103422013-08-26 08:31:06 +0200599 cpu->exception_index = EXCP_FIQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100600 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000601 next_tb = 0;
bellardb5ff1b32005-11-26 10:38:39 +0000602 }
pbrook9ee6e8b2007-11-11 00:04:49 +0000603 /* ARMv7-M interrupt return works by loading a magic value
604 into the PC. On real hardware the load causes the
605 return to occur. The qemu implementation performs the
606 jump normally, then does the exception return when the
607 CPU tries to execute code at the magic address.
608 This will cause the magic PC value to be pushed to
Stefan Weila1c72732011-04-28 17:20:38 +0200609 the stack if an interrupt occurred at the wrong time.
pbrook9ee6e8b2007-11-11 00:04:49 +0000610 We avoid this by disabling interrupts when
611 pc contains a magic address. */
bellardb5ff1b32005-11-26 10:38:39 +0000612 if (interrupt_request & CPU_INTERRUPT_HARD
pbrook9ee6e8b2007-11-11 00:04:49 +0000613 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
Peter Maydell4cc35612014-02-26 17:20:06 +0000614 || !(env->daif & PSTATE_I))) {
Andreas Färber27103422013-08-26 08:31:06 +0200615 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100616 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000617 next_tb = 0;
bellardb5ff1b32005-11-26 10:38:39 +0000618 }
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800619#elif defined(TARGET_UNICORE32)
620 if (interrupt_request & CPU_INTERRUPT_HARD
621 && !(env->uncached_asr & ASR_I)) {
Andreas Färber27103422013-08-26 08:31:06 +0200622 cpu->exception_index = UC32_EXCP_INTR;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100623 cc->do_interrupt(cpu);
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800624 next_tb = 0;
625 }
bellardfdf9b3e2006-04-27 21:07:38 +0000626#elif defined(TARGET_SH4)
thse96e2042007-12-02 06:18:24 +0000627 if (interrupt_request & CPU_INTERRUPT_HARD) {
Andreas Färber97a8ea52013-02-02 10:57:51 +0100628 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000629 next_tb = 0;
thse96e2042007-12-02 06:18:24 +0000630 }
j_mayereddf68a2007-04-05 07:22:49 +0000631#elif defined(TARGET_ALPHA)
Richard Henderson6a80e082011-04-18 15:09:09 -0700632 {
633 int idx = -1;
634 /* ??? This hard-codes the OSF/1 interrupt levels. */
陳韋任e965fc32012-02-06 14:02:55 +0800635 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
Richard Henderson6a80e082011-04-18 15:09:09 -0700636 case 0 ... 3:
637 if (interrupt_request & CPU_INTERRUPT_HARD) {
638 idx = EXCP_DEV_INTERRUPT;
639 }
640 /* FALLTHRU */
641 case 4:
642 if (interrupt_request & CPU_INTERRUPT_TIMER) {
643 idx = EXCP_CLK_INTERRUPT;
644 }
645 /* FALLTHRU */
646 case 5:
647 if (interrupt_request & CPU_INTERRUPT_SMP) {
648 idx = EXCP_SMP_INTERRUPT;
649 }
650 /* FALLTHRU */
651 case 6:
652 if (interrupt_request & CPU_INTERRUPT_MCHK) {
653 idx = EXCP_MCHK;
654 }
655 }
656 if (idx >= 0) {
Andreas Färber27103422013-08-26 08:31:06 +0200657 cpu->exception_index = idx;
Richard Henderson6a80e082011-04-18 15:09:09 -0700658 env->error_code = 0;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100659 cc->do_interrupt(cpu);
Richard Henderson6a80e082011-04-18 15:09:09 -0700660 next_tb = 0;
661 }
j_mayereddf68a2007-04-05 07:22:49 +0000662 }
thsf1ccf902007-10-08 13:16:14 +0000663#elif defined(TARGET_CRIS)
edgar_igl1b1a38b2008-06-09 23:18:06 +0000664 if (interrupt_request & CPU_INTERRUPT_HARD
Edgar E. Iglesiasfb9fb692010-02-15 11:17:33 +0100665 && (env->pregs[PR_CCS] & I_FLAG)
666 && !env->locked_irq) {
Andreas Färber27103422013-08-26 08:31:06 +0200667 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100668 cc->do_interrupt(cpu);
edgar_igl1b1a38b2008-06-09 23:18:06 +0000669 next_tb = 0;
670 }
Lars Persson82193142012-06-14 16:23:55 +0200671 if (interrupt_request & CPU_INTERRUPT_NMI) {
672 unsigned int m_flag_archval;
673 if (env->pregs[PR_VR] < 32) {
674 m_flag_archval = M_FLAG_V10;
675 } else {
676 m_flag_archval = M_FLAG_V32;
677 }
678 if ((env->pregs[PR_CCS] & m_flag_archval)) {
Andreas Färber27103422013-08-26 08:31:06 +0200679 cpu->exception_index = EXCP_NMI;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100680 cc->do_interrupt(cpu);
Lars Persson82193142012-06-14 16:23:55 +0200681 next_tb = 0;
682 }
thsf1ccf902007-10-08 13:16:14 +0000683 }
pbrook06338792007-05-23 19:58:11 +0000684#elif defined(TARGET_M68K)
685 if (interrupt_request & CPU_INTERRUPT_HARD
686 && ((env->sr & SR_I) >> SR_I_SHIFT)
687 < env->pending_level) {
688 /* Real hardware gets the interrupt vector via an
689 IACK cycle at this point. Current emulated
690 hardware doesn't rely on this, so we
691 provide/save the vector when the interrupt is
692 first signalled. */
Andreas Färber27103422013-08-26 08:31:06 +0200693 cpu->exception_index = env->pending_vector;
Blue Swirl3c688822011-05-21 07:55:24 +0000694 do_interrupt_m68k_hardirq(env);
blueswir1b5fc09a2008-05-04 06:38:18 +0000695 next_tb = 0;
pbrook06338792007-05-23 19:58:11 +0000696 }
Alexander Graf3110e292011-04-15 17:32:48 +0200697#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
698 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
699 (env->psw.mask & PSW_MASK_EXT)) {
Andreas Färber97a8ea52013-02-02 10:57:51 +0100700 cc->do_interrupt(cpu);
Alexander Graf3110e292011-04-15 17:32:48 +0200701 next_tb = 0;
702 }
Max Filippov40643d72011-09-06 03:55:41 +0400703#elif defined(TARGET_XTENSA)
704 if (interrupt_request & CPU_INTERRUPT_HARD) {
Andreas Färber27103422013-08-26 08:31:06 +0200705 cpu->exception_index = EXC_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100706 cc->do_interrupt(cpu);
Max Filippov40643d72011-09-06 03:55:41 +0400707 next_tb = 0;
708 }
bellard68a79312003-06-30 13:12:32 +0000709#endif
Stefan Weilff2712b2011-04-28 17:20:35 +0200710 /* Don't use the cached interrupt_request value,
bellard9d050952006-05-22 22:03:52 +0000711 do_interrupt may have updated the EXITTB flag. */
Andreas Färber259186a2013-01-17 18:51:17 +0100712 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
713 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bellardbf3e8bf2004-02-16 21:58:54 +0000714 /* ensure that no TB jump will be modified as
715 the program flow was changed */
blueswir1b5fc09a2008-05-04 06:38:18 +0000716 next_tb = 0;
bellardbf3e8bf2004-02-16 21:58:54 +0000717 }
aurel32be214e62009-03-06 21:48:00 +0000718 }
Andreas Färberfcd7d002012-12-17 08:02:44 +0100719 if (unlikely(cpu->exit_request)) {
720 cpu->exit_request = 0;
Andreas Färber27103422013-08-26 08:31:06 +0200721 cpu->exception_index = EXCP_INTERRUPT;
Andreas Färber5638d182013-08-27 17:52:12 +0200722 cpu_loop_exit(cpu);
bellard3fb2ded2003-06-24 13:22:59 +0000723 }
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700724 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
Peter Maydellbae2c272014-04-04 17:42:56 +0100725 have_tb_lock = true;
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000726 tb = tb_find_fast(env);
pbrookd5975362008-06-07 20:50:51 +0000727 /* Note: we do it here to avoid a gcc bug on Mac OS X when
728 doing it in tb_find_slow */
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700729 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
pbrookd5975362008-06-07 20:50:51 +0000730 /* as some TB could have been invalidated because
731 of memory exceptions while generating the code, we
732 must recompute the hash index here */
733 next_tb = 0;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700734 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
pbrookd5975362008-06-07 20:50:51 +0000735 }
Peter Maydellc30d1ae2013-04-11 21:21:46 +0100736 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
737 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
738 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
739 }
bellard8a40a182005-11-20 10:35:40 +0000740 /* see if we can patch the calling TB. When the TB
741 spans two pages, we cannot safely do a direct
742 jump. */
Paolo Bonzini040f2fb2010-01-15 08:56:36 +0100743 if (next_tb != 0 && tb->page_addr[1] == -1) {
Peter Maydell09800112013-02-22 18:10:00 +0000744 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
745 next_tb & TB_EXIT_MASK, tb);
bellard3fb2ded2003-06-24 13:22:59 +0000746 }
Peter Maydellbae2c272014-04-04 17:42:56 +0100747 have_tb_lock = false;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700748 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
malc55e8b852008-11-04 14:18:13 +0000749
750 /* cpu_interrupt might be called while translating the
751 TB, but before it is linked into a potentially
752 infinite loop and becomes env->current_tb. Avoid
753 starting execution if there is a pending interrupt. */
Andreas Färberd77953b2013-01-16 19:29:31 +0100754 cpu->current_tb = tb;
Jan Kiszkab0052d12010-06-25 16:56:50 +0200755 barrier();
Andreas Färberfcd7d002012-12-17 08:02:44 +0100756 if (likely(!cpu->exit_request)) {
Alex Bennée6db8b532014-08-01 17:08:57 +0100757 trace_exec_tb(tb, tb->pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000758 tc_ptr = tb->tc_ptr;
陳韋任e965fc32012-02-06 14:02:55 +0800759 /* execute the generated code */
Peter Maydell77211372013-02-22 18:10:02 +0000760 next_tb = cpu_tb_exec(cpu, tc_ptr);
Peter Maydell378df4b2013-02-22 18:10:03 +0000761 switch (next_tb & TB_EXIT_MASK) {
762 case TB_EXIT_REQUESTED:
763 /* Something asked us to stop executing
764 * chained TBs; just continue round the main
765 * loop. Whatever requested the exit will also
766 * have set something else (eg exit_request or
767 * interrupt_request) which we will handle
768 * next time around the loop.
769 */
770 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
771 next_tb = 0;
772 break;
773 case TB_EXIT_ICOUNT_EXPIRED:
774 {
thsbf20dc02008-06-30 17:22:19 +0000775 /* Instruction counter expired. */
pbrook2e70f6e2008-06-29 01:03:05 +0000776 int insns_left;
Peter Maydell09800112013-02-22 18:10:00 +0000777 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
Andreas Färber28ecfd72013-08-26 05:51:49 +0200778 insns_left = cpu->icount_decr.u32;
Andreas Färberefee7342013-08-26 05:39:29 +0200779 if (cpu->icount_extra && insns_left >= 0) {
pbrook2e70f6e2008-06-29 01:03:05 +0000780 /* Refill decrementer and continue execution. */
Andreas Färberefee7342013-08-26 05:39:29 +0200781 cpu->icount_extra += insns_left;
782 if (cpu->icount_extra > 0xffff) {
pbrook2e70f6e2008-06-29 01:03:05 +0000783 insns_left = 0xffff;
784 } else {
Andreas Färberefee7342013-08-26 05:39:29 +0200785 insns_left = cpu->icount_extra;
pbrook2e70f6e2008-06-29 01:03:05 +0000786 }
Andreas Färberefee7342013-08-26 05:39:29 +0200787 cpu->icount_extra -= insns_left;
Andreas Färber28ecfd72013-08-26 05:51:49 +0200788 cpu->icount_decr.u16.low = insns_left;
pbrook2e70f6e2008-06-29 01:03:05 +0000789 } else {
790 if (insns_left > 0) {
791 /* Execute remaining instructions. */
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000792 cpu_exec_nocache(env, insns_left, tb);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200793 align_clocks(&sc, cpu);
pbrook2e70f6e2008-06-29 01:03:05 +0000794 }
Andreas Färber27103422013-08-26 08:31:06 +0200795 cpu->exception_index = EXCP_INTERRUPT;
pbrook2e70f6e2008-06-29 01:03:05 +0000796 next_tb = 0;
Andreas Färber5638d182013-08-27 17:52:12 +0200797 cpu_loop_exit(cpu);
pbrook2e70f6e2008-06-29 01:03:05 +0000798 }
Peter Maydell378df4b2013-02-22 18:10:03 +0000799 break;
800 }
801 default:
802 break;
pbrook2e70f6e2008-06-29 01:03:05 +0000803 }
804 }
Andreas Färberd77953b2013-01-16 19:29:31 +0100805 cpu->current_tb = NULL;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200806 /* Try to align the host and virtual clocks
807 if the guest is in advance */
808 align_clocks(&sc, cpu);
bellard4cbf74b2003-08-10 21:48:43 +0000809 /* reset soft MMU for next block (it can currently
810 only be set by a memory fault) */
ths50a518e2007-06-03 18:52:15 +0000811 } /* for(;;) */
Jan Kiszka0d101932011-07-02 09:50:51 +0200812 } else {
813 /* Reload env after longjmp - the compiler may have smashed all
814 * local variables as longjmp is marked 'noreturn'. */
Andreas Färber4917cf42013-05-27 05:17:50 +0200815 cpu = current_cpu;
816 env = cpu->env_ptr;
Juergen Lock6c78f292013-10-03 16:09:37 +0200817#if !(defined(CONFIG_USER_ONLY) && \
818 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
819 cc = CPU_GET_CLASS(cpu);
820#endif
Andreas Färber693fa552013-12-24 03:18:12 +0100821#ifdef TARGET_I386
822 x86_cpu = X86_CPU(cpu);
823#endif
Peter Maydellbae2c272014-04-04 17:42:56 +0100824 if (have_tb_lock) {
825 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
826 have_tb_lock = false;
827 }
bellard7d132992003-03-06 23:23:54 +0000828 }
bellard3fb2ded2003-06-24 13:22:59 +0000829 } /* for(;;) */
830
bellard7d132992003-03-06 23:23:54 +0000831
bellarde4533c72003-06-15 19:51:39 +0000832#if defined(TARGET_I386)
bellard9de5e442003-03-23 16:49:39 +0000833 /* restore flags in standard format */
Blue Swirle694d4e2011-05-16 19:38:48 +0000834 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
liguang80cf2c82013-05-28 16:21:08 +0800835 | (env->df & DF_MASK);
bellarde4533c72003-06-15 19:51:39 +0000836#elif defined(TARGET_ARM)
bellardb7bcbe92005-02-22 19:27:29 +0000837 /* XXX: Save/restore host fpu exception state?. */
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800838#elif defined(TARGET_UNICORE32)
bellard93ac68b2003-09-30 20:57:29 +0000839#elif defined(TARGET_SPARC)
bellard67867302003-11-23 17:05:30 +0000840#elif defined(TARGET_PPC)
Michael Walle81ea0e12011-02-17 23:45:02 +0100841#elif defined(TARGET_LM32)
pbrooke6e59062006-10-22 00:18:54 +0000842#elif defined(TARGET_M68K)
843 cpu_m68k_flush_flags(env, env->cc_op);
844 env->cc_op = CC_OP_FLAGS;
845 env->sr = (env->sr & 0xffe0)
846 | env->cc_dest | (env->cc_x << 4);
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200847#elif defined(TARGET_MICROBLAZE)
bellard6af0bf92005-07-02 14:58:51 +0000848#elif defined(TARGET_MIPS)
Anthony Greend15a9c22013-03-18 15:49:25 -0400849#elif defined(TARGET_MOXIE)
Jia Liue67db062012-07-20 15:50:39 +0800850#elif defined(TARGET_OPENRISC)
bellardfdf9b3e2006-04-27 21:07:38 +0000851#elif defined(TARGET_SH4)
j_mayereddf68a2007-04-05 07:22:49 +0000852#elif defined(TARGET_ALPHA)
thsf1ccf902007-10-08 13:16:14 +0000853#elif defined(TARGET_CRIS)
Alexander Graf10ec5112009-12-05 12:44:21 +0100854#elif defined(TARGET_S390X)
Max Filippov23288262011-09-06 03:55:25 +0400855#elif defined(TARGET_XTENSA)
bellardfdf9b3e2006-04-27 21:07:38 +0000856 /* XXXXX */
bellarde4533c72003-06-15 19:51:39 +0000857#else
858#error unsupported target CPU
859#endif
pbrook1057eaa2007-02-04 13:37:44 +0000860
Andreas Färber4917cf42013-05-27 05:17:50 +0200861 /* fail safe : never use current_cpu outside cpu_exec() */
862 current_cpu = NULL;
bellard7d132992003-03-06 23:23:54 +0000863 return ret;
864}