blob: d5b86d092f8ad6766c50c3941a6464e9fe8473fa [file] [log] [blame]
bellard7d132992003-03-06 23:23:54 +00001/*
陳韋任e965fc32012-02-06 14:02:55 +08002 * emulator main execution loop
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard66321a12005-04-06 20:47:48 +00004 * Copyright (c) 2003-2005 Fabrice Bellard
bellard7d132992003-03-06 23:23:54 +00005 *
bellard3ef693a2003-03-23 20:17:16 +00006 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
bellard7d132992003-03-06 23:23:54 +000010 *
bellard3ef693a2003-03-23 20:17:16 +000011 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
bellard7d132992003-03-06 23:23:54 +000015 *
bellard3ef693a2003-03-23 20:17:16 +000016 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard7d132992003-03-06 23:23:54 +000018 */
bellarde4533c72003-06-15 19:51:39 +000019#include "config.h"
Blue Swirlcea5f9a2011-05-15 16:03:25 +000020#include "cpu.h"
Alex Bennée6db8b532014-08-01 17:08:57 +010021#include "trace.h"
Paolo Bonzini76cad712012-10-24 11:12:21 +020022#include "disas/disas.h"
bellard7cb69ca2008-05-10 10:55:51 +000023#include "tcg.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010024#include "qemu/atomic.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010025#include "sysemu/qtest.h"
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020026#include "qemu/timer.h"
27
28/* -icount align implementation. */
29
30typedef struct SyncClocks {
31 int64_t diff_clk;
32 int64_t last_cpu_icount;
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020033 int64_t realtime_clock;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020034} SyncClocks;
35
36#if !defined(CONFIG_USER_ONLY)
37/* Allow the guest to have a max 3ms advance.
38 * The difference between the 2 clocks could therefore
39 * oscillate around 0.
40 */
41#define VM_CLOCK_ADVANCE 3000000
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020042#define THRESHOLD_REDUCE 1.5
43#define MAX_DELAY_PRINT_RATE 2000000000LL
44#define MAX_NB_PRINTS 100
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020045
46static void align_clocks(SyncClocks *sc, const CPUState *cpu)
47{
48 int64_t cpu_icount;
49
50 if (!icount_align_option) {
51 return;
52 }
53
54 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
55 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
56 sc->last_cpu_icount = cpu_icount;
57
58 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
59#ifndef _WIN32
60 struct timespec sleep_delay, rem_delay;
61 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
62 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
63 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
64 sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL;
65 sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec;
66 } else {
67 sc->diff_clk = 0;
68 }
69#else
70 Sleep(sc->diff_clk / SCALE_MS);
71 sc->diff_clk = 0;
72#endif
73 }
74}
75
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020076static void print_delay(const SyncClocks *sc)
77{
78 static float threshold_delay;
79 static int64_t last_realtime_clock;
80 static int nb_prints;
81
82 if (icount_align_option &&
83 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
84 nb_prints < MAX_NB_PRINTS) {
85 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
86 (-sc->diff_clk / (float)1000000000LL <
87 (threshold_delay - THRESHOLD_REDUCE))) {
88 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
89 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
90 threshold_delay - 1,
91 threshold_delay);
92 nb_prints++;
93 last_realtime_clock = sc->realtime_clock;
94 }
95 }
96}
97
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020098static void init_delay_params(SyncClocks *sc,
99 const CPUState *cpu)
100{
101 if (!icount_align_option) {
102 return;
103 }
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200104 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200105 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200106 sc->realtime_clock +
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200107 cpu_get_clock_offset();
108 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
Sebastian Tanase27498be2014-07-25 11:56:33 +0200109 if (sc->diff_clk < max_delay) {
110 max_delay = sc->diff_clk;
111 }
112 if (sc->diff_clk > max_advance) {
113 max_advance = sc->diff_clk;
114 }
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200115
116 /* Print every 2s max if the guest is late. We limit the number
117 of printed messages to NB_PRINT_MAX(currently 100) */
118 print_delay(sc);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200119}
120#else
121static void align_clocks(SyncClocks *sc, const CPUState *cpu)
122{
123}
124
125static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
126{
127}
128#endif /* CONFIG USER ONLY */
bellard7d132992003-03-06 23:23:54 +0000129
Andreas Färber5638d182013-08-27 17:52:12 +0200130void cpu_loop_exit(CPUState *cpu)
bellarde4533c72003-06-15 19:51:39 +0000131{
Andreas Färberd77953b2013-01-16 19:29:31 +0100132 cpu->current_tb = NULL;
Andreas Färber6f03bef2013-08-26 06:22:03 +0200133 siglongjmp(cpu->jmp_env, 1);
bellarde4533c72003-06-15 19:51:39 +0000134}
thsbfed01f2007-06-03 17:44:37 +0000135
bellardfbf9eeb2004-04-25 21:21:33 +0000136/* exit the current TB from a signal handler. The host registers are
137 restored in a state compatible with the CPU emulator
138 */
Blue Swirl9eff14f2011-05-21 08:42:35 +0000139#if defined(CONFIG_SOFTMMU)
Andreas Färber0ea8cb82013-09-03 02:12:23 +0200140void cpu_resume_from_signal(CPUState *cpu, void *puc)
bellardfbf9eeb2004-04-25 21:21:33 +0000141{
Blue Swirl9eff14f2011-05-21 08:42:35 +0000142 /* XXX: restore cpu registers saved in host registers */
143
Andreas Färber27103422013-08-26 08:31:06 +0200144 cpu->exception_index = -1;
Andreas Färber6f03bef2013-08-26 06:22:03 +0200145 siglongjmp(cpu->jmp_env, 1);
Blue Swirl9eff14f2011-05-21 08:42:35 +0000146}
Blue Swirl9eff14f2011-05-21 08:42:35 +0000147#endif
bellardfbf9eeb2004-04-25 21:21:33 +0000148
Peter Maydell77211372013-02-22 18:10:02 +0000149/* Execute a TB, and fix up the CPU state afterwards if necessary */
150static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
151{
152 CPUArchState *env = cpu->env_ptr;
Richard Henderson03afa5f2013-11-06 17:29:39 +1000153 uintptr_t next_tb;
154
155#if defined(DEBUG_DISAS)
156 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
157#if defined(TARGET_I386)
158 log_cpu_state(cpu, CPU_DUMP_CCOP);
159#elif defined(TARGET_M68K)
160 /* ??? Should not modify env state for dumping. */
161 cpu_m68k_flush_flags(env, env->cc_op);
162 env->cc_op = CC_OP_FLAGS;
163 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
164 log_cpu_state(cpu, 0);
165#else
166 log_cpu_state(cpu, 0);
167#endif
168 }
169#endif /* DEBUG_DISAS */
170
171 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
Alex Bennée6db8b532014-08-01 17:08:57 +0100172 trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
173 next_tb & TB_EXIT_MASK);
174
Peter Maydell77211372013-02-22 18:10:02 +0000175 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
176 /* We didn't start executing this TB (eg because the instruction
177 * counter hit zero); we must restore the guest PC to the address
178 * of the start of the TB.
179 */
Andreas Färberbdf7ae52013-06-28 19:31:32 +0200180 CPUClass *cc = CPU_GET_CLASS(cpu);
Peter Maydell77211372013-02-22 18:10:02 +0000181 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
Andreas Färberbdf7ae52013-06-28 19:31:32 +0200182 if (cc->synchronize_from_tb) {
183 cc->synchronize_from_tb(cpu, tb);
184 } else {
185 assert(cc->set_pc);
186 cc->set_pc(cpu, tb->pc);
187 }
Peter Maydell77211372013-02-22 18:10:02 +0000188 }
Peter Maydell378df4b2013-02-22 18:10:03 +0000189 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
190 /* We were asked to stop executing TBs (probably a pending
191 * interrupt. We've now stopped, so clear the flag.
192 */
193 cpu->tcg_exit_req = 0;
194 }
Peter Maydell77211372013-02-22 18:10:02 +0000195 return next_tb;
196}
197
pbrook2e70f6e2008-06-29 01:03:05 +0000198/* Execute the code without caching the generated code. An interpreter
199 could be used if available. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100200static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000201 TranslationBlock *orig_tb)
pbrook2e70f6e2008-06-29 01:03:05 +0000202{
Andreas Färberd77953b2013-01-16 19:29:31 +0100203 CPUState *cpu = ENV_GET_CPU(env);
pbrook2e70f6e2008-06-29 01:03:05 +0000204 TranslationBlock *tb;
205
206 /* Should never happen.
207 We only end up here when an existing TB is too long. */
208 if (max_cycles > CF_COUNT_MASK)
209 max_cycles = CF_COUNT_MASK;
210
Andreas Färber648f0342013-09-01 17:43:17 +0200211 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
pbrook2e70f6e2008-06-29 01:03:05 +0000212 max_cycles);
Andreas Färberd77953b2013-01-16 19:29:31 +0100213 cpu->current_tb = tb;
pbrook2e70f6e2008-06-29 01:03:05 +0000214 /* execute the generated code */
Alex Bennée6db8b532014-08-01 17:08:57 +0100215 trace_exec_tb_nocache(tb, tb->pc);
Peter Maydell77211372013-02-22 18:10:02 +0000216 cpu_tb_exec(cpu, tb->tc_ptr);
Andreas Färberd77953b2013-01-16 19:29:31 +0100217 cpu->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +0000218 tb_phys_invalidate(tb, -1);
219 tb_free(tb);
220}
221
Andreas Färber9349b4f2012-03-14 01:38:32 +0100222static TranslationBlock *tb_find_slow(CPUArchState *env,
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000223 target_ulong pc,
bellard8a40a182005-11-20 10:35:40 +0000224 target_ulong cs_base,
j_mayerc0686882007-09-20 22:47:42 +0000225 uint64_t flags)
bellard8a40a182005-11-20 10:35:40 +0000226{
Andreas Färber8cd70432013-08-26 06:03:38 +0200227 CPUState *cpu = ENV_GET_CPU(env);
bellard8a40a182005-11-20 10:35:40 +0000228 TranslationBlock *tb, **ptb1;
bellard8a40a182005-11-20 10:35:40 +0000229 unsigned int h;
Blue Swirl337fc752011-09-04 11:06:22 +0000230 tb_page_addr_t phys_pc, phys_page1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000231 target_ulong virt_page2;
ths3b46e622007-09-17 08:09:54 +0000232
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700233 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
ths3b46e622007-09-17 08:09:54 +0000234
bellard8a40a182005-11-20 10:35:40 +0000235 /* find translated block using physical mappings */
Paul Brook41c1b1c2010-03-12 16:54:58 +0000236 phys_pc = get_page_addr_code(env, pc);
bellard8a40a182005-11-20 10:35:40 +0000237 phys_page1 = phys_pc & TARGET_PAGE_MASK;
bellard8a40a182005-11-20 10:35:40 +0000238 h = tb_phys_hash_func(phys_pc);
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700239 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
bellard8a40a182005-11-20 10:35:40 +0000240 for(;;) {
241 tb = *ptb1;
242 if (!tb)
243 goto not_found;
ths5fafdf22007-09-16 21:08:06 +0000244 if (tb->pc == pc &&
bellard8a40a182005-11-20 10:35:40 +0000245 tb->page_addr[0] == phys_page1 &&
ths5fafdf22007-09-16 21:08:06 +0000246 tb->cs_base == cs_base &&
bellard8a40a182005-11-20 10:35:40 +0000247 tb->flags == flags) {
248 /* check next page if needed */
249 if (tb->page_addr[1] != -1) {
Blue Swirl337fc752011-09-04 11:06:22 +0000250 tb_page_addr_t phys_page2;
251
ths5fafdf22007-09-16 21:08:06 +0000252 virt_page2 = (pc & TARGET_PAGE_MASK) +
bellard8a40a182005-11-20 10:35:40 +0000253 TARGET_PAGE_SIZE;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000254 phys_page2 = get_page_addr_code(env, virt_page2);
bellard8a40a182005-11-20 10:35:40 +0000255 if (tb->page_addr[1] == phys_page2)
256 goto found;
257 } else {
258 goto found;
259 }
260 }
261 ptb1 = &tb->phys_hash_next;
262 }
263 not_found:
pbrook2e70f6e2008-06-29 01:03:05 +0000264 /* if no translated code available, then translate it now */
Andreas Färber648f0342013-09-01 17:43:17 +0200265 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
ths3b46e622007-09-17 08:09:54 +0000266
bellard8a40a182005-11-20 10:35:40 +0000267 found:
Kirill Batuzov2c90fe22010-12-02 16:12:46 +0300268 /* Move the last found TB to the head of the list */
269 if (likely(*ptb1)) {
270 *ptb1 = tb->phys_hash_next;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700271 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
272 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
Kirill Batuzov2c90fe22010-12-02 16:12:46 +0300273 }
bellard8a40a182005-11-20 10:35:40 +0000274 /* we add the TB in the virtual pc hash table */
Andreas Färber8cd70432013-08-26 06:03:38 +0200275 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
bellard8a40a182005-11-20 10:35:40 +0000276 return tb;
277}
278
Andreas Färber9349b4f2012-03-14 01:38:32 +0100279static inline TranslationBlock *tb_find_fast(CPUArchState *env)
bellard8a40a182005-11-20 10:35:40 +0000280{
Andreas Färber8cd70432013-08-26 06:03:38 +0200281 CPUState *cpu = ENV_GET_CPU(env);
bellard8a40a182005-11-20 10:35:40 +0000282 TranslationBlock *tb;
283 target_ulong cs_base, pc;
aliguori6b917542008-11-18 19:46:41 +0000284 int flags;
bellard8a40a182005-11-20 10:35:40 +0000285
286 /* we record a subset of the CPU state. It will
287 always be the same before a given translated block
288 is executed. */
aliguori6b917542008-11-18 19:46:41 +0000289 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
Andreas Färber8cd70432013-08-26 06:03:38 +0200290 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
ths551bd272008-07-03 17:57:36 +0000291 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
292 tb->flags != flags)) {
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000293 tb = tb_find_slow(env, pc, cs_base, flags);
bellard8a40a182005-11-20 10:35:40 +0000294 }
295 return tb;
296}
297
Andreas Färber9349b4f2012-03-14 01:38:32 +0100298static void cpu_handle_debug_exception(CPUArchState *env)
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100299{
Andreas Färberff4700b2013-08-26 18:23:18 +0200300 CPUState *cpu = ENV_GET_CPU(env);
Peter Maydell86025ee2014-09-12 14:06:48 +0100301 CPUClass *cc = CPU_GET_CLASS(cpu);
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100302 CPUWatchpoint *wp;
303
Andreas Färberff4700b2013-08-26 18:23:18 +0200304 if (!cpu->watchpoint_hit) {
305 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100306 wp->flags &= ~BP_WATCHPOINT_HIT;
307 }
308 }
Peter Maydell86025ee2014-09-12 14:06:48 +0100309
310 cc->debug_excp_handler(cpu);
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100311}
312
bellard7d132992003-03-06 23:23:54 +0000313/* main execution loop */
314
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300315volatile sig_atomic_t exit_request;
316
Andreas Färber9349b4f2012-03-14 01:38:32 +0100317int cpu_exec(CPUArchState *env)
bellard7d132992003-03-06 23:23:54 +0000318{
Andreas Färberc356a1b2012-05-04 19:39:23 +0200319 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färber97a8ea52013-02-02 10:57:51 +0100320 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber693fa552013-12-24 03:18:12 +0100321#ifdef TARGET_I386
322 X86CPU *x86_cpu = X86_CPU(cpu);
323#endif
bellard8a40a182005-11-20 10:35:40 +0000324 int ret, interrupt_request;
bellard8a40a182005-11-20 10:35:40 +0000325 TranslationBlock *tb;
bellardc27004e2005-01-03 23:35:10 +0000326 uint8_t *tc_ptr;
Richard Henderson3e9bd632013-08-20 14:40:25 -0700327 uintptr_t next_tb;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200328 SyncClocks sc;
329
Peter Maydellbae2c272014-04-04 17:42:56 +0100330 /* This must be volatile so it is not trashed by longjmp() */
331 volatile bool have_tb_lock = false;
bellard8c6939c2003-06-09 15:28:00 +0000332
Andreas Färber259186a2013-01-17 18:51:17 +0100333 if (cpu->halted) {
Andreas Färber3993c6b2012-05-03 06:43:49 +0200334 if (!cpu_has_work(cpu)) {
Paolo Bonzinieda48c32011-03-12 17:43:56 +0100335 return EXCP_HALTED;
336 }
337
Andreas Färber259186a2013-01-17 18:51:17 +0100338 cpu->halted = 0;
Paolo Bonzinieda48c32011-03-12 17:43:56 +0100339 }
bellard5a1e3cf2005-11-23 21:02:53 +0000340
Andreas Färber4917cf42013-05-27 05:17:50 +0200341 current_cpu = cpu;
bellarde4533c72003-06-15 19:51:39 +0000342
Andreas Färber4917cf42013-05-27 05:17:50 +0200343 /* As long as current_cpu is null, up to the assignment just above,
Olivier Hainqueec9bd892013-04-09 18:06:54 +0200344 * requests by other threads to exit the execution loop are expected to
345 * be issued using the exit_request global. We must make sure that our
Andreas Färber4917cf42013-05-27 05:17:50 +0200346 * evaluation of the global value is performed past the current_cpu
Olivier Hainqueec9bd892013-04-09 18:06:54 +0200347 * value transition point, which requires a memory barrier as well as
348 * an instruction scheduling constraint on modern architectures. */
349 smp_mb();
350
Jan Kiszkac629a4b2010-06-25 16:56:52 +0200351 if (unlikely(exit_request)) {
Andreas Färberfcd7d002012-12-17 08:02:44 +0100352 cpu->exit_request = 1;
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300353 }
354
thsecb644f2007-06-03 18:45:53 +0000355#if defined(TARGET_I386)
Jan Kiszka6792a572011-02-07 12:19:18 +0100356 /* put eflags in CPU temporary format */
357 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
liguang80cf2c82013-05-28 16:21:08 +0800358 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
Jan Kiszka6792a572011-02-07 12:19:18 +0100359 CC_OP = CC_OP_EFLAGS;
360 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
bellard93ac68b2003-09-30 20:57:29 +0000361#elif defined(TARGET_SPARC)
pbrooke6e59062006-10-22 00:18:54 +0000362#elif defined(TARGET_M68K)
363 env->cc_op = CC_OP_FLAGS;
364 env->cc_dest = env->sr & 0xf;
365 env->cc_x = (env->sr >> 4) & 1;
thsecb644f2007-06-03 18:45:53 +0000366#elif defined(TARGET_ALPHA)
367#elif defined(TARGET_ARM)
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800368#elif defined(TARGET_UNICORE32)
thsecb644f2007-06-03 18:45:53 +0000369#elif defined(TARGET_PPC)
Elie Richa4e85f822011-07-22 05:58:39 +0000370 env->reserve_addr = -1;
Michael Walle81ea0e12011-02-17 23:45:02 +0100371#elif defined(TARGET_LM32)
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200372#elif defined(TARGET_MICROBLAZE)
bellard6af0bf92005-07-02 14:58:51 +0000373#elif defined(TARGET_MIPS)
Anthony Greend15a9c22013-03-18 15:49:25 -0400374#elif defined(TARGET_MOXIE)
Jia Liue67db062012-07-20 15:50:39 +0800375#elif defined(TARGET_OPENRISC)
bellardfdf9b3e2006-04-27 21:07:38 +0000376#elif defined(TARGET_SH4)
thsf1ccf902007-10-08 13:16:14 +0000377#elif defined(TARGET_CRIS)
Alexander Graf10ec5112009-12-05 12:44:21 +0100378#elif defined(TARGET_S390X)
Max Filippov23288262011-09-06 03:55:25 +0400379#elif defined(TARGET_XTENSA)
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +0100380#elif defined(TARGET_TRICORE)
bellardfdf9b3e2006-04-27 21:07:38 +0000381 /* XXXXX */
bellarde4533c72003-06-15 19:51:39 +0000382#endif
Richard Hendersoncffe7b32014-09-13 09:45:12 -0700383 cc->cpu_exec_enter(cpu);
Andreas Färber27103422013-08-26 08:31:06 +0200384 cpu->exception_index = -1;
bellard9d27abd2003-05-10 13:13:54 +0000385
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200386 /* Calculate difference between guest clock and host clock.
387 * This delay includes the delay of the last cycle, so
388 * what we have to do is sleep until it is 0. As for the
389 * advance/delay we gain here, we try to fix it next time.
390 */
391 init_delay_params(&sc, cpu);
392
bellard7d132992003-03-06 23:23:54 +0000393 /* prepare setjmp context for exception handling */
bellard3fb2ded2003-06-24 13:22:59 +0000394 for(;;) {
Andreas Färber6f03bef2013-08-26 06:22:03 +0200395 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
bellard3fb2ded2003-06-24 13:22:59 +0000396 /* if an exception is pending, we execute it here */
Andreas Färber27103422013-08-26 08:31:06 +0200397 if (cpu->exception_index >= 0) {
398 if (cpu->exception_index >= EXCP_INTERRUPT) {
bellard3fb2ded2003-06-24 13:22:59 +0000399 /* exit request from the cpu execution loop */
Andreas Färber27103422013-08-26 08:31:06 +0200400 ret = cpu->exception_index;
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100401 if (ret == EXCP_DEBUG) {
402 cpu_handle_debug_exception(env);
403 }
bellard3fb2ded2003-06-24 13:22:59 +0000404 break;
aurel3272d239e2009-01-14 19:40:27 +0000405 } else {
406#if defined(CONFIG_USER_ONLY)
bellard3fb2ded2003-06-24 13:22:59 +0000407 /* if user mode only, we simulate a fake exception
ths9f083492006-12-07 18:28:42 +0000408 which will be handled outside the cpu execution
bellard3fb2ded2003-06-24 13:22:59 +0000409 loop */
bellard83479e72003-06-25 16:12:37 +0000410#if defined(TARGET_I386)
Andreas Färber97a8ea52013-02-02 10:57:51 +0100411 cc->do_interrupt(cpu);
bellard83479e72003-06-25 16:12:37 +0000412#endif
Andreas Färber27103422013-08-26 08:31:06 +0200413 ret = cpu->exception_index;
bellard3fb2ded2003-06-24 13:22:59 +0000414 break;
aurel3272d239e2009-01-14 19:40:27 +0000415#else
Andreas Färber97a8ea52013-02-02 10:57:51 +0100416 cc->do_interrupt(cpu);
Andreas Färber27103422013-08-26 08:31:06 +0200417 cpu->exception_index = -1;
aurel3272d239e2009-01-14 19:40:27 +0000418#endif
bellard3fb2ded2003-06-24 13:22:59 +0000419 }
ths5fafdf22007-09-16 21:08:06 +0000420 }
bellard9df217a2005-02-10 22:05:51 +0000421
blueswir1b5fc09a2008-05-04 06:38:18 +0000422 next_tb = 0; /* force lookup of first TB */
bellard3fb2ded2003-06-24 13:22:59 +0000423 for(;;) {
Andreas Färber259186a2013-01-17 18:51:17 +0100424 interrupt_request = cpu->interrupt_request;
malce1638bd2008-11-06 18:54:46 +0000425 if (unlikely(interrupt_request)) {
Andreas Färbered2803d2013-06-21 20:20:45 +0200426 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
malce1638bd2008-11-06 18:54:46 +0000427 /* Mask out external interrupts for this step. */
Richard Henderson3125f762011-05-04 13:34:25 -0700428 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
malce1638bd2008-11-06 18:54:46 +0000429 }
pbrook6658ffb2007-03-16 23:58:11 +0000430 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
Andreas Färber259186a2013-01-17 18:51:17 +0100431 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
Andreas Färber27103422013-08-26 08:31:06 +0200432 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +0200433 cpu_loop_exit(cpu);
pbrook6658ffb2007-03-16 23:58:11 +0000434 }
balroga90b7312007-05-01 01:28:01 +0000435#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200436 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +0100437 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || \
438 defined(TARGET_UNICORE32) || defined(TARGET_TRICORE)
balroga90b7312007-05-01 01:28:01 +0000439 if (interrupt_request & CPU_INTERRUPT_HALT) {
Andreas Färber259186a2013-01-17 18:51:17 +0100440 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
441 cpu->halted = 1;
Andreas Färber27103422013-08-26 08:31:06 +0200442 cpu->exception_index = EXCP_HLT;
Andreas Färber5638d182013-08-27 17:52:12 +0200443 cpu_loop_exit(cpu);
balroga90b7312007-05-01 01:28:01 +0000444 }
445#endif
bellard68a79312003-06-30 13:12:32 +0000446#if defined(TARGET_I386)
Paolo Bonzini4a92a552013-03-05 15:35:17 +0100447 if (interrupt_request & CPU_INTERRUPT_INIT) {
448 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
449 do_cpu_init(x86_cpu);
450 cpu->exception_index = EXCP_HALTED;
451 cpu_loop_exit(cpu);
452 }
453#else
454 if (interrupt_request & CPU_INTERRUPT_RESET) {
455 cpu_reset(cpu);
456 }
457#endif
458#if defined(TARGET_I386)
Jan Kiszka5d62c432012-07-09 16:42:32 +0200459#if !defined(CONFIG_USER_ONLY)
460 if (interrupt_request & CPU_INTERRUPT_POLL) {
Andreas Färber259186a2013-01-17 18:51:17 +0100461 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
Andreas Färber693fa552013-12-24 03:18:12 +0100462 apic_poll_irq(x86_cpu->apic_state);
Jan Kiszka5d62c432012-07-09 16:42:32 +0200463 }
464#endif
Paolo Bonzini4a92a552013-03-05 15:35:17 +0100465 if (interrupt_request & CPU_INTERRUPT_SIPI) {
Andreas Färber693fa552013-12-24 03:18:12 +0100466 do_cpu_sipi(x86_cpu);
Gleb Natapovb09ea7d2009-06-17 23:26:59 +0300467 } else if (env->hflags2 & HF2_GIF_MASK) {
bellarddb620f42008-06-04 17:02:19 +0000468 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
469 !(env->hflags & HF_SMM_MASK)) {
Blue Swirl77b2bc22012-04-28 19:35:10 +0000470 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
471 0);
Andreas Färber259186a2013-01-17 18:51:17 +0100472 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
Andreas Färber693fa552013-12-24 03:18:12 +0100473 do_smm_enter(x86_cpu);
bellarddb620f42008-06-04 17:02:19 +0000474 next_tb = 0;
475 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
476 !(env->hflags2 & HF2_NMI_MASK)) {
Andreas Färber259186a2013-01-17 18:51:17 +0100477 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
bellarddb620f42008-06-04 17:02:19 +0000478 env->hflags2 |= HF2_NMI_MASK;
Blue Swirle694d4e2011-05-16 19:38:48 +0000479 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
bellarddb620f42008-06-04 17:02:19 +0000480 next_tb = 0;
陳韋任e965fc32012-02-06 14:02:55 +0800481 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
Andreas Färber259186a2013-01-17 18:51:17 +0100482 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
Blue Swirle694d4e2011-05-16 19:38:48 +0000483 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
Huang Ying79c4f6b2009-06-23 10:05:14 +0800484 next_tb = 0;
bellarddb620f42008-06-04 17:02:19 +0000485 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
486 (((env->hflags2 & HF2_VINTR_MASK) &&
487 (env->hflags2 & HF2_HIF_MASK)) ||
488 (!(env->hflags2 & HF2_VINTR_MASK) &&
489 (env->eflags & IF_MASK &&
490 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
491 int intno;
Blue Swirl77b2bc22012-04-28 19:35:10 +0000492 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
493 0);
Andreas Färber259186a2013-01-17 18:51:17 +0100494 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
495 CPU_INTERRUPT_VIRQ);
bellarddb620f42008-06-04 17:02:19 +0000496 intno = cpu_get_pic_interrupt(env);
malc4f213872012-08-27 18:33:12 +0400497 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
498 do_interrupt_x86_hardirq(env, intno, 1);
499 /* ensure that no TB jump will be modified as
500 the program flow was changed */
501 next_tb = 0;
ths0573fbf2007-09-23 15:28:04 +0000502#if !defined(CONFIG_USER_ONLY)
bellarddb620f42008-06-04 17:02:19 +0000503 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
504 (env->eflags & IF_MASK) &&
505 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
506 int intno;
507 /* FIXME: this should respect TPR */
Blue Swirl77b2bc22012-04-28 19:35:10 +0000508 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
509 0);
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +0100510 intno = ldl_phys(cpu->as,
511 env->vm_vmcb
512 + offsetof(struct vmcb,
513 control.int_vector));
aliguori93fcfe32009-01-15 22:34:14 +0000514 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
Blue Swirle694d4e2011-05-16 19:38:48 +0000515 do_interrupt_x86_hardirq(env, intno, 1);
Andreas Färber259186a2013-01-17 18:51:17 +0100516 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
bellarddb620f42008-06-04 17:02:19 +0000517 next_tb = 0;
ths0573fbf2007-09-23 15:28:04 +0000518#endif
bellarddb620f42008-06-04 17:02:19 +0000519 }
bellard68a79312003-06-30 13:12:32 +0000520 }
bellardce097762004-01-04 23:53:18 +0000521#elif defined(TARGET_PPC)
j_mayer47103572007-03-30 09:38:04 +0000522 if (interrupt_request & CPU_INTERRUPT_HARD) {
j_mayere9df0142007-04-09 22:45:36 +0000523 ppc_hw_interrupt(env);
Andreas Färber259186a2013-01-17 18:51:17 +0100524 if (env->pending_interrupts == 0) {
525 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
526 }
blueswir1b5fc09a2008-05-04 06:38:18 +0000527 next_tb = 0;
bellardce097762004-01-04 23:53:18 +0000528 }
Michael Walle81ea0e12011-02-17 23:45:02 +0100529#elif defined(TARGET_LM32)
530 if ((interrupt_request & CPU_INTERRUPT_HARD)
531 && (env->ie & IE_IE)) {
Andreas Färber27103422013-08-26 08:31:06 +0200532 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100533 cc->do_interrupt(cpu);
Michael Walle81ea0e12011-02-17 23:45:02 +0100534 next_tb = 0;
535 }
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200536#elif defined(TARGET_MICROBLAZE)
537 if ((interrupt_request & CPU_INTERRUPT_HARD)
538 && (env->sregs[SR_MSR] & MSR_IE)
539 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
540 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
Andreas Färber27103422013-08-26 08:31:06 +0200541 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100542 cc->do_interrupt(cpu);
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200543 next_tb = 0;
544 }
bellard6af0bf92005-07-02 14:58:51 +0000545#elif defined(TARGET_MIPS)
546 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
Aurelien Jarno4cdc1cd2010-12-25 22:56:32 +0100547 cpu_mips_hw_interrupts_pending(env)) {
bellard6af0bf92005-07-02 14:58:51 +0000548 /* Raise it */
Andreas Färber27103422013-08-26 08:31:06 +0200549 cpu->exception_index = EXCP_EXT_INTERRUPT;
bellard6af0bf92005-07-02 14:58:51 +0000550 env->error_code = 0;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100551 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000552 next_tb = 0;
bellard6af0bf92005-07-02 14:58:51 +0000553 }
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +0100554#elif defined(TARGET_TRICORE)
555 if ((interrupt_request & CPU_INTERRUPT_HARD)) {
556 cc->do_interrupt(cpu);
557 next_tb = 0;
558 }
559
Jia Liub6a71ef2012-07-20 15:50:41 +0800560#elif defined(TARGET_OPENRISC)
561 {
562 int idx = -1;
563 if ((interrupt_request & CPU_INTERRUPT_HARD)
564 && (env->sr & SR_IEE)) {
565 idx = EXCP_INT;
566 }
567 if ((interrupt_request & CPU_INTERRUPT_TIMER)
568 && (env->sr & SR_TEE)) {
569 idx = EXCP_TICK;
570 }
571 if (idx >= 0) {
Andreas Färber27103422013-08-26 08:31:06 +0200572 cpu->exception_index = idx;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100573 cc->do_interrupt(cpu);
Jia Liub6a71ef2012-07-20 15:50:41 +0800574 next_tb = 0;
575 }
576 }
bellarde95c8d52004-09-30 22:22:08 +0000577#elif defined(TARGET_SPARC)
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300578 if (interrupt_request & CPU_INTERRUPT_HARD) {
579 if (cpu_interrupts_enabled(env) &&
580 env->interrupt_index > 0) {
581 int pil = env->interrupt_index & 0xf;
582 int type = env->interrupt_index & 0xf0;
bellard66321a12005-04-06 20:47:48 +0000583
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300584 if (((type == TT_EXTINT) &&
585 cpu_pil_allowed(env, pil)) ||
586 type != TT_EXTINT) {
Andreas Färber27103422013-08-26 08:31:06 +0200587 cpu->exception_index = env->interrupt_index;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100588 cc->do_interrupt(cpu);
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300589 next_tb = 0;
590 }
591 }
陳韋任e965fc32012-02-06 14:02:55 +0800592 }
bellardb5ff1b32005-11-26 10:38:39 +0000593#elif defined(TARGET_ARM)
594 if (interrupt_request & CPU_INTERRUPT_FIQ
Peter Maydell4cc35612014-02-26 17:20:06 +0000595 && !(env->daif & PSTATE_F)) {
Andreas Färber27103422013-08-26 08:31:06 +0200596 cpu->exception_index = EXCP_FIQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100597 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000598 next_tb = 0;
bellardb5ff1b32005-11-26 10:38:39 +0000599 }
pbrook9ee6e8b2007-11-11 00:04:49 +0000600 /* ARMv7-M interrupt return works by loading a magic value
601 into the PC. On real hardware the load causes the
602 return to occur. The qemu implementation performs the
603 jump normally, then does the exception return when the
604 CPU tries to execute code at the magic address.
605 This will cause the magic PC value to be pushed to
Stefan Weila1c72732011-04-28 17:20:38 +0200606 the stack if an interrupt occurred at the wrong time.
pbrook9ee6e8b2007-11-11 00:04:49 +0000607 We avoid this by disabling interrupts when
608 pc contains a magic address. */
bellardb5ff1b32005-11-26 10:38:39 +0000609 if (interrupt_request & CPU_INTERRUPT_HARD
David Hooverc3c8d6b2014-09-12 14:06:47 +0100610 && !(env->daif & PSTATE_I)
611 && (!IS_M(env) || env->regs[15] < 0xfffffff0)) {
Andreas Färber27103422013-08-26 08:31:06 +0200612 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100613 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000614 next_tb = 0;
bellardb5ff1b32005-11-26 10:38:39 +0000615 }
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800616#elif defined(TARGET_UNICORE32)
617 if (interrupt_request & CPU_INTERRUPT_HARD
618 && !(env->uncached_asr & ASR_I)) {
Andreas Färber27103422013-08-26 08:31:06 +0200619 cpu->exception_index = UC32_EXCP_INTR;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100620 cc->do_interrupt(cpu);
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800621 next_tb = 0;
622 }
bellardfdf9b3e2006-04-27 21:07:38 +0000623#elif defined(TARGET_SH4)
thse96e2042007-12-02 06:18:24 +0000624 if (interrupt_request & CPU_INTERRUPT_HARD) {
Andreas Färber97a8ea52013-02-02 10:57:51 +0100625 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000626 next_tb = 0;
thse96e2042007-12-02 06:18:24 +0000627 }
j_mayereddf68a2007-04-05 07:22:49 +0000628#elif defined(TARGET_ALPHA)
Richard Henderson6a80e082011-04-18 15:09:09 -0700629 {
630 int idx = -1;
631 /* ??? This hard-codes the OSF/1 interrupt levels. */
陳韋任e965fc32012-02-06 14:02:55 +0800632 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
Richard Henderson6a80e082011-04-18 15:09:09 -0700633 case 0 ... 3:
634 if (interrupt_request & CPU_INTERRUPT_HARD) {
635 idx = EXCP_DEV_INTERRUPT;
636 }
637 /* FALLTHRU */
638 case 4:
639 if (interrupt_request & CPU_INTERRUPT_TIMER) {
640 idx = EXCP_CLK_INTERRUPT;
641 }
642 /* FALLTHRU */
643 case 5:
644 if (interrupt_request & CPU_INTERRUPT_SMP) {
645 idx = EXCP_SMP_INTERRUPT;
646 }
647 /* FALLTHRU */
648 case 6:
649 if (interrupt_request & CPU_INTERRUPT_MCHK) {
650 idx = EXCP_MCHK;
651 }
652 }
653 if (idx >= 0) {
Andreas Färber27103422013-08-26 08:31:06 +0200654 cpu->exception_index = idx;
Richard Henderson6a80e082011-04-18 15:09:09 -0700655 env->error_code = 0;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100656 cc->do_interrupt(cpu);
Richard Henderson6a80e082011-04-18 15:09:09 -0700657 next_tb = 0;
658 }
j_mayereddf68a2007-04-05 07:22:49 +0000659 }
thsf1ccf902007-10-08 13:16:14 +0000660#elif defined(TARGET_CRIS)
edgar_igl1b1a38b2008-06-09 23:18:06 +0000661 if (interrupt_request & CPU_INTERRUPT_HARD
Edgar E. Iglesiasfb9fb692010-02-15 11:17:33 +0100662 && (env->pregs[PR_CCS] & I_FLAG)
663 && !env->locked_irq) {
Andreas Färber27103422013-08-26 08:31:06 +0200664 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100665 cc->do_interrupt(cpu);
edgar_igl1b1a38b2008-06-09 23:18:06 +0000666 next_tb = 0;
667 }
Lars Persson82193142012-06-14 16:23:55 +0200668 if (interrupt_request & CPU_INTERRUPT_NMI) {
669 unsigned int m_flag_archval;
670 if (env->pregs[PR_VR] < 32) {
671 m_flag_archval = M_FLAG_V10;
672 } else {
673 m_flag_archval = M_FLAG_V32;
674 }
675 if ((env->pregs[PR_CCS] & m_flag_archval)) {
Andreas Färber27103422013-08-26 08:31:06 +0200676 cpu->exception_index = EXCP_NMI;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100677 cc->do_interrupt(cpu);
Lars Persson82193142012-06-14 16:23:55 +0200678 next_tb = 0;
679 }
thsf1ccf902007-10-08 13:16:14 +0000680 }
pbrook06338792007-05-23 19:58:11 +0000681#elif defined(TARGET_M68K)
682 if (interrupt_request & CPU_INTERRUPT_HARD
683 && ((env->sr & SR_I) >> SR_I_SHIFT)
684 < env->pending_level) {
685 /* Real hardware gets the interrupt vector via an
686 IACK cycle at this point. Current emulated
687 hardware doesn't rely on this, so we
688 provide/save the vector when the interrupt is
689 first signalled. */
Andreas Färber27103422013-08-26 08:31:06 +0200690 cpu->exception_index = env->pending_vector;
Blue Swirl3c688822011-05-21 07:55:24 +0000691 do_interrupt_m68k_hardirq(env);
blueswir1b5fc09a2008-05-04 06:38:18 +0000692 next_tb = 0;
pbrook06338792007-05-23 19:58:11 +0000693 }
Alexander Graf3110e292011-04-15 17:32:48 +0200694#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
695 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
696 (env->psw.mask & PSW_MASK_EXT)) {
Andreas Färber97a8ea52013-02-02 10:57:51 +0100697 cc->do_interrupt(cpu);
Alexander Graf3110e292011-04-15 17:32:48 +0200698 next_tb = 0;
699 }
Max Filippov40643d72011-09-06 03:55:41 +0400700#elif defined(TARGET_XTENSA)
701 if (interrupt_request & CPU_INTERRUPT_HARD) {
Andreas Färber27103422013-08-26 08:31:06 +0200702 cpu->exception_index = EXC_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100703 cc->do_interrupt(cpu);
Max Filippov40643d72011-09-06 03:55:41 +0400704 next_tb = 0;
705 }
bellard68a79312003-06-30 13:12:32 +0000706#endif
Stefan Weilff2712b2011-04-28 17:20:35 +0200707 /* Don't use the cached interrupt_request value,
bellard9d050952006-05-22 22:03:52 +0000708 do_interrupt may have updated the EXITTB flag. */
Andreas Färber259186a2013-01-17 18:51:17 +0100709 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
710 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bellardbf3e8bf2004-02-16 21:58:54 +0000711 /* ensure that no TB jump will be modified as
712 the program flow was changed */
blueswir1b5fc09a2008-05-04 06:38:18 +0000713 next_tb = 0;
bellardbf3e8bf2004-02-16 21:58:54 +0000714 }
aurel32be214e62009-03-06 21:48:00 +0000715 }
Andreas Färberfcd7d002012-12-17 08:02:44 +0100716 if (unlikely(cpu->exit_request)) {
717 cpu->exit_request = 0;
Andreas Färber27103422013-08-26 08:31:06 +0200718 cpu->exception_index = EXCP_INTERRUPT;
Andreas Färber5638d182013-08-27 17:52:12 +0200719 cpu_loop_exit(cpu);
bellard3fb2ded2003-06-24 13:22:59 +0000720 }
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700721 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
Peter Maydellbae2c272014-04-04 17:42:56 +0100722 have_tb_lock = true;
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000723 tb = tb_find_fast(env);
pbrookd5975362008-06-07 20:50:51 +0000724 /* Note: we do it here to avoid a gcc bug on Mac OS X when
725 doing it in tb_find_slow */
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700726 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
pbrookd5975362008-06-07 20:50:51 +0000727 /* as some TB could have been invalidated because
728 of memory exceptions while generating the code, we
729 must recompute the hash index here */
730 next_tb = 0;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700731 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
pbrookd5975362008-06-07 20:50:51 +0000732 }
Peter Maydellc30d1ae2013-04-11 21:21:46 +0100733 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
734 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
735 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
736 }
bellard8a40a182005-11-20 10:35:40 +0000737 /* see if we can patch the calling TB. When the TB
738 spans two pages, we cannot safely do a direct
739 jump. */
Paolo Bonzini040f2fb2010-01-15 08:56:36 +0100740 if (next_tb != 0 && tb->page_addr[1] == -1) {
Peter Maydell09800112013-02-22 18:10:00 +0000741 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
742 next_tb & TB_EXIT_MASK, tb);
bellard3fb2ded2003-06-24 13:22:59 +0000743 }
Peter Maydellbae2c272014-04-04 17:42:56 +0100744 have_tb_lock = false;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700745 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
malc55e8b852008-11-04 14:18:13 +0000746
747 /* cpu_interrupt might be called while translating the
748 TB, but before it is linked into a potentially
749 infinite loop and becomes env->current_tb. Avoid
750 starting execution if there is a pending interrupt. */
Andreas Färberd77953b2013-01-16 19:29:31 +0100751 cpu->current_tb = tb;
Jan Kiszkab0052d12010-06-25 16:56:50 +0200752 barrier();
Andreas Färberfcd7d002012-12-17 08:02:44 +0100753 if (likely(!cpu->exit_request)) {
Alex Bennée6db8b532014-08-01 17:08:57 +0100754 trace_exec_tb(tb, tb->pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000755 tc_ptr = tb->tc_ptr;
陳韋任e965fc32012-02-06 14:02:55 +0800756 /* execute the generated code */
Peter Maydell77211372013-02-22 18:10:02 +0000757 next_tb = cpu_tb_exec(cpu, tc_ptr);
Peter Maydell378df4b2013-02-22 18:10:03 +0000758 switch (next_tb & TB_EXIT_MASK) {
759 case TB_EXIT_REQUESTED:
760 /* Something asked us to stop executing
761 * chained TBs; just continue round the main
762 * loop. Whatever requested the exit will also
763 * have set something else (eg exit_request or
764 * interrupt_request) which we will handle
765 * next time around the loop.
766 */
767 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
768 next_tb = 0;
769 break;
770 case TB_EXIT_ICOUNT_EXPIRED:
771 {
thsbf20dc02008-06-30 17:22:19 +0000772 /* Instruction counter expired. */
pbrook2e70f6e2008-06-29 01:03:05 +0000773 int insns_left;
Peter Maydell09800112013-02-22 18:10:00 +0000774 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
Andreas Färber28ecfd72013-08-26 05:51:49 +0200775 insns_left = cpu->icount_decr.u32;
Andreas Färberefee7342013-08-26 05:39:29 +0200776 if (cpu->icount_extra && insns_left >= 0) {
pbrook2e70f6e2008-06-29 01:03:05 +0000777 /* Refill decrementer and continue execution. */
Andreas Färberefee7342013-08-26 05:39:29 +0200778 cpu->icount_extra += insns_left;
779 if (cpu->icount_extra > 0xffff) {
pbrook2e70f6e2008-06-29 01:03:05 +0000780 insns_left = 0xffff;
781 } else {
Andreas Färberefee7342013-08-26 05:39:29 +0200782 insns_left = cpu->icount_extra;
pbrook2e70f6e2008-06-29 01:03:05 +0000783 }
Andreas Färberefee7342013-08-26 05:39:29 +0200784 cpu->icount_extra -= insns_left;
Andreas Färber28ecfd72013-08-26 05:51:49 +0200785 cpu->icount_decr.u16.low = insns_left;
pbrook2e70f6e2008-06-29 01:03:05 +0000786 } else {
787 if (insns_left > 0) {
788 /* Execute remaining instructions. */
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000789 cpu_exec_nocache(env, insns_left, tb);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200790 align_clocks(&sc, cpu);
pbrook2e70f6e2008-06-29 01:03:05 +0000791 }
Andreas Färber27103422013-08-26 08:31:06 +0200792 cpu->exception_index = EXCP_INTERRUPT;
pbrook2e70f6e2008-06-29 01:03:05 +0000793 next_tb = 0;
Andreas Färber5638d182013-08-27 17:52:12 +0200794 cpu_loop_exit(cpu);
pbrook2e70f6e2008-06-29 01:03:05 +0000795 }
Peter Maydell378df4b2013-02-22 18:10:03 +0000796 break;
797 }
798 default:
799 break;
pbrook2e70f6e2008-06-29 01:03:05 +0000800 }
801 }
Andreas Färberd77953b2013-01-16 19:29:31 +0100802 cpu->current_tb = NULL;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200803 /* Try to align the host and virtual clocks
804 if the guest is in advance */
805 align_clocks(&sc, cpu);
bellard4cbf74b2003-08-10 21:48:43 +0000806 /* reset soft MMU for next block (it can currently
807 only be set by a memory fault) */
ths50a518e2007-06-03 18:52:15 +0000808 } /* for(;;) */
Jan Kiszka0d101932011-07-02 09:50:51 +0200809 } else {
810 /* Reload env after longjmp - the compiler may have smashed all
811 * local variables as longjmp is marked 'noreturn'. */
Andreas Färber4917cf42013-05-27 05:17:50 +0200812 cpu = current_cpu;
813 env = cpu->env_ptr;
Juergen Lock6c78f292013-10-03 16:09:37 +0200814#if !(defined(CONFIG_USER_ONLY) && \
815 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
816 cc = CPU_GET_CLASS(cpu);
817#endif
Andreas Färber693fa552013-12-24 03:18:12 +0100818#ifdef TARGET_I386
819 x86_cpu = X86_CPU(cpu);
820#endif
Peter Maydellbae2c272014-04-04 17:42:56 +0100821 if (have_tb_lock) {
822 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
823 have_tb_lock = false;
824 }
bellard7d132992003-03-06 23:23:54 +0000825 }
bellard3fb2ded2003-06-24 13:22:59 +0000826 } /* for(;;) */
827
bellard7d132992003-03-06 23:23:54 +0000828
bellarde4533c72003-06-15 19:51:39 +0000829#if defined(TARGET_I386)
bellard9de5e442003-03-23 16:49:39 +0000830 /* restore flags in standard format */
Blue Swirle694d4e2011-05-16 19:38:48 +0000831 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
liguang80cf2c82013-05-28 16:21:08 +0800832 | (env->df & DF_MASK);
bellarde4533c72003-06-15 19:51:39 +0000833#elif defined(TARGET_ARM)
bellardb7bcbe92005-02-22 19:27:29 +0000834 /* XXX: Save/restore host fpu exception state?. */
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800835#elif defined(TARGET_UNICORE32)
bellard93ac68b2003-09-30 20:57:29 +0000836#elif defined(TARGET_SPARC)
bellard67867302003-11-23 17:05:30 +0000837#elif defined(TARGET_PPC)
Michael Walle81ea0e12011-02-17 23:45:02 +0100838#elif defined(TARGET_LM32)
pbrooke6e59062006-10-22 00:18:54 +0000839#elif defined(TARGET_M68K)
840 cpu_m68k_flush_flags(env, env->cc_op);
841 env->cc_op = CC_OP_FLAGS;
842 env->sr = (env->sr & 0xffe0)
843 | env->cc_dest | (env->cc_x << 4);
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200844#elif defined(TARGET_MICROBLAZE)
bellard6af0bf92005-07-02 14:58:51 +0000845#elif defined(TARGET_MIPS)
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +0100846#elif defined(TARGET_TRICORE)
Anthony Greend15a9c22013-03-18 15:49:25 -0400847#elif defined(TARGET_MOXIE)
Jia Liue67db062012-07-20 15:50:39 +0800848#elif defined(TARGET_OPENRISC)
bellardfdf9b3e2006-04-27 21:07:38 +0000849#elif defined(TARGET_SH4)
j_mayereddf68a2007-04-05 07:22:49 +0000850#elif defined(TARGET_ALPHA)
thsf1ccf902007-10-08 13:16:14 +0000851#elif defined(TARGET_CRIS)
Alexander Graf10ec5112009-12-05 12:44:21 +0100852#elif defined(TARGET_S390X)
Max Filippov23288262011-09-06 03:55:25 +0400853#elif defined(TARGET_XTENSA)
bellardfdf9b3e2006-04-27 21:07:38 +0000854 /* XXXXX */
bellarde4533c72003-06-15 19:51:39 +0000855#endif
Richard Hendersoncffe7b32014-09-13 09:45:12 -0700856 cc->cpu_exec_exit(cpu);
pbrook1057eaa2007-02-04 13:37:44 +0000857
Andreas Färber4917cf42013-05-27 05:17:50 +0200858 /* fail safe : never use current_cpu outside cpu_exec() */
859 current_cpu = NULL;
bellard7d132992003-03-06 23:23:54 +0000860 return ret;
861}