blob: d930e7a49afc603ac85c2eb65cace9d6c2322b71 [file] [log] [blame]
bellard7d132992003-03-06 23:23:54 +00001/*
陳韋任e965fc32012-02-06 14:02:55 +08002 * emulator main execution loop
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard66321a12005-04-06 20:47:48 +00004 * Copyright (c) 2003-2005 Fabrice Bellard
bellard7d132992003-03-06 23:23:54 +00005 *
bellard3ef693a2003-03-23 20:17:16 +00006 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
bellard7d132992003-03-06 23:23:54 +000010 *
bellard3ef693a2003-03-23 20:17:16 +000011 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
bellard7d132992003-03-06 23:23:54 +000015 *
bellard3ef693a2003-03-23 20:17:16 +000016 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard7d132992003-03-06 23:23:54 +000018 */
bellarde4533c72003-06-15 19:51:39 +000019#include "config.h"
Blue Swirlcea5f9a2011-05-15 16:03:25 +000020#include "cpu.h"
Alex Bennée6db8b532014-08-01 17:08:57 +010021#include "trace.h"
Paolo Bonzini76cad712012-10-24 11:12:21 +020022#include "disas/disas.h"
bellard7cb69ca2008-05-10 10:55:51 +000023#include "tcg.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010024#include "qemu/atomic.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010025#include "sysemu/qtest.h"
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020026#include "qemu/timer.h"
27
28/* -icount align implementation. */
29
30typedef struct SyncClocks {
31 int64_t diff_clk;
32 int64_t last_cpu_icount;
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020033 int64_t realtime_clock;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020034} SyncClocks;
35
36#if !defined(CONFIG_USER_ONLY)
37/* Allow the guest to have a max 3ms advance.
38 * The difference between the 2 clocks could therefore
39 * oscillate around 0.
40 */
41#define VM_CLOCK_ADVANCE 3000000
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020042#define THRESHOLD_REDUCE 1.5
43#define MAX_DELAY_PRINT_RATE 2000000000LL
44#define MAX_NB_PRINTS 100
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020045
46static void align_clocks(SyncClocks *sc, const CPUState *cpu)
47{
48 int64_t cpu_icount;
49
50 if (!icount_align_option) {
51 return;
52 }
53
54 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
55 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
56 sc->last_cpu_icount = cpu_icount;
57
58 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
59#ifndef _WIN32
60 struct timespec sleep_delay, rem_delay;
61 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
62 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
63 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
64 sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL;
65 sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec;
66 } else {
67 sc->diff_clk = 0;
68 }
69#else
70 Sleep(sc->diff_clk / SCALE_MS);
71 sc->diff_clk = 0;
72#endif
73 }
74}
75
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020076static void print_delay(const SyncClocks *sc)
77{
78 static float threshold_delay;
79 static int64_t last_realtime_clock;
80 static int nb_prints;
81
82 if (icount_align_option &&
83 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
84 nb_prints < MAX_NB_PRINTS) {
85 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
86 (-sc->diff_clk / (float)1000000000LL <
87 (threshold_delay - THRESHOLD_REDUCE))) {
88 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
89 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
90 threshold_delay - 1,
91 threshold_delay);
92 nb_prints++;
93 last_realtime_clock = sc->realtime_clock;
94 }
95 }
96}
97
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020098static void init_delay_params(SyncClocks *sc,
99 const CPUState *cpu)
100{
101 if (!icount_align_option) {
102 return;
103 }
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200104 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200105 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200106 sc->realtime_clock +
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200107 cpu_get_clock_offset();
108 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
Sebastian Tanase27498be2014-07-25 11:56:33 +0200109 if (sc->diff_clk < max_delay) {
110 max_delay = sc->diff_clk;
111 }
112 if (sc->diff_clk > max_advance) {
113 max_advance = sc->diff_clk;
114 }
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200115
116 /* Print every 2s max if the guest is late. We limit the number
117 of printed messages to NB_PRINT_MAX(currently 100) */
118 print_delay(sc);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200119}
120#else
121static void align_clocks(SyncClocks *sc, const CPUState *cpu)
122{
123}
124
125static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
126{
127}
128#endif /* CONFIG USER ONLY */
bellard7d132992003-03-06 23:23:54 +0000129
Andreas Färber5638d182013-08-27 17:52:12 +0200130void cpu_loop_exit(CPUState *cpu)
bellarde4533c72003-06-15 19:51:39 +0000131{
Andreas Färberd77953b2013-01-16 19:29:31 +0100132 cpu->current_tb = NULL;
Andreas Färber6f03bef2013-08-26 06:22:03 +0200133 siglongjmp(cpu->jmp_env, 1);
bellarde4533c72003-06-15 19:51:39 +0000134}
thsbfed01f2007-06-03 17:44:37 +0000135
bellardfbf9eeb2004-04-25 21:21:33 +0000136/* exit the current TB from a signal handler. The host registers are
137 restored in a state compatible with the CPU emulator
138 */
Blue Swirl9eff14f2011-05-21 08:42:35 +0000139#if defined(CONFIG_SOFTMMU)
Andreas Färber0ea8cb82013-09-03 02:12:23 +0200140void cpu_resume_from_signal(CPUState *cpu, void *puc)
bellardfbf9eeb2004-04-25 21:21:33 +0000141{
Blue Swirl9eff14f2011-05-21 08:42:35 +0000142 /* XXX: restore cpu registers saved in host registers */
143
Andreas Färber27103422013-08-26 08:31:06 +0200144 cpu->exception_index = -1;
Andreas Färber6f03bef2013-08-26 06:22:03 +0200145 siglongjmp(cpu->jmp_env, 1);
Blue Swirl9eff14f2011-05-21 08:42:35 +0000146}
Blue Swirl9eff14f2011-05-21 08:42:35 +0000147#endif
bellardfbf9eeb2004-04-25 21:21:33 +0000148
Peter Maydell77211372013-02-22 18:10:02 +0000149/* Execute a TB, and fix up the CPU state afterwards if necessary */
150static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
151{
152 CPUArchState *env = cpu->env_ptr;
Richard Henderson03afa5f2013-11-06 17:29:39 +1000153 uintptr_t next_tb;
154
155#if defined(DEBUG_DISAS)
156 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
157#if defined(TARGET_I386)
158 log_cpu_state(cpu, CPU_DUMP_CCOP);
159#elif defined(TARGET_M68K)
160 /* ??? Should not modify env state for dumping. */
161 cpu_m68k_flush_flags(env, env->cc_op);
162 env->cc_op = CC_OP_FLAGS;
163 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
164 log_cpu_state(cpu, 0);
165#else
166 log_cpu_state(cpu, 0);
167#endif
168 }
169#endif /* DEBUG_DISAS */
170
171 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
Alex Bennée6db8b532014-08-01 17:08:57 +0100172 trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
173 next_tb & TB_EXIT_MASK);
174
Peter Maydell77211372013-02-22 18:10:02 +0000175 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
176 /* We didn't start executing this TB (eg because the instruction
177 * counter hit zero); we must restore the guest PC to the address
178 * of the start of the TB.
179 */
Andreas Färberbdf7ae52013-06-28 19:31:32 +0200180 CPUClass *cc = CPU_GET_CLASS(cpu);
Peter Maydell77211372013-02-22 18:10:02 +0000181 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
Andreas Färberbdf7ae52013-06-28 19:31:32 +0200182 if (cc->synchronize_from_tb) {
183 cc->synchronize_from_tb(cpu, tb);
184 } else {
185 assert(cc->set_pc);
186 cc->set_pc(cpu, tb->pc);
187 }
Peter Maydell77211372013-02-22 18:10:02 +0000188 }
Peter Maydell378df4b2013-02-22 18:10:03 +0000189 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
190 /* We were asked to stop executing TBs (probably a pending
191 * interrupt. We've now stopped, so clear the flag.
192 */
193 cpu->tcg_exit_req = 0;
194 }
Peter Maydell77211372013-02-22 18:10:02 +0000195 return next_tb;
196}
197
pbrook2e70f6e2008-06-29 01:03:05 +0000198/* Execute the code without caching the generated code. An interpreter
199 could be used if available. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100200static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000201 TranslationBlock *orig_tb)
pbrook2e70f6e2008-06-29 01:03:05 +0000202{
Andreas Färberd77953b2013-01-16 19:29:31 +0100203 CPUState *cpu = ENV_GET_CPU(env);
pbrook2e70f6e2008-06-29 01:03:05 +0000204 TranslationBlock *tb;
205
206 /* Should never happen.
207 We only end up here when an existing TB is too long. */
208 if (max_cycles > CF_COUNT_MASK)
209 max_cycles = CF_COUNT_MASK;
210
Andreas Färber648f0342013-09-01 17:43:17 +0200211 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
pbrook2e70f6e2008-06-29 01:03:05 +0000212 max_cycles);
Andreas Färberd77953b2013-01-16 19:29:31 +0100213 cpu->current_tb = tb;
pbrook2e70f6e2008-06-29 01:03:05 +0000214 /* execute the generated code */
Alex Bennée6db8b532014-08-01 17:08:57 +0100215 trace_exec_tb_nocache(tb, tb->pc);
Peter Maydell77211372013-02-22 18:10:02 +0000216 cpu_tb_exec(cpu, tb->tc_ptr);
Andreas Färberd77953b2013-01-16 19:29:31 +0100217 cpu->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +0000218 tb_phys_invalidate(tb, -1);
219 tb_free(tb);
220}
221
Andreas Färber9349b4f2012-03-14 01:38:32 +0100222static TranslationBlock *tb_find_slow(CPUArchState *env,
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000223 target_ulong pc,
bellard8a40a182005-11-20 10:35:40 +0000224 target_ulong cs_base,
j_mayerc0686882007-09-20 22:47:42 +0000225 uint64_t flags)
bellard8a40a182005-11-20 10:35:40 +0000226{
Andreas Färber8cd70432013-08-26 06:03:38 +0200227 CPUState *cpu = ENV_GET_CPU(env);
bellard8a40a182005-11-20 10:35:40 +0000228 TranslationBlock *tb, **ptb1;
bellard8a40a182005-11-20 10:35:40 +0000229 unsigned int h;
Blue Swirl337fc752011-09-04 11:06:22 +0000230 tb_page_addr_t phys_pc, phys_page1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000231 target_ulong virt_page2;
ths3b46e622007-09-17 08:09:54 +0000232
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700233 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
ths3b46e622007-09-17 08:09:54 +0000234
bellard8a40a182005-11-20 10:35:40 +0000235 /* find translated block using physical mappings */
Paul Brook41c1b1c2010-03-12 16:54:58 +0000236 phys_pc = get_page_addr_code(env, pc);
bellard8a40a182005-11-20 10:35:40 +0000237 phys_page1 = phys_pc & TARGET_PAGE_MASK;
bellard8a40a182005-11-20 10:35:40 +0000238 h = tb_phys_hash_func(phys_pc);
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700239 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
bellard8a40a182005-11-20 10:35:40 +0000240 for(;;) {
241 tb = *ptb1;
242 if (!tb)
243 goto not_found;
ths5fafdf22007-09-16 21:08:06 +0000244 if (tb->pc == pc &&
bellard8a40a182005-11-20 10:35:40 +0000245 tb->page_addr[0] == phys_page1 &&
ths5fafdf22007-09-16 21:08:06 +0000246 tb->cs_base == cs_base &&
bellard8a40a182005-11-20 10:35:40 +0000247 tb->flags == flags) {
248 /* check next page if needed */
249 if (tb->page_addr[1] != -1) {
Blue Swirl337fc752011-09-04 11:06:22 +0000250 tb_page_addr_t phys_page2;
251
ths5fafdf22007-09-16 21:08:06 +0000252 virt_page2 = (pc & TARGET_PAGE_MASK) +
bellard8a40a182005-11-20 10:35:40 +0000253 TARGET_PAGE_SIZE;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000254 phys_page2 = get_page_addr_code(env, virt_page2);
bellard8a40a182005-11-20 10:35:40 +0000255 if (tb->page_addr[1] == phys_page2)
256 goto found;
257 } else {
258 goto found;
259 }
260 }
261 ptb1 = &tb->phys_hash_next;
262 }
263 not_found:
pbrook2e70f6e2008-06-29 01:03:05 +0000264 /* if no translated code available, then translate it now */
Andreas Färber648f0342013-09-01 17:43:17 +0200265 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
ths3b46e622007-09-17 08:09:54 +0000266
bellard8a40a182005-11-20 10:35:40 +0000267 found:
Kirill Batuzov2c90fe22010-12-02 16:12:46 +0300268 /* Move the last found TB to the head of the list */
269 if (likely(*ptb1)) {
270 *ptb1 = tb->phys_hash_next;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700271 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
272 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
Kirill Batuzov2c90fe22010-12-02 16:12:46 +0300273 }
bellard8a40a182005-11-20 10:35:40 +0000274 /* we add the TB in the virtual pc hash table */
Andreas Färber8cd70432013-08-26 06:03:38 +0200275 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
bellard8a40a182005-11-20 10:35:40 +0000276 return tb;
277}
278
Andreas Färber9349b4f2012-03-14 01:38:32 +0100279static inline TranslationBlock *tb_find_fast(CPUArchState *env)
bellard8a40a182005-11-20 10:35:40 +0000280{
Andreas Färber8cd70432013-08-26 06:03:38 +0200281 CPUState *cpu = ENV_GET_CPU(env);
bellard8a40a182005-11-20 10:35:40 +0000282 TranslationBlock *tb;
283 target_ulong cs_base, pc;
aliguori6b917542008-11-18 19:46:41 +0000284 int flags;
bellard8a40a182005-11-20 10:35:40 +0000285
286 /* we record a subset of the CPU state. It will
287 always be the same before a given translated block
288 is executed. */
aliguori6b917542008-11-18 19:46:41 +0000289 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
Andreas Färber8cd70432013-08-26 06:03:38 +0200290 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
ths551bd272008-07-03 17:57:36 +0000291 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
292 tb->flags != flags)) {
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000293 tb = tb_find_slow(env, pc, cs_base, flags);
bellard8a40a182005-11-20 10:35:40 +0000294 }
295 return tb;
296}
297
Andreas Färber9349b4f2012-03-14 01:38:32 +0100298static void cpu_handle_debug_exception(CPUArchState *env)
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100299{
Andreas Färberff4700b2013-08-26 18:23:18 +0200300 CPUState *cpu = ENV_GET_CPU(env);
Peter Maydell86025ee2014-09-12 14:06:48 +0100301 CPUClass *cc = CPU_GET_CLASS(cpu);
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100302 CPUWatchpoint *wp;
303
Andreas Färberff4700b2013-08-26 18:23:18 +0200304 if (!cpu->watchpoint_hit) {
305 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100306 wp->flags &= ~BP_WATCHPOINT_HIT;
307 }
308 }
Peter Maydell86025ee2014-09-12 14:06:48 +0100309
310 cc->debug_excp_handler(cpu);
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100311}
312
bellard7d132992003-03-06 23:23:54 +0000313/* main execution loop */
314
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300315volatile sig_atomic_t exit_request;
316
Andreas Färber9349b4f2012-03-14 01:38:32 +0100317int cpu_exec(CPUArchState *env)
bellard7d132992003-03-06 23:23:54 +0000318{
Andreas Färberc356a1b2012-05-04 19:39:23 +0200319 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färber97a8ea52013-02-02 10:57:51 +0100320 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber693fa552013-12-24 03:18:12 +0100321#ifdef TARGET_I386
322 X86CPU *x86_cpu = X86_CPU(cpu);
323#endif
bellard8a40a182005-11-20 10:35:40 +0000324 int ret, interrupt_request;
bellard8a40a182005-11-20 10:35:40 +0000325 TranslationBlock *tb;
bellardc27004e2005-01-03 23:35:10 +0000326 uint8_t *tc_ptr;
Richard Henderson3e9bd632013-08-20 14:40:25 -0700327 uintptr_t next_tb;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200328 SyncClocks sc;
329
Peter Maydellbae2c272014-04-04 17:42:56 +0100330 /* This must be volatile so it is not trashed by longjmp() */
331 volatile bool have_tb_lock = false;
bellard8c6939c2003-06-09 15:28:00 +0000332
Andreas Färber259186a2013-01-17 18:51:17 +0100333 if (cpu->halted) {
Andreas Färber3993c6b2012-05-03 06:43:49 +0200334 if (!cpu_has_work(cpu)) {
Paolo Bonzinieda48c32011-03-12 17:43:56 +0100335 return EXCP_HALTED;
336 }
337
Andreas Färber259186a2013-01-17 18:51:17 +0100338 cpu->halted = 0;
Paolo Bonzinieda48c32011-03-12 17:43:56 +0100339 }
bellard5a1e3cf2005-11-23 21:02:53 +0000340
Andreas Färber4917cf42013-05-27 05:17:50 +0200341 current_cpu = cpu;
bellarde4533c72003-06-15 19:51:39 +0000342
Andreas Färber4917cf42013-05-27 05:17:50 +0200343 /* As long as current_cpu is null, up to the assignment just above,
Olivier Hainqueec9bd892013-04-09 18:06:54 +0200344 * requests by other threads to exit the execution loop are expected to
345 * be issued using the exit_request global. We must make sure that our
Andreas Färber4917cf42013-05-27 05:17:50 +0200346 * evaluation of the global value is performed past the current_cpu
Olivier Hainqueec9bd892013-04-09 18:06:54 +0200347 * value transition point, which requires a memory barrier as well as
348 * an instruction scheduling constraint on modern architectures. */
349 smp_mb();
350
Jan Kiszkac629a4b2010-06-25 16:56:52 +0200351 if (unlikely(exit_request)) {
Andreas Färberfcd7d002012-12-17 08:02:44 +0100352 cpu->exit_request = 1;
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300353 }
354
Richard Hendersoncffe7b32014-09-13 09:45:12 -0700355 cc->cpu_exec_enter(cpu);
Andreas Färber27103422013-08-26 08:31:06 +0200356 cpu->exception_index = -1;
bellard9d27abd2003-05-10 13:13:54 +0000357
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200358 /* Calculate difference between guest clock and host clock.
359 * This delay includes the delay of the last cycle, so
360 * what we have to do is sleep until it is 0. As for the
361 * advance/delay we gain here, we try to fix it next time.
362 */
363 init_delay_params(&sc, cpu);
364
bellard7d132992003-03-06 23:23:54 +0000365 /* prepare setjmp context for exception handling */
bellard3fb2ded2003-06-24 13:22:59 +0000366 for(;;) {
Andreas Färber6f03bef2013-08-26 06:22:03 +0200367 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
bellard3fb2ded2003-06-24 13:22:59 +0000368 /* if an exception is pending, we execute it here */
Andreas Färber27103422013-08-26 08:31:06 +0200369 if (cpu->exception_index >= 0) {
370 if (cpu->exception_index >= EXCP_INTERRUPT) {
bellard3fb2ded2003-06-24 13:22:59 +0000371 /* exit request from the cpu execution loop */
Andreas Färber27103422013-08-26 08:31:06 +0200372 ret = cpu->exception_index;
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100373 if (ret == EXCP_DEBUG) {
374 cpu_handle_debug_exception(env);
375 }
bellard3fb2ded2003-06-24 13:22:59 +0000376 break;
aurel3272d239e2009-01-14 19:40:27 +0000377 } else {
378#if defined(CONFIG_USER_ONLY)
bellard3fb2ded2003-06-24 13:22:59 +0000379 /* if user mode only, we simulate a fake exception
ths9f083492006-12-07 18:28:42 +0000380 which will be handled outside the cpu execution
bellard3fb2ded2003-06-24 13:22:59 +0000381 loop */
bellard83479e72003-06-25 16:12:37 +0000382#if defined(TARGET_I386)
Andreas Färber97a8ea52013-02-02 10:57:51 +0100383 cc->do_interrupt(cpu);
bellard83479e72003-06-25 16:12:37 +0000384#endif
Andreas Färber27103422013-08-26 08:31:06 +0200385 ret = cpu->exception_index;
bellard3fb2ded2003-06-24 13:22:59 +0000386 break;
aurel3272d239e2009-01-14 19:40:27 +0000387#else
Andreas Färber97a8ea52013-02-02 10:57:51 +0100388 cc->do_interrupt(cpu);
Andreas Färber27103422013-08-26 08:31:06 +0200389 cpu->exception_index = -1;
aurel3272d239e2009-01-14 19:40:27 +0000390#endif
bellard3fb2ded2003-06-24 13:22:59 +0000391 }
ths5fafdf22007-09-16 21:08:06 +0000392 }
bellard9df217a2005-02-10 22:05:51 +0000393
blueswir1b5fc09a2008-05-04 06:38:18 +0000394 next_tb = 0; /* force lookup of first TB */
bellard3fb2ded2003-06-24 13:22:59 +0000395 for(;;) {
Andreas Färber259186a2013-01-17 18:51:17 +0100396 interrupt_request = cpu->interrupt_request;
malce1638bd2008-11-06 18:54:46 +0000397 if (unlikely(interrupt_request)) {
Andreas Färbered2803d2013-06-21 20:20:45 +0200398 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
malce1638bd2008-11-06 18:54:46 +0000399 /* Mask out external interrupts for this step. */
Richard Henderson3125f762011-05-04 13:34:25 -0700400 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
malce1638bd2008-11-06 18:54:46 +0000401 }
pbrook6658ffb2007-03-16 23:58:11 +0000402 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
Andreas Färber259186a2013-01-17 18:51:17 +0100403 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
Andreas Färber27103422013-08-26 08:31:06 +0200404 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +0200405 cpu_loop_exit(cpu);
pbrook6658ffb2007-03-16 23:58:11 +0000406 }
balroga90b7312007-05-01 01:28:01 +0000407#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200408 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +0100409 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || \
410 defined(TARGET_UNICORE32) || defined(TARGET_TRICORE)
balroga90b7312007-05-01 01:28:01 +0000411 if (interrupt_request & CPU_INTERRUPT_HALT) {
Andreas Färber259186a2013-01-17 18:51:17 +0100412 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
413 cpu->halted = 1;
Andreas Färber27103422013-08-26 08:31:06 +0200414 cpu->exception_index = EXCP_HLT;
Andreas Färber5638d182013-08-27 17:52:12 +0200415 cpu_loop_exit(cpu);
balroga90b7312007-05-01 01:28:01 +0000416 }
417#endif
bellard68a79312003-06-30 13:12:32 +0000418#if defined(TARGET_I386)
Paolo Bonzini4a92a552013-03-05 15:35:17 +0100419 if (interrupt_request & CPU_INTERRUPT_INIT) {
420 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
421 do_cpu_init(x86_cpu);
422 cpu->exception_index = EXCP_HALTED;
423 cpu_loop_exit(cpu);
424 }
425#else
426 if (interrupt_request & CPU_INTERRUPT_RESET) {
427 cpu_reset(cpu);
428 }
429#endif
430#if defined(TARGET_I386)
Jan Kiszka5d62c432012-07-09 16:42:32 +0200431#if !defined(CONFIG_USER_ONLY)
432 if (interrupt_request & CPU_INTERRUPT_POLL) {
Andreas Färber259186a2013-01-17 18:51:17 +0100433 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
Andreas Färber693fa552013-12-24 03:18:12 +0100434 apic_poll_irq(x86_cpu->apic_state);
Jan Kiszka5d62c432012-07-09 16:42:32 +0200435 }
436#endif
Paolo Bonzini4a92a552013-03-05 15:35:17 +0100437 if (interrupt_request & CPU_INTERRUPT_SIPI) {
Andreas Färber693fa552013-12-24 03:18:12 +0100438 do_cpu_sipi(x86_cpu);
Gleb Natapovb09ea7d2009-06-17 23:26:59 +0300439 } else if (env->hflags2 & HF2_GIF_MASK) {
bellarddb620f42008-06-04 17:02:19 +0000440 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
441 !(env->hflags & HF_SMM_MASK)) {
Blue Swirl77b2bc22012-04-28 19:35:10 +0000442 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
443 0);
Andreas Färber259186a2013-01-17 18:51:17 +0100444 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
Andreas Färber693fa552013-12-24 03:18:12 +0100445 do_smm_enter(x86_cpu);
bellarddb620f42008-06-04 17:02:19 +0000446 next_tb = 0;
447 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
448 !(env->hflags2 & HF2_NMI_MASK)) {
Andreas Färber259186a2013-01-17 18:51:17 +0100449 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
bellarddb620f42008-06-04 17:02:19 +0000450 env->hflags2 |= HF2_NMI_MASK;
Blue Swirle694d4e2011-05-16 19:38:48 +0000451 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
bellarddb620f42008-06-04 17:02:19 +0000452 next_tb = 0;
陳韋任e965fc32012-02-06 14:02:55 +0800453 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
Andreas Färber259186a2013-01-17 18:51:17 +0100454 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
Blue Swirle694d4e2011-05-16 19:38:48 +0000455 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
Huang Ying79c4f6b2009-06-23 10:05:14 +0800456 next_tb = 0;
bellarddb620f42008-06-04 17:02:19 +0000457 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
458 (((env->hflags2 & HF2_VINTR_MASK) &&
459 (env->hflags2 & HF2_HIF_MASK)) ||
460 (!(env->hflags2 & HF2_VINTR_MASK) &&
461 (env->eflags & IF_MASK &&
462 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
463 int intno;
Blue Swirl77b2bc22012-04-28 19:35:10 +0000464 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
465 0);
Andreas Färber259186a2013-01-17 18:51:17 +0100466 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
467 CPU_INTERRUPT_VIRQ);
bellarddb620f42008-06-04 17:02:19 +0000468 intno = cpu_get_pic_interrupt(env);
malc4f213872012-08-27 18:33:12 +0400469 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
470 do_interrupt_x86_hardirq(env, intno, 1);
471 /* ensure that no TB jump will be modified as
472 the program flow was changed */
473 next_tb = 0;
ths0573fbf2007-09-23 15:28:04 +0000474#if !defined(CONFIG_USER_ONLY)
bellarddb620f42008-06-04 17:02:19 +0000475 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
476 (env->eflags & IF_MASK) &&
477 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
478 int intno;
479 /* FIXME: this should respect TPR */
Blue Swirl77b2bc22012-04-28 19:35:10 +0000480 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
481 0);
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +0100482 intno = ldl_phys(cpu->as,
483 env->vm_vmcb
484 + offsetof(struct vmcb,
485 control.int_vector));
aliguori93fcfe32009-01-15 22:34:14 +0000486 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
Blue Swirle694d4e2011-05-16 19:38:48 +0000487 do_interrupt_x86_hardirq(env, intno, 1);
Andreas Färber259186a2013-01-17 18:51:17 +0100488 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
bellarddb620f42008-06-04 17:02:19 +0000489 next_tb = 0;
ths0573fbf2007-09-23 15:28:04 +0000490#endif
bellarddb620f42008-06-04 17:02:19 +0000491 }
bellard68a79312003-06-30 13:12:32 +0000492 }
bellardce097762004-01-04 23:53:18 +0000493#elif defined(TARGET_PPC)
j_mayer47103572007-03-30 09:38:04 +0000494 if (interrupt_request & CPU_INTERRUPT_HARD) {
j_mayere9df0142007-04-09 22:45:36 +0000495 ppc_hw_interrupt(env);
Andreas Färber259186a2013-01-17 18:51:17 +0100496 if (env->pending_interrupts == 0) {
497 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
498 }
blueswir1b5fc09a2008-05-04 06:38:18 +0000499 next_tb = 0;
bellardce097762004-01-04 23:53:18 +0000500 }
Michael Walle81ea0e12011-02-17 23:45:02 +0100501#elif defined(TARGET_LM32)
502 if ((interrupt_request & CPU_INTERRUPT_HARD)
503 && (env->ie & IE_IE)) {
Andreas Färber27103422013-08-26 08:31:06 +0200504 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100505 cc->do_interrupt(cpu);
Michael Walle81ea0e12011-02-17 23:45:02 +0100506 next_tb = 0;
507 }
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200508#elif defined(TARGET_MICROBLAZE)
509 if ((interrupt_request & CPU_INTERRUPT_HARD)
510 && (env->sregs[SR_MSR] & MSR_IE)
511 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
512 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
Andreas Färber27103422013-08-26 08:31:06 +0200513 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100514 cc->do_interrupt(cpu);
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200515 next_tb = 0;
516 }
bellard6af0bf92005-07-02 14:58:51 +0000517#elif defined(TARGET_MIPS)
518 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
Aurelien Jarno4cdc1cd2010-12-25 22:56:32 +0100519 cpu_mips_hw_interrupts_pending(env)) {
bellard6af0bf92005-07-02 14:58:51 +0000520 /* Raise it */
Andreas Färber27103422013-08-26 08:31:06 +0200521 cpu->exception_index = EXCP_EXT_INTERRUPT;
bellard6af0bf92005-07-02 14:58:51 +0000522 env->error_code = 0;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100523 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000524 next_tb = 0;
bellard6af0bf92005-07-02 14:58:51 +0000525 }
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +0100526#elif defined(TARGET_TRICORE)
527 if ((interrupt_request & CPU_INTERRUPT_HARD)) {
528 cc->do_interrupt(cpu);
529 next_tb = 0;
530 }
531
Jia Liub6a71ef2012-07-20 15:50:41 +0800532#elif defined(TARGET_OPENRISC)
533 {
534 int idx = -1;
535 if ((interrupt_request & CPU_INTERRUPT_HARD)
536 && (env->sr & SR_IEE)) {
537 idx = EXCP_INT;
538 }
539 if ((interrupt_request & CPU_INTERRUPT_TIMER)
540 && (env->sr & SR_TEE)) {
541 idx = EXCP_TICK;
542 }
543 if (idx >= 0) {
Andreas Färber27103422013-08-26 08:31:06 +0200544 cpu->exception_index = idx;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100545 cc->do_interrupt(cpu);
Jia Liub6a71ef2012-07-20 15:50:41 +0800546 next_tb = 0;
547 }
548 }
bellarde95c8d52004-09-30 22:22:08 +0000549#elif defined(TARGET_SPARC)
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300550 if (interrupt_request & CPU_INTERRUPT_HARD) {
551 if (cpu_interrupts_enabled(env) &&
552 env->interrupt_index > 0) {
553 int pil = env->interrupt_index & 0xf;
554 int type = env->interrupt_index & 0xf0;
bellard66321a12005-04-06 20:47:48 +0000555
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300556 if (((type == TT_EXTINT) &&
557 cpu_pil_allowed(env, pil)) ||
558 type != TT_EXTINT) {
Andreas Färber27103422013-08-26 08:31:06 +0200559 cpu->exception_index = env->interrupt_index;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100560 cc->do_interrupt(cpu);
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300561 next_tb = 0;
562 }
563 }
陳韋任e965fc32012-02-06 14:02:55 +0800564 }
bellardb5ff1b32005-11-26 10:38:39 +0000565#elif defined(TARGET_ARM)
566 if (interrupt_request & CPU_INTERRUPT_FIQ
Peter Maydell4cc35612014-02-26 17:20:06 +0000567 && !(env->daif & PSTATE_F)) {
Andreas Färber27103422013-08-26 08:31:06 +0200568 cpu->exception_index = EXCP_FIQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100569 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000570 next_tb = 0;
bellardb5ff1b32005-11-26 10:38:39 +0000571 }
pbrook9ee6e8b2007-11-11 00:04:49 +0000572 /* ARMv7-M interrupt return works by loading a magic value
573 into the PC. On real hardware the load causes the
574 return to occur. The qemu implementation performs the
575 jump normally, then does the exception return when the
576 CPU tries to execute code at the magic address.
577 This will cause the magic PC value to be pushed to
Stefan Weila1c72732011-04-28 17:20:38 +0200578 the stack if an interrupt occurred at the wrong time.
pbrook9ee6e8b2007-11-11 00:04:49 +0000579 We avoid this by disabling interrupts when
580 pc contains a magic address. */
bellardb5ff1b32005-11-26 10:38:39 +0000581 if (interrupt_request & CPU_INTERRUPT_HARD
David Hooverc3c8d6b2014-09-12 14:06:47 +0100582 && !(env->daif & PSTATE_I)
583 && (!IS_M(env) || env->regs[15] < 0xfffffff0)) {
Andreas Färber27103422013-08-26 08:31:06 +0200584 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100585 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000586 next_tb = 0;
bellardb5ff1b32005-11-26 10:38:39 +0000587 }
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800588#elif defined(TARGET_UNICORE32)
589 if (interrupt_request & CPU_INTERRUPT_HARD
590 && !(env->uncached_asr & ASR_I)) {
Andreas Färber27103422013-08-26 08:31:06 +0200591 cpu->exception_index = UC32_EXCP_INTR;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100592 cc->do_interrupt(cpu);
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800593 next_tb = 0;
594 }
bellardfdf9b3e2006-04-27 21:07:38 +0000595#elif defined(TARGET_SH4)
thse96e2042007-12-02 06:18:24 +0000596 if (interrupt_request & CPU_INTERRUPT_HARD) {
Andreas Färber97a8ea52013-02-02 10:57:51 +0100597 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000598 next_tb = 0;
thse96e2042007-12-02 06:18:24 +0000599 }
j_mayereddf68a2007-04-05 07:22:49 +0000600#elif defined(TARGET_ALPHA)
Richard Henderson6a80e082011-04-18 15:09:09 -0700601 {
602 int idx = -1;
603 /* ??? This hard-codes the OSF/1 interrupt levels. */
陳韋任e965fc32012-02-06 14:02:55 +0800604 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
Richard Henderson6a80e082011-04-18 15:09:09 -0700605 case 0 ... 3:
606 if (interrupt_request & CPU_INTERRUPT_HARD) {
607 idx = EXCP_DEV_INTERRUPT;
608 }
609 /* FALLTHRU */
610 case 4:
611 if (interrupt_request & CPU_INTERRUPT_TIMER) {
612 idx = EXCP_CLK_INTERRUPT;
613 }
614 /* FALLTHRU */
615 case 5:
616 if (interrupt_request & CPU_INTERRUPT_SMP) {
617 idx = EXCP_SMP_INTERRUPT;
618 }
619 /* FALLTHRU */
620 case 6:
621 if (interrupt_request & CPU_INTERRUPT_MCHK) {
622 idx = EXCP_MCHK;
623 }
624 }
625 if (idx >= 0) {
Andreas Färber27103422013-08-26 08:31:06 +0200626 cpu->exception_index = idx;
Richard Henderson6a80e082011-04-18 15:09:09 -0700627 env->error_code = 0;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100628 cc->do_interrupt(cpu);
Richard Henderson6a80e082011-04-18 15:09:09 -0700629 next_tb = 0;
630 }
j_mayereddf68a2007-04-05 07:22:49 +0000631 }
thsf1ccf902007-10-08 13:16:14 +0000632#elif defined(TARGET_CRIS)
edgar_igl1b1a38b2008-06-09 23:18:06 +0000633 if (interrupt_request & CPU_INTERRUPT_HARD
Edgar E. Iglesiasfb9fb692010-02-15 11:17:33 +0100634 && (env->pregs[PR_CCS] & I_FLAG)
635 && !env->locked_irq) {
Andreas Färber27103422013-08-26 08:31:06 +0200636 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100637 cc->do_interrupt(cpu);
edgar_igl1b1a38b2008-06-09 23:18:06 +0000638 next_tb = 0;
639 }
Lars Persson82193142012-06-14 16:23:55 +0200640 if (interrupt_request & CPU_INTERRUPT_NMI) {
641 unsigned int m_flag_archval;
642 if (env->pregs[PR_VR] < 32) {
643 m_flag_archval = M_FLAG_V10;
644 } else {
645 m_flag_archval = M_FLAG_V32;
646 }
647 if ((env->pregs[PR_CCS] & m_flag_archval)) {
Andreas Färber27103422013-08-26 08:31:06 +0200648 cpu->exception_index = EXCP_NMI;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100649 cc->do_interrupt(cpu);
Lars Persson82193142012-06-14 16:23:55 +0200650 next_tb = 0;
651 }
thsf1ccf902007-10-08 13:16:14 +0000652 }
pbrook06338792007-05-23 19:58:11 +0000653#elif defined(TARGET_M68K)
654 if (interrupt_request & CPU_INTERRUPT_HARD
655 && ((env->sr & SR_I) >> SR_I_SHIFT)
656 < env->pending_level) {
657 /* Real hardware gets the interrupt vector via an
658 IACK cycle at this point. Current emulated
659 hardware doesn't rely on this, so we
660 provide/save the vector when the interrupt is
661 first signalled. */
Andreas Färber27103422013-08-26 08:31:06 +0200662 cpu->exception_index = env->pending_vector;
Blue Swirl3c688822011-05-21 07:55:24 +0000663 do_interrupt_m68k_hardirq(env);
blueswir1b5fc09a2008-05-04 06:38:18 +0000664 next_tb = 0;
pbrook06338792007-05-23 19:58:11 +0000665 }
Alexander Graf3110e292011-04-15 17:32:48 +0200666#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
667 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
668 (env->psw.mask & PSW_MASK_EXT)) {
Andreas Färber97a8ea52013-02-02 10:57:51 +0100669 cc->do_interrupt(cpu);
Alexander Graf3110e292011-04-15 17:32:48 +0200670 next_tb = 0;
671 }
Max Filippov40643d72011-09-06 03:55:41 +0400672#elif defined(TARGET_XTENSA)
673 if (interrupt_request & CPU_INTERRUPT_HARD) {
Andreas Färber27103422013-08-26 08:31:06 +0200674 cpu->exception_index = EXC_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100675 cc->do_interrupt(cpu);
Max Filippov40643d72011-09-06 03:55:41 +0400676 next_tb = 0;
677 }
bellard68a79312003-06-30 13:12:32 +0000678#endif
Stefan Weilff2712b2011-04-28 17:20:35 +0200679 /* Don't use the cached interrupt_request value,
bellard9d050952006-05-22 22:03:52 +0000680 do_interrupt may have updated the EXITTB flag. */
Andreas Färber259186a2013-01-17 18:51:17 +0100681 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
682 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bellardbf3e8bf2004-02-16 21:58:54 +0000683 /* ensure that no TB jump will be modified as
684 the program flow was changed */
blueswir1b5fc09a2008-05-04 06:38:18 +0000685 next_tb = 0;
bellardbf3e8bf2004-02-16 21:58:54 +0000686 }
aurel32be214e62009-03-06 21:48:00 +0000687 }
Andreas Färberfcd7d002012-12-17 08:02:44 +0100688 if (unlikely(cpu->exit_request)) {
689 cpu->exit_request = 0;
Andreas Färber27103422013-08-26 08:31:06 +0200690 cpu->exception_index = EXCP_INTERRUPT;
Andreas Färber5638d182013-08-27 17:52:12 +0200691 cpu_loop_exit(cpu);
bellard3fb2ded2003-06-24 13:22:59 +0000692 }
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700693 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
Peter Maydellbae2c272014-04-04 17:42:56 +0100694 have_tb_lock = true;
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000695 tb = tb_find_fast(env);
pbrookd5975362008-06-07 20:50:51 +0000696 /* Note: we do it here to avoid a gcc bug on Mac OS X when
697 doing it in tb_find_slow */
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700698 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
pbrookd5975362008-06-07 20:50:51 +0000699 /* as some TB could have been invalidated because
700 of memory exceptions while generating the code, we
701 must recompute the hash index here */
702 next_tb = 0;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700703 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
pbrookd5975362008-06-07 20:50:51 +0000704 }
Peter Maydellc30d1ae2013-04-11 21:21:46 +0100705 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
706 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
707 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
708 }
bellard8a40a182005-11-20 10:35:40 +0000709 /* see if we can patch the calling TB. When the TB
710 spans two pages, we cannot safely do a direct
711 jump. */
Paolo Bonzini040f2fb2010-01-15 08:56:36 +0100712 if (next_tb != 0 && tb->page_addr[1] == -1) {
Peter Maydell09800112013-02-22 18:10:00 +0000713 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
714 next_tb & TB_EXIT_MASK, tb);
bellard3fb2ded2003-06-24 13:22:59 +0000715 }
Peter Maydellbae2c272014-04-04 17:42:56 +0100716 have_tb_lock = false;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700717 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
malc55e8b852008-11-04 14:18:13 +0000718
719 /* cpu_interrupt might be called while translating the
720 TB, but before it is linked into a potentially
721 infinite loop and becomes env->current_tb. Avoid
722 starting execution if there is a pending interrupt. */
Andreas Färberd77953b2013-01-16 19:29:31 +0100723 cpu->current_tb = tb;
Jan Kiszkab0052d12010-06-25 16:56:50 +0200724 barrier();
Andreas Färberfcd7d002012-12-17 08:02:44 +0100725 if (likely(!cpu->exit_request)) {
Alex Bennée6db8b532014-08-01 17:08:57 +0100726 trace_exec_tb(tb, tb->pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000727 tc_ptr = tb->tc_ptr;
陳韋任e965fc32012-02-06 14:02:55 +0800728 /* execute the generated code */
Peter Maydell77211372013-02-22 18:10:02 +0000729 next_tb = cpu_tb_exec(cpu, tc_ptr);
Peter Maydell378df4b2013-02-22 18:10:03 +0000730 switch (next_tb & TB_EXIT_MASK) {
731 case TB_EXIT_REQUESTED:
732 /* Something asked us to stop executing
733 * chained TBs; just continue round the main
734 * loop. Whatever requested the exit will also
735 * have set something else (eg exit_request or
736 * interrupt_request) which we will handle
737 * next time around the loop.
738 */
739 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
740 next_tb = 0;
741 break;
742 case TB_EXIT_ICOUNT_EXPIRED:
743 {
thsbf20dc02008-06-30 17:22:19 +0000744 /* Instruction counter expired. */
pbrook2e70f6e2008-06-29 01:03:05 +0000745 int insns_left;
Peter Maydell09800112013-02-22 18:10:00 +0000746 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
Andreas Färber28ecfd72013-08-26 05:51:49 +0200747 insns_left = cpu->icount_decr.u32;
Andreas Färberefee7342013-08-26 05:39:29 +0200748 if (cpu->icount_extra && insns_left >= 0) {
pbrook2e70f6e2008-06-29 01:03:05 +0000749 /* Refill decrementer and continue execution. */
Andreas Färberefee7342013-08-26 05:39:29 +0200750 cpu->icount_extra += insns_left;
751 if (cpu->icount_extra > 0xffff) {
pbrook2e70f6e2008-06-29 01:03:05 +0000752 insns_left = 0xffff;
753 } else {
Andreas Färberefee7342013-08-26 05:39:29 +0200754 insns_left = cpu->icount_extra;
pbrook2e70f6e2008-06-29 01:03:05 +0000755 }
Andreas Färberefee7342013-08-26 05:39:29 +0200756 cpu->icount_extra -= insns_left;
Andreas Färber28ecfd72013-08-26 05:51:49 +0200757 cpu->icount_decr.u16.low = insns_left;
pbrook2e70f6e2008-06-29 01:03:05 +0000758 } else {
759 if (insns_left > 0) {
760 /* Execute remaining instructions. */
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000761 cpu_exec_nocache(env, insns_left, tb);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200762 align_clocks(&sc, cpu);
pbrook2e70f6e2008-06-29 01:03:05 +0000763 }
Andreas Färber27103422013-08-26 08:31:06 +0200764 cpu->exception_index = EXCP_INTERRUPT;
pbrook2e70f6e2008-06-29 01:03:05 +0000765 next_tb = 0;
Andreas Färber5638d182013-08-27 17:52:12 +0200766 cpu_loop_exit(cpu);
pbrook2e70f6e2008-06-29 01:03:05 +0000767 }
Peter Maydell378df4b2013-02-22 18:10:03 +0000768 break;
769 }
770 default:
771 break;
pbrook2e70f6e2008-06-29 01:03:05 +0000772 }
773 }
Andreas Färberd77953b2013-01-16 19:29:31 +0100774 cpu->current_tb = NULL;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200775 /* Try to align the host and virtual clocks
776 if the guest is in advance */
777 align_clocks(&sc, cpu);
bellard4cbf74b2003-08-10 21:48:43 +0000778 /* reset soft MMU for next block (it can currently
779 only be set by a memory fault) */
ths50a518e2007-06-03 18:52:15 +0000780 } /* for(;;) */
Jan Kiszka0d101932011-07-02 09:50:51 +0200781 } else {
782 /* Reload env after longjmp - the compiler may have smashed all
783 * local variables as longjmp is marked 'noreturn'. */
Andreas Färber4917cf42013-05-27 05:17:50 +0200784 cpu = current_cpu;
785 env = cpu->env_ptr;
Juergen Lock6c78f292013-10-03 16:09:37 +0200786#if !(defined(CONFIG_USER_ONLY) && \
787 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
788 cc = CPU_GET_CLASS(cpu);
789#endif
Andreas Färber693fa552013-12-24 03:18:12 +0100790#ifdef TARGET_I386
791 x86_cpu = X86_CPU(cpu);
792#endif
Peter Maydellbae2c272014-04-04 17:42:56 +0100793 if (have_tb_lock) {
794 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
795 have_tb_lock = false;
796 }
bellard7d132992003-03-06 23:23:54 +0000797 }
bellard3fb2ded2003-06-24 13:22:59 +0000798 } /* for(;;) */
799
Richard Hendersoncffe7b32014-09-13 09:45:12 -0700800 cc->cpu_exec_exit(cpu);
pbrook1057eaa2007-02-04 13:37:44 +0000801
Andreas Färber4917cf42013-05-27 05:17:50 +0200802 /* fail safe : never use current_cpu outside cpu_exec() */
803 current_cpu = NULL;
bellard7d132992003-03-06 23:23:54 +0000804 return ret;
805}