blob: 7b5d2e21d0a45c48f3f1ad4a7c1ff7dc2a144020 [file] [log] [blame]
bellard7d132992003-03-06 23:23:54 +00001/*
陳韋任e965fc32012-02-06 14:02:55 +08002 * emulator main execution loop
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard66321a12005-04-06 20:47:48 +00004 * Copyright (c) 2003-2005 Fabrice Bellard
bellard7d132992003-03-06 23:23:54 +00005 *
bellard3ef693a2003-03-23 20:17:16 +00006 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
bellard7d132992003-03-06 23:23:54 +000010 *
bellard3ef693a2003-03-23 20:17:16 +000011 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
bellard7d132992003-03-06 23:23:54 +000015 *
bellard3ef693a2003-03-23 20:17:16 +000016 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard7d132992003-03-06 23:23:54 +000018 */
bellarde4533c72003-06-15 19:51:39 +000019#include "config.h"
Blue Swirlcea5f9a2011-05-15 16:03:25 +000020#include "cpu.h"
Alex Bennée6db8b532014-08-01 17:08:57 +010021#include "trace.h"
Paolo Bonzini76cad712012-10-24 11:12:21 +020022#include "disas/disas.h"
bellard7cb69ca2008-05-10 10:55:51 +000023#include "tcg.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010024#include "qemu/atomic.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010025#include "sysemu/qtest.h"
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020026#include "qemu/timer.h"
27
28/* -icount align implementation. */
29
30typedef struct SyncClocks {
31 int64_t diff_clk;
32 int64_t last_cpu_icount;
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020033 int64_t realtime_clock;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020034} SyncClocks;
35
36#if !defined(CONFIG_USER_ONLY)
37/* Allow the guest to have a max 3ms advance.
38 * The difference between the 2 clocks could therefore
39 * oscillate around 0.
40 */
41#define VM_CLOCK_ADVANCE 3000000
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020042#define THRESHOLD_REDUCE 1.5
43#define MAX_DELAY_PRINT_RATE 2000000000LL
44#define MAX_NB_PRINTS 100
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020045
46static void align_clocks(SyncClocks *sc, const CPUState *cpu)
47{
48 int64_t cpu_icount;
49
50 if (!icount_align_option) {
51 return;
52 }
53
54 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
55 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
56 sc->last_cpu_icount = cpu_icount;
57
58 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
59#ifndef _WIN32
60 struct timespec sleep_delay, rem_delay;
61 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
62 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
63 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
64 sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL;
65 sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec;
66 } else {
67 sc->diff_clk = 0;
68 }
69#else
70 Sleep(sc->diff_clk / SCALE_MS);
71 sc->diff_clk = 0;
72#endif
73 }
74}
75
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020076static void print_delay(const SyncClocks *sc)
77{
78 static float threshold_delay;
79 static int64_t last_realtime_clock;
80 static int nb_prints;
81
82 if (icount_align_option &&
83 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
84 nb_prints < MAX_NB_PRINTS) {
85 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
86 (-sc->diff_clk / (float)1000000000LL <
87 (threshold_delay - THRESHOLD_REDUCE))) {
88 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
89 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
90 threshold_delay - 1,
91 threshold_delay);
92 nb_prints++;
93 last_realtime_clock = sc->realtime_clock;
94 }
95 }
96}
97
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020098static void init_delay_params(SyncClocks *sc,
99 const CPUState *cpu)
100{
101 if (!icount_align_option) {
102 return;
103 }
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200104 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200105 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200106 sc->realtime_clock +
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200107 cpu_get_clock_offset();
108 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
Sebastian Tanase27498be2014-07-25 11:56:33 +0200109 if (sc->diff_clk < max_delay) {
110 max_delay = sc->diff_clk;
111 }
112 if (sc->diff_clk > max_advance) {
113 max_advance = sc->diff_clk;
114 }
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200115
116 /* Print every 2s max if the guest is late. We limit the number
117 of printed messages to NB_PRINT_MAX(currently 100) */
118 print_delay(sc);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200119}
120#else
121static void align_clocks(SyncClocks *sc, const CPUState *cpu)
122{
123}
124
125static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
126{
127}
128#endif /* CONFIG USER ONLY */
bellard7d132992003-03-06 23:23:54 +0000129
Andreas Färber5638d182013-08-27 17:52:12 +0200130void cpu_loop_exit(CPUState *cpu)
bellarde4533c72003-06-15 19:51:39 +0000131{
Andreas Färberd77953b2013-01-16 19:29:31 +0100132 cpu->current_tb = NULL;
Andreas Färber6f03bef2013-08-26 06:22:03 +0200133 siglongjmp(cpu->jmp_env, 1);
bellarde4533c72003-06-15 19:51:39 +0000134}
thsbfed01f2007-06-03 17:44:37 +0000135
bellardfbf9eeb2004-04-25 21:21:33 +0000136/* exit the current TB from a signal handler. The host registers are
137 restored in a state compatible with the CPU emulator
138 */
Blue Swirl9eff14f2011-05-21 08:42:35 +0000139#if defined(CONFIG_SOFTMMU)
Andreas Färber0ea8cb82013-09-03 02:12:23 +0200140void cpu_resume_from_signal(CPUState *cpu, void *puc)
bellardfbf9eeb2004-04-25 21:21:33 +0000141{
Blue Swirl9eff14f2011-05-21 08:42:35 +0000142 /* XXX: restore cpu registers saved in host registers */
143
Andreas Färber27103422013-08-26 08:31:06 +0200144 cpu->exception_index = -1;
Andreas Färber6f03bef2013-08-26 06:22:03 +0200145 siglongjmp(cpu->jmp_env, 1);
Blue Swirl9eff14f2011-05-21 08:42:35 +0000146}
Blue Swirl9eff14f2011-05-21 08:42:35 +0000147#endif
bellardfbf9eeb2004-04-25 21:21:33 +0000148
Peter Maydell77211372013-02-22 18:10:02 +0000149/* Execute a TB, and fix up the CPU state afterwards if necessary */
150static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
151{
152 CPUArchState *env = cpu->env_ptr;
Richard Henderson03afa5f2013-11-06 17:29:39 +1000153 uintptr_t next_tb;
154
155#if defined(DEBUG_DISAS)
156 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
157#if defined(TARGET_I386)
158 log_cpu_state(cpu, CPU_DUMP_CCOP);
159#elif defined(TARGET_M68K)
160 /* ??? Should not modify env state for dumping. */
161 cpu_m68k_flush_flags(env, env->cc_op);
162 env->cc_op = CC_OP_FLAGS;
163 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
164 log_cpu_state(cpu, 0);
165#else
166 log_cpu_state(cpu, 0);
167#endif
168 }
169#endif /* DEBUG_DISAS */
170
171 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
Alex Bennée6db8b532014-08-01 17:08:57 +0100172 trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
173 next_tb & TB_EXIT_MASK);
174
Peter Maydell77211372013-02-22 18:10:02 +0000175 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
176 /* We didn't start executing this TB (eg because the instruction
177 * counter hit zero); we must restore the guest PC to the address
178 * of the start of the TB.
179 */
Andreas Färberbdf7ae52013-06-28 19:31:32 +0200180 CPUClass *cc = CPU_GET_CLASS(cpu);
Peter Maydell77211372013-02-22 18:10:02 +0000181 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
Andreas Färberbdf7ae52013-06-28 19:31:32 +0200182 if (cc->synchronize_from_tb) {
183 cc->synchronize_from_tb(cpu, tb);
184 } else {
185 assert(cc->set_pc);
186 cc->set_pc(cpu, tb->pc);
187 }
Peter Maydell77211372013-02-22 18:10:02 +0000188 }
Peter Maydell378df4b2013-02-22 18:10:03 +0000189 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
190 /* We were asked to stop executing TBs (probably a pending
191 * interrupt. We've now stopped, so clear the flag.
192 */
193 cpu->tcg_exit_req = 0;
194 }
Peter Maydell77211372013-02-22 18:10:02 +0000195 return next_tb;
196}
197
pbrook2e70f6e2008-06-29 01:03:05 +0000198/* Execute the code without caching the generated code. An interpreter
199 could be used if available. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100200static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000201 TranslationBlock *orig_tb)
pbrook2e70f6e2008-06-29 01:03:05 +0000202{
Andreas Färberd77953b2013-01-16 19:29:31 +0100203 CPUState *cpu = ENV_GET_CPU(env);
pbrook2e70f6e2008-06-29 01:03:05 +0000204 TranslationBlock *tb;
205
206 /* Should never happen.
207 We only end up here when an existing TB is too long. */
208 if (max_cycles > CF_COUNT_MASK)
209 max_cycles = CF_COUNT_MASK;
210
Andreas Färber648f0342013-09-01 17:43:17 +0200211 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
pbrook2e70f6e2008-06-29 01:03:05 +0000212 max_cycles);
Andreas Färberd77953b2013-01-16 19:29:31 +0100213 cpu->current_tb = tb;
pbrook2e70f6e2008-06-29 01:03:05 +0000214 /* execute the generated code */
Alex Bennée6db8b532014-08-01 17:08:57 +0100215 trace_exec_tb_nocache(tb, tb->pc);
Peter Maydell77211372013-02-22 18:10:02 +0000216 cpu_tb_exec(cpu, tb->tc_ptr);
Andreas Färberd77953b2013-01-16 19:29:31 +0100217 cpu->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +0000218 tb_phys_invalidate(tb, -1);
219 tb_free(tb);
220}
221
Andreas Färber9349b4f2012-03-14 01:38:32 +0100222static TranslationBlock *tb_find_slow(CPUArchState *env,
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000223 target_ulong pc,
bellard8a40a182005-11-20 10:35:40 +0000224 target_ulong cs_base,
j_mayerc0686882007-09-20 22:47:42 +0000225 uint64_t flags)
bellard8a40a182005-11-20 10:35:40 +0000226{
Andreas Färber8cd70432013-08-26 06:03:38 +0200227 CPUState *cpu = ENV_GET_CPU(env);
bellard8a40a182005-11-20 10:35:40 +0000228 TranslationBlock *tb, **ptb1;
bellard8a40a182005-11-20 10:35:40 +0000229 unsigned int h;
Blue Swirl337fc752011-09-04 11:06:22 +0000230 tb_page_addr_t phys_pc, phys_page1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000231 target_ulong virt_page2;
ths3b46e622007-09-17 08:09:54 +0000232
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700233 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
ths3b46e622007-09-17 08:09:54 +0000234
bellard8a40a182005-11-20 10:35:40 +0000235 /* find translated block using physical mappings */
Paul Brook41c1b1c2010-03-12 16:54:58 +0000236 phys_pc = get_page_addr_code(env, pc);
bellard8a40a182005-11-20 10:35:40 +0000237 phys_page1 = phys_pc & TARGET_PAGE_MASK;
bellard8a40a182005-11-20 10:35:40 +0000238 h = tb_phys_hash_func(phys_pc);
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700239 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
bellard8a40a182005-11-20 10:35:40 +0000240 for(;;) {
241 tb = *ptb1;
242 if (!tb)
243 goto not_found;
ths5fafdf22007-09-16 21:08:06 +0000244 if (tb->pc == pc &&
bellard8a40a182005-11-20 10:35:40 +0000245 tb->page_addr[0] == phys_page1 &&
ths5fafdf22007-09-16 21:08:06 +0000246 tb->cs_base == cs_base &&
bellard8a40a182005-11-20 10:35:40 +0000247 tb->flags == flags) {
248 /* check next page if needed */
249 if (tb->page_addr[1] != -1) {
Blue Swirl337fc752011-09-04 11:06:22 +0000250 tb_page_addr_t phys_page2;
251
ths5fafdf22007-09-16 21:08:06 +0000252 virt_page2 = (pc & TARGET_PAGE_MASK) +
bellard8a40a182005-11-20 10:35:40 +0000253 TARGET_PAGE_SIZE;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000254 phys_page2 = get_page_addr_code(env, virt_page2);
bellard8a40a182005-11-20 10:35:40 +0000255 if (tb->page_addr[1] == phys_page2)
256 goto found;
257 } else {
258 goto found;
259 }
260 }
261 ptb1 = &tb->phys_hash_next;
262 }
263 not_found:
pbrook2e70f6e2008-06-29 01:03:05 +0000264 /* if no translated code available, then translate it now */
Andreas Färber648f0342013-09-01 17:43:17 +0200265 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
ths3b46e622007-09-17 08:09:54 +0000266
bellard8a40a182005-11-20 10:35:40 +0000267 found:
Kirill Batuzov2c90fe22010-12-02 16:12:46 +0300268 /* Move the last found TB to the head of the list */
269 if (likely(*ptb1)) {
270 *ptb1 = tb->phys_hash_next;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700271 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
272 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
Kirill Batuzov2c90fe22010-12-02 16:12:46 +0300273 }
bellard8a40a182005-11-20 10:35:40 +0000274 /* we add the TB in the virtual pc hash table */
Andreas Färber8cd70432013-08-26 06:03:38 +0200275 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
bellard8a40a182005-11-20 10:35:40 +0000276 return tb;
277}
278
Andreas Färber9349b4f2012-03-14 01:38:32 +0100279static inline TranslationBlock *tb_find_fast(CPUArchState *env)
bellard8a40a182005-11-20 10:35:40 +0000280{
Andreas Färber8cd70432013-08-26 06:03:38 +0200281 CPUState *cpu = ENV_GET_CPU(env);
bellard8a40a182005-11-20 10:35:40 +0000282 TranslationBlock *tb;
283 target_ulong cs_base, pc;
aliguori6b917542008-11-18 19:46:41 +0000284 int flags;
bellard8a40a182005-11-20 10:35:40 +0000285
286 /* we record a subset of the CPU state. It will
287 always be the same before a given translated block
288 is executed. */
aliguori6b917542008-11-18 19:46:41 +0000289 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
Andreas Färber8cd70432013-08-26 06:03:38 +0200290 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
ths551bd272008-07-03 17:57:36 +0000291 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
292 tb->flags != flags)) {
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000293 tb = tb_find_slow(env, pc, cs_base, flags);
bellard8a40a182005-11-20 10:35:40 +0000294 }
295 return tb;
296}
297
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100298static CPUDebugExcpHandler *debug_excp_handler;
299
Igor Mammedov84e3b602012-06-21 18:29:38 +0200300void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100301{
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100302 debug_excp_handler = handler;
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100303}
304
Andreas Färber9349b4f2012-03-14 01:38:32 +0100305static void cpu_handle_debug_exception(CPUArchState *env)
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100306{
Andreas Färberff4700b2013-08-26 18:23:18 +0200307 CPUState *cpu = ENV_GET_CPU(env);
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100308 CPUWatchpoint *wp;
309
Andreas Färberff4700b2013-08-26 18:23:18 +0200310 if (!cpu->watchpoint_hit) {
311 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100312 wp->flags &= ~BP_WATCHPOINT_HIT;
313 }
314 }
315 if (debug_excp_handler) {
316 debug_excp_handler(env);
317 }
318}
319
bellard7d132992003-03-06 23:23:54 +0000320/* main execution loop */
321
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300322volatile sig_atomic_t exit_request;
323
Andreas Färber9349b4f2012-03-14 01:38:32 +0100324int cpu_exec(CPUArchState *env)
bellard7d132992003-03-06 23:23:54 +0000325{
Andreas Färberc356a1b2012-05-04 19:39:23 +0200326 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färber97a8ea52013-02-02 10:57:51 +0100327#if !(defined(CONFIG_USER_ONLY) && \
328 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
329 CPUClass *cc = CPU_GET_CLASS(cpu);
330#endif
Andreas Färber693fa552013-12-24 03:18:12 +0100331#ifdef TARGET_I386
332 X86CPU *x86_cpu = X86_CPU(cpu);
333#endif
bellard8a40a182005-11-20 10:35:40 +0000334 int ret, interrupt_request;
bellard8a40a182005-11-20 10:35:40 +0000335 TranslationBlock *tb;
bellardc27004e2005-01-03 23:35:10 +0000336 uint8_t *tc_ptr;
Richard Henderson3e9bd632013-08-20 14:40:25 -0700337 uintptr_t next_tb;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200338 SyncClocks sc;
339
Peter Maydellbae2c272014-04-04 17:42:56 +0100340 /* This must be volatile so it is not trashed by longjmp() */
341 volatile bool have_tb_lock = false;
bellard8c6939c2003-06-09 15:28:00 +0000342
Andreas Färber259186a2013-01-17 18:51:17 +0100343 if (cpu->halted) {
Andreas Färber3993c6b2012-05-03 06:43:49 +0200344 if (!cpu_has_work(cpu)) {
Paolo Bonzinieda48c32011-03-12 17:43:56 +0100345 return EXCP_HALTED;
346 }
347
Andreas Färber259186a2013-01-17 18:51:17 +0100348 cpu->halted = 0;
Paolo Bonzinieda48c32011-03-12 17:43:56 +0100349 }
bellard5a1e3cf2005-11-23 21:02:53 +0000350
Andreas Färber4917cf42013-05-27 05:17:50 +0200351 current_cpu = cpu;
bellarde4533c72003-06-15 19:51:39 +0000352
Andreas Färber4917cf42013-05-27 05:17:50 +0200353 /* As long as current_cpu is null, up to the assignment just above,
Olivier Hainqueec9bd892013-04-09 18:06:54 +0200354 * requests by other threads to exit the execution loop are expected to
355 * be issued using the exit_request global. We must make sure that our
Andreas Färber4917cf42013-05-27 05:17:50 +0200356 * evaluation of the global value is performed past the current_cpu
Olivier Hainqueec9bd892013-04-09 18:06:54 +0200357 * value transition point, which requires a memory barrier as well as
358 * an instruction scheduling constraint on modern architectures. */
359 smp_mb();
360
Jan Kiszkac629a4b2010-06-25 16:56:52 +0200361 if (unlikely(exit_request)) {
Andreas Färberfcd7d002012-12-17 08:02:44 +0100362 cpu->exit_request = 1;
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300363 }
364
thsecb644f2007-06-03 18:45:53 +0000365#if defined(TARGET_I386)
Jan Kiszka6792a572011-02-07 12:19:18 +0100366 /* put eflags in CPU temporary format */
367 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
liguang80cf2c82013-05-28 16:21:08 +0800368 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
Jan Kiszka6792a572011-02-07 12:19:18 +0100369 CC_OP = CC_OP_EFLAGS;
370 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
bellard93ac68b2003-09-30 20:57:29 +0000371#elif defined(TARGET_SPARC)
pbrooke6e59062006-10-22 00:18:54 +0000372#elif defined(TARGET_M68K)
373 env->cc_op = CC_OP_FLAGS;
374 env->cc_dest = env->sr & 0xf;
375 env->cc_x = (env->sr >> 4) & 1;
thsecb644f2007-06-03 18:45:53 +0000376#elif defined(TARGET_ALPHA)
377#elif defined(TARGET_ARM)
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800378#elif defined(TARGET_UNICORE32)
thsecb644f2007-06-03 18:45:53 +0000379#elif defined(TARGET_PPC)
Elie Richa4e85f822011-07-22 05:58:39 +0000380 env->reserve_addr = -1;
Michael Walle81ea0e12011-02-17 23:45:02 +0100381#elif defined(TARGET_LM32)
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200382#elif defined(TARGET_MICROBLAZE)
bellard6af0bf92005-07-02 14:58:51 +0000383#elif defined(TARGET_MIPS)
Anthony Greend15a9c22013-03-18 15:49:25 -0400384#elif defined(TARGET_MOXIE)
Jia Liue67db062012-07-20 15:50:39 +0800385#elif defined(TARGET_OPENRISC)
bellardfdf9b3e2006-04-27 21:07:38 +0000386#elif defined(TARGET_SH4)
thsf1ccf902007-10-08 13:16:14 +0000387#elif defined(TARGET_CRIS)
Alexander Graf10ec5112009-12-05 12:44:21 +0100388#elif defined(TARGET_S390X)
Max Filippov23288262011-09-06 03:55:25 +0400389#elif defined(TARGET_XTENSA)
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +0100390#elif defined(TARGET_TRICORE)
bellardfdf9b3e2006-04-27 21:07:38 +0000391 /* XXXXX */
bellarde4533c72003-06-15 19:51:39 +0000392#else
393#error unsupported target CPU
394#endif
Andreas Färber27103422013-08-26 08:31:06 +0200395 cpu->exception_index = -1;
bellard9d27abd2003-05-10 13:13:54 +0000396
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200397 /* Calculate difference between guest clock and host clock.
398 * This delay includes the delay of the last cycle, so
399 * what we have to do is sleep until it is 0. As for the
400 * advance/delay we gain here, we try to fix it next time.
401 */
402 init_delay_params(&sc, cpu);
403
bellard7d132992003-03-06 23:23:54 +0000404 /* prepare setjmp context for exception handling */
bellard3fb2ded2003-06-24 13:22:59 +0000405 for(;;) {
Andreas Färber6f03bef2013-08-26 06:22:03 +0200406 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
bellard3fb2ded2003-06-24 13:22:59 +0000407 /* if an exception is pending, we execute it here */
Andreas Färber27103422013-08-26 08:31:06 +0200408 if (cpu->exception_index >= 0) {
409 if (cpu->exception_index >= EXCP_INTERRUPT) {
bellard3fb2ded2003-06-24 13:22:59 +0000410 /* exit request from the cpu execution loop */
Andreas Färber27103422013-08-26 08:31:06 +0200411 ret = cpu->exception_index;
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100412 if (ret == EXCP_DEBUG) {
413 cpu_handle_debug_exception(env);
414 }
bellard3fb2ded2003-06-24 13:22:59 +0000415 break;
aurel3272d239e2009-01-14 19:40:27 +0000416 } else {
417#if defined(CONFIG_USER_ONLY)
bellard3fb2ded2003-06-24 13:22:59 +0000418 /* if user mode only, we simulate a fake exception
ths9f083492006-12-07 18:28:42 +0000419 which will be handled outside the cpu execution
bellard3fb2ded2003-06-24 13:22:59 +0000420 loop */
bellard83479e72003-06-25 16:12:37 +0000421#if defined(TARGET_I386)
Andreas Färber97a8ea52013-02-02 10:57:51 +0100422 cc->do_interrupt(cpu);
bellard83479e72003-06-25 16:12:37 +0000423#endif
Andreas Färber27103422013-08-26 08:31:06 +0200424 ret = cpu->exception_index;
bellard3fb2ded2003-06-24 13:22:59 +0000425 break;
aurel3272d239e2009-01-14 19:40:27 +0000426#else
Andreas Färber97a8ea52013-02-02 10:57:51 +0100427 cc->do_interrupt(cpu);
Andreas Färber27103422013-08-26 08:31:06 +0200428 cpu->exception_index = -1;
aurel3272d239e2009-01-14 19:40:27 +0000429#endif
bellard3fb2ded2003-06-24 13:22:59 +0000430 }
ths5fafdf22007-09-16 21:08:06 +0000431 }
bellard9df217a2005-02-10 22:05:51 +0000432
blueswir1b5fc09a2008-05-04 06:38:18 +0000433 next_tb = 0; /* force lookup of first TB */
bellard3fb2ded2003-06-24 13:22:59 +0000434 for(;;) {
Andreas Färber259186a2013-01-17 18:51:17 +0100435 interrupt_request = cpu->interrupt_request;
malce1638bd2008-11-06 18:54:46 +0000436 if (unlikely(interrupt_request)) {
Andreas Färbered2803d2013-06-21 20:20:45 +0200437 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
malce1638bd2008-11-06 18:54:46 +0000438 /* Mask out external interrupts for this step. */
Richard Henderson3125f762011-05-04 13:34:25 -0700439 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
malce1638bd2008-11-06 18:54:46 +0000440 }
pbrook6658ffb2007-03-16 23:58:11 +0000441 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
Andreas Färber259186a2013-01-17 18:51:17 +0100442 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
Andreas Färber27103422013-08-26 08:31:06 +0200443 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +0200444 cpu_loop_exit(cpu);
pbrook6658ffb2007-03-16 23:58:11 +0000445 }
balroga90b7312007-05-01 01:28:01 +0000446#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200447 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +0100448 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || \
449 defined(TARGET_UNICORE32) || defined(TARGET_TRICORE)
balroga90b7312007-05-01 01:28:01 +0000450 if (interrupt_request & CPU_INTERRUPT_HALT) {
Andreas Färber259186a2013-01-17 18:51:17 +0100451 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
452 cpu->halted = 1;
Andreas Färber27103422013-08-26 08:31:06 +0200453 cpu->exception_index = EXCP_HLT;
Andreas Färber5638d182013-08-27 17:52:12 +0200454 cpu_loop_exit(cpu);
balroga90b7312007-05-01 01:28:01 +0000455 }
456#endif
bellard68a79312003-06-30 13:12:32 +0000457#if defined(TARGET_I386)
Paolo Bonzini4a92a552013-03-05 15:35:17 +0100458 if (interrupt_request & CPU_INTERRUPT_INIT) {
459 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
460 do_cpu_init(x86_cpu);
461 cpu->exception_index = EXCP_HALTED;
462 cpu_loop_exit(cpu);
463 }
464#else
465 if (interrupt_request & CPU_INTERRUPT_RESET) {
466 cpu_reset(cpu);
467 }
468#endif
469#if defined(TARGET_I386)
Jan Kiszka5d62c432012-07-09 16:42:32 +0200470#if !defined(CONFIG_USER_ONLY)
471 if (interrupt_request & CPU_INTERRUPT_POLL) {
Andreas Färber259186a2013-01-17 18:51:17 +0100472 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
Andreas Färber693fa552013-12-24 03:18:12 +0100473 apic_poll_irq(x86_cpu->apic_state);
Jan Kiszka5d62c432012-07-09 16:42:32 +0200474 }
475#endif
Paolo Bonzini4a92a552013-03-05 15:35:17 +0100476 if (interrupt_request & CPU_INTERRUPT_SIPI) {
Andreas Färber693fa552013-12-24 03:18:12 +0100477 do_cpu_sipi(x86_cpu);
Gleb Natapovb09ea7d2009-06-17 23:26:59 +0300478 } else if (env->hflags2 & HF2_GIF_MASK) {
bellarddb620f42008-06-04 17:02:19 +0000479 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
480 !(env->hflags & HF_SMM_MASK)) {
Blue Swirl77b2bc22012-04-28 19:35:10 +0000481 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
482 0);
Andreas Färber259186a2013-01-17 18:51:17 +0100483 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
Andreas Färber693fa552013-12-24 03:18:12 +0100484 do_smm_enter(x86_cpu);
bellarddb620f42008-06-04 17:02:19 +0000485 next_tb = 0;
486 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
487 !(env->hflags2 & HF2_NMI_MASK)) {
Andreas Färber259186a2013-01-17 18:51:17 +0100488 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
bellarddb620f42008-06-04 17:02:19 +0000489 env->hflags2 |= HF2_NMI_MASK;
Blue Swirle694d4e2011-05-16 19:38:48 +0000490 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
bellarddb620f42008-06-04 17:02:19 +0000491 next_tb = 0;
陳韋任e965fc32012-02-06 14:02:55 +0800492 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
Andreas Färber259186a2013-01-17 18:51:17 +0100493 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
Blue Swirle694d4e2011-05-16 19:38:48 +0000494 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
Huang Ying79c4f6b2009-06-23 10:05:14 +0800495 next_tb = 0;
bellarddb620f42008-06-04 17:02:19 +0000496 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
497 (((env->hflags2 & HF2_VINTR_MASK) &&
498 (env->hflags2 & HF2_HIF_MASK)) ||
499 (!(env->hflags2 & HF2_VINTR_MASK) &&
500 (env->eflags & IF_MASK &&
501 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
502 int intno;
Blue Swirl77b2bc22012-04-28 19:35:10 +0000503 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
504 0);
Andreas Färber259186a2013-01-17 18:51:17 +0100505 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
506 CPU_INTERRUPT_VIRQ);
bellarddb620f42008-06-04 17:02:19 +0000507 intno = cpu_get_pic_interrupt(env);
malc4f213872012-08-27 18:33:12 +0400508 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
509 do_interrupt_x86_hardirq(env, intno, 1);
510 /* ensure that no TB jump will be modified as
511 the program flow was changed */
512 next_tb = 0;
ths0573fbf2007-09-23 15:28:04 +0000513#if !defined(CONFIG_USER_ONLY)
bellarddb620f42008-06-04 17:02:19 +0000514 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
515 (env->eflags & IF_MASK) &&
516 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
517 int intno;
518 /* FIXME: this should respect TPR */
Blue Swirl77b2bc22012-04-28 19:35:10 +0000519 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
520 0);
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +0100521 intno = ldl_phys(cpu->as,
522 env->vm_vmcb
523 + offsetof(struct vmcb,
524 control.int_vector));
aliguori93fcfe32009-01-15 22:34:14 +0000525 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
Blue Swirle694d4e2011-05-16 19:38:48 +0000526 do_interrupt_x86_hardirq(env, intno, 1);
Andreas Färber259186a2013-01-17 18:51:17 +0100527 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
bellarddb620f42008-06-04 17:02:19 +0000528 next_tb = 0;
ths0573fbf2007-09-23 15:28:04 +0000529#endif
bellarddb620f42008-06-04 17:02:19 +0000530 }
bellard68a79312003-06-30 13:12:32 +0000531 }
bellardce097762004-01-04 23:53:18 +0000532#elif defined(TARGET_PPC)
j_mayer47103572007-03-30 09:38:04 +0000533 if (interrupt_request & CPU_INTERRUPT_HARD) {
j_mayere9df0142007-04-09 22:45:36 +0000534 ppc_hw_interrupt(env);
Andreas Färber259186a2013-01-17 18:51:17 +0100535 if (env->pending_interrupts == 0) {
536 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
537 }
blueswir1b5fc09a2008-05-04 06:38:18 +0000538 next_tb = 0;
bellardce097762004-01-04 23:53:18 +0000539 }
Michael Walle81ea0e12011-02-17 23:45:02 +0100540#elif defined(TARGET_LM32)
541 if ((interrupt_request & CPU_INTERRUPT_HARD)
542 && (env->ie & IE_IE)) {
Andreas Färber27103422013-08-26 08:31:06 +0200543 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100544 cc->do_interrupt(cpu);
Michael Walle81ea0e12011-02-17 23:45:02 +0100545 next_tb = 0;
546 }
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200547#elif defined(TARGET_MICROBLAZE)
548 if ((interrupt_request & CPU_INTERRUPT_HARD)
549 && (env->sregs[SR_MSR] & MSR_IE)
550 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
551 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
Andreas Färber27103422013-08-26 08:31:06 +0200552 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100553 cc->do_interrupt(cpu);
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200554 next_tb = 0;
555 }
bellard6af0bf92005-07-02 14:58:51 +0000556#elif defined(TARGET_MIPS)
557 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
Aurelien Jarno4cdc1cd2010-12-25 22:56:32 +0100558 cpu_mips_hw_interrupts_pending(env)) {
bellard6af0bf92005-07-02 14:58:51 +0000559 /* Raise it */
Andreas Färber27103422013-08-26 08:31:06 +0200560 cpu->exception_index = EXCP_EXT_INTERRUPT;
bellard6af0bf92005-07-02 14:58:51 +0000561 env->error_code = 0;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100562 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000563 next_tb = 0;
bellard6af0bf92005-07-02 14:58:51 +0000564 }
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +0100565#elif defined(TARGET_TRICORE)
566 if ((interrupt_request & CPU_INTERRUPT_HARD)) {
567 cc->do_interrupt(cpu);
568 next_tb = 0;
569 }
570
Jia Liub6a71ef2012-07-20 15:50:41 +0800571#elif defined(TARGET_OPENRISC)
572 {
573 int idx = -1;
574 if ((interrupt_request & CPU_INTERRUPT_HARD)
575 && (env->sr & SR_IEE)) {
576 idx = EXCP_INT;
577 }
578 if ((interrupt_request & CPU_INTERRUPT_TIMER)
579 && (env->sr & SR_TEE)) {
580 idx = EXCP_TICK;
581 }
582 if (idx >= 0) {
Andreas Färber27103422013-08-26 08:31:06 +0200583 cpu->exception_index = idx;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100584 cc->do_interrupt(cpu);
Jia Liub6a71ef2012-07-20 15:50:41 +0800585 next_tb = 0;
586 }
587 }
bellarde95c8d52004-09-30 22:22:08 +0000588#elif defined(TARGET_SPARC)
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300589 if (interrupt_request & CPU_INTERRUPT_HARD) {
590 if (cpu_interrupts_enabled(env) &&
591 env->interrupt_index > 0) {
592 int pil = env->interrupt_index & 0xf;
593 int type = env->interrupt_index & 0xf0;
bellard66321a12005-04-06 20:47:48 +0000594
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300595 if (((type == TT_EXTINT) &&
596 cpu_pil_allowed(env, pil)) ||
597 type != TT_EXTINT) {
Andreas Färber27103422013-08-26 08:31:06 +0200598 cpu->exception_index = env->interrupt_index;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100599 cc->do_interrupt(cpu);
Igor V. Kovalenkod532b262010-01-07 23:28:31 +0300600 next_tb = 0;
601 }
602 }
陳韋任e965fc32012-02-06 14:02:55 +0800603 }
bellardb5ff1b32005-11-26 10:38:39 +0000604#elif defined(TARGET_ARM)
605 if (interrupt_request & CPU_INTERRUPT_FIQ
Peter Maydell4cc35612014-02-26 17:20:06 +0000606 && !(env->daif & PSTATE_F)) {
Andreas Färber27103422013-08-26 08:31:06 +0200607 cpu->exception_index = EXCP_FIQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100608 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000609 next_tb = 0;
bellardb5ff1b32005-11-26 10:38:39 +0000610 }
pbrook9ee6e8b2007-11-11 00:04:49 +0000611 /* ARMv7-M interrupt return works by loading a magic value
612 into the PC. On real hardware the load causes the
613 return to occur. The qemu implementation performs the
614 jump normally, then does the exception return when the
615 CPU tries to execute code at the magic address.
616 This will cause the magic PC value to be pushed to
Stefan Weila1c72732011-04-28 17:20:38 +0200617 the stack if an interrupt occurred at the wrong time.
pbrook9ee6e8b2007-11-11 00:04:49 +0000618 We avoid this by disabling interrupts when
619 pc contains a magic address. */
bellardb5ff1b32005-11-26 10:38:39 +0000620 if (interrupt_request & CPU_INTERRUPT_HARD
pbrook9ee6e8b2007-11-11 00:04:49 +0000621 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
Peter Maydell4cc35612014-02-26 17:20:06 +0000622 || !(env->daif & PSTATE_I))) {
Andreas Färber27103422013-08-26 08:31:06 +0200623 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100624 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000625 next_tb = 0;
bellardb5ff1b32005-11-26 10:38:39 +0000626 }
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800627#elif defined(TARGET_UNICORE32)
628 if (interrupt_request & CPU_INTERRUPT_HARD
629 && !(env->uncached_asr & ASR_I)) {
Andreas Färber27103422013-08-26 08:31:06 +0200630 cpu->exception_index = UC32_EXCP_INTR;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100631 cc->do_interrupt(cpu);
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800632 next_tb = 0;
633 }
bellardfdf9b3e2006-04-27 21:07:38 +0000634#elif defined(TARGET_SH4)
thse96e2042007-12-02 06:18:24 +0000635 if (interrupt_request & CPU_INTERRUPT_HARD) {
Andreas Färber97a8ea52013-02-02 10:57:51 +0100636 cc->do_interrupt(cpu);
blueswir1b5fc09a2008-05-04 06:38:18 +0000637 next_tb = 0;
thse96e2042007-12-02 06:18:24 +0000638 }
j_mayereddf68a2007-04-05 07:22:49 +0000639#elif defined(TARGET_ALPHA)
Richard Henderson6a80e082011-04-18 15:09:09 -0700640 {
641 int idx = -1;
642 /* ??? This hard-codes the OSF/1 interrupt levels. */
陳韋任e965fc32012-02-06 14:02:55 +0800643 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
Richard Henderson6a80e082011-04-18 15:09:09 -0700644 case 0 ... 3:
645 if (interrupt_request & CPU_INTERRUPT_HARD) {
646 idx = EXCP_DEV_INTERRUPT;
647 }
648 /* FALLTHRU */
649 case 4:
650 if (interrupt_request & CPU_INTERRUPT_TIMER) {
651 idx = EXCP_CLK_INTERRUPT;
652 }
653 /* FALLTHRU */
654 case 5:
655 if (interrupt_request & CPU_INTERRUPT_SMP) {
656 idx = EXCP_SMP_INTERRUPT;
657 }
658 /* FALLTHRU */
659 case 6:
660 if (interrupt_request & CPU_INTERRUPT_MCHK) {
661 idx = EXCP_MCHK;
662 }
663 }
664 if (idx >= 0) {
Andreas Färber27103422013-08-26 08:31:06 +0200665 cpu->exception_index = idx;
Richard Henderson6a80e082011-04-18 15:09:09 -0700666 env->error_code = 0;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100667 cc->do_interrupt(cpu);
Richard Henderson6a80e082011-04-18 15:09:09 -0700668 next_tb = 0;
669 }
j_mayereddf68a2007-04-05 07:22:49 +0000670 }
thsf1ccf902007-10-08 13:16:14 +0000671#elif defined(TARGET_CRIS)
edgar_igl1b1a38b2008-06-09 23:18:06 +0000672 if (interrupt_request & CPU_INTERRUPT_HARD
Edgar E. Iglesiasfb9fb692010-02-15 11:17:33 +0100673 && (env->pregs[PR_CCS] & I_FLAG)
674 && !env->locked_irq) {
Andreas Färber27103422013-08-26 08:31:06 +0200675 cpu->exception_index = EXCP_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100676 cc->do_interrupt(cpu);
edgar_igl1b1a38b2008-06-09 23:18:06 +0000677 next_tb = 0;
678 }
Lars Persson82193142012-06-14 16:23:55 +0200679 if (interrupt_request & CPU_INTERRUPT_NMI) {
680 unsigned int m_flag_archval;
681 if (env->pregs[PR_VR] < 32) {
682 m_flag_archval = M_FLAG_V10;
683 } else {
684 m_flag_archval = M_FLAG_V32;
685 }
686 if ((env->pregs[PR_CCS] & m_flag_archval)) {
Andreas Färber27103422013-08-26 08:31:06 +0200687 cpu->exception_index = EXCP_NMI;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100688 cc->do_interrupt(cpu);
Lars Persson82193142012-06-14 16:23:55 +0200689 next_tb = 0;
690 }
thsf1ccf902007-10-08 13:16:14 +0000691 }
pbrook06338792007-05-23 19:58:11 +0000692#elif defined(TARGET_M68K)
693 if (interrupt_request & CPU_INTERRUPT_HARD
694 && ((env->sr & SR_I) >> SR_I_SHIFT)
695 < env->pending_level) {
696 /* Real hardware gets the interrupt vector via an
697 IACK cycle at this point. Current emulated
698 hardware doesn't rely on this, so we
699 provide/save the vector when the interrupt is
700 first signalled. */
Andreas Färber27103422013-08-26 08:31:06 +0200701 cpu->exception_index = env->pending_vector;
Blue Swirl3c688822011-05-21 07:55:24 +0000702 do_interrupt_m68k_hardirq(env);
blueswir1b5fc09a2008-05-04 06:38:18 +0000703 next_tb = 0;
pbrook06338792007-05-23 19:58:11 +0000704 }
Alexander Graf3110e292011-04-15 17:32:48 +0200705#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
706 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
707 (env->psw.mask & PSW_MASK_EXT)) {
Andreas Färber97a8ea52013-02-02 10:57:51 +0100708 cc->do_interrupt(cpu);
Alexander Graf3110e292011-04-15 17:32:48 +0200709 next_tb = 0;
710 }
Max Filippov40643d72011-09-06 03:55:41 +0400711#elif defined(TARGET_XTENSA)
712 if (interrupt_request & CPU_INTERRUPT_HARD) {
Andreas Färber27103422013-08-26 08:31:06 +0200713 cpu->exception_index = EXC_IRQ;
Andreas Färber97a8ea52013-02-02 10:57:51 +0100714 cc->do_interrupt(cpu);
Max Filippov40643d72011-09-06 03:55:41 +0400715 next_tb = 0;
716 }
bellard68a79312003-06-30 13:12:32 +0000717#endif
Stefan Weilff2712b2011-04-28 17:20:35 +0200718 /* Don't use the cached interrupt_request value,
bellard9d050952006-05-22 22:03:52 +0000719 do_interrupt may have updated the EXITTB flag. */
Andreas Färber259186a2013-01-17 18:51:17 +0100720 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
721 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bellardbf3e8bf2004-02-16 21:58:54 +0000722 /* ensure that no TB jump will be modified as
723 the program flow was changed */
blueswir1b5fc09a2008-05-04 06:38:18 +0000724 next_tb = 0;
bellardbf3e8bf2004-02-16 21:58:54 +0000725 }
aurel32be214e62009-03-06 21:48:00 +0000726 }
Andreas Färberfcd7d002012-12-17 08:02:44 +0100727 if (unlikely(cpu->exit_request)) {
728 cpu->exit_request = 0;
Andreas Färber27103422013-08-26 08:31:06 +0200729 cpu->exception_index = EXCP_INTERRUPT;
Andreas Färber5638d182013-08-27 17:52:12 +0200730 cpu_loop_exit(cpu);
bellard3fb2ded2003-06-24 13:22:59 +0000731 }
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700732 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
Peter Maydellbae2c272014-04-04 17:42:56 +0100733 have_tb_lock = true;
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000734 tb = tb_find_fast(env);
pbrookd5975362008-06-07 20:50:51 +0000735 /* Note: we do it here to avoid a gcc bug on Mac OS X when
736 doing it in tb_find_slow */
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700737 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
pbrookd5975362008-06-07 20:50:51 +0000738 /* as some TB could have been invalidated because
739 of memory exceptions while generating the code, we
740 must recompute the hash index here */
741 next_tb = 0;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700742 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
pbrookd5975362008-06-07 20:50:51 +0000743 }
Peter Maydellc30d1ae2013-04-11 21:21:46 +0100744 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
745 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
746 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
747 }
bellard8a40a182005-11-20 10:35:40 +0000748 /* see if we can patch the calling TB. When the TB
749 spans two pages, we cannot safely do a direct
750 jump. */
Paolo Bonzini040f2fb2010-01-15 08:56:36 +0100751 if (next_tb != 0 && tb->page_addr[1] == -1) {
Peter Maydell09800112013-02-22 18:10:00 +0000752 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
753 next_tb & TB_EXIT_MASK, tb);
bellard3fb2ded2003-06-24 13:22:59 +0000754 }
Peter Maydellbae2c272014-04-04 17:42:56 +0100755 have_tb_lock = false;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700756 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
malc55e8b852008-11-04 14:18:13 +0000757
758 /* cpu_interrupt might be called while translating the
759 TB, but before it is linked into a potentially
760 infinite loop and becomes env->current_tb. Avoid
761 starting execution if there is a pending interrupt. */
Andreas Färberd77953b2013-01-16 19:29:31 +0100762 cpu->current_tb = tb;
Jan Kiszkab0052d12010-06-25 16:56:50 +0200763 barrier();
Andreas Färberfcd7d002012-12-17 08:02:44 +0100764 if (likely(!cpu->exit_request)) {
Alex Bennée6db8b532014-08-01 17:08:57 +0100765 trace_exec_tb(tb, tb->pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000766 tc_ptr = tb->tc_ptr;
陳韋任e965fc32012-02-06 14:02:55 +0800767 /* execute the generated code */
Peter Maydell77211372013-02-22 18:10:02 +0000768 next_tb = cpu_tb_exec(cpu, tc_ptr);
Peter Maydell378df4b2013-02-22 18:10:03 +0000769 switch (next_tb & TB_EXIT_MASK) {
770 case TB_EXIT_REQUESTED:
771 /* Something asked us to stop executing
772 * chained TBs; just continue round the main
773 * loop. Whatever requested the exit will also
774 * have set something else (eg exit_request or
775 * interrupt_request) which we will handle
776 * next time around the loop.
777 */
778 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
779 next_tb = 0;
780 break;
781 case TB_EXIT_ICOUNT_EXPIRED:
782 {
thsbf20dc02008-06-30 17:22:19 +0000783 /* Instruction counter expired. */
pbrook2e70f6e2008-06-29 01:03:05 +0000784 int insns_left;
Peter Maydell09800112013-02-22 18:10:00 +0000785 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
Andreas Färber28ecfd72013-08-26 05:51:49 +0200786 insns_left = cpu->icount_decr.u32;
Andreas Färberefee7342013-08-26 05:39:29 +0200787 if (cpu->icount_extra && insns_left >= 0) {
pbrook2e70f6e2008-06-29 01:03:05 +0000788 /* Refill decrementer and continue execution. */
Andreas Färberefee7342013-08-26 05:39:29 +0200789 cpu->icount_extra += insns_left;
790 if (cpu->icount_extra > 0xffff) {
pbrook2e70f6e2008-06-29 01:03:05 +0000791 insns_left = 0xffff;
792 } else {
Andreas Färberefee7342013-08-26 05:39:29 +0200793 insns_left = cpu->icount_extra;
pbrook2e70f6e2008-06-29 01:03:05 +0000794 }
Andreas Färberefee7342013-08-26 05:39:29 +0200795 cpu->icount_extra -= insns_left;
Andreas Färber28ecfd72013-08-26 05:51:49 +0200796 cpu->icount_decr.u16.low = insns_left;
pbrook2e70f6e2008-06-29 01:03:05 +0000797 } else {
798 if (insns_left > 0) {
799 /* Execute remaining instructions. */
Blue Swirlcea5f9a2011-05-15 16:03:25 +0000800 cpu_exec_nocache(env, insns_left, tb);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200801 align_clocks(&sc, cpu);
pbrook2e70f6e2008-06-29 01:03:05 +0000802 }
Andreas Färber27103422013-08-26 08:31:06 +0200803 cpu->exception_index = EXCP_INTERRUPT;
pbrook2e70f6e2008-06-29 01:03:05 +0000804 next_tb = 0;
Andreas Färber5638d182013-08-27 17:52:12 +0200805 cpu_loop_exit(cpu);
pbrook2e70f6e2008-06-29 01:03:05 +0000806 }
Peter Maydell378df4b2013-02-22 18:10:03 +0000807 break;
808 }
809 default:
810 break;
pbrook2e70f6e2008-06-29 01:03:05 +0000811 }
812 }
Andreas Färberd77953b2013-01-16 19:29:31 +0100813 cpu->current_tb = NULL;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200814 /* Try to align the host and virtual clocks
815 if the guest is in advance */
816 align_clocks(&sc, cpu);
bellard4cbf74b2003-08-10 21:48:43 +0000817 /* reset soft MMU for next block (it can currently
818 only be set by a memory fault) */
ths50a518e2007-06-03 18:52:15 +0000819 } /* for(;;) */
Jan Kiszka0d101932011-07-02 09:50:51 +0200820 } else {
821 /* Reload env after longjmp - the compiler may have smashed all
822 * local variables as longjmp is marked 'noreturn'. */
Andreas Färber4917cf42013-05-27 05:17:50 +0200823 cpu = current_cpu;
824 env = cpu->env_ptr;
Juergen Lock6c78f292013-10-03 16:09:37 +0200825#if !(defined(CONFIG_USER_ONLY) && \
826 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
827 cc = CPU_GET_CLASS(cpu);
828#endif
Andreas Färber693fa552013-12-24 03:18:12 +0100829#ifdef TARGET_I386
830 x86_cpu = X86_CPU(cpu);
831#endif
Peter Maydellbae2c272014-04-04 17:42:56 +0100832 if (have_tb_lock) {
833 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
834 have_tb_lock = false;
835 }
bellard7d132992003-03-06 23:23:54 +0000836 }
bellard3fb2ded2003-06-24 13:22:59 +0000837 } /* for(;;) */
838
bellard7d132992003-03-06 23:23:54 +0000839
bellarde4533c72003-06-15 19:51:39 +0000840#if defined(TARGET_I386)
bellard9de5e442003-03-23 16:49:39 +0000841 /* restore flags in standard format */
Blue Swirle694d4e2011-05-16 19:38:48 +0000842 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
liguang80cf2c82013-05-28 16:21:08 +0800843 | (env->df & DF_MASK);
bellarde4533c72003-06-15 19:51:39 +0000844#elif defined(TARGET_ARM)
bellardb7bcbe92005-02-22 19:27:29 +0000845 /* XXX: Save/restore host fpu exception state?. */
Guan Xuetaod2fbca92011-04-12 16:27:03 +0800846#elif defined(TARGET_UNICORE32)
bellard93ac68b2003-09-30 20:57:29 +0000847#elif defined(TARGET_SPARC)
bellard67867302003-11-23 17:05:30 +0000848#elif defined(TARGET_PPC)
Michael Walle81ea0e12011-02-17 23:45:02 +0100849#elif defined(TARGET_LM32)
pbrooke6e59062006-10-22 00:18:54 +0000850#elif defined(TARGET_M68K)
851 cpu_m68k_flush_flags(env, env->cc_op);
852 env->cc_op = CC_OP_FLAGS;
853 env->sr = (env->sr & 0xffe0)
854 | env->cc_dest | (env->cc_x << 4);
Edgar E. Iglesiasb779e292009-05-20 21:31:33 +0200855#elif defined(TARGET_MICROBLAZE)
bellard6af0bf92005-07-02 14:58:51 +0000856#elif defined(TARGET_MIPS)
Bastian Koppelmann48e06fe2014-09-01 12:59:46 +0100857#elif defined(TARGET_TRICORE)
Anthony Greend15a9c22013-03-18 15:49:25 -0400858#elif defined(TARGET_MOXIE)
Jia Liue67db062012-07-20 15:50:39 +0800859#elif defined(TARGET_OPENRISC)
bellardfdf9b3e2006-04-27 21:07:38 +0000860#elif defined(TARGET_SH4)
j_mayereddf68a2007-04-05 07:22:49 +0000861#elif defined(TARGET_ALPHA)
thsf1ccf902007-10-08 13:16:14 +0000862#elif defined(TARGET_CRIS)
Alexander Graf10ec5112009-12-05 12:44:21 +0100863#elif defined(TARGET_S390X)
Max Filippov23288262011-09-06 03:55:25 +0400864#elif defined(TARGET_XTENSA)
bellardfdf9b3e2006-04-27 21:07:38 +0000865 /* XXXXX */
bellarde4533c72003-06-15 19:51:39 +0000866#else
867#error unsupported target CPU
868#endif
pbrook1057eaa2007-02-04 13:37:44 +0000869
Andreas Färber4917cf42013-05-27 05:17:50 +0200870 /* fail safe : never use current_cpu outside cpu_exec() */
871 current_cpu = NULL;
bellard7d132992003-03-06 23:23:54 +0000872 return ret;
873}