blob: 4bc6c24e11aef739c0a6861f470173a85ab2d4ea [file] [log] [blame]
Blue Swirl0cac1b62012-04-09 16:50:52 +00001/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "config.h"
21#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010022#include "exec/exec-all.h"
23#include "exec/memory.h"
24#include "exec/address-spaces.h"
Paolo Bonzinif08b6172014-03-28 19:42:10 +010025#include "exec/cpu_ldst.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000026
Paolo Bonzini022c62c2012-12-17 18:19:49 +010027#include "exec/cputlb.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000028
Paolo Bonzini022c62c2012-12-17 18:19:49 +010029#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020030#include "exec/ram_addr.h"
Paolo Bonzini0f590e742014-03-28 17:55:24 +010031#include "tcg/tcg.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000032
33//#define DEBUG_TLB
34//#define DEBUG_TLB_CHECK
35
36/* statistics */
37int tlb_flush_count;
38
Blue Swirl0cac1b62012-04-09 16:50:52 +000039/* NOTE:
40 * If flush_global is true (the usual case), flush all tlb entries.
41 * If flush_global is false, flush (at least) all tlb entries not
42 * marked global.
43 *
44 * Since QEMU doesn't currently implement a global/not-global flag
45 * for tlb entries, at the moment tlb_flush() will also flush all
46 * tlb entries in the flush_global == false case. This is OK because
47 * CPU architectures generally permit an implementation to drop
48 * entries from the TLB at any time, so flushing more entries than
49 * required is only an efficiency issue, not a correctness issue.
50 */
Andreas Färber00c8cb02013-09-04 02:19:44 +020051void tlb_flush(CPUState *cpu, int flush_global)
Blue Swirl0cac1b62012-04-09 16:50:52 +000052{
Andreas Färber00c8cb02013-09-04 02:19:44 +020053 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
55#if defined(DEBUG_TLB)
56 printf("tlb_flush:\n");
57#endif
58 /* must reset current TB so that interrupts cannot modify the
59 links while we are modifying them */
Andreas Färberd77953b2013-01-16 19:29:31 +010060 cpu->current_tb = NULL;
Blue Swirl0cac1b62012-04-09 16:50:52 +000061
Richard Henderson4fadb3b2013-12-07 10:44:51 +130062 memset(env->tlb_table, -1, sizeof(env->tlb_table));
Xin Tong88e89a52014-08-04 20:35:23 -050063 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
Andreas Färber8cd70432013-08-26 06:03:38 +020064 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
Blue Swirl0cac1b62012-04-09 16:50:52 +000065
Xin Tong88e89a52014-08-04 20:35:23 -050066 env->vtlb_index = 0;
Blue Swirl0cac1b62012-04-09 16:50:52 +000067 env->tlb_flush_addr = -1;
68 env->tlb_flush_mask = 0;
69 tlb_flush_count++;
70}
71
Peter Maydelld7a74a92015-08-25 15:45:09 +010072static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
73{
74 CPUArchState *env = cpu->env_ptr;
75
76#if defined(DEBUG_TLB)
77 printf("tlb_flush_by_mmuidx:");
78#endif
79 /* must reset current TB so that interrupts cannot modify the
80 links while we are modifying them */
81 cpu->current_tb = NULL;
82
83 for (;;) {
84 int mmu_idx = va_arg(argp, int);
85
86 if (mmu_idx < 0) {
87 break;
88 }
89
90#if defined(DEBUG_TLB)
91 printf(" %d", mmu_idx);
92#endif
93
94 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
95 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
96 }
97
98#if defined(DEBUG_TLB)
99 printf("\n");
100#endif
101
102 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
103}
104
105void tlb_flush_by_mmuidx(CPUState *cpu, ...)
106{
107 va_list argp;
108 va_start(argp, cpu);
109 v_tlb_flush_by_mmuidx(cpu, argp);
110 va_end(argp);
111}
112
Blue Swirl0cac1b62012-04-09 16:50:52 +0000113static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
114{
115 if (addr == (tlb_entry->addr_read &
116 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
117 addr == (tlb_entry->addr_write &
118 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
119 addr == (tlb_entry->addr_code &
120 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Richard Henderson4fadb3b2013-12-07 10:44:51 +1300121 memset(tlb_entry, -1, sizeof(*tlb_entry));
Blue Swirl0cac1b62012-04-09 16:50:52 +0000122 }
123}
124
Andreas Färber31b030d2013-09-04 01:29:02 +0200125void tlb_flush_page(CPUState *cpu, target_ulong addr)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000126{
Andreas Färber31b030d2013-09-04 01:29:02 +0200127 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000128 int i;
129 int mmu_idx;
130
131#if defined(DEBUG_TLB)
132 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
133#endif
134 /* Check if we need to flush due to large pages. */
135 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
136#if defined(DEBUG_TLB)
137 printf("tlb_flush_page: forced full flush ("
138 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
139 env->tlb_flush_addr, env->tlb_flush_mask);
140#endif
Andreas Färber00c8cb02013-09-04 02:19:44 +0200141 tlb_flush(cpu, 1);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000142 return;
143 }
144 /* must reset current TB so that interrupts cannot modify the
145 links while we are modifying them */
Andreas Färberd77953b2013-01-16 19:29:31 +0100146 cpu->current_tb = NULL;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000147
148 addr &= TARGET_PAGE_MASK;
149 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
150 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
151 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
152 }
153
Xin Tong88e89a52014-08-04 20:35:23 -0500154 /* check whether there are entries that need to be flushed in the vtlb */
155 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
156 int k;
157 for (k = 0; k < CPU_VTLB_SIZE; k++) {
158 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
159 }
160 }
161
Andreas Färber611d4f92013-09-01 17:52:07 +0200162 tb_flush_jmp_cache(cpu, addr);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000163}
164
Peter Maydelld7a74a92015-08-25 15:45:09 +0100165void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
166{
167 CPUArchState *env = cpu->env_ptr;
168 int i, k;
169 va_list argp;
170
171 va_start(argp, addr);
172
173#if defined(DEBUG_TLB)
174 printf("tlb_flush_page_by_mmu_idx: " TARGET_FMT_lx, addr);
175#endif
176 /* Check if we need to flush due to large pages. */
177 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
178#if defined(DEBUG_TLB)
179 printf(" forced full flush ("
180 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
181 env->tlb_flush_addr, env->tlb_flush_mask);
182#endif
183 v_tlb_flush_by_mmuidx(cpu, argp);
184 va_end(argp);
185 return;
186 }
187 /* must reset current TB so that interrupts cannot modify the
188 links while we are modifying them */
189 cpu->current_tb = NULL;
190
191 addr &= TARGET_PAGE_MASK;
192 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
193
194 for (;;) {
195 int mmu_idx = va_arg(argp, int);
196
197 if (mmu_idx < 0) {
198 break;
199 }
200
201#if defined(DEBUG_TLB)
202 printf(" %d", mmu_idx);
203#endif
204
205 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
206
207 /* check whether there are vltb entries that need to be flushed */
208 for (k = 0; k < CPU_VTLB_SIZE; k++) {
209 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
210 }
211 }
212 va_end(argp);
213
214#if defined(DEBUG_TLB)
215 printf("\n");
216#endif
217
218 tb_flush_jmp_cache(cpu, addr);
219}
220
Blue Swirl0cac1b62012-04-09 16:50:52 +0000221/* update the TLBs so that writes to code in the virtual page 'addr'
222 can be detected */
223void tlb_protect_code(ram_addr_t ram_addr)
224{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000225 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
226 DIRTY_MEMORY_CODE);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000227}
228
229/* update the TLB so that writes in physical page 'phys_addr' are no longer
230 tested for self modifying code */
Paolo Bonzini9564f522015-04-22 14:24:54 +0200231void tlb_unprotect_code(ram_addr_t ram_addr)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000232{
Juan Quintela52159192013-10-08 12:44:04 +0200233 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000234}
235
236static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
237{
238 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
239}
240
241void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
242 uintptr_t length)
243{
244 uintptr_t addr;
245
246 if (tlb_is_dirty_ram(tlb_entry)) {
247 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
248 if ((addr - start) < length) {
249 tlb_entry->addr_write |= TLB_NOTDIRTY;
250 }
251 }
252}
253
Paolo Bonzini7443b432013-06-03 12:44:02 +0200254static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
255{
256 ram_addr_t ram_addr;
257
Paolo Bonzini1b5ec232013-05-06 14:36:15 +0200258 if (qemu_ram_addr_from_host(ptr, &ram_addr) == NULL) {
Paolo Bonzini7443b432013-06-03 12:44:02 +0200259 fprintf(stderr, "Bad ram pointer %p\n", ptr);
260 abort();
261 }
262 return ram_addr;
263}
264
Blue Swirl0cac1b62012-04-09 16:50:52 +0000265void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length)
266{
Andreas Färber182735e2013-05-29 22:29:20 +0200267 CPUState *cpu;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000268 CPUArchState *env;
269
Andreas Färberbdc44642013-06-24 23:50:24 +0200270 CPU_FOREACH(cpu) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000271 int mmu_idx;
272
Andreas Färber182735e2013-05-29 22:29:20 +0200273 env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000274 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
275 unsigned int i;
276
277 for (i = 0; i < CPU_TLB_SIZE; i++) {
278 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
279 start1, length);
280 }
Xin Tong88e89a52014-08-04 20:35:23 -0500281
282 for (i = 0; i < CPU_VTLB_SIZE; i++) {
283 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
284 start1, length);
285 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000286 }
287 }
288}
289
290static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
291{
292 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
293 tlb_entry->addr_write = vaddr;
294 }
295}
296
297/* update the TLB corresponding to virtual page vaddr
298 so that it is no longer dirty */
299void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
300{
301 int i;
302 int mmu_idx;
303
304 vaddr &= TARGET_PAGE_MASK;
305 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
306 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
307 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
308 }
Xin Tong88e89a52014-08-04 20:35:23 -0500309
310 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
311 int k;
312 for (k = 0; k < CPU_VTLB_SIZE; k++) {
313 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
314 }
315 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000316}
317
318/* Our TLB does not support large pages, so remember the area covered by
319 large pages and trigger a full TLB flush if these are invalidated. */
320static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
321 target_ulong size)
322{
323 target_ulong mask = ~(size - 1);
324
325 if (env->tlb_flush_addr == (target_ulong)-1) {
326 env->tlb_flush_addr = vaddr & mask;
327 env->tlb_flush_mask = mask;
328 return;
329 }
330 /* Extend the existing region to include the new page.
331 This is a compromise between unnecessary flushes and the cost
332 of maintaining a full variable size TLB. */
333 mask &= env->tlb_flush_mask;
334 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
335 mask <<= 1;
336 }
337 env->tlb_flush_addr &= mask;
338 env->tlb_flush_mask = mask;
339}
340
341/* Add a new TLB entry. At most one entry for a given virtual address
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100342 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
343 * supplied size is only used by tlb_flush_page.
344 *
345 * Called from TCG-generated code, which is under an RCU read-side
346 * critical section.
347 */
Peter Maydellfadc1cb2015-04-26 16:49:24 +0100348void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
349 hwaddr paddr, MemTxAttrs attrs, int prot,
350 int mmu_idx, target_ulong size)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000351{
Andreas Färber0c591eb2013-09-03 13:59:37 +0200352 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000353 MemoryRegionSection *section;
354 unsigned int index;
355 target_ulong address;
356 target_ulong code_address;
357 uintptr_t addend;
358 CPUTLBEntry *te;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200359 hwaddr iotlb, xlat, sz;
Xin Tong88e89a52014-08-04 20:35:23 -0500360 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000361
362 assert(size >= TARGET_PAGE_SIZE);
363 if (size != TARGET_PAGE_SIZE) {
364 tlb_add_large_page(env, vaddr, size);
365 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200366
367 sz = size;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200368 section = address_space_translate_for_iotlb(cpu, paddr, &xlat, &sz);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200369 assert(sz >= TARGET_PAGE_SIZE);
370
Blue Swirl0cac1b62012-04-09 16:50:52 +0000371#if defined(DEBUG_TLB)
Antony Pavlov339aaf52014-12-13 19:48:18 +0300372 qemu_log_mask(CPU_LOG_MMU,
373 "tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
Hervé Poussineau54b949d2013-06-05 20:16:42 +0800374 " prot=%x idx=%d\n",
375 vaddr, paddr, prot, mmu_idx);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000376#endif
377
378 address = vaddr;
Paolo Bonzini8f3e03c2013-05-24 16:45:30 +0200379 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
380 /* IO memory case */
Blue Swirl0cac1b62012-04-09 16:50:52 +0000381 address |= TLB_MMIO;
Paolo Bonzini8f3e03c2013-05-24 16:45:30 +0200382 addend = 0;
383 } else {
384 /* TLB_MMIO for rom/romd handled below */
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200385 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000386 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000387
388 code_address = address;
Andreas Färberbb0e6272013-09-03 13:32:01 +0200389 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200390 prot, &address);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000391
392 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000393 te = &env->tlb_table[mmu_idx][index];
Xin Tong88e89a52014-08-04 20:35:23 -0500394
395 /* do not discard the translation in te, evict it into a victim tlb */
396 env->tlb_v_table[mmu_idx][vidx] = *te;
397 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
398
399 /* refill the tlb */
Peter Maydelle469b222015-04-26 16:49:23 +0100400 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
Peter Maydellfadc1cb2015-04-26 16:49:24 +0100401 env->iotlb[mmu_idx][index].attrs = attrs;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000402 te->addend = addend - vaddr;
403 if (prot & PAGE_READ) {
404 te->addr_read = address;
405 } else {
406 te->addr_read = -1;
407 }
408
409 if (prot & PAGE_EXEC) {
410 te->addr_code = code_address;
411 } else {
412 te->addr_code = -1;
413 }
414 if (prot & PAGE_WRITE) {
415 if ((memory_region_is_ram(section->mr) && section->readonly)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000416 || memory_region_is_romd(section->mr)) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000417 /* Write access calls the I/O callback. */
418 te->addr_write = address | TLB_MMIO;
419 } else if (memory_region_is_ram(section->mr)
Juan Quintelaa2cd8c82013-10-10 11:20:22 +0200420 && cpu_physical_memory_is_clean(section->mr->ram_addr
421 + xlat)) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000422 te->addr_write = address | TLB_NOTDIRTY;
423 } else {
424 te->addr_write = address;
425 }
426 } else {
427 te->addr_write = -1;
428 }
429}
430
Peter Maydellfadc1cb2015-04-26 16:49:24 +0100431/* Add a new TLB entry, but without specifying the memory
432 * transaction attributes to be used.
433 */
434void tlb_set_page(CPUState *cpu, target_ulong vaddr,
435 hwaddr paddr, int prot,
436 int mmu_idx, target_ulong size)
437{
438 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
439 prot, mmu_idx, size);
440}
441
Blue Swirl0cac1b62012-04-09 16:50:52 +0000442/* NOTE: this function can trigger an exception */
443/* NOTE2: the returned address is not exactly the physical address: it
Peter Maydell116aae32012-08-10 17:14:05 +0100444 * is actually a ram_addr_t (in system mode; the user mode emulation
445 * version of this function returns a guest virtual address).
446 */
Blue Swirl0cac1b62012-04-09 16:50:52 +0000447tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
448{
449 int mmu_idx, page_index, pd;
450 void *p;
451 MemoryRegion *mr;
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000452 CPUState *cpu = ENV_GET_CPU(env1);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000453
454 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
455 mmu_idx = cpu_mmu_index(env1);
456 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
457 (addr & TARGET_PAGE_MASK))) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000458 cpu_ldub_code(env1, addr);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000459 }
Peter Maydelle469b222015-04-26 16:49:23 +0100460 pd = env1->iotlb[mmu_idx][page_index].addr & ~TARGET_PAGE_MASK;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200461 mr = iotlb_to_region(cpu, pd);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000462 if (memory_region_is_unassigned(mr)) {
Andreas Färberc658b942013-05-27 06:49:53 +0200463 CPUClass *cc = CPU_GET_CLASS(cpu);
464
465 if (cc->do_unassigned_access) {
466 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
467 } else {
Andreas Färbera47dddd2013-09-03 17:38:47 +0200468 cpu_abort(cpu, "Trying to execute code outside RAM or ROM at 0x"
Andreas Färberc658b942013-05-27 06:49:53 +0200469 TARGET_FMT_lx "\n", addr);
470 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000471 }
472 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
473 return qemu_ram_addr_from_host_nofail(p);
474}
475
Paolo Bonzini0f590e742014-03-28 17:55:24 +0100476#define MMUSUFFIX _mmu
477
478#define SHIFT 0
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100479#include "softmmu_template.h"
Paolo Bonzini0f590e742014-03-28 17:55:24 +0100480
481#define SHIFT 1
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100482#include "softmmu_template.h"
Paolo Bonzini0f590e742014-03-28 17:55:24 +0100483
484#define SHIFT 2
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100485#include "softmmu_template.h"
Paolo Bonzini0f590e742014-03-28 17:55:24 +0100486
487#define SHIFT 3
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100488#include "softmmu_template.h"
Paolo Bonzini0f590e742014-03-28 17:55:24 +0100489#undef MMUSUFFIX
490
Blue Swirl0cac1b62012-04-09 16:50:52 +0000491#define MMUSUFFIX _cmmu
Stefan Weil7e4e8862014-04-28 19:20:00 +0200492#undef GETPC_ADJ
493#define GETPC_ADJ 0
494#undef GETRA
495#define GETRA() ((uintptr_t)0)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000496#define SOFTMMU_CODE_ACCESS
497
498#define SHIFT 0
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100499#include "softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000500
501#define SHIFT 1
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100502#include "softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000503
504#define SHIFT 2
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100505#include "softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000506
507#define SHIFT 3
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100508#include "softmmu_template.h"