blob: dd149a28bc9176db0d32c2b4dfd54f9e9af5e472 [file] [log] [blame]
Blue Swirl0cac1b62012-04-09 16:50:52 +00001/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "config.h"
21#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010022#include "exec/exec-all.h"
23#include "exec/memory.h"
24#include "exec/address-spaces.h"
Paolo Bonzinif08b6172014-03-28 19:42:10 +010025#include "exec/cpu_ldst.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000026
Paolo Bonzini022c62c2012-12-17 18:19:49 +010027#include "exec/cputlb.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000028
Paolo Bonzini022c62c2012-12-17 18:19:49 +010029#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020030#include "exec/ram_addr.h"
Paolo Bonzini0f590e742014-03-28 17:55:24 +010031#include "tcg/tcg.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000032
33//#define DEBUG_TLB
34//#define DEBUG_TLB_CHECK
35
36/* statistics */
37int tlb_flush_count;
38
Blue Swirl0cac1b62012-04-09 16:50:52 +000039/* NOTE:
40 * If flush_global is true (the usual case), flush all tlb entries.
41 * If flush_global is false, flush (at least) all tlb entries not
42 * marked global.
43 *
44 * Since QEMU doesn't currently implement a global/not-global flag
45 * for tlb entries, at the moment tlb_flush() will also flush all
46 * tlb entries in the flush_global == false case. This is OK because
47 * CPU architectures generally permit an implementation to drop
48 * entries from the TLB at any time, so flushing more entries than
49 * required is only an efficiency issue, not a correctness issue.
50 */
Andreas Färber00c8cb02013-09-04 02:19:44 +020051void tlb_flush(CPUState *cpu, int flush_global)
Blue Swirl0cac1b62012-04-09 16:50:52 +000052{
Andreas Färber00c8cb02013-09-04 02:19:44 +020053 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
55#if defined(DEBUG_TLB)
56 printf("tlb_flush:\n");
57#endif
58 /* must reset current TB so that interrupts cannot modify the
59 links while we are modifying them */
Andreas Färberd77953b2013-01-16 19:29:31 +010060 cpu->current_tb = NULL;
Blue Swirl0cac1b62012-04-09 16:50:52 +000061
Richard Henderson4fadb3b2013-12-07 10:44:51 +130062 memset(env->tlb_table, -1, sizeof(env->tlb_table));
Xin Tong88e89a52014-08-04 20:35:23 -050063 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
Andreas Färber8cd70432013-08-26 06:03:38 +020064 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
Blue Swirl0cac1b62012-04-09 16:50:52 +000065
Xin Tong88e89a52014-08-04 20:35:23 -050066 env->vtlb_index = 0;
Blue Swirl0cac1b62012-04-09 16:50:52 +000067 env->tlb_flush_addr = -1;
68 env->tlb_flush_mask = 0;
69 tlb_flush_count++;
70}
71
Peter Maydelld7a74a92015-08-25 15:45:09 +010072static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
73{
74 CPUArchState *env = cpu->env_ptr;
75
76#if defined(DEBUG_TLB)
77 printf("tlb_flush_by_mmuidx:");
78#endif
79 /* must reset current TB so that interrupts cannot modify the
80 links while we are modifying them */
81 cpu->current_tb = NULL;
82
83 for (;;) {
84 int mmu_idx = va_arg(argp, int);
85
86 if (mmu_idx < 0) {
87 break;
88 }
89
90#if defined(DEBUG_TLB)
91 printf(" %d", mmu_idx);
92#endif
93
94 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
95 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
96 }
97
98#if defined(DEBUG_TLB)
99 printf("\n");
100#endif
101
102 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
103}
104
105void tlb_flush_by_mmuidx(CPUState *cpu, ...)
106{
107 va_list argp;
108 va_start(argp, cpu);
109 v_tlb_flush_by_mmuidx(cpu, argp);
110 va_end(argp);
111}
112
Blue Swirl0cac1b62012-04-09 16:50:52 +0000113static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
114{
115 if (addr == (tlb_entry->addr_read &
116 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
117 addr == (tlb_entry->addr_write &
118 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
119 addr == (tlb_entry->addr_code &
120 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Richard Henderson4fadb3b2013-12-07 10:44:51 +1300121 memset(tlb_entry, -1, sizeof(*tlb_entry));
Blue Swirl0cac1b62012-04-09 16:50:52 +0000122 }
123}
124
Andreas Färber31b030d2013-09-04 01:29:02 +0200125void tlb_flush_page(CPUState *cpu, target_ulong addr)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000126{
Andreas Färber31b030d2013-09-04 01:29:02 +0200127 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000128 int i;
129 int mmu_idx;
130
131#if defined(DEBUG_TLB)
132 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
133#endif
134 /* Check if we need to flush due to large pages. */
135 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
136#if defined(DEBUG_TLB)
137 printf("tlb_flush_page: forced full flush ("
138 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
139 env->tlb_flush_addr, env->tlb_flush_mask);
140#endif
Andreas Färber00c8cb02013-09-04 02:19:44 +0200141 tlb_flush(cpu, 1);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000142 return;
143 }
144 /* must reset current TB so that interrupts cannot modify the
145 links while we are modifying them */
Andreas Färberd77953b2013-01-16 19:29:31 +0100146 cpu->current_tb = NULL;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000147
148 addr &= TARGET_PAGE_MASK;
149 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
150 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
151 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
152 }
153
Xin Tong88e89a52014-08-04 20:35:23 -0500154 /* check whether there are entries that need to be flushed in the vtlb */
155 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
156 int k;
157 for (k = 0; k < CPU_VTLB_SIZE; k++) {
158 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
159 }
160 }
161
Andreas Färber611d4f92013-09-01 17:52:07 +0200162 tb_flush_jmp_cache(cpu, addr);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000163}
164
Peter Maydelld7a74a92015-08-25 15:45:09 +0100165void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
166{
167 CPUArchState *env = cpu->env_ptr;
168 int i, k;
169 va_list argp;
170
171 va_start(argp, addr);
172
173#if defined(DEBUG_TLB)
174 printf("tlb_flush_page_by_mmu_idx: " TARGET_FMT_lx, addr);
175#endif
176 /* Check if we need to flush due to large pages. */
177 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
178#if defined(DEBUG_TLB)
179 printf(" forced full flush ("
180 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
181 env->tlb_flush_addr, env->tlb_flush_mask);
182#endif
183 v_tlb_flush_by_mmuidx(cpu, argp);
184 va_end(argp);
185 return;
186 }
187 /* must reset current TB so that interrupts cannot modify the
188 links while we are modifying them */
189 cpu->current_tb = NULL;
190
191 addr &= TARGET_PAGE_MASK;
192 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
193
194 for (;;) {
195 int mmu_idx = va_arg(argp, int);
196
197 if (mmu_idx < 0) {
198 break;
199 }
200
201#if defined(DEBUG_TLB)
202 printf(" %d", mmu_idx);
203#endif
204
205 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
206
207 /* check whether there are vltb entries that need to be flushed */
208 for (k = 0; k < CPU_VTLB_SIZE; k++) {
209 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
210 }
211 }
212 va_end(argp);
213
214#if defined(DEBUG_TLB)
215 printf("\n");
216#endif
217
218 tb_flush_jmp_cache(cpu, addr);
219}
220
Blue Swirl0cac1b62012-04-09 16:50:52 +0000221/* update the TLBs so that writes to code in the virtual page 'addr'
222 can be detected */
223void tlb_protect_code(ram_addr_t ram_addr)
224{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000225 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
226 DIRTY_MEMORY_CODE);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000227}
228
229/* update the TLB so that writes in physical page 'phys_addr' are no longer
230 tested for self modifying code */
Paolo Bonzini9564f522015-04-22 14:24:54 +0200231void tlb_unprotect_code(ram_addr_t ram_addr)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000232{
Juan Quintela52159192013-10-08 12:44:04 +0200233 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000234}
235
236static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
237{
238 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
239}
240
241void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
242 uintptr_t length)
243{
244 uintptr_t addr;
245
246 if (tlb_is_dirty_ram(tlb_entry)) {
247 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
248 if ((addr - start) < length) {
249 tlb_entry->addr_write |= TLB_NOTDIRTY;
250 }
251 }
252}
253
Paolo Bonzini7443b432013-06-03 12:44:02 +0200254static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
255{
256 ram_addr_t ram_addr;
257
Paolo Bonzini1b5ec232013-05-06 14:36:15 +0200258 if (qemu_ram_addr_from_host(ptr, &ram_addr) == NULL) {
Paolo Bonzini7443b432013-06-03 12:44:02 +0200259 fprintf(stderr, "Bad ram pointer %p\n", ptr);
260 abort();
261 }
262 return ram_addr;
263}
264
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700265void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000266{
267 CPUArchState *env;
268
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700269 int mmu_idx;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000270
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700271 env = cpu->env_ptr;
272 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
273 unsigned int i;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000274
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700275 for (i = 0; i < CPU_TLB_SIZE; i++) {
276 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
277 start1, length);
278 }
Xin Tong88e89a52014-08-04 20:35:23 -0500279
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700280 for (i = 0; i < CPU_VTLB_SIZE; i++) {
281 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
282 start1, length);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000283 }
284 }
285}
286
287static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
288{
289 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
290 tlb_entry->addr_write = vaddr;
291 }
292}
293
294/* update the TLB corresponding to virtual page vaddr
295 so that it is no longer dirty */
296void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
297{
298 int i;
299 int mmu_idx;
300
301 vaddr &= TARGET_PAGE_MASK;
302 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
303 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
304 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
305 }
Xin Tong88e89a52014-08-04 20:35:23 -0500306
307 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
308 int k;
309 for (k = 0; k < CPU_VTLB_SIZE; k++) {
310 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
311 }
312 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000313}
314
315/* Our TLB does not support large pages, so remember the area covered by
316 large pages and trigger a full TLB flush if these are invalidated. */
317static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
318 target_ulong size)
319{
320 target_ulong mask = ~(size - 1);
321
322 if (env->tlb_flush_addr == (target_ulong)-1) {
323 env->tlb_flush_addr = vaddr & mask;
324 env->tlb_flush_mask = mask;
325 return;
326 }
327 /* Extend the existing region to include the new page.
328 This is a compromise between unnecessary flushes and the cost
329 of maintaining a full variable size TLB. */
330 mask &= env->tlb_flush_mask;
331 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
332 mask <<= 1;
333 }
334 env->tlb_flush_addr &= mask;
335 env->tlb_flush_mask = mask;
336}
337
338/* Add a new TLB entry. At most one entry for a given virtual address
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100339 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
340 * supplied size is only used by tlb_flush_page.
341 *
342 * Called from TCG-generated code, which is under an RCU read-side
343 * critical section.
344 */
Peter Maydellfadc1cb2015-04-26 16:49:24 +0100345void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
346 hwaddr paddr, MemTxAttrs attrs, int prot,
347 int mmu_idx, target_ulong size)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000348{
Andreas Färber0c591eb2013-09-03 13:59:37 +0200349 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000350 MemoryRegionSection *section;
351 unsigned int index;
352 target_ulong address;
353 target_ulong code_address;
354 uintptr_t addend;
355 CPUTLBEntry *te;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200356 hwaddr iotlb, xlat, sz;
Xin Tong88e89a52014-08-04 20:35:23 -0500357 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000358
359 assert(size >= TARGET_PAGE_SIZE);
360 if (size != TARGET_PAGE_SIZE) {
361 tlb_add_large_page(env, vaddr, size);
362 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200363
364 sz = size;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200365 section = address_space_translate_for_iotlb(cpu, paddr, &xlat, &sz);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200366 assert(sz >= TARGET_PAGE_SIZE);
367
Blue Swirl0cac1b62012-04-09 16:50:52 +0000368#if defined(DEBUG_TLB)
Antony Pavlov339aaf52014-12-13 19:48:18 +0300369 qemu_log_mask(CPU_LOG_MMU,
370 "tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
Hervé Poussineau54b949d2013-06-05 20:16:42 +0800371 " prot=%x idx=%d\n",
372 vaddr, paddr, prot, mmu_idx);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000373#endif
374
375 address = vaddr;
Paolo Bonzini8f3e03c2013-05-24 16:45:30 +0200376 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
377 /* IO memory case */
Blue Swirl0cac1b62012-04-09 16:50:52 +0000378 address |= TLB_MMIO;
Paolo Bonzini8f3e03c2013-05-24 16:45:30 +0200379 addend = 0;
380 } else {
381 /* TLB_MMIO for rom/romd handled below */
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200382 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000383 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000384
385 code_address = address;
Andreas Färberbb0e6272013-09-03 13:32:01 +0200386 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200387 prot, &address);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000388
389 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000390 te = &env->tlb_table[mmu_idx][index];
Xin Tong88e89a52014-08-04 20:35:23 -0500391
392 /* do not discard the translation in te, evict it into a victim tlb */
393 env->tlb_v_table[mmu_idx][vidx] = *te;
394 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
395
396 /* refill the tlb */
Peter Maydelle469b222015-04-26 16:49:23 +0100397 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
Peter Maydellfadc1cb2015-04-26 16:49:24 +0100398 env->iotlb[mmu_idx][index].attrs = attrs;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000399 te->addend = addend - vaddr;
400 if (prot & PAGE_READ) {
401 te->addr_read = address;
402 } else {
403 te->addr_read = -1;
404 }
405
406 if (prot & PAGE_EXEC) {
407 te->addr_code = code_address;
408 } else {
409 te->addr_code = -1;
410 }
411 if (prot & PAGE_WRITE) {
412 if ((memory_region_is_ram(section->mr) && section->readonly)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000413 || memory_region_is_romd(section->mr)) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000414 /* Write access calls the I/O callback. */
415 te->addr_write = address | TLB_MMIO;
416 } else if (memory_region_is_ram(section->mr)
Juan Quintelaa2cd8c82013-10-10 11:20:22 +0200417 && cpu_physical_memory_is_clean(section->mr->ram_addr
418 + xlat)) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000419 te->addr_write = address | TLB_NOTDIRTY;
420 } else {
421 te->addr_write = address;
422 }
423 } else {
424 te->addr_write = -1;
425 }
426}
427
Peter Maydellfadc1cb2015-04-26 16:49:24 +0100428/* Add a new TLB entry, but without specifying the memory
429 * transaction attributes to be used.
430 */
431void tlb_set_page(CPUState *cpu, target_ulong vaddr,
432 hwaddr paddr, int prot,
433 int mmu_idx, target_ulong size)
434{
435 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
436 prot, mmu_idx, size);
437}
438
Blue Swirl0cac1b62012-04-09 16:50:52 +0000439/* NOTE: this function can trigger an exception */
440/* NOTE2: the returned address is not exactly the physical address: it
Peter Maydell116aae32012-08-10 17:14:05 +0100441 * is actually a ram_addr_t (in system mode; the user mode emulation
442 * version of this function returns a guest virtual address).
443 */
Blue Swirl0cac1b62012-04-09 16:50:52 +0000444tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
445{
446 int mmu_idx, page_index, pd;
447 void *p;
448 MemoryRegion *mr;
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000449 CPUState *cpu = ENV_GET_CPU(env1);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000450
451 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Benjamin Herrenschmidt97ed5cc2015-08-17 17:34:10 +1000452 mmu_idx = cpu_mmu_index(env1, true);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000453 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
454 (addr & TARGET_PAGE_MASK))) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000455 cpu_ldub_code(env1, addr);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000456 }
Peter Maydelle469b222015-04-26 16:49:23 +0100457 pd = env1->iotlb[mmu_idx][page_index].addr & ~TARGET_PAGE_MASK;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200458 mr = iotlb_to_region(cpu, pd);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000459 if (memory_region_is_unassigned(mr)) {
Andreas Färberc658b942013-05-27 06:49:53 +0200460 CPUClass *cc = CPU_GET_CLASS(cpu);
461
462 if (cc->do_unassigned_access) {
463 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
464 } else {
Andreas Färbera47dddd2013-09-03 17:38:47 +0200465 cpu_abort(cpu, "Trying to execute code outside RAM or ROM at 0x"
Andreas Färberc658b942013-05-27 06:49:53 +0200466 TARGET_FMT_lx "\n", addr);
467 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000468 }
469 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
470 return qemu_ram_addr_from_host_nofail(p);
471}
472
Paolo Bonzini0f590e742014-03-28 17:55:24 +0100473#define MMUSUFFIX _mmu
474
475#define SHIFT 0
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100476#include "softmmu_template.h"
Paolo Bonzini0f590e742014-03-28 17:55:24 +0100477
478#define SHIFT 1
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100479#include "softmmu_template.h"
Paolo Bonzini0f590e742014-03-28 17:55:24 +0100480
481#define SHIFT 2
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100482#include "softmmu_template.h"
Paolo Bonzini0f590e742014-03-28 17:55:24 +0100483
484#define SHIFT 3
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100485#include "softmmu_template.h"
Paolo Bonzini0f590e742014-03-28 17:55:24 +0100486#undef MMUSUFFIX
487
Blue Swirl0cac1b62012-04-09 16:50:52 +0000488#define MMUSUFFIX _cmmu
Stefan Weil7e4e8862014-04-28 19:20:00 +0200489#undef GETPC_ADJ
490#define GETPC_ADJ 0
491#undef GETRA
492#define GETRA() ((uintptr_t)0)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000493#define SOFTMMU_CODE_ACCESS
494
495#define SHIFT 0
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100496#include "softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000497
498#define SHIFT 1
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100499#include "softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000500
501#define SHIFT 2
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100502#include "softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000503
504#define SHIFT 3
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100505#include "softmmu_template.h"