blob: eed535009ed23196ff8131cd1501c07ba374a0f2 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
bellard6a00d602005-11-21 23:25:50 +0000126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100129DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800163/* The bits remaining after N lower levels of page tables. */
164#define P_L1_BITS_REM \
165 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
169/* Size of the L1 page table. Avoid silly small sizes. */
170#if P_L1_BITS_REM < 4
171#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
172#else
173#define P_L1_BITS P_L1_BITS_REM
174#endif
175
176#if V_L1_BITS_REM < 4
177#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
178#else
179#define V_L1_BITS V_L1_BITS_REM
180#endif
181
182#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
183#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
184
185#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
186#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
187
bellard83fb7ad2004-07-05 21:25:26 +0000188unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000189unsigned long qemu_host_page_size;
190unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000191
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800192/* This is a multi-level map on the virtual address space.
193 The bottom level has pointers to PageDesc. */
194static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000195
pbrooke2eef172008-06-08 01:09:01 +0000196#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000197typedef struct PhysPageDesc {
198 /* offset in host memory of the page + io_index in the low bits */
199 ram_addr_t phys_offset;
200 ram_addr_t region_offset;
201} PhysPageDesc;
202
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800203/* This is a multi-level map on the physical address space.
204 The bottom level has pointers to PhysPageDesc. */
205static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000206
pbrooke2eef172008-06-08 01:09:01 +0000207static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300208static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000209
bellard33417e72003-08-10 21:47:01 +0000210/* io memory support */
Avi Kivityacbbec52011-11-21 12:27:03 +0200211CPUWriteMemoryFunc *_io_mem_write[IO_MEM_NB_ENTRIES][4];
212CPUReadMemoryFunc *_io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000213void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000214static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000215static int io_mem_watch;
216#endif
bellard33417e72003-08-10 21:47:01 +0000217
bellard34865132003-10-05 14:28:56 +0000218/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200219#ifdef WIN32
220static const char *logfilename = "qemu.log";
221#else
blueswir1d9b630f2008-10-05 09:57:08 +0000222static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200223#endif
bellard34865132003-10-05 14:28:56 +0000224FILE *logfile;
225int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000226static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000227
bellarde3db7222005-01-26 22:00:47 +0000228/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000229#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000230static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000231#endif
bellarde3db7222005-01-26 22:00:47 +0000232static int tb_flush_count;
233static int tb_phys_invalidate_count;
234
bellard7cb69ca2008-05-10 10:55:51 +0000235#ifdef _WIN32
236static void map_exec(void *addr, long size)
237{
238 DWORD old_protect;
239 VirtualProtect(addr, size,
240 PAGE_EXECUTE_READWRITE, &old_protect);
241
242}
243#else
244static void map_exec(void *addr, long size)
245{
bellard43694152008-05-29 09:35:57 +0000246 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000247
bellard43694152008-05-29 09:35:57 +0000248 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000249 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000250 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000251
252 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000253 end += page_size - 1;
254 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000255
256 mprotect((void *)start, end - start,
257 PROT_READ | PROT_WRITE | PROT_EXEC);
258}
259#endif
260
bellardb346ff42003-06-15 20:05:50 +0000261static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000262{
bellard83fb7ad2004-07-05 21:25:26 +0000263 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000264 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000265#ifdef _WIN32
266 {
267 SYSTEM_INFO system_info;
268
269 GetSystemInfo(&system_info);
270 qemu_real_host_page_size = system_info.dwPageSize;
271 }
272#else
273 qemu_real_host_page_size = getpagesize();
274#endif
bellard83fb7ad2004-07-05 21:25:26 +0000275 if (qemu_host_page_size == 0)
276 qemu_host_page_size = qemu_real_host_page_size;
277 if (qemu_host_page_size < TARGET_PAGE_SIZE)
278 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000279 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000280
Paul Brook2e9a5712010-05-05 16:32:59 +0100281#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000282 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100283#ifdef HAVE_KINFO_GETVMMAP
284 struct kinfo_vmentry *freep;
285 int i, cnt;
286
287 freep = kinfo_getvmmap(getpid(), &cnt);
288 if (freep) {
289 mmap_lock();
290 for (i = 0; i < cnt; i++) {
291 unsigned long startaddr, endaddr;
292
293 startaddr = freep[i].kve_start;
294 endaddr = freep[i].kve_end;
295 if (h2g_valid(startaddr)) {
296 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
297
298 if (h2g_valid(endaddr)) {
299 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200300 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100301 } else {
302#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
303 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200304 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100305#endif
306 }
307 }
308 }
309 free(freep);
310 mmap_unlock();
311 }
312#else
balrog50a95692007-12-12 01:16:23 +0000313 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000314
pbrook07765902008-05-31 16:33:53 +0000315 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800316
Aurelien Jarnofd436902010-04-10 17:20:36 +0200317 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000318 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800319 mmap_lock();
320
balrog50a95692007-12-12 01:16:23 +0000321 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800322 unsigned long startaddr, endaddr;
323 int n;
324
325 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
326
327 if (n == 2 && h2g_valid(startaddr)) {
328 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
329
330 if (h2g_valid(endaddr)) {
331 endaddr = h2g(endaddr);
332 } else {
333 endaddr = ~0ul;
334 }
335 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000336 }
337 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800338
balrog50a95692007-12-12 01:16:23 +0000339 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800340 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000341 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100342#endif
balrog50a95692007-12-12 01:16:23 +0000343 }
344#endif
bellard54936002003-05-13 00:25:15 +0000345}
346
Paul Brook41c1b1c2010-03-12 16:54:58 +0000347static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000348{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000349 PageDesc *pd;
350 void **lp;
351 int i;
352
pbrook17e23772008-06-09 13:47:45 +0000353#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500354 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800355# define ALLOC(P, SIZE) \
356 do { \
357 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
358 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800359 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000360#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800361# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500362 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000363#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800364
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800365 /* Level 1. Always allocated. */
366 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
367
368 /* Level 2..N-1. */
369 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
370 void **p = *lp;
371
372 if (p == NULL) {
373 if (!alloc) {
374 return NULL;
375 }
376 ALLOC(p, sizeof(void *) * L2_SIZE);
377 *lp = p;
378 }
379
380 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000381 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800382
383 pd = *lp;
384 if (pd == NULL) {
385 if (!alloc) {
386 return NULL;
387 }
388 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
389 *lp = pd;
390 }
391
392#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800393
394 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000395}
396
Paul Brook41c1b1c2010-03-12 16:54:58 +0000397static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000398{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800399 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000400}
401
Paul Brook6d9a1302010-02-28 23:55:53 +0000402#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500403static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000404{
pbrooke3f4e2a2006-04-08 20:02:06 +0000405 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800406 void **lp;
407 int i;
bellard92e873b2004-05-21 14:52:29 +0000408
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800409 /* Level 1. Always allocated. */
410 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000411
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800412 /* Level 2..N-1. */
413 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
414 void **p = *lp;
415 if (p == NULL) {
416 if (!alloc) {
417 return NULL;
418 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500419 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800420 }
421 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000422 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800423
pbrooke3f4e2a2006-04-08 20:02:06 +0000424 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800425 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000426 int i;
Alex Rozenman5ab97b72011-12-13 12:52:08 +0200427 int first_index = index & ~(L2_SIZE - 1);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800428
429 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000430 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800431 }
432
Anthony Liguori7267c092011-08-20 22:09:37 -0500433 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800434
pbrook67c4d232009-02-23 13:16:07 +0000435 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200436 pd[i].phys_offset = io_mem_unassigned.ram_addr;
Alex Rozenman5ab97b72011-12-13 12:52:08 +0200437 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000438 }
bellard92e873b2004-05-21 14:52:29 +0000439 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800440
441 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000442}
443
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200444static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000445{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200446 PhysPageDesc *p = phys_page_find_alloc(index, 0);
447
448 if (p) {
449 return *p;
450 } else {
451 return (PhysPageDesc) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200452 .phys_offset = io_mem_unassigned.ram_addr,
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200453 .region_offset = index << TARGET_PAGE_BITS,
454 };
455 }
bellard92e873b2004-05-21 14:52:29 +0000456}
457
Anthony Liguoric227f092009-10-01 16:12:16 -0500458static void tlb_protect_code(ram_addr_t ram_addr);
459static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000460 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000461#define mmap_lock() do { } while(0)
462#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000463#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000464
bellard43694152008-05-29 09:35:57 +0000465#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
466
467#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100468/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000469 user mode. It will change when a dedicated libc will be used */
470#define USE_STATIC_CODE_GEN_BUFFER
471#endif
472
473#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200474static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
475 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000476#endif
477
blueswir18fcd3692008-08-17 20:26:25 +0000478static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000479{
bellard43694152008-05-29 09:35:57 +0000480#ifdef USE_STATIC_CODE_GEN_BUFFER
481 code_gen_buffer = static_code_gen_buffer;
482 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
483 map_exec(code_gen_buffer, code_gen_buffer_size);
484#else
bellard26a5f132008-05-28 12:30:31 +0000485 code_gen_buffer_size = tb_size;
486 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000487#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000488 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
489#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100490 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000491 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000492#endif
bellard26a5f132008-05-28 12:30:31 +0000493 }
494 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
495 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
496 /* The code gen buffer location may have constraints depending on
497 the host cpu and OS */
498#if defined(__linux__)
499 {
500 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000501 void *start = NULL;
502
bellard26a5f132008-05-28 12:30:31 +0000503 flags = MAP_PRIVATE | MAP_ANONYMOUS;
504#if defined(__x86_64__)
505 flags |= MAP_32BIT;
506 /* Cannot map more than that */
507 if (code_gen_buffer_size > (800 * 1024 * 1024))
508 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000509#elif defined(__sparc_v9__)
510 // Map the buffer below 2G, so we can use direct calls and branches
511 flags |= MAP_FIXED;
512 start = (void *) 0x60000000UL;
513 if (code_gen_buffer_size > (512 * 1024 * 1024))
514 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000515#elif defined(__arm__)
Dr. David Alan Gilbert222f23f2011-12-12 16:37:31 +0100516 /* Keep the buffer no bigger than 16GB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000517 if (code_gen_buffer_size > 16 * 1024 * 1024)
518 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700519#elif defined(__s390x__)
520 /* Map the buffer so that we can use direct calls and branches. */
521 /* We have a +- 4GB range on the branches; leave some slop. */
522 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
523 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
524 }
525 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000526#endif
blueswir1141ac462008-07-26 15:05:57 +0000527 code_gen_buffer = mmap(start, code_gen_buffer_size,
528 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000529 flags, -1, 0);
530 if (code_gen_buffer == MAP_FAILED) {
531 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
532 exit(1);
533 }
534 }
Bradcbb608a2010-12-20 21:25:40 -0500535#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000536 || defined(__DragonFly__) || defined(__OpenBSD__) \
537 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000538 {
539 int flags;
540 void *addr = NULL;
541 flags = MAP_PRIVATE | MAP_ANONYMOUS;
542#if defined(__x86_64__)
543 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
544 * 0x40000000 is free */
545 flags |= MAP_FIXED;
546 addr = (void *)0x40000000;
547 /* Cannot map more than that */
548 if (code_gen_buffer_size > (800 * 1024 * 1024))
549 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000550#elif defined(__sparc_v9__)
551 // Map the buffer below 2G, so we can use direct calls and branches
552 flags |= MAP_FIXED;
553 addr = (void *) 0x60000000UL;
554 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
555 code_gen_buffer_size = (512 * 1024 * 1024);
556 }
aliguori06e67a82008-09-27 15:32:41 +0000557#endif
558 code_gen_buffer = mmap(addr, code_gen_buffer_size,
559 PROT_WRITE | PROT_READ | PROT_EXEC,
560 flags, -1, 0);
561 if (code_gen_buffer == MAP_FAILED) {
562 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
563 exit(1);
564 }
565 }
bellard26a5f132008-05-28 12:30:31 +0000566#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500567 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000568 map_exec(code_gen_buffer, code_gen_buffer_size);
569#endif
bellard43694152008-05-29 09:35:57 +0000570#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000571 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100572 code_gen_buffer_max_size = code_gen_buffer_size -
573 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000574 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500575 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000576}
577
578/* Must be called before using the QEMU cpus. 'tb_size' is the size
579 (in bytes) allocated to the translation buffer. Zero means default
580 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200581void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000582{
bellard26a5f132008-05-28 12:30:31 +0000583 cpu_gen_init();
584 code_gen_alloc(tb_size);
585 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000586 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700587#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
588 /* There's no guest base to take into account, so go ahead and
589 initialize the prologue now. */
590 tcg_prologue_init(&tcg_ctx);
591#endif
bellard26a5f132008-05-28 12:30:31 +0000592}
593
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200594bool tcg_enabled(void)
595{
596 return code_gen_buffer != NULL;
597}
598
599void cpu_exec_init_all(void)
600{
601#if !defined(CONFIG_USER_ONLY)
602 memory_map_init();
603 io_mem_init();
604#endif
605}
606
pbrook9656f322008-07-01 20:01:19 +0000607#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
608
Juan Quintelae59fb372009-09-29 22:48:21 +0200609static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200610{
611 CPUState *env = opaque;
612
aurel323098dba2009-03-07 21:28:24 +0000613 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
614 version_id is increased. */
615 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000616 tlb_flush(env, 1);
617
618 return 0;
619}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200620
621static const VMStateDescription vmstate_cpu_common = {
622 .name = "cpu_common",
623 .version_id = 1,
624 .minimum_version_id = 1,
625 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200626 .post_load = cpu_common_post_load,
627 .fields = (VMStateField []) {
628 VMSTATE_UINT32(halted, CPUState),
629 VMSTATE_UINT32(interrupt_request, CPUState),
630 VMSTATE_END_OF_LIST()
631 }
632};
pbrook9656f322008-07-01 20:01:19 +0000633#endif
634
Glauber Costa950f1472009-06-09 12:15:18 -0400635CPUState *qemu_get_cpu(int cpu)
636{
637 CPUState *env = first_cpu;
638
639 while (env) {
640 if (env->cpu_index == cpu)
641 break;
642 env = env->next_cpu;
643 }
644
645 return env;
646}
647
bellard6a00d602005-11-21 23:25:50 +0000648void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000649{
bellard6a00d602005-11-21 23:25:50 +0000650 CPUState **penv;
651 int cpu_index;
652
pbrookc2764712009-03-07 15:24:59 +0000653#if defined(CONFIG_USER_ONLY)
654 cpu_list_lock();
655#endif
bellard6a00d602005-11-21 23:25:50 +0000656 env->next_cpu = NULL;
657 penv = &first_cpu;
658 cpu_index = 0;
659 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700660 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000661 cpu_index++;
662 }
663 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000664 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000665 QTAILQ_INIT(&env->breakpoints);
666 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100667#ifndef CONFIG_USER_ONLY
668 env->thread_id = qemu_get_thread_id();
669#endif
bellard6a00d602005-11-21 23:25:50 +0000670 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000671#if defined(CONFIG_USER_ONLY)
672 cpu_list_unlock();
673#endif
pbrookb3c77242008-06-30 16:31:04 +0000674#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600675 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
676 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000677 cpu_save, cpu_load, env);
678#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000679}
680
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100681/* Allocate a new translation block. Flush the translation buffer if
682 too many translation blocks or too much generated code. */
683static TranslationBlock *tb_alloc(target_ulong pc)
684{
685 TranslationBlock *tb;
686
687 if (nb_tbs >= code_gen_max_blocks ||
688 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
689 return NULL;
690 tb = &tbs[nb_tbs++];
691 tb->pc = pc;
692 tb->cflags = 0;
693 return tb;
694}
695
696void tb_free(TranslationBlock *tb)
697{
698 /* In practice this is mostly used for single use temporary TB
699 Ignore the hard cases and just back up if this TB happens to
700 be the last one generated. */
701 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
702 code_gen_ptr = tb->tc_ptr;
703 nb_tbs--;
704 }
705}
706
bellard9fa3e852004-01-04 18:06:42 +0000707static inline void invalidate_page_bitmap(PageDesc *p)
708{
709 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500710 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000711 p->code_bitmap = NULL;
712 }
713 p->code_write_count = 0;
714}
715
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800716/* Set to NULL all the 'first_tb' fields in all PageDescs. */
717
718static void page_flush_tb_1 (int level, void **lp)
719{
720 int i;
721
722 if (*lp == NULL) {
723 return;
724 }
725 if (level == 0) {
726 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000727 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800728 pd[i].first_tb = NULL;
729 invalidate_page_bitmap(pd + i);
730 }
731 } else {
732 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000733 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800734 page_flush_tb_1 (level - 1, pp + i);
735 }
736 }
737}
738
bellardfd6ce8f2003-05-14 19:00:11 +0000739static void page_flush_tb(void)
740{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800741 int i;
742 for (i = 0; i < V_L1_SIZE; i++) {
743 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000744 }
745}
746
747/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000748/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000749void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000750{
bellard6a00d602005-11-21 23:25:50 +0000751 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000752#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000753 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
754 (unsigned long)(code_gen_ptr - code_gen_buffer),
755 nb_tbs, nb_tbs > 0 ?
756 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000757#endif
bellard26a5f132008-05-28 12:30:31 +0000758 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000759 cpu_abort(env1, "Internal error: code buffer overflow\n");
760
bellardfd6ce8f2003-05-14 19:00:11 +0000761 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000762
bellard6a00d602005-11-21 23:25:50 +0000763 for(env = first_cpu; env != NULL; env = env->next_cpu) {
764 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
765 }
bellard9fa3e852004-01-04 18:06:42 +0000766
bellard8a8a6082004-10-03 13:36:49 +0000767 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000768 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000769
bellardfd6ce8f2003-05-14 19:00:11 +0000770 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000771 /* XXX: flush processor icache at this point if cache flush is
772 expensive */
bellarde3db7222005-01-26 22:00:47 +0000773 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000774}
775
776#ifdef DEBUG_TB_CHECK
777
j_mayerbc98a7e2007-04-04 07:55:12 +0000778static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000779{
780 TranslationBlock *tb;
781 int i;
782 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000783 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
784 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000785 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
786 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000787 printf("ERROR invalidate: address=" TARGET_FMT_lx
788 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000789 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000790 }
791 }
792 }
793}
794
795/* verify that all the pages have correct rights for code */
796static void tb_page_check(void)
797{
798 TranslationBlock *tb;
799 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000800
pbrook99773bd2006-04-16 15:14:59 +0000801 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
802 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000803 flags1 = page_get_flags(tb->pc);
804 flags2 = page_get_flags(tb->pc + tb->size - 1);
805 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
806 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000807 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000808 }
809 }
810 }
811}
812
813#endif
814
815/* invalidate one TB */
816static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
817 int next_offset)
818{
819 TranslationBlock *tb1;
820 for(;;) {
821 tb1 = *ptb;
822 if (tb1 == tb) {
823 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
824 break;
825 }
826 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
827 }
828}
829
bellard9fa3e852004-01-04 18:06:42 +0000830static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
831{
832 TranslationBlock *tb1;
833 unsigned int n1;
834
835 for(;;) {
836 tb1 = *ptb;
837 n1 = (long)tb1 & 3;
838 tb1 = (TranslationBlock *)((long)tb1 & ~3);
839 if (tb1 == tb) {
840 *ptb = tb1->page_next[n1];
841 break;
842 }
843 ptb = &tb1->page_next[n1];
844 }
845}
846
bellardd4e81642003-05-25 16:46:15 +0000847static inline void tb_jmp_remove(TranslationBlock *tb, int n)
848{
849 TranslationBlock *tb1, **ptb;
850 unsigned int n1;
851
852 ptb = &tb->jmp_next[n];
853 tb1 = *ptb;
854 if (tb1) {
855 /* find tb(n) in circular list */
856 for(;;) {
857 tb1 = *ptb;
858 n1 = (long)tb1 & 3;
859 tb1 = (TranslationBlock *)((long)tb1 & ~3);
860 if (n1 == n && tb1 == tb)
861 break;
862 if (n1 == 2) {
863 ptb = &tb1->jmp_first;
864 } else {
865 ptb = &tb1->jmp_next[n1];
866 }
867 }
868 /* now we can suppress tb(n) from the list */
869 *ptb = tb->jmp_next[n];
870
871 tb->jmp_next[n] = NULL;
872 }
873}
874
875/* reset the jump entry 'n' of a TB so that it is not chained to
876 another TB */
877static inline void tb_reset_jump(TranslationBlock *tb, int n)
878{
879 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
880}
881
Paul Brook41c1b1c2010-03-12 16:54:58 +0000882void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000883{
bellard6a00d602005-11-21 23:25:50 +0000884 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000885 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000886 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000887 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000888 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000889
bellard9fa3e852004-01-04 18:06:42 +0000890 /* remove the TB from the hash list */
891 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
892 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000893 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000894 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000895
bellard9fa3e852004-01-04 18:06:42 +0000896 /* remove the TB from the page list */
897 if (tb->page_addr[0] != page_addr) {
898 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
899 tb_page_remove(&p->first_tb, tb);
900 invalidate_page_bitmap(p);
901 }
902 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
903 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
904 tb_page_remove(&p->first_tb, tb);
905 invalidate_page_bitmap(p);
906 }
907
bellard8a40a182005-11-20 10:35:40 +0000908 tb_invalidated_flag = 1;
909
910 /* remove the TB from the hash list */
911 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000912 for(env = first_cpu; env != NULL; env = env->next_cpu) {
913 if (env->tb_jmp_cache[h] == tb)
914 env->tb_jmp_cache[h] = NULL;
915 }
bellard8a40a182005-11-20 10:35:40 +0000916
917 /* suppress this TB from the two jump lists */
918 tb_jmp_remove(tb, 0);
919 tb_jmp_remove(tb, 1);
920
921 /* suppress any remaining jumps to this TB */
922 tb1 = tb->jmp_first;
923 for(;;) {
924 n1 = (long)tb1 & 3;
925 if (n1 == 2)
926 break;
927 tb1 = (TranslationBlock *)((long)tb1 & ~3);
928 tb2 = tb1->jmp_next[n1];
929 tb_reset_jump(tb1, n1);
930 tb1->jmp_next[n1] = NULL;
931 tb1 = tb2;
932 }
933 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
934
bellarde3db7222005-01-26 22:00:47 +0000935 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000936}
937
938static inline void set_bits(uint8_t *tab, int start, int len)
939{
940 int end, mask, end1;
941
942 end = start + len;
943 tab += start >> 3;
944 mask = 0xff << (start & 7);
945 if ((start & ~7) == (end & ~7)) {
946 if (start < end) {
947 mask &= ~(0xff << (end & 7));
948 *tab |= mask;
949 }
950 } else {
951 *tab++ |= mask;
952 start = (start + 8) & ~7;
953 end1 = end & ~7;
954 while (start < end1) {
955 *tab++ = 0xff;
956 start += 8;
957 }
958 if (start < end) {
959 mask = ~(0xff << (end & 7));
960 *tab |= mask;
961 }
962 }
963}
964
965static void build_page_bitmap(PageDesc *p)
966{
967 int n, tb_start, tb_end;
968 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000969
Anthony Liguori7267c092011-08-20 22:09:37 -0500970 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000971
972 tb = p->first_tb;
973 while (tb != NULL) {
974 n = (long)tb & 3;
975 tb = (TranslationBlock *)((long)tb & ~3);
976 /* NOTE: this is subtle as a TB may span two physical pages */
977 if (n == 0) {
978 /* NOTE: tb_end may be after the end of the page, but
979 it is not a problem */
980 tb_start = tb->pc & ~TARGET_PAGE_MASK;
981 tb_end = tb_start + tb->size;
982 if (tb_end > TARGET_PAGE_SIZE)
983 tb_end = TARGET_PAGE_SIZE;
984 } else {
985 tb_start = 0;
986 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
987 }
988 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
989 tb = tb->page_next[n];
990 }
991}
992
pbrook2e70f6e2008-06-29 01:03:05 +0000993TranslationBlock *tb_gen_code(CPUState *env,
994 target_ulong pc, target_ulong cs_base,
995 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000996{
997 TranslationBlock *tb;
998 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000999 tb_page_addr_t phys_pc, phys_page2;
1000 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001001 int code_gen_size;
1002
Paul Brook41c1b1c2010-03-12 16:54:58 +00001003 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001004 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001005 if (!tb) {
1006 /* flush must be done */
1007 tb_flush(env);
1008 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001009 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001010 /* Don't forget to invalidate previous TB info. */
1011 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001012 }
1013 tc_ptr = code_gen_ptr;
1014 tb->tc_ptr = tc_ptr;
1015 tb->cs_base = cs_base;
1016 tb->flags = flags;
1017 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001018 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001019 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001020
bellardd720b932004-04-25 17:57:43 +00001021 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001022 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001023 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001024 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001025 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001026 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001027 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001028 return tb;
bellardd720b932004-04-25 17:57:43 +00001029}
ths3b46e622007-09-17 08:09:54 +00001030
bellard9fa3e852004-01-04 18:06:42 +00001031/* invalidate all TBs which intersect with the target physical page
1032 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001033 the same physical page. 'is_cpu_write_access' should be true if called
1034 from a real cpu write access: the virtual CPU will exit the current
1035 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001036void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001037 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001038{
aliguori6b917542008-11-18 19:46:41 +00001039 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001040 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001041 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001042 PageDesc *p;
1043 int n;
1044#ifdef TARGET_HAS_PRECISE_SMC
1045 int current_tb_not_found = is_cpu_write_access;
1046 TranslationBlock *current_tb = NULL;
1047 int current_tb_modified = 0;
1048 target_ulong current_pc = 0;
1049 target_ulong current_cs_base = 0;
1050 int current_flags = 0;
1051#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001052
1053 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001054 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001055 return;
ths5fafdf22007-09-16 21:08:06 +00001056 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001057 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1058 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001059 /* build code bitmap */
1060 build_page_bitmap(p);
1061 }
1062
1063 /* we remove all the TBs in the range [start, end[ */
1064 /* XXX: see if in some cases it could be faster to invalidate all the code */
1065 tb = p->first_tb;
1066 while (tb != NULL) {
1067 n = (long)tb & 3;
1068 tb = (TranslationBlock *)((long)tb & ~3);
1069 tb_next = tb->page_next[n];
1070 /* NOTE: this is subtle as a TB may span two physical pages */
1071 if (n == 0) {
1072 /* NOTE: tb_end may be after the end of the page, but
1073 it is not a problem */
1074 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1075 tb_end = tb_start + tb->size;
1076 } else {
1077 tb_start = tb->page_addr[1];
1078 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1079 }
1080 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001081#ifdef TARGET_HAS_PRECISE_SMC
1082 if (current_tb_not_found) {
1083 current_tb_not_found = 0;
1084 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001085 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001086 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001087 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001088 }
1089 }
1090 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001091 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001092 /* If we are modifying the current TB, we must stop
1093 its execution. We could be more precise by checking
1094 that the modification is after the current PC, but it
1095 would require a specialized function to partially
1096 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001097
bellardd720b932004-04-25 17:57:43 +00001098 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001099 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001100 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1101 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001102 }
1103#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001104 /* we need to do that to handle the case where a signal
1105 occurs while doing tb_phys_invalidate() */
1106 saved_tb = NULL;
1107 if (env) {
1108 saved_tb = env->current_tb;
1109 env->current_tb = NULL;
1110 }
bellard9fa3e852004-01-04 18:06:42 +00001111 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001112 if (env) {
1113 env->current_tb = saved_tb;
1114 if (env->interrupt_request && env->current_tb)
1115 cpu_interrupt(env, env->interrupt_request);
1116 }
bellard9fa3e852004-01-04 18:06:42 +00001117 }
1118 tb = tb_next;
1119 }
1120#if !defined(CONFIG_USER_ONLY)
1121 /* if no code remaining, no need to continue to use slow writes */
1122 if (!p->first_tb) {
1123 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001124 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001125 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001126 }
1127 }
1128#endif
1129#ifdef TARGET_HAS_PRECISE_SMC
1130 if (current_tb_modified) {
1131 /* we generate a block containing just the instruction
1132 modifying the memory. It will ensure that it cannot modify
1133 itself */
bellardea1c1802004-06-14 18:56:36 +00001134 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001135 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001136 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001137 }
1138#endif
1139}
1140
1141/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001142static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001143{
1144 PageDesc *p;
1145 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001146#if 0
bellarda4193c82004-06-03 14:01:43 +00001147 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001148 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1149 cpu_single_env->mem_io_vaddr, len,
1150 cpu_single_env->eip,
1151 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001152 }
1153#endif
bellard9fa3e852004-01-04 18:06:42 +00001154 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001155 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001156 return;
1157 if (p->code_bitmap) {
1158 offset = start & ~TARGET_PAGE_MASK;
1159 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1160 if (b & ((1 << len) - 1))
1161 goto do_invalidate;
1162 } else {
1163 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001164 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001165 }
1166}
1167
bellard9fa3e852004-01-04 18:06:42 +00001168#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001169static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001170 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001171{
aliguori6b917542008-11-18 19:46:41 +00001172 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001173 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001174 int n;
bellardd720b932004-04-25 17:57:43 +00001175#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001176 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001177 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001178 int current_tb_modified = 0;
1179 target_ulong current_pc = 0;
1180 target_ulong current_cs_base = 0;
1181 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001182#endif
bellard9fa3e852004-01-04 18:06:42 +00001183
1184 addr &= TARGET_PAGE_MASK;
1185 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001186 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001187 return;
1188 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001189#ifdef TARGET_HAS_PRECISE_SMC
1190 if (tb && pc != 0) {
1191 current_tb = tb_find_pc(pc);
1192 }
1193#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001194 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001195 n = (long)tb & 3;
1196 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001197#ifdef TARGET_HAS_PRECISE_SMC
1198 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001199 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001200 /* If we are modifying the current TB, we must stop
1201 its execution. We could be more precise by checking
1202 that the modification is after the current PC, but it
1203 would require a specialized function to partially
1204 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001205
bellardd720b932004-04-25 17:57:43 +00001206 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001207 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001208 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1209 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001210 }
1211#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001212 tb_phys_invalidate(tb, addr);
1213 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001214 }
1215 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001216#ifdef TARGET_HAS_PRECISE_SMC
1217 if (current_tb_modified) {
1218 /* we generate a block containing just the instruction
1219 modifying the memory. It will ensure that it cannot modify
1220 itself */
bellardea1c1802004-06-14 18:56:36 +00001221 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001222 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001223 cpu_resume_from_signal(env, puc);
1224 }
1225#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001226}
bellard9fa3e852004-01-04 18:06:42 +00001227#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001228
1229/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001230static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001231 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001232{
1233 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001234#ifndef CONFIG_USER_ONLY
1235 bool page_already_protected;
1236#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001237
bellard9fa3e852004-01-04 18:06:42 +00001238 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001239 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001240 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001241#ifndef CONFIG_USER_ONLY
1242 page_already_protected = p->first_tb != NULL;
1243#endif
bellard9fa3e852004-01-04 18:06:42 +00001244 p->first_tb = (TranslationBlock *)((long)tb | n);
1245 invalidate_page_bitmap(p);
1246
bellard107db442004-06-22 18:48:46 +00001247#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001248
bellard9fa3e852004-01-04 18:06:42 +00001249#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001250 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001251 target_ulong addr;
1252 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001253 int prot;
1254
bellardfd6ce8f2003-05-14 19:00:11 +00001255 /* force the host page as non writable (writes will have a
1256 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001257 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001258 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001259 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1260 addr += TARGET_PAGE_SIZE) {
1261
1262 p2 = page_find (addr >> TARGET_PAGE_BITS);
1263 if (!p2)
1264 continue;
1265 prot |= p2->flags;
1266 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001267 }
ths5fafdf22007-09-16 21:08:06 +00001268 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001269 (prot & PAGE_BITS) & ~PAGE_WRITE);
1270#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001271 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001272 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001273#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001274 }
bellard9fa3e852004-01-04 18:06:42 +00001275#else
1276 /* if some code is already present, then the pages are already
1277 protected. So we handle the case where only the first TB is
1278 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001279 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001280 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001281 }
1282#endif
bellardd720b932004-04-25 17:57:43 +00001283
1284#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001285}
1286
bellard9fa3e852004-01-04 18:06:42 +00001287/* add a new TB and link it to the physical page tables. phys_page2 is
1288 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001289void tb_link_page(TranslationBlock *tb,
1290 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001291{
bellard9fa3e852004-01-04 18:06:42 +00001292 unsigned int h;
1293 TranslationBlock **ptb;
1294
pbrookc8a706f2008-06-02 16:16:42 +00001295 /* Grab the mmap lock to stop another thread invalidating this TB
1296 before we are done. */
1297 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001298 /* add in the physical hash table */
1299 h = tb_phys_hash_func(phys_pc);
1300 ptb = &tb_phys_hash[h];
1301 tb->phys_hash_next = *ptb;
1302 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001303
1304 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001305 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1306 if (phys_page2 != -1)
1307 tb_alloc_page(tb, 1, phys_page2);
1308 else
1309 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001310
bellardd4e81642003-05-25 16:46:15 +00001311 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1312 tb->jmp_next[0] = NULL;
1313 tb->jmp_next[1] = NULL;
1314
1315 /* init original jump addresses */
1316 if (tb->tb_next_offset[0] != 0xffff)
1317 tb_reset_jump(tb, 0);
1318 if (tb->tb_next_offset[1] != 0xffff)
1319 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001320
1321#ifdef DEBUG_TB_CHECK
1322 tb_page_check();
1323#endif
pbrookc8a706f2008-06-02 16:16:42 +00001324 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001325}
1326
bellarda513fe12003-05-27 23:29:48 +00001327/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1328 tb[1].tc_ptr. Return NULL if not found */
1329TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1330{
1331 int m_min, m_max, m;
1332 unsigned long v;
1333 TranslationBlock *tb;
1334
1335 if (nb_tbs <= 0)
1336 return NULL;
1337 if (tc_ptr < (unsigned long)code_gen_buffer ||
1338 tc_ptr >= (unsigned long)code_gen_ptr)
1339 return NULL;
1340 /* binary search (cf Knuth) */
1341 m_min = 0;
1342 m_max = nb_tbs - 1;
1343 while (m_min <= m_max) {
1344 m = (m_min + m_max) >> 1;
1345 tb = &tbs[m];
1346 v = (unsigned long)tb->tc_ptr;
1347 if (v == tc_ptr)
1348 return tb;
1349 else if (tc_ptr < v) {
1350 m_max = m - 1;
1351 } else {
1352 m_min = m + 1;
1353 }
ths5fafdf22007-09-16 21:08:06 +00001354 }
bellarda513fe12003-05-27 23:29:48 +00001355 return &tbs[m_max];
1356}
bellard75012672003-06-21 13:11:07 +00001357
bellardea041c02003-06-25 16:16:50 +00001358static void tb_reset_jump_recursive(TranslationBlock *tb);
1359
1360static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1361{
1362 TranslationBlock *tb1, *tb_next, **ptb;
1363 unsigned int n1;
1364
1365 tb1 = tb->jmp_next[n];
1366 if (tb1 != NULL) {
1367 /* find head of list */
1368 for(;;) {
1369 n1 = (long)tb1 & 3;
1370 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1371 if (n1 == 2)
1372 break;
1373 tb1 = tb1->jmp_next[n1];
1374 }
1375 /* we are now sure now that tb jumps to tb1 */
1376 tb_next = tb1;
1377
1378 /* remove tb from the jmp_first list */
1379 ptb = &tb_next->jmp_first;
1380 for(;;) {
1381 tb1 = *ptb;
1382 n1 = (long)tb1 & 3;
1383 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1384 if (n1 == n && tb1 == tb)
1385 break;
1386 ptb = &tb1->jmp_next[n1];
1387 }
1388 *ptb = tb->jmp_next[n];
1389 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001390
bellardea041c02003-06-25 16:16:50 +00001391 /* suppress the jump to next tb in generated code */
1392 tb_reset_jump(tb, n);
1393
bellard01243112004-01-04 15:48:17 +00001394 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001395 tb_reset_jump_recursive(tb_next);
1396 }
1397}
1398
1399static void tb_reset_jump_recursive(TranslationBlock *tb)
1400{
1401 tb_reset_jump_recursive2(tb, 0);
1402 tb_reset_jump_recursive2(tb, 1);
1403}
1404
bellard1fddef42005-04-17 19:16:13 +00001405#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001406#if defined(CONFIG_USER_ONLY)
1407static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1408{
1409 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1410}
1411#else
bellardd720b932004-04-25 17:57:43 +00001412static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1413{
Anthony Liguoric227f092009-10-01 16:12:16 -05001414 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001415 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001416 ram_addr_t ram_addr;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001417 PhysPageDesc p;
bellardd720b932004-04-25 17:57:43 +00001418
pbrookc2f07f82006-04-08 17:14:56 +00001419 addr = cpu_get_phys_page_debug(env, pc);
1420 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001421 pd = p.phys_offset;
pbrookc2f07f82006-04-08 17:14:56 +00001422 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001423 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001424}
bellardc27004e2005-01-03 23:35:10 +00001425#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001426#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001427
Paul Brookc527ee82010-03-01 03:31:14 +00001428#if defined(CONFIG_USER_ONLY)
1429void cpu_watchpoint_remove_all(CPUState *env, int mask)
1430
1431{
1432}
1433
1434int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1435 int flags, CPUWatchpoint **watchpoint)
1436{
1437 return -ENOSYS;
1438}
1439#else
pbrook6658ffb2007-03-16 23:58:11 +00001440/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001441int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1442 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001443{
aliguorib4051332008-11-18 20:14:20 +00001444 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001445 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001446
aliguorib4051332008-11-18 20:14:20 +00001447 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1448 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1449 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1450 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1451 return -EINVAL;
1452 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001453 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001454
aliguoria1d1bb32008-11-18 20:07:32 +00001455 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001456 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001457 wp->flags = flags;
1458
aliguori2dc9f412008-11-18 20:56:59 +00001459 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001460 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001461 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001462 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001463 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001464
pbrook6658ffb2007-03-16 23:58:11 +00001465 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001466
1467 if (watchpoint)
1468 *watchpoint = wp;
1469 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001470}
1471
aliguoria1d1bb32008-11-18 20:07:32 +00001472/* Remove a specific watchpoint. */
1473int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1474 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001475{
aliguorib4051332008-11-18 20:14:20 +00001476 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001477 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001478
Blue Swirl72cf2d42009-09-12 07:36:22 +00001479 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001480 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001481 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001482 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001483 return 0;
1484 }
1485 }
aliguoria1d1bb32008-11-18 20:07:32 +00001486 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001487}
1488
aliguoria1d1bb32008-11-18 20:07:32 +00001489/* Remove a specific watchpoint by reference. */
1490void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1491{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001492 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001493
aliguoria1d1bb32008-11-18 20:07:32 +00001494 tlb_flush_page(env, watchpoint->vaddr);
1495
Anthony Liguori7267c092011-08-20 22:09:37 -05001496 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001497}
1498
aliguoria1d1bb32008-11-18 20:07:32 +00001499/* Remove all matching watchpoints. */
1500void cpu_watchpoint_remove_all(CPUState *env, int mask)
1501{
aliguoric0ce9982008-11-25 22:13:57 +00001502 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001503
Blue Swirl72cf2d42009-09-12 07:36:22 +00001504 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001505 if (wp->flags & mask)
1506 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001507 }
aliguoria1d1bb32008-11-18 20:07:32 +00001508}
Paul Brookc527ee82010-03-01 03:31:14 +00001509#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001510
1511/* Add a breakpoint. */
1512int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1513 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001514{
bellard1fddef42005-04-17 19:16:13 +00001515#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001516 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001517
Anthony Liguori7267c092011-08-20 22:09:37 -05001518 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001519
1520 bp->pc = pc;
1521 bp->flags = flags;
1522
aliguori2dc9f412008-11-18 20:56:59 +00001523 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001524 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001525 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001526 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001527 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001528
1529 breakpoint_invalidate(env, pc);
1530
1531 if (breakpoint)
1532 *breakpoint = bp;
1533 return 0;
1534#else
1535 return -ENOSYS;
1536#endif
1537}
1538
1539/* Remove a specific breakpoint. */
1540int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1541{
1542#if defined(TARGET_HAS_ICE)
1543 CPUBreakpoint *bp;
1544
Blue Swirl72cf2d42009-09-12 07:36:22 +00001545 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001546 if (bp->pc == pc && bp->flags == flags) {
1547 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001548 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001549 }
bellard4c3a88a2003-07-26 12:06:08 +00001550 }
aliguoria1d1bb32008-11-18 20:07:32 +00001551 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001552#else
aliguoria1d1bb32008-11-18 20:07:32 +00001553 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001554#endif
1555}
1556
aliguoria1d1bb32008-11-18 20:07:32 +00001557/* Remove a specific breakpoint by reference. */
1558void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001559{
bellard1fddef42005-04-17 19:16:13 +00001560#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001561 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001562
aliguoria1d1bb32008-11-18 20:07:32 +00001563 breakpoint_invalidate(env, breakpoint->pc);
1564
Anthony Liguori7267c092011-08-20 22:09:37 -05001565 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001566#endif
1567}
1568
1569/* Remove all matching breakpoints. */
1570void cpu_breakpoint_remove_all(CPUState *env, int mask)
1571{
1572#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001573 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001574
Blue Swirl72cf2d42009-09-12 07:36:22 +00001575 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001576 if (bp->flags & mask)
1577 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001578 }
bellard4c3a88a2003-07-26 12:06:08 +00001579#endif
1580}
1581
bellardc33a3462003-07-29 20:50:33 +00001582/* enable or disable single step mode. EXCP_DEBUG is returned by the
1583 CPU loop after each instruction */
1584void cpu_single_step(CPUState *env, int enabled)
1585{
bellard1fddef42005-04-17 19:16:13 +00001586#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001587 if (env->singlestep_enabled != enabled) {
1588 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001589 if (kvm_enabled())
1590 kvm_update_guest_debug(env, 0);
1591 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001592 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001593 /* XXX: only flush what is necessary */
1594 tb_flush(env);
1595 }
bellardc33a3462003-07-29 20:50:33 +00001596 }
1597#endif
1598}
1599
bellard34865132003-10-05 14:28:56 +00001600/* enable or disable low levels log */
1601void cpu_set_log(int log_flags)
1602{
1603 loglevel = log_flags;
1604 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001605 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001606 if (!logfile) {
1607 perror(logfilename);
1608 _exit(1);
1609 }
bellard9fa3e852004-01-04 18:06:42 +00001610#if !defined(CONFIG_SOFTMMU)
1611 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1612 {
blueswir1b55266b2008-09-20 08:07:15 +00001613 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001614 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1615 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001616#elif defined(_WIN32)
1617 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1618 setvbuf(logfile, NULL, _IONBF, 0);
1619#else
bellard34865132003-10-05 14:28:56 +00001620 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001621#endif
pbrooke735b912007-06-30 13:53:24 +00001622 log_append = 1;
1623 }
1624 if (!loglevel && logfile) {
1625 fclose(logfile);
1626 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001627 }
1628}
1629
1630void cpu_set_log_filename(const char *filename)
1631{
1632 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001633 if (logfile) {
1634 fclose(logfile);
1635 logfile = NULL;
1636 }
1637 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001638}
bellardc33a3462003-07-29 20:50:33 +00001639
aurel323098dba2009-03-07 21:28:24 +00001640static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001641{
pbrookd5975362008-06-07 20:50:51 +00001642 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1643 problem and hope the cpu will stop of its own accord. For userspace
1644 emulation this often isn't actually as bad as it sounds. Often
1645 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001646 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001647 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001648
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001649 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001650 tb = env->current_tb;
1651 /* if the cpu is currently executing code, we must unlink it and
1652 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001653 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001654 env->current_tb = NULL;
1655 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001656 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001657 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001658}
1659
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001660#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001661/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001662static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001663{
1664 int old_mask;
1665
1666 old_mask = env->interrupt_request;
1667 env->interrupt_request |= mask;
1668
aliguori8edac962009-04-24 18:03:45 +00001669 /*
1670 * If called from iothread context, wake the target cpu in
1671 * case its halted.
1672 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001673 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001674 qemu_cpu_kick(env);
1675 return;
1676 }
aliguori8edac962009-04-24 18:03:45 +00001677
pbrook2e70f6e2008-06-29 01:03:05 +00001678 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001679 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001680 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001681 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001682 cpu_abort(env, "Raised interrupt while not in I/O function");
1683 }
pbrook2e70f6e2008-06-29 01:03:05 +00001684 } else {
aurel323098dba2009-03-07 21:28:24 +00001685 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001686 }
1687}
1688
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001689CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1690
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001691#else /* CONFIG_USER_ONLY */
1692
1693void cpu_interrupt(CPUState *env, int mask)
1694{
1695 env->interrupt_request |= mask;
1696 cpu_unlink_tb(env);
1697}
1698#endif /* CONFIG_USER_ONLY */
1699
bellardb54ad042004-05-20 13:42:52 +00001700void cpu_reset_interrupt(CPUState *env, int mask)
1701{
1702 env->interrupt_request &= ~mask;
1703}
1704
aurel323098dba2009-03-07 21:28:24 +00001705void cpu_exit(CPUState *env)
1706{
1707 env->exit_request = 1;
1708 cpu_unlink_tb(env);
1709}
1710
blueswir1c7cd6a32008-10-02 18:27:46 +00001711const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001712 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001713 "show generated host assembly code for each compiled TB" },
1714 { CPU_LOG_TB_IN_ASM, "in_asm",
1715 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001716 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001717 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001718 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001719 "show micro ops "
1720#ifdef TARGET_I386
1721 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001722#endif
blueswir1e01a1152008-03-14 17:37:11 +00001723 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001724 { CPU_LOG_INT, "int",
1725 "show interrupts/exceptions in short format" },
1726 { CPU_LOG_EXEC, "exec",
1727 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001728 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001729 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001730#ifdef TARGET_I386
1731 { CPU_LOG_PCALL, "pcall",
1732 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001733 { CPU_LOG_RESET, "cpu_reset",
1734 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001735#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001736#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001737 { CPU_LOG_IOPORT, "ioport",
1738 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001739#endif
bellardf193c792004-03-21 17:06:25 +00001740 { 0, NULL, NULL },
1741};
1742
1743static int cmp1(const char *s1, int n, const char *s2)
1744{
1745 if (strlen(s2) != n)
1746 return 0;
1747 return memcmp(s1, s2, n) == 0;
1748}
ths3b46e622007-09-17 08:09:54 +00001749
bellardf193c792004-03-21 17:06:25 +00001750/* takes a comma separated list of log masks. Return 0 if error. */
1751int cpu_str_to_log_mask(const char *str)
1752{
blueswir1c7cd6a32008-10-02 18:27:46 +00001753 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001754 int mask;
1755 const char *p, *p1;
1756
1757 p = str;
1758 mask = 0;
1759 for(;;) {
1760 p1 = strchr(p, ',');
1761 if (!p1)
1762 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001763 if(cmp1(p,p1-p,"all")) {
1764 for(item = cpu_log_items; item->mask != 0; item++) {
1765 mask |= item->mask;
1766 }
1767 } else {
1768 for(item = cpu_log_items; item->mask != 0; item++) {
1769 if (cmp1(p, p1 - p, item->name))
1770 goto found;
1771 }
1772 return 0;
bellardf193c792004-03-21 17:06:25 +00001773 }
bellardf193c792004-03-21 17:06:25 +00001774 found:
1775 mask |= item->mask;
1776 if (*p1 != ',')
1777 break;
1778 p = p1 + 1;
1779 }
1780 return mask;
1781}
bellardea041c02003-06-25 16:16:50 +00001782
bellard75012672003-06-21 13:11:07 +00001783void cpu_abort(CPUState *env, const char *fmt, ...)
1784{
1785 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001786 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001787
1788 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001789 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001790 fprintf(stderr, "qemu: fatal: ");
1791 vfprintf(stderr, fmt, ap);
1792 fprintf(stderr, "\n");
1793#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001794 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1795#else
1796 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001797#endif
aliguori93fcfe32009-01-15 22:34:14 +00001798 if (qemu_log_enabled()) {
1799 qemu_log("qemu: fatal: ");
1800 qemu_log_vprintf(fmt, ap2);
1801 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001802#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001803 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001804#else
aliguori93fcfe32009-01-15 22:34:14 +00001805 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001806#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001807 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001808 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001809 }
pbrook493ae1f2007-11-23 16:53:59 +00001810 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001811 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001812#if defined(CONFIG_USER_ONLY)
1813 {
1814 struct sigaction act;
1815 sigfillset(&act.sa_mask);
1816 act.sa_handler = SIG_DFL;
1817 sigaction(SIGABRT, &act, NULL);
1818 }
1819#endif
bellard75012672003-06-21 13:11:07 +00001820 abort();
1821}
1822
thsc5be9f02007-02-28 20:20:53 +00001823CPUState *cpu_copy(CPUState *env)
1824{
ths01ba9812007-12-09 02:22:57 +00001825 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001826 CPUState *next_cpu = new_env->next_cpu;
1827 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001828#if defined(TARGET_HAS_ICE)
1829 CPUBreakpoint *bp;
1830 CPUWatchpoint *wp;
1831#endif
1832
thsc5be9f02007-02-28 20:20:53 +00001833 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001834
1835 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001836 new_env->next_cpu = next_cpu;
1837 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001838
1839 /* Clone all break/watchpoints.
1840 Note: Once we support ptrace with hw-debug register access, make sure
1841 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001842 QTAILQ_INIT(&env->breakpoints);
1843 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001844#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001845 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001846 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1847 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001848 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001849 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1850 wp->flags, NULL);
1851 }
1852#endif
1853
thsc5be9f02007-02-28 20:20:53 +00001854 return new_env;
1855}
1856
bellard01243112004-01-04 15:48:17 +00001857#if !defined(CONFIG_USER_ONLY)
1858
edgar_igl5c751e92008-05-06 08:44:21 +00001859static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1860{
1861 unsigned int i;
1862
1863 /* Discard jump cache entries for any tb which might potentially
1864 overlap the flushed page. */
1865 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1866 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001867 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001868
1869 i = tb_jmp_cache_hash_page(addr);
1870 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001871 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001872}
1873
Igor Kovalenko08738982009-07-12 02:15:40 +04001874static CPUTLBEntry s_cputlb_empty_entry = {
1875 .addr_read = -1,
1876 .addr_write = -1,
1877 .addr_code = -1,
1878 .addend = -1,
1879};
1880
bellardee8b7022004-02-03 23:35:10 +00001881/* NOTE: if flush_global is true, also flush global entries (not
1882 implemented yet) */
1883void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001884{
bellard33417e72003-08-10 21:47:01 +00001885 int i;
bellard01243112004-01-04 15:48:17 +00001886
bellard9fa3e852004-01-04 18:06:42 +00001887#if defined(DEBUG_TLB)
1888 printf("tlb_flush:\n");
1889#endif
bellard01243112004-01-04 15:48:17 +00001890 /* must reset current TB so that interrupts cannot modify the
1891 links while we are modifying them */
1892 env->current_tb = NULL;
1893
bellard33417e72003-08-10 21:47:01 +00001894 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001895 int mmu_idx;
1896 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001897 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001898 }
bellard33417e72003-08-10 21:47:01 +00001899 }
bellard9fa3e852004-01-04 18:06:42 +00001900
bellard8a40a182005-11-20 10:35:40 +00001901 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001902
Paul Brookd4c430a2010-03-17 02:14:28 +00001903 env->tlb_flush_addr = -1;
1904 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001905 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001906}
1907
bellard274da6b2004-05-20 21:56:27 +00001908static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001909{
ths5fafdf22007-09-16 21:08:06 +00001910 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001911 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001912 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001913 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001914 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001915 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001916 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001917 }
bellard61382a52003-10-27 21:22:23 +00001918}
1919
bellard2e126692004-04-25 21:28:44 +00001920void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001921{
bellard8a40a182005-11-20 10:35:40 +00001922 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001923 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001924
bellard9fa3e852004-01-04 18:06:42 +00001925#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001926 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001927#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001928 /* Check if we need to flush due to large pages. */
1929 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1930#if defined(DEBUG_TLB)
1931 printf("tlb_flush_page: forced full flush ("
1932 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1933 env->tlb_flush_addr, env->tlb_flush_mask);
1934#endif
1935 tlb_flush(env, 1);
1936 return;
1937 }
bellard01243112004-01-04 15:48:17 +00001938 /* must reset current TB so that interrupts cannot modify the
1939 links while we are modifying them */
1940 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001941
bellard61382a52003-10-27 21:22:23 +00001942 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001943 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001944 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1945 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001946
edgar_igl5c751e92008-05-06 08:44:21 +00001947 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001948}
1949
bellard9fa3e852004-01-04 18:06:42 +00001950/* update the TLBs so that writes to code in the virtual page 'addr'
1951 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001952static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001953{
ths5fafdf22007-09-16 21:08:06 +00001954 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00001955 ram_addr + TARGET_PAGE_SIZE,
1956 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00001957}
1958
bellard9fa3e852004-01-04 18:06:42 +00001959/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00001960 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05001961static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00001962 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00001963{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001964 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00001965}
1966
ths5fafdf22007-09-16 21:08:06 +00001967static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00001968 unsigned long start, unsigned long length)
1969{
1970 unsigned long addr;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001971 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
bellard84b7b8e2005-11-28 21:19:04 +00001972 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00001973 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00001974 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00001975 }
1976 }
1977}
1978
pbrook5579c7f2009-04-11 14:47:08 +00001979/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001980void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001981 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001982{
1983 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00001984 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001985 int i;
bellard1ccde1c2004-02-06 19:46:14 +00001986
1987 start &= TARGET_PAGE_MASK;
1988 end = TARGET_PAGE_ALIGN(end);
1989
1990 length = end - start;
1991 if (length == 0)
1992 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001993 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001994
bellard1ccde1c2004-02-06 19:46:14 +00001995 /* we modify the TLB cache so that the dirty bit will be set again
1996 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001997 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001998 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001999 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002000 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002001 != (end - 1) - start) {
2002 abort();
2003 }
2004
bellard6a00d602005-11-21 23:25:50 +00002005 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002006 int mmu_idx;
2007 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2008 for(i = 0; i < CPU_TLB_SIZE; i++)
2009 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2010 start1, length);
2011 }
bellard6a00d602005-11-21 23:25:50 +00002012 }
bellard1ccde1c2004-02-06 19:46:14 +00002013}
2014
aliguori74576192008-10-06 14:02:03 +00002015int cpu_physical_memory_set_dirty_tracking(int enable)
2016{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002017 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002018 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002019 return ret;
aliguori74576192008-10-06 14:02:03 +00002020}
2021
bellard3a7d9292005-08-21 09:26:42 +00002022static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2023{
Anthony Liguoric227f092009-10-01 16:12:16 -05002024 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002025 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002026
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002027 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
pbrook5579c7f2009-04-11 14:47:08 +00002028 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2029 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002030 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002031 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002032 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002033 }
2034 }
2035}
2036
2037/* update the TLB according to the current state of the dirty bits */
2038void cpu_tlb_update_dirty(CPUState *env)
2039{
2040 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002041 int mmu_idx;
2042 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2043 for(i = 0; i < CPU_TLB_SIZE; i++)
2044 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2045 }
bellard3a7d9292005-08-21 09:26:42 +00002046}
2047
pbrook0f459d12008-06-09 00:20:13 +00002048static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002049{
pbrook0f459d12008-06-09 00:20:13 +00002050 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2051 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002052}
2053
pbrook0f459d12008-06-09 00:20:13 +00002054/* update the TLB corresponding to virtual page vaddr
2055 so that it is no longer dirty */
2056static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002057{
bellard1ccde1c2004-02-06 19:46:14 +00002058 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002059 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002060
pbrook0f459d12008-06-09 00:20:13 +00002061 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002062 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002063 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2064 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002065}
2066
Paul Brookd4c430a2010-03-17 02:14:28 +00002067/* Our TLB does not support large pages, so remember the area covered by
2068 large pages and trigger a full TLB flush if these are invalidated. */
2069static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2070 target_ulong size)
2071{
2072 target_ulong mask = ~(size - 1);
2073
2074 if (env->tlb_flush_addr == (target_ulong)-1) {
2075 env->tlb_flush_addr = vaddr & mask;
2076 env->tlb_flush_mask = mask;
2077 return;
2078 }
2079 /* Extend the existing region to include the new page.
2080 This is a compromise between unnecessary flushes and the cost
2081 of maintaining a full variable size TLB. */
2082 mask &= env->tlb_flush_mask;
2083 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2084 mask <<= 1;
2085 }
2086 env->tlb_flush_addr &= mask;
2087 env->tlb_flush_mask = mask;
2088}
2089
Avi Kivity1d393fa2012-01-01 21:15:42 +02002090static bool is_ram_rom(ram_addr_t pd)
2091{
2092 pd &= ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002093 return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002094}
2095
2096static bool is_ram_rom_romd(ram_addr_t pd)
2097{
2098 return is_ram_rom(pd) || (pd & IO_MEM_ROMD);
2099}
2100
Paul Brookd4c430a2010-03-17 02:14:28 +00002101/* Add a new TLB entry. At most one entry for a given virtual address
2102 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2103 supplied size is only used by tlb_flush_page. */
2104void tlb_set_page(CPUState *env, target_ulong vaddr,
2105 target_phys_addr_t paddr, int prot,
2106 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002107{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002108 PhysPageDesc p;
bellard4f2ac232004-04-26 19:44:02 +00002109 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002110 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002111 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002112 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002113 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002114 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002115 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002116 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002117
Paul Brookd4c430a2010-03-17 02:14:28 +00002118 assert(size >= TARGET_PAGE_SIZE);
2119 if (size != TARGET_PAGE_SIZE) {
2120 tlb_add_large_page(env, vaddr, size);
2121 }
bellard92e873b2004-05-21 14:52:29 +00002122 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002123 pd = p.phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002124#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002125 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2126 " prot=%x idx=%d pd=0x%08lx\n",
2127 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002128#endif
2129
pbrook0f459d12008-06-09 00:20:13 +00002130 address = vaddr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002131 if (!is_ram_rom_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002132 /* IO memory case (romd handled later) */
2133 address |= TLB_MMIO;
2134 }
pbrook5579c7f2009-04-11 14:47:08 +00002135 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002136 if (is_ram_rom(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002137 /* Normal RAM. */
2138 iotlb = pd & TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002139 if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2140 iotlb |= io_mem_notdirty.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002141 else
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002142 iotlb |= io_mem_rom.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002143 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002144 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002145 It would be nice to pass an offset from the base address
2146 of that region. This would avoid having to special case RAM,
2147 and avoid full address decoding in every device.
2148 We can't use the high bits of pd for this because
2149 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002150 iotlb = (pd & ~TARGET_PAGE_MASK);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002151 iotlb += p.region_offset;
pbrook0f459d12008-06-09 00:20:13 +00002152 }
pbrook6658ffb2007-03-16 23:58:11 +00002153
pbrook0f459d12008-06-09 00:20:13 +00002154 code_address = address;
2155 /* Make accesses to pages with watchpoints go via the
2156 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002157 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002158 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002159 /* Avoid trapping reads of pages with a write breakpoint. */
2160 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2161 iotlb = io_mem_watch + paddr;
2162 address |= TLB_MMIO;
2163 break;
2164 }
pbrook6658ffb2007-03-16 23:58:11 +00002165 }
pbrook0f459d12008-06-09 00:20:13 +00002166 }
balrogd79acba2007-06-26 20:01:13 +00002167
pbrook0f459d12008-06-09 00:20:13 +00002168 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2169 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2170 te = &env->tlb_table[mmu_idx][index];
2171 te->addend = addend - vaddr;
2172 if (prot & PAGE_READ) {
2173 te->addr_read = address;
2174 } else {
2175 te->addr_read = -1;
2176 }
edgar_igl5c751e92008-05-06 08:44:21 +00002177
pbrook0f459d12008-06-09 00:20:13 +00002178 if (prot & PAGE_EXEC) {
2179 te->addr_code = code_address;
2180 } else {
2181 te->addr_code = -1;
2182 }
2183 if (prot & PAGE_WRITE) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002184 if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr ||
pbrook0f459d12008-06-09 00:20:13 +00002185 (pd & IO_MEM_ROMD)) {
2186 /* Write access calls the I/O callback. */
2187 te->addr_write = address | TLB_MMIO;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002188 } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
pbrook0f459d12008-06-09 00:20:13 +00002189 !cpu_physical_memory_is_dirty(pd)) {
2190 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002191 } else {
pbrook0f459d12008-06-09 00:20:13 +00002192 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002193 }
pbrook0f459d12008-06-09 00:20:13 +00002194 } else {
2195 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002196 }
bellard9fa3e852004-01-04 18:06:42 +00002197}
2198
bellard01243112004-01-04 15:48:17 +00002199#else
2200
bellardee8b7022004-02-03 23:35:10 +00002201void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002202{
2203}
2204
bellard2e126692004-04-25 21:28:44 +00002205void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002206{
2207}
2208
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002209/*
2210 * Walks guest process memory "regions" one by one
2211 * and calls callback function 'fn' for each region.
2212 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002213
2214struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002215{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002216 walk_memory_regions_fn fn;
2217 void *priv;
2218 unsigned long start;
2219 int prot;
2220};
bellard9fa3e852004-01-04 18:06:42 +00002221
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002222static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002223 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002224{
2225 if (data->start != -1ul) {
2226 int rc = data->fn(data->priv, data->start, end, data->prot);
2227 if (rc != 0) {
2228 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002229 }
bellard33417e72003-08-10 21:47:01 +00002230 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002231
2232 data->start = (new_prot ? end : -1ul);
2233 data->prot = new_prot;
2234
2235 return 0;
2236}
2237
2238static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002239 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002240{
Paul Brookb480d9b2010-03-12 23:23:29 +00002241 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002242 int i, rc;
2243
2244 if (*lp == NULL) {
2245 return walk_memory_regions_end(data, base, 0);
2246 }
2247
2248 if (level == 0) {
2249 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002250 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002251 int prot = pd[i].flags;
2252
2253 pa = base | (i << TARGET_PAGE_BITS);
2254 if (prot != data->prot) {
2255 rc = walk_memory_regions_end(data, pa, prot);
2256 if (rc != 0) {
2257 return rc;
2258 }
2259 }
2260 }
2261 } else {
2262 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002263 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002264 pa = base | ((abi_ulong)i <<
2265 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002266 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2267 if (rc != 0) {
2268 return rc;
2269 }
2270 }
2271 }
2272
2273 return 0;
2274}
2275
2276int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2277{
2278 struct walk_memory_regions_data data;
2279 unsigned long i;
2280
2281 data.fn = fn;
2282 data.priv = priv;
2283 data.start = -1ul;
2284 data.prot = 0;
2285
2286 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002287 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002288 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2289 if (rc != 0) {
2290 return rc;
2291 }
2292 }
2293
2294 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002295}
2296
Paul Brookb480d9b2010-03-12 23:23:29 +00002297static int dump_region(void *priv, abi_ulong start,
2298 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002299{
2300 FILE *f = (FILE *)priv;
2301
Paul Brookb480d9b2010-03-12 23:23:29 +00002302 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2303 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002304 start, end, end - start,
2305 ((prot & PAGE_READ) ? 'r' : '-'),
2306 ((prot & PAGE_WRITE) ? 'w' : '-'),
2307 ((prot & PAGE_EXEC) ? 'x' : '-'));
2308
2309 return (0);
2310}
2311
2312/* dump memory mappings */
2313void page_dump(FILE *f)
2314{
2315 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2316 "start", "end", "size", "prot");
2317 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002318}
2319
pbrook53a59602006-03-25 19:31:22 +00002320int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002321{
bellard9fa3e852004-01-04 18:06:42 +00002322 PageDesc *p;
2323
2324 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002325 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002326 return 0;
2327 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002328}
2329
Richard Henderson376a7902010-03-10 15:57:04 -08002330/* Modify the flags of a page and invalidate the code if necessary.
2331 The flag PAGE_WRITE_ORG is positioned automatically depending
2332 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002333void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002334{
Richard Henderson376a7902010-03-10 15:57:04 -08002335 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002336
Richard Henderson376a7902010-03-10 15:57:04 -08002337 /* This function should never be called with addresses outside the
2338 guest address space. If this assert fires, it probably indicates
2339 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002340#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2341 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002342#endif
2343 assert(start < end);
2344
bellard9fa3e852004-01-04 18:06:42 +00002345 start = start & TARGET_PAGE_MASK;
2346 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002347
2348 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002349 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002350 }
2351
2352 for (addr = start, len = end - start;
2353 len != 0;
2354 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2355 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2356
2357 /* If the write protection bit is set, then we invalidate
2358 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002359 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002360 (flags & PAGE_WRITE) &&
2361 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002362 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002363 }
2364 p->flags = flags;
2365 }
bellard9fa3e852004-01-04 18:06:42 +00002366}
2367
ths3d97b402007-11-02 19:02:07 +00002368int page_check_range(target_ulong start, target_ulong len, int flags)
2369{
2370 PageDesc *p;
2371 target_ulong end;
2372 target_ulong addr;
2373
Richard Henderson376a7902010-03-10 15:57:04 -08002374 /* This function should never be called with addresses outside the
2375 guest address space. If this assert fires, it probably indicates
2376 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002377#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2378 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002379#endif
2380
Richard Henderson3e0650a2010-03-29 10:54:42 -07002381 if (len == 0) {
2382 return 0;
2383 }
Richard Henderson376a7902010-03-10 15:57:04 -08002384 if (start + len - 1 < start) {
2385 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002386 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002387 }
balrog55f280c2008-10-28 10:24:11 +00002388
ths3d97b402007-11-02 19:02:07 +00002389 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2390 start = start & TARGET_PAGE_MASK;
2391
Richard Henderson376a7902010-03-10 15:57:04 -08002392 for (addr = start, len = end - start;
2393 len != 0;
2394 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002395 p = page_find(addr >> TARGET_PAGE_BITS);
2396 if( !p )
2397 return -1;
2398 if( !(p->flags & PAGE_VALID) )
2399 return -1;
2400
bellarddae32702007-11-14 10:51:00 +00002401 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002402 return -1;
bellarddae32702007-11-14 10:51:00 +00002403 if (flags & PAGE_WRITE) {
2404 if (!(p->flags & PAGE_WRITE_ORG))
2405 return -1;
2406 /* unprotect the page if it was put read-only because it
2407 contains translated code */
2408 if (!(p->flags & PAGE_WRITE)) {
2409 if (!page_unprotect(addr, 0, NULL))
2410 return -1;
2411 }
2412 return 0;
2413 }
ths3d97b402007-11-02 19:02:07 +00002414 }
2415 return 0;
2416}
2417
bellard9fa3e852004-01-04 18:06:42 +00002418/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002419 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002420int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002421{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002422 unsigned int prot;
2423 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002424 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002425
pbrookc8a706f2008-06-02 16:16:42 +00002426 /* Technically this isn't safe inside a signal handler. However we
2427 know this only ever happens in a synchronous SEGV handler, so in
2428 practice it seems to be ok. */
2429 mmap_lock();
2430
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002431 p = page_find(address >> TARGET_PAGE_BITS);
2432 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002433 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002434 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002435 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002436
bellard9fa3e852004-01-04 18:06:42 +00002437 /* if the page was really writable, then we change its
2438 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002439 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2440 host_start = address & qemu_host_page_mask;
2441 host_end = host_start + qemu_host_page_size;
2442
2443 prot = 0;
2444 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2445 p = page_find(addr >> TARGET_PAGE_BITS);
2446 p->flags |= PAGE_WRITE;
2447 prot |= p->flags;
2448
bellard9fa3e852004-01-04 18:06:42 +00002449 /* and since the content will be modified, we must invalidate
2450 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002451 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002452#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002453 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002454#endif
bellard9fa3e852004-01-04 18:06:42 +00002455 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002456 mprotect((void *)g2h(host_start), qemu_host_page_size,
2457 prot & PAGE_BITS);
2458
2459 mmap_unlock();
2460 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002461 }
pbrookc8a706f2008-06-02 16:16:42 +00002462 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002463 return 0;
2464}
2465
bellard6a00d602005-11-21 23:25:50 +00002466static inline void tlb_set_dirty(CPUState *env,
2467 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002468{
2469}
bellard9fa3e852004-01-04 18:06:42 +00002470#endif /* defined(CONFIG_USER_ONLY) */
2471
pbrooke2eef172008-06-08 01:09:01 +00002472#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002473
Paul Brookc04b2b72010-03-01 03:31:14 +00002474#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2475typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002476 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002477 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002478 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2479 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002480} subpage_t;
2481
Anthony Liguoric227f092009-10-01 16:12:16 -05002482static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2483 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002484static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2485 ram_addr_t orig_memory,
2486 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002487#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2488 need_subpage) \
2489 do { \
2490 if (addr > start_addr) \
2491 start_addr2 = 0; \
2492 else { \
2493 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2494 if (start_addr2 > 0) \
2495 need_subpage = 1; \
2496 } \
2497 \
blueswir149e9fba2007-05-30 17:25:06 +00002498 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002499 end_addr2 = TARGET_PAGE_SIZE - 1; \
2500 else { \
2501 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2502 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2503 need_subpage = 1; \
2504 } \
2505 } while (0)
2506
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002507/* register physical memory.
2508 For RAM, 'size' must be a multiple of the target page size.
2509 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002510 io memory page. The address used when calling the IO function is
2511 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002512 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002513 before calculating this offset. This should not be a problem unless
2514 the low bits of start_addr and region_offset differ. */
Avi Kivitydd811242012-01-02 12:17:03 +02002515void cpu_register_physical_memory_log(MemoryRegionSection *section,
2516 bool readable, bool readonly)
bellard33417e72003-08-10 21:47:01 +00002517{
Avi Kivitydd811242012-01-02 12:17:03 +02002518 target_phys_addr_t start_addr = section->offset_within_address_space;
2519 ram_addr_t size = section->size;
2520 ram_addr_t phys_offset = section->mr->ram_addr;
2521 ram_addr_t region_offset = section->offset_within_region;
Anthony Liguoric227f092009-10-01 16:12:16 -05002522 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002523 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002524 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002525 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002526 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002527
Avi Kivitydd811242012-01-02 12:17:03 +02002528 if (memory_region_is_ram(section->mr)) {
2529 phys_offset += region_offset;
2530 region_offset = 0;
2531 }
2532
2533 if (!readable) {
2534 phys_offset &= ~TARGET_PAGE_MASK & ~IO_MEM_ROMD;
2535 }
2536
2537 if (readonly) {
2538 phys_offset |= io_mem_rom.ram_addr;
2539 }
2540
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002541 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002542
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002543 if (phys_offset == io_mem_unassigned.ram_addr) {
pbrook67c4d232009-02-23 13:16:07 +00002544 region_offset = start_addr;
2545 }
pbrook8da3ff12008-12-01 18:59:50 +00002546 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002547 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002548 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002549
2550 addr = start_addr;
2551 do {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002552 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002553 if (p && p->phys_offset != io_mem_unassigned.ram_addr) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002554 ram_addr_t orig_memory = p->phys_offset;
2555 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002556 int need_subpage = 0;
2557
2558 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2559 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002560 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002561 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2562 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002563 &p->phys_offset, orig_memory,
2564 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002565 } else {
2566 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2567 >> IO_MEM_SHIFT];
2568 }
pbrook8da3ff12008-12-01 18:59:50 +00002569 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2570 region_offset);
2571 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002572 } else {
2573 p->phys_offset = phys_offset;
Avi Kivity2774c6d2012-01-01 18:24:24 +02002574 p->region_offset = region_offset;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002575 if (is_ram_rom_romd(phys_offset))
blueswir1db7b5422007-05-26 17:36:03 +00002576 phys_offset += TARGET_PAGE_SIZE;
2577 }
2578 } else {
2579 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2580 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002581 p->region_offset = region_offset;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002582 if (is_ram_rom_romd(phys_offset)) {
blueswir1db7b5422007-05-26 17:36:03 +00002583 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002584 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002585 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002586 int need_subpage = 0;
2587
2588 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2589 end_addr2, need_subpage);
2590
Richard Hendersonf6405242010-04-22 16:47:31 -07002591 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002592 subpage = subpage_init((addr & TARGET_PAGE_MASK),
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002593 &p->phys_offset,
2594 io_mem_unassigned.ram_addr,
pbrook67c4d232009-02-23 13:16:07 +00002595 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002596 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002597 phys_offset, region_offset);
2598 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002599 }
2600 }
2601 }
pbrook8da3ff12008-12-01 18:59:50 +00002602 region_offset += TARGET_PAGE_SIZE;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002603 addr += TARGET_PAGE_SIZE;
2604 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002605
bellard9d420372006-06-25 22:25:22 +00002606 /* since each CPU stores ram addresses in its TLB cache, we must
2607 reset the modified entries */
2608 /* XXX: slow ! */
2609 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2610 tlb_flush(env, 1);
2611 }
bellard33417e72003-08-10 21:47:01 +00002612}
2613
Anthony Liguoric227f092009-10-01 16:12:16 -05002614void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002615{
2616 if (kvm_enabled())
2617 kvm_coalesce_mmio_region(addr, size);
2618}
2619
Anthony Liguoric227f092009-10-01 16:12:16 -05002620void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002621{
2622 if (kvm_enabled())
2623 kvm_uncoalesce_mmio_region(addr, size);
2624}
2625
Sheng Yang62a27442010-01-26 19:21:16 +08002626void qemu_flush_coalesced_mmio_buffer(void)
2627{
2628 if (kvm_enabled())
2629 kvm_flush_coalesced_mmio_buffer();
2630}
2631
Marcelo Tosattic9027602010-03-01 20:25:08 -03002632#if defined(__linux__) && !defined(TARGET_S390X)
2633
2634#include <sys/vfs.h>
2635
2636#define HUGETLBFS_MAGIC 0x958458f6
2637
2638static long gethugepagesize(const char *path)
2639{
2640 struct statfs fs;
2641 int ret;
2642
2643 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002644 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002645 } while (ret != 0 && errno == EINTR);
2646
2647 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002648 perror(path);
2649 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002650 }
2651
2652 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002653 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002654
2655 return fs.f_bsize;
2656}
2657
Alex Williamson04b16652010-07-02 11:13:17 -06002658static void *file_ram_alloc(RAMBlock *block,
2659 ram_addr_t memory,
2660 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002661{
2662 char *filename;
2663 void *area;
2664 int fd;
2665#ifdef MAP_POPULATE
2666 int flags;
2667#endif
2668 unsigned long hpagesize;
2669
2670 hpagesize = gethugepagesize(path);
2671 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002672 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002673 }
2674
2675 if (memory < hpagesize) {
2676 return NULL;
2677 }
2678
2679 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2680 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2681 return NULL;
2682 }
2683
2684 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002685 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002686 }
2687
2688 fd = mkstemp(filename);
2689 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002690 perror("unable to create backing store for hugepages");
2691 free(filename);
2692 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002693 }
2694 unlink(filename);
2695 free(filename);
2696
2697 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2698
2699 /*
2700 * ftruncate is not supported by hugetlbfs in older
2701 * hosts, so don't bother bailing out on errors.
2702 * If anything goes wrong with it under other filesystems,
2703 * mmap will fail.
2704 */
2705 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002706 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002707
2708#ifdef MAP_POPULATE
2709 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2710 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2711 * to sidestep this quirk.
2712 */
2713 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2714 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2715#else
2716 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2717#endif
2718 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002719 perror("file_ram_alloc: can't mmap RAM pages");
2720 close(fd);
2721 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002722 }
Alex Williamson04b16652010-07-02 11:13:17 -06002723 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002724 return area;
2725}
2726#endif
2727
Alex Williamsond17b5282010-06-25 11:08:38 -06002728static ram_addr_t find_ram_offset(ram_addr_t size)
2729{
Alex Williamson04b16652010-07-02 11:13:17 -06002730 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002731 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002732
2733 if (QLIST_EMPTY(&ram_list.blocks))
2734 return 0;
2735
2736 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002737 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002738
2739 end = block->offset + block->length;
2740
2741 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2742 if (next_block->offset >= end) {
2743 next = MIN(next, next_block->offset);
2744 }
2745 }
2746 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002747 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002748 mingap = next - end;
2749 }
2750 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002751
2752 if (offset == RAM_ADDR_MAX) {
2753 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2754 (uint64_t)size);
2755 abort();
2756 }
2757
Alex Williamson04b16652010-07-02 11:13:17 -06002758 return offset;
2759}
2760
2761static ram_addr_t last_ram_offset(void)
2762{
Alex Williamsond17b5282010-06-25 11:08:38 -06002763 RAMBlock *block;
2764 ram_addr_t last = 0;
2765
2766 QLIST_FOREACH(block, &ram_list.blocks, next)
2767 last = MAX(last, block->offset + block->length);
2768
2769 return last;
2770}
2771
Avi Kivityc5705a72011-12-20 15:59:12 +02002772void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002773{
2774 RAMBlock *new_block, *block;
2775
Avi Kivityc5705a72011-12-20 15:59:12 +02002776 new_block = NULL;
2777 QLIST_FOREACH(block, &ram_list.blocks, next) {
2778 if (block->offset == addr) {
2779 new_block = block;
2780 break;
2781 }
2782 }
2783 assert(new_block);
2784 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002785
2786 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2787 char *id = dev->parent_bus->info->get_dev_path(dev);
2788 if (id) {
2789 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002790 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002791 }
2792 }
2793 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2794
2795 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002796 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002797 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2798 new_block->idstr);
2799 abort();
2800 }
2801 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002802}
2803
2804ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2805 MemoryRegion *mr)
2806{
2807 RAMBlock *new_block;
2808
2809 size = TARGET_PAGE_ALIGN(size);
2810 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002811
Avi Kivity7c637362011-12-21 13:09:49 +02002812 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002813 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002814 if (host) {
2815 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002816 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002817 } else {
2818 if (mem_path) {
2819#if defined (__linux__) && !defined(TARGET_S390X)
2820 new_block->host = file_ram_alloc(new_block, size, mem_path);
2821 if (!new_block->host) {
2822 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002823 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002824 }
2825#else
2826 fprintf(stderr, "-mem-path option unsupported\n");
2827 exit(1);
2828#endif
2829 } else {
2830#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002831 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2832 an system defined value, which is at least 256GB. Larger systems
2833 have larger values. We put the guest between the end of data
2834 segment (system break) and this value. We use 32GB as a base to
2835 have enough room for the system break to grow. */
2836 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002837 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002838 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002839 if (new_block->host == MAP_FAILED) {
2840 fprintf(stderr, "Allocating RAM failed\n");
2841 abort();
2842 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002843#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002844 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002845 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002846 } else {
2847 new_block->host = qemu_vmalloc(size);
2848 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002849#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002850 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002851 }
2852 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002853 new_block->length = size;
2854
2855 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2856
Anthony Liguori7267c092011-08-20 22:09:37 -05002857 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002858 last_ram_offset() >> TARGET_PAGE_BITS);
2859 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2860 0xff, size >> TARGET_PAGE_BITS);
2861
2862 if (kvm_enabled())
2863 kvm_setup_guest_memory(new_block->host, size);
2864
2865 return new_block->offset;
2866}
2867
Avi Kivityc5705a72011-12-20 15:59:12 +02002868ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002869{
Avi Kivityc5705a72011-12-20 15:59:12 +02002870 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002871}
bellarde9a1ab12007-02-08 23:08:38 +00002872
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002873void qemu_ram_free_from_ptr(ram_addr_t addr)
2874{
2875 RAMBlock *block;
2876
2877 QLIST_FOREACH(block, &ram_list.blocks, next) {
2878 if (addr == block->offset) {
2879 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002880 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002881 return;
2882 }
2883 }
2884}
2885
Anthony Liguoric227f092009-10-01 16:12:16 -05002886void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002887{
Alex Williamson04b16652010-07-02 11:13:17 -06002888 RAMBlock *block;
2889
2890 QLIST_FOREACH(block, &ram_list.blocks, next) {
2891 if (addr == block->offset) {
2892 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002893 if (block->flags & RAM_PREALLOC_MASK) {
2894 ;
2895 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002896#if defined (__linux__) && !defined(TARGET_S390X)
2897 if (block->fd) {
2898 munmap(block->host, block->length);
2899 close(block->fd);
2900 } else {
2901 qemu_vfree(block->host);
2902 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002903#else
2904 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002905#endif
2906 } else {
2907#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2908 munmap(block->host, block->length);
2909#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002910 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002911 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002912 } else {
2913 qemu_vfree(block->host);
2914 }
Alex Williamson04b16652010-07-02 11:13:17 -06002915#endif
2916 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002917 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002918 return;
2919 }
2920 }
2921
bellarde9a1ab12007-02-08 23:08:38 +00002922}
2923
Huang Yingcd19cfa2011-03-02 08:56:19 +01002924#ifndef _WIN32
2925void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2926{
2927 RAMBlock *block;
2928 ram_addr_t offset;
2929 int flags;
2930 void *area, *vaddr;
2931
2932 QLIST_FOREACH(block, &ram_list.blocks, next) {
2933 offset = addr - block->offset;
2934 if (offset < block->length) {
2935 vaddr = block->host + offset;
2936 if (block->flags & RAM_PREALLOC_MASK) {
2937 ;
2938 } else {
2939 flags = MAP_FIXED;
2940 munmap(vaddr, length);
2941 if (mem_path) {
2942#if defined(__linux__) && !defined(TARGET_S390X)
2943 if (block->fd) {
2944#ifdef MAP_POPULATE
2945 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2946 MAP_PRIVATE;
2947#else
2948 flags |= MAP_PRIVATE;
2949#endif
2950 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2951 flags, block->fd, offset);
2952 } else {
2953 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2954 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2955 flags, -1, 0);
2956 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002957#else
2958 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002959#endif
2960 } else {
2961#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2962 flags |= MAP_SHARED | MAP_ANONYMOUS;
2963 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2964 flags, -1, 0);
2965#else
2966 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2967 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2968 flags, -1, 0);
2969#endif
2970 }
2971 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002972 fprintf(stderr, "Could not remap addr: "
2973 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002974 length, addr);
2975 exit(1);
2976 }
2977 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2978 }
2979 return;
2980 }
2981 }
2982}
2983#endif /* !_WIN32 */
2984
pbrookdc828ca2009-04-09 22:21:07 +00002985/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002986 With the exception of the softmmu code in this file, this should
2987 only be used for local memory (e.g. video ram) that the device owns,
2988 and knows it isn't going to access beyond the end of the block.
2989
2990 It should not be used for general purpose DMA.
2991 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2992 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002993void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002994{
pbrook94a6b542009-04-11 17:15:54 +00002995 RAMBlock *block;
2996
Alex Williamsonf471a172010-06-11 11:11:42 -06002997 QLIST_FOREACH(block, &ram_list.blocks, next) {
2998 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002999 /* Move this entry to to start of the list. */
3000 if (block != QLIST_FIRST(&ram_list.blocks)) {
3001 QLIST_REMOVE(block, next);
3002 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3003 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003004 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003005 /* We need to check if the requested address is in the RAM
3006 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003007 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003008 */
3009 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003010 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003011 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003012 block->host =
3013 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003014 }
3015 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003016 return block->host + (addr - block->offset);
3017 }
pbrook94a6b542009-04-11 17:15:54 +00003018 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003019
3020 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3021 abort();
3022
3023 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003024}
3025
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003026/* Return a host pointer to ram allocated with qemu_ram_alloc.
3027 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3028 */
3029void *qemu_safe_ram_ptr(ram_addr_t addr)
3030{
3031 RAMBlock *block;
3032
3033 QLIST_FOREACH(block, &ram_list.blocks, next) {
3034 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003035 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003036 /* We need to check if the requested address is in the RAM
3037 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003038 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003039 */
3040 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003041 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003042 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003043 block->host =
3044 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003045 }
3046 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003047 return block->host + (addr - block->offset);
3048 }
3049 }
3050
3051 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3052 abort();
3053
3054 return NULL;
3055}
3056
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003057/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3058 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003059void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003060{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003061 if (*size == 0) {
3062 return NULL;
3063 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003064 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003065 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003066 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003067 RAMBlock *block;
3068
3069 QLIST_FOREACH(block, &ram_list.blocks, next) {
3070 if (addr - block->offset < block->length) {
3071 if (addr - block->offset + *size > block->length)
3072 *size = block->length - addr + block->offset;
3073 return block->host + (addr - block->offset);
3074 }
3075 }
3076
3077 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3078 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003079 }
3080}
3081
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003082void qemu_put_ram_ptr(void *addr)
3083{
3084 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003085}
3086
Marcelo Tosattie8902612010-10-11 15:31:19 -03003087int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003088{
pbrook94a6b542009-04-11 17:15:54 +00003089 RAMBlock *block;
3090 uint8_t *host = ptr;
3091
Jan Kiszka868bb332011-06-21 22:59:09 +02003092 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003093 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003094 return 0;
3095 }
3096
Alex Williamsonf471a172010-06-11 11:11:42 -06003097 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003098 /* This case append when the block is not mapped. */
3099 if (block->host == NULL) {
3100 continue;
3101 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003102 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003103 *ram_addr = block->offset + (host - block->host);
3104 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003105 }
pbrook94a6b542009-04-11 17:15:54 +00003106 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003107
Marcelo Tosattie8902612010-10-11 15:31:19 -03003108 return -1;
3109}
Alex Williamsonf471a172010-06-11 11:11:42 -06003110
Marcelo Tosattie8902612010-10-11 15:31:19 -03003111/* Some of the softmmu routines need to translate from a host pointer
3112 (typically a TLB entry) back to a ram offset. */
3113ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3114{
3115 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003116
Marcelo Tosattie8902612010-10-11 15:31:19 -03003117 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3118 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3119 abort();
3120 }
3121 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003122}
3123
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003124static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3125 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003126{
pbrook67d3b952006-12-18 05:03:52 +00003127#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003128 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003129#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003130#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003131 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003132#endif
3133 return 0;
3134}
3135
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003136static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3137 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003138{
3139#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003140 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003141#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003142#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003143 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003144#endif
3145}
3146
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003147static const MemoryRegionOps unassigned_mem_ops = {
3148 .read = unassigned_mem_read,
3149 .write = unassigned_mem_write,
3150 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003151};
3152
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003153static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3154 unsigned size)
3155{
3156 abort();
3157}
3158
3159static void error_mem_write(void *opaque, target_phys_addr_t addr,
3160 uint64_t value, unsigned size)
3161{
3162 abort();
3163}
3164
3165static const MemoryRegionOps error_mem_ops = {
3166 .read = error_mem_read,
3167 .write = error_mem_write,
3168 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003169};
3170
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003171static const MemoryRegionOps rom_mem_ops = {
3172 .read = error_mem_read,
3173 .write = unassigned_mem_write,
3174 .endianness = DEVICE_NATIVE_ENDIAN,
3175};
3176
3177static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3178 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003179{
bellard3a7d9292005-08-21 09:26:42 +00003180 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003181 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003182 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3183#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003184 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003185 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003186#endif
3187 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003188 switch (size) {
3189 case 1:
3190 stb_p(qemu_get_ram_ptr(ram_addr), val);
3191 break;
3192 case 2:
3193 stw_p(qemu_get_ram_ptr(ram_addr), val);
3194 break;
3195 case 4:
3196 stl_p(qemu_get_ram_ptr(ram_addr), val);
3197 break;
3198 default:
3199 abort();
3200 }
bellardf23db162005-08-21 19:12:28 +00003201 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003202 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003203 /* we remove the notdirty callback only if the code has been
3204 flushed */
3205 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003206 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003207}
3208
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003209static const MemoryRegionOps notdirty_mem_ops = {
3210 .read = error_mem_read,
3211 .write = notdirty_mem_write,
3212 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003213};
3214
pbrook0f459d12008-06-09 00:20:13 +00003215/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003216static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003217{
3218 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003219 target_ulong pc, cs_base;
3220 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003221 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003222 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003223 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003224
aliguori06d55cc2008-11-18 20:24:06 +00003225 if (env->watchpoint_hit) {
3226 /* We re-entered the check after replacing the TB. Now raise
3227 * the debug interrupt so that is will trigger after the
3228 * current instruction. */
3229 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3230 return;
3231 }
pbrook2e70f6e2008-06-29 01:03:05 +00003232 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003233 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003234 if ((vaddr == (wp->vaddr & len_mask) ||
3235 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003236 wp->flags |= BP_WATCHPOINT_HIT;
3237 if (!env->watchpoint_hit) {
3238 env->watchpoint_hit = wp;
3239 tb = tb_find_pc(env->mem_io_pc);
3240 if (!tb) {
3241 cpu_abort(env, "check_watchpoint: could not find TB for "
3242 "pc=%p", (void *)env->mem_io_pc);
3243 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003244 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003245 tb_phys_invalidate(tb, -1);
3246 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3247 env->exception_index = EXCP_DEBUG;
3248 } else {
3249 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3250 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3251 }
3252 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003253 }
aliguori6e140f22008-11-18 20:37:55 +00003254 } else {
3255 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003256 }
3257 }
3258}
3259
pbrook6658ffb2007-03-16 23:58:11 +00003260/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3261 so these check for a hit then pass through to the normal out-of-line
3262 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05003263static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003264{
aliguorib4051332008-11-18 20:14:20 +00003265 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003266 return ldub_phys(addr);
3267}
3268
Anthony Liguoric227f092009-10-01 16:12:16 -05003269static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003270{
aliguorib4051332008-11-18 20:14:20 +00003271 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003272 return lduw_phys(addr);
3273}
3274
Anthony Liguoric227f092009-10-01 16:12:16 -05003275static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003276{
aliguorib4051332008-11-18 20:14:20 +00003277 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003278 return ldl_phys(addr);
3279}
3280
Anthony Liguoric227f092009-10-01 16:12:16 -05003281static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003282 uint32_t val)
3283{
aliguorib4051332008-11-18 20:14:20 +00003284 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003285 stb_phys(addr, val);
3286}
3287
Anthony Liguoric227f092009-10-01 16:12:16 -05003288static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003289 uint32_t val)
3290{
aliguorib4051332008-11-18 20:14:20 +00003291 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003292 stw_phys(addr, val);
3293}
3294
Anthony Liguoric227f092009-10-01 16:12:16 -05003295static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003296 uint32_t val)
3297{
aliguorib4051332008-11-18 20:14:20 +00003298 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003299 stl_phys(addr, val);
3300}
3301
Blue Swirld60efc62009-08-25 18:29:31 +00003302static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003303 watch_mem_readb,
3304 watch_mem_readw,
3305 watch_mem_readl,
3306};
3307
Blue Swirld60efc62009-08-25 18:29:31 +00003308static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003309 watch_mem_writeb,
3310 watch_mem_writew,
3311 watch_mem_writel,
3312};
pbrook6658ffb2007-03-16 23:58:11 +00003313
Avi Kivity70c68e42012-01-02 12:32:48 +02003314static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3315 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003316{
Avi Kivity70c68e42012-01-02 12:32:48 +02003317 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003318 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003319#if defined(DEBUG_SUBPAGE)
3320 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3321 mmio, len, addr, idx);
3322#endif
blueswir1db7b5422007-05-26 17:36:03 +00003323
Richard Hendersonf6405242010-04-22 16:47:31 -07003324 addr += mmio->region_offset[idx];
3325 idx = mmio->sub_io_index[idx];
Avi Kivity70c68e42012-01-02 12:32:48 +02003326 return io_mem_read(idx, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003327}
3328
Avi Kivity70c68e42012-01-02 12:32:48 +02003329static void subpage_write(void *opaque, target_phys_addr_t addr,
3330 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003331{
Avi Kivity70c68e42012-01-02 12:32:48 +02003332 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003333 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003334#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003335 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3336 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003337 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003338#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003339
3340 addr += mmio->region_offset[idx];
3341 idx = mmio->sub_io_index[idx];
Avi Kivity70c68e42012-01-02 12:32:48 +02003342 io_mem_write(idx, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003343}
3344
Avi Kivity70c68e42012-01-02 12:32:48 +02003345static const MemoryRegionOps subpage_ops = {
3346 .read = subpage_read,
3347 .write = subpage_write,
3348 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003349};
3350
Avi Kivityde712f92012-01-02 12:41:07 +02003351static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3352 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003353{
3354 ram_addr_t raddr = addr;
3355 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003356 switch (size) {
3357 case 1: return ldub_p(ptr);
3358 case 2: return lduw_p(ptr);
3359 case 4: return ldl_p(ptr);
3360 default: abort();
3361 }
Andreas Färber56384e82011-11-30 16:26:21 +01003362}
3363
Avi Kivityde712f92012-01-02 12:41:07 +02003364static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3365 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003366{
3367 ram_addr_t raddr = addr;
3368 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003369 switch (size) {
3370 case 1: return stb_p(ptr, value);
3371 case 2: return stw_p(ptr, value);
3372 case 4: return stl_p(ptr, value);
3373 default: abort();
3374 }
Andreas Färber56384e82011-11-30 16:26:21 +01003375}
3376
Avi Kivityde712f92012-01-02 12:41:07 +02003377static const MemoryRegionOps subpage_ram_ops = {
3378 .read = subpage_ram_read,
3379 .write = subpage_ram_write,
3380 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003381};
3382
Anthony Liguoric227f092009-10-01 16:12:16 -05003383static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3384 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003385{
3386 int idx, eidx;
3387
3388 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3389 return -1;
3390 idx = SUBPAGE_IDX(start);
3391 eidx = SUBPAGE_IDX(end);
3392#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003393 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003394 mmio, start, end, idx, eidx, memory);
3395#endif
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003396 if ((memory & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
Avi Kivityde712f92012-01-02 12:41:07 +02003397 memory = io_mem_subpage_ram.ram_addr;
Andreas Färber56384e82011-11-30 16:26:21 +01003398 }
Richard Hendersonf6405242010-04-22 16:47:31 -07003399 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
blueswir1db7b5422007-05-26 17:36:03 +00003400 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003401 mmio->sub_io_index[idx] = memory;
3402 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003403 }
3404
3405 return 0;
3406}
3407
Richard Hendersonf6405242010-04-22 16:47:31 -07003408static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3409 ram_addr_t orig_memory,
3410 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003411{
Anthony Liguoric227f092009-10-01 16:12:16 -05003412 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003413 int subpage_memory;
3414
Anthony Liguori7267c092011-08-20 22:09:37 -05003415 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003416
3417 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003418 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3419 "subpage", TARGET_PAGE_SIZE);
3420 subpage_memory = mmio->iomem.ram_addr;
blueswir1db7b5422007-05-26 17:36:03 +00003421#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003422 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3423 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003424#endif
aliguori1eec6142009-02-05 22:06:18 +00003425 *phys = subpage_memory | IO_MEM_SUBPAGE;
Richard Hendersonf6405242010-04-22 16:47:31 -07003426 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003427
3428 return mmio;
3429}
3430
aliguori88715652009-02-11 15:20:58 +00003431static int get_free_io_mem_idx(void)
3432{
3433 int i;
3434
3435 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3436 if (!io_mem_used[i]) {
3437 io_mem_used[i] = 1;
3438 return i;
3439 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003440 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003441 return -1;
3442}
3443
bellard33417e72003-08-10 21:47:01 +00003444/* mem_read and mem_write are arrays of functions containing the
3445 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003446 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003447 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003448 modified. If it is zero, a new io zone is allocated. The return
3449 value can be used with cpu_register_physical_memory(). (-1) is
3450 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003451static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003452 CPUReadMemoryFunc * const *mem_read,
3453 CPUWriteMemoryFunc * const *mem_write,
Avi Kivitybe675c92011-11-20 16:22:55 +02003454 void *opaque)
bellard33417e72003-08-10 21:47:01 +00003455{
Richard Henderson3cab7212010-05-07 09:52:51 -07003456 int i;
3457
bellard33417e72003-08-10 21:47:01 +00003458 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003459 io_index = get_free_io_mem_idx();
3460 if (io_index == -1)
3461 return io_index;
bellard33417e72003-08-10 21:47:01 +00003462 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003463 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003464 if (io_index >= IO_MEM_NB_ENTRIES)
3465 return -1;
3466 }
bellardb5ff1b32005-11-26 10:38:39 +00003467
Richard Henderson3cab7212010-05-07 09:52:51 -07003468 for (i = 0; i < 3; ++i) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003469 assert(mem_read[i]);
3470 _io_mem_read[io_index][i] = mem_read[i];
Richard Henderson3cab7212010-05-07 09:52:51 -07003471 }
3472 for (i = 0; i < 3; ++i) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003473 assert(mem_write[i]);
3474 _io_mem_write[io_index][i] = mem_write[i];
Richard Henderson3cab7212010-05-07 09:52:51 -07003475 }
bellarda4193c82004-06-03 14:01:43 +00003476 io_mem_opaque[io_index] = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003477
3478 return (io_index << IO_MEM_SHIFT);
bellard33417e72003-08-10 21:47:01 +00003479}
bellard61382a52003-10-27 21:22:23 +00003480
Blue Swirld60efc62009-08-25 18:29:31 +00003481int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3482 CPUWriteMemoryFunc * const *mem_write,
Avi Kivitybe675c92011-11-20 16:22:55 +02003483 void *opaque)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003484{
Avi Kivitybe675c92011-11-20 16:22:55 +02003485 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003486}
3487
aliguori88715652009-02-11 15:20:58 +00003488void cpu_unregister_io_memory(int io_table_address)
3489{
3490 int i;
3491 int io_index = io_table_address >> IO_MEM_SHIFT;
3492
3493 for (i=0;i < 3; i++) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003494 _io_mem_read[io_index][i] = NULL;
3495 _io_mem_write[io_index][i] = NULL;
aliguori88715652009-02-11 15:20:58 +00003496 }
3497 io_mem_opaque[io_index] = NULL;
3498 io_mem_used[io_index] = 0;
3499}
3500
Avi Kivitye9179ce2009-06-14 11:38:52 +03003501static void io_mem_init(void)
3502{
3503 int i;
3504
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003505 /* Must be first: */
3506 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3507 assert(io_mem_ram.ram_addr == 0);
3508 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3509 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3510 "unassigned", UINT64_MAX);
3511 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3512 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003513 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3514 "subpage-ram", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003515 for (i=0; i<5; i++)
3516 io_mem_used[i] = 1;
3517
3518 io_mem_watch = cpu_register_io_memory(watch_mem_read,
Avi Kivitybe675c92011-11-20 16:22:55 +02003519 watch_mem_write, NULL);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003520}
3521
Avi Kivity62152b82011-07-26 14:26:14 +03003522static void memory_map_init(void)
3523{
Anthony Liguori7267c092011-08-20 22:09:37 -05003524 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003525 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003526 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003527
Anthony Liguori7267c092011-08-20 22:09:37 -05003528 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003529 memory_region_init(system_io, "io", 65536);
3530 set_system_io_map(system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003531}
3532
3533MemoryRegion *get_system_memory(void)
3534{
3535 return system_memory;
3536}
3537
Avi Kivity309cb472011-08-08 16:09:03 +03003538MemoryRegion *get_system_io(void)
3539{
3540 return system_io;
3541}
3542
pbrooke2eef172008-06-08 01:09:01 +00003543#endif /* !defined(CONFIG_USER_ONLY) */
3544
bellard13eb76e2004-01-24 15:23:36 +00003545/* physical memory access (slow version, mainly for debug) */
3546#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003547int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3548 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003549{
3550 int l, flags;
3551 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003552 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003553
3554 while (len > 0) {
3555 page = addr & TARGET_PAGE_MASK;
3556 l = (page + TARGET_PAGE_SIZE) - addr;
3557 if (l > len)
3558 l = len;
3559 flags = page_get_flags(page);
3560 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003561 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003562 if (is_write) {
3563 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003564 return -1;
bellard579a97f2007-11-11 14:26:47 +00003565 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003566 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003567 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003568 memcpy(p, buf, l);
3569 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003570 } else {
3571 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003572 return -1;
bellard579a97f2007-11-11 14:26:47 +00003573 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003574 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003575 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003576 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003577 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003578 }
3579 len -= l;
3580 buf += l;
3581 addr += l;
3582 }
Paul Brooka68fe892010-03-01 00:08:59 +00003583 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003584}
bellard8df1cd02005-01-28 22:37:22 +00003585
bellard13eb76e2004-01-24 15:23:36 +00003586#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003587void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003588 int len, int is_write)
3589{
3590 int l, io_index;
3591 uint8_t *ptr;
3592 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003593 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003594 ram_addr_t pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003595 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003596
bellard13eb76e2004-01-24 15:23:36 +00003597 while (len > 0) {
3598 page = addr & TARGET_PAGE_MASK;
3599 l = (page + TARGET_PAGE_SIZE) - addr;
3600 if (l > len)
3601 l = len;
bellard92e873b2004-05-21 14:52:29 +00003602 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003603 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003604
bellard13eb76e2004-01-24 15:23:36 +00003605 if (is_write) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003606 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003607 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003608 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003609 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
bellard6a00d602005-11-21 23:25:50 +00003610 /* XXX: could force cpu_single_env to NULL to avoid
3611 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003612 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003613 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003614 val = ldl_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003615 io_mem_write(io_index, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003616 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003617 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003618 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003619 val = lduw_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003620 io_mem_write(io_index, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003621 l = 2;
3622 } else {
bellard1c213d12005-09-03 10:49:04 +00003623 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003624 val = ldub_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003625 io_mem_write(io_index, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003626 l = 1;
3627 }
3628 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003629 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00003630 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003631 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003632 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003633 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003634 if (!cpu_physical_memory_is_dirty(addr1)) {
3635 /* invalidate code */
3636 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3637 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003638 cpu_physical_memory_set_dirty_flags(
3639 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003640 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003641 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003642 }
3643 } else {
Avi Kivity1d393fa2012-01-01 21:15:42 +02003644 if (!is_ram_rom_romd(pd)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003645 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003646 /* I/O case */
3647 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003648 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
aurel326c2934d2009-02-18 21:37:17 +00003649 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003650 /* 32 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003651 val = io_mem_read(io_index, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003652 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003653 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003654 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003655 /* 16 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003656 val = io_mem_read(io_index, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003657 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003658 l = 2;
3659 } else {
bellard1c213d12005-09-03 10:49:04 +00003660 /* 8 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003661 val = io_mem_read(io_index, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003662 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003663 l = 1;
3664 }
3665 } else {
3666 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003667 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3668 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3669 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003670 }
3671 }
3672 len -= l;
3673 buf += l;
3674 addr += l;
3675 }
3676}
bellard8df1cd02005-01-28 22:37:22 +00003677
bellardd0ecd2a2006-04-23 17:14:48 +00003678/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003679void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003680 const uint8_t *buf, int len)
3681{
3682 int l;
3683 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003684 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003685 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003686 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003687
bellardd0ecd2a2006-04-23 17:14:48 +00003688 while (len > 0) {
3689 page = addr & TARGET_PAGE_MASK;
3690 l = (page + TARGET_PAGE_SIZE) - addr;
3691 if (l > len)
3692 l = len;
3693 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003694 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003695
Avi Kivity1d393fa2012-01-01 21:15:42 +02003696 if (!is_ram_rom_romd(pd)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003697 /* do nothing */
3698 } else {
3699 unsigned long addr1;
3700 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3701 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003702 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003703 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003704 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003705 }
3706 len -= l;
3707 buf += l;
3708 addr += l;
3709 }
3710}
3711
aliguori6d16c2f2009-01-22 16:59:11 +00003712typedef struct {
3713 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003714 target_phys_addr_t addr;
3715 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003716} BounceBuffer;
3717
3718static BounceBuffer bounce;
3719
aliguoriba223c22009-01-22 16:59:16 +00003720typedef struct MapClient {
3721 void *opaque;
3722 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003723 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003724} MapClient;
3725
Blue Swirl72cf2d42009-09-12 07:36:22 +00003726static QLIST_HEAD(map_client_list, MapClient) map_client_list
3727 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003728
3729void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3730{
Anthony Liguori7267c092011-08-20 22:09:37 -05003731 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003732
3733 client->opaque = opaque;
3734 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003735 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003736 return client;
3737}
3738
3739void cpu_unregister_map_client(void *_client)
3740{
3741 MapClient *client = (MapClient *)_client;
3742
Blue Swirl72cf2d42009-09-12 07:36:22 +00003743 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003744 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003745}
3746
3747static void cpu_notify_map_clients(void)
3748{
3749 MapClient *client;
3750
Blue Swirl72cf2d42009-09-12 07:36:22 +00003751 while (!QLIST_EMPTY(&map_client_list)) {
3752 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003753 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003754 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003755 }
3756}
3757
aliguori6d16c2f2009-01-22 16:59:11 +00003758/* Map a physical memory region into a host virtual address.
3759 * May map a subset of the requested range, given by and returned in *plen.
3760 * May return NULL if resources needed to perform the mapping are exhausted.
3761 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003762 * Use cpu_register_map_client() to know when retrying the map operation is
3763 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003764 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003765void *cpu_physical_memory_map(target_phys_addr_t addr,
3766 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003767 int is_write)
3768{
Anthony Liguoric227f092009-10-01 16:12:16 -05003769 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003770 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003771 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003772 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003773 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003774 PhysPageDesc p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003775 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003776 ram_addr_t rlen;
3777 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003778
3779 while (len > 0) {
3780 page = addr & TARGET_PAGE_MASK;
3781 l = (page + TARGET_PAGE_SIZE) - addr;
3782 if (l > len)
3783 l = len;
3784 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003785 pd = p.phys_offset;
aliguori6d16c2f2009-01-22 16:59:11 +00003786
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003787 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003788 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003789 break;
3790 }
3791 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3792 bounce.addr = addr;
3793 bounce.len = l;
3794 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003795 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003796 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003797
3798 *plen = l;
3799 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003800 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003801 if (!todo) {
3802 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3803 }
aliguori6d16c2f2009-01-22 16:59:11 +00003804
3805 len -= l;
3806 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003807 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003808 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003809 rlen = todo;
3810 ret = qemu_ram_ptr_length(raddr, &rlen);
3811 *plen = rlen;
3812 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003813}
3814
3815/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3816 * Will also mark the memory as dirty if is_write == 1. access_len gives
3817 * the amount of memory that was actually read or written by the caller.
3818 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003819void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3820 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003821{
3822 if (buffer != bounce.buffer) {
3823 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003824 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003825 while (access_len) {
3826 unsigned l;
3827 l = TARGET_PAGE_SIZE;
3828 if (l > access_len)
3829 l = access_len;
3830 if (!cpu_physical_memory_is_dirty(addr1)) {
3831 /* invalidate code */
3832 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3833 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003834 cpu_physical_memory_set_dirty_flags(
3835 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00003836 }
3837 addr1 += l;
3838 access_len -= l;
3839 }
3840 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003841 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003842 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003843 }
aliguori6d16c2f2009-01-22 16:59:11 +00003844 return;
3845 }
3846 if (is_write) {
3847 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3848 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003849 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003850 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003851 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003852}
bellardd0ecd2a2006-04-23 17:14:48 +00003853
bellard8df1cd02005-01-28 22:37:22 +00003854/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003855static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3856 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003857{
3858 int io_index;
3859 uint8_t *ptr;
3860 uint32_t val;
3861 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003862 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00003863
3864 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003865 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003866
Avi Kivity1d393fa2012-01-01 21:15:42 +02003867 if (!is_ram_rom_romd(pd)) {
bellard8df1cd02005-01-28 22:37:22 +00003868 /* I/O case */
3869 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003870 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02003871 val = io_mem_read(io_index, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003872#if defined(TARGET_WORDS_BIGENDIAN)
3873 if (endian == DEVICE_LITTLE_ENDIAN) {
3874 val = bswap32(val);
3875 }
3876#else
3877 if (endian == DEVICE_BIG_ENDIAN) {
3878 val = bswap32(val);
3879 }
3880#endif
bellard8df1cd02005-01-28 22:37:22 +00003881 } else {
3882 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003883 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00003884 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003885 switch (endian) {
3886 case DEVICE_LITTLE_ENDIAN:
3887 val = ldl_le_p(ptr);
3888 break;
3889 case DEVICE_BIG_ENDIAN:
3890 val = ldl_be_p(ptr);
3891 break;
3892 default:
3893 val = ldl_p(ptr);
3894 break;
3895 }
bellard8df1cd02005-01-28 22:37:22 +00003896 }
3897 return val;
3898}
3899
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003900uint32_t ldl_phys(target_phys_addr_t addr)
3901{
3902 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3903}
3904
3905uint32_t ldl_le_phys(target_phys_addr_t addr)
3906{
3907 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3908}
3909
3910uint32_t ldl_be_phys(target_phys_addr_t addr)
3911{
3912 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3913}
3914
bellard84b7b8e2005-11-28 21:19:04 +00003915/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003916static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3917 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003918{
3919 int io_index;
3920 uint8_t *ptr;
3921 uint64_t val;
3922 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003923 PhysPageDesc p;
bellard84b7b8e2005-11-28 21:19:04 +00003924
3925 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003926 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003927
Avi Kivity1d393fa2012-01-01 21:15:42 +02003928 if (!is_ram_rom_romd(pd)) {
bellard84b7b8e2005-11-28 21:19:04 +00003929 /* I/O case */
3930 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003931 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003932
3933 /* XXX This is broken when device endian != cpu endian.
3934 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003935#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02003936 val = io_mem_read(io_index, addr, 4) << 32;
3937 val |= io_mem_read(io_index, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003938#else
Avi Kivityacbbec52011-11-21 12:27:03 +02003939 val = io_mem_read(io_index, addr, 4);
3940 val |= io_mem_read(io_index, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003941#endif
3942 } else {
3943 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003944 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00003945 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003946 switch (endian) {
3947 case DEVICE_LITTLE_ENDIAN:
3948 val = ldq_le_p(ptr);
3949 break;
3950 case DEVICE_BIG_ENDIAN:
3951 val = ldq_be_p(ptr);
3952 break;
3953 default:
3954 val = ldq_p(ptr);
3955 break;
3956 }
bellard84b7b8e2005-11-28 21:19:04 +00003957 }
3958 return val;
3959}
3960
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003961uint64_t ldq_phys(target_phys_addr_t addr)
3962{
3963 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3964}
3965
3966uint64_t ldq_le_phys(target_phys_addr_t addr)
3967{
3968 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3969}
3970
3971uint64_t ldq_be_phys(target_phys_addr_t addr)
3972{
3973 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3974}
3975
bellardaab33092005-10-30 20:48:42 +00003976/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003977uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003978{
3979 uint8_t val;
3980 cpu_physical_memory_read(addr, &val, 1);
3981 return val;
3982}
3983
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003984/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003985static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3986 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003987{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003988 int io_index;
3989 uint8_t *ptr;
3990 uint64_t val;
3991 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003992 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003993
3994 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003995 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003996
Avi Kivity1d393fa2012-01-01 21:15:42 +02003997 if (!is_ram_rom_romd(pd)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003998 /* I/O case */
3999 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004000 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004001 val = io_mem_read(io_index, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004002#if defined(TARGET_WORDS_BIGENDIAN)
4003 if (endian == DEVICE_LITTLE_ENDIAN) {
4004 val = bswap16(val);
4005 }
4006#else
4007 if (endian == DEVICE_BIG_ENDIAN) {
4008 val = bswap16(val);
4009 }
4010#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004011 } else {
4012 /* RAM case */
4013 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4014 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004015 switch (endian) {
4016 case DEVICE_LITTLE_ENDIAN:
4017 val = lduw_le_p(ptr);
4018 break;
4019 case DEVICE_BIG_ENDIAN:
4020 val = lduw_be_p(ptr);
4021 break;
4022 default:
4023 val = lduw_p(ptr);
4024 break;
4025 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004026 }
4027 return val;
bellardaab33092005-10-30 20:48:42 +00004028}
4029
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004030uint32_t lduw_phys(target_phys_addr_t addr)
4031{
4032 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4033}
4034
4035uint32_t lduw_le_phys(target_phys_addr_t addr)
4036{
4037 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4038}
4039
4040uint32_t lduw_be_phys(target_phys_addr_t addr)
4041{
4042 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4043}
4044
bellard8df1cd02005-01-28 22:37:22 +00004045/* warning: addr must be aligned. The ram page is not masked as dirty
4046 and the code inside is not invalidated. It is useful if the dirty
4047 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004048void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004049{
4050 int io_index;
4051 uint8_t *ptr;
4052 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004053 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004054
4055 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004056 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004057
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004058 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
bellard8df1cd02005-01-28 22:37:22 +00004059 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004060 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004061 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004062 } else {
aliguori74576192008-10-06 14:02:03 +00004063 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004064 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004065 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004066
4067 if (unlikely(in_migration)) {
4068 if (!cpu_physical_memory_is_dirty(addr1)) {
4069 /* invalidate code */
4070 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4071 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004072 cpu_physical_memory_set_dirty_flags(
4073 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004074 }
4075 }
bellard8df1cd02005-01-28 22:37:22 +00004076 }
4077}
4078
Anthony Liguoric227f092009-10-01 16:12:16 -05004079void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004080{
4081 int io_index;
4082 uint8_t *ptr;
4083 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004084 PhysPageDesc p;
j_mayerbc98a7e2007-04-04 07:55:12 +00004085
4086 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004087 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004088
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004089 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
j_mayerbc98a7e2007-04-04 07:55:12 +00004090 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004091 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004092#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004093 io_mem_write(io_index, addr, val >> 32, 4);
4094 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004095#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004096 io_mem_write(io_index, addr, (uint32_t)val, 4);
4097 io_mem_write(io_index, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004098#endif
4099 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004100 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004101 (addr & ~TARGET_PAGE_MASK);
4102 stq_p(ptr, val);
4103 }
4104}
4105
bellard8df1cd02005-01-28 22:37:22 +00004106/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004107static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4108 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004109{
4110 int io_index;
4111 uint8_t *ptr;
4112 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004113 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004114
4115 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004116 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004117
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004118 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
bellard8df1cd02005-01-28 22:37:22 +00004119 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004120 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004121#if defined(TARGET_WORDS_BIGENDIAN)
4122 if (endian == DEVICE_LITTLE_ENDIAN) {
4123 val = bswap32(val);
4124 }
4125#else
4126 if (endian == DEVICE_BIG_ENDIAN) {
4127 val = bswap32(val);
4128 }
4129#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004130 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004131 } else {
4132 unsigned long addr1;
4133 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4134 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004135 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004136 switch (endian) {
4137 case DEVICE_LITTLE_ENDIAN:
4138 stl_le_p(ptr, val);
4139 break;
4140 case DEVICE_BIG_ENDIAN:
4141 stl_be_p(ptr, val);
4142 break;
4143 default:
4144 stl_p(ptr, val);
4145 break;
4146 }
bellard3a7d9292005-08-21 09:26:42 +00004147 if (!cpu_physical_memory_is_dirty(addr1)) {
4148 /* invalidate code */
4149 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4150 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004151 cpu_physical_memory_set_dirty_flags(addr1,
4152 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004153 }
bellard8df1cd02005-01-28 22:37:22 +00004154 }
4155}
4156
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004157void stl_phys(target_phys_addr_t addr, uint32_t val)
4158{
4159 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4160}
4161
4162void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4163{
4164 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4165}
4166
4167void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4168{
4169 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4170}
4171
bellardaab33092005-10-30 20:48:42 +00004172/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004173void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004174{
4175 uint8_t v = val;
4176 cpu_physical_memory_write(addr, &v, 1);
4177}
4178
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004179/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004180static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4181 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004182{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004183 int io_index;
4184 uint8_t *ptr;
4185 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004186 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004187
4188 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004189 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004190
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004191 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004192 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004193 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004194#if defined(TARGET_WORDS_BIGENDIAN)
4195 if (endian == DEVICE_LITTLE_ENDIAN) {
4196 val = bswap16(val);
4197 }
4198#else
4199 if (endian == DEVICE_BIG_ENDIAN) {
4200 val = bswap16(val);
4201 }
4202#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004203 io_mem_write(io_index, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004204 } else {
4205 unsigned long addr1;
4206 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4207 /* RAM case */
4208 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004209 switch (endian) {
4210 case DEVICE_LITTLE_ENDIAN:
4211 stw_le_p(ptr, val);
4212 break;
4213 case DEVICE_BIG_ENDIAN:
4214 stw_be_p(ptr, val);
4215 break;
4216 default:
4217 stw_p(ptr, val);
4218 break;
4219 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004220 if (!cpu_physical_memory_is_dirty(addr1)) {
4221 /* invalidate code */
4222 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4223 /* set dirty bit */
4224 cpu_physical_memory_set_dirty_flags(addr1,
4225 (0xff & ~CODE_DIRTY_FLAG));
4226 }
4227 }
bellardaab33092005-10-30 20:48:42 +00004228}
4229
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004230void stw_phys(target_phys_addr_t addr, uint32_t val)
4231{
4232 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4233}
4234
4235void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4236{
4237 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4238}
4239
4240void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4241{
4242 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4243}
4244
bellardaab33092005-10-30 20:48:42 +00004245/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004246void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004247{
4248 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004249 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004250}
4251
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004252void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4253{
4254 val = cpu_to_le64(val);
4255 cpu_physical_memory_write(addr, &val, 8);
4256}
4257
4258void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4259{
4260 val = cpu_to_be64(val);
4261 cpu_physical_memory_write(addr, &val, 8);
4262}
4263
aliguori5e2972f2009-03-28 17:51:36 +00004264/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004265int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004266 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004267{
4268 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004269 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004270 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004271
4272 while (len > 0) {
4273 page = addr & TARGET_PAGE_MASK;
4274 phys_addr = cpu_get_phys_page_debug(env, page);
4275 /* if no physical page mapped, return an error */
4276 if (phys_addr == -1)
4277 return -1;
4278 l = (page + TARGET_PAGE_SIZE) - addr;
4279 if (l > len)
4280 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004281 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004282 if (is_write)
4283 cpu_physical_memory_write_rom(phys_addr, buf, l);
4284 else
aliguori5e2972f2009-03-28 17:51:36 +00004285 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004286 len -= l;
4287 buf += l;
4288 addr += l;
4289 }
4290 return 0;
4291}
Paul Brooka68fe892010-03-01 00:08:59 +00004292#endif
bellard13eb76e2004-01-24 15:23:36 +00004293
pbrook2e70f6e2008-06-29 01:03:05 +00004294/* in deterministic execution mode, instructions doing device I/Os
4295 must be at the end of the TB */
4296void cpu_io_recompile(CPUState *env, void *retaddr)
4297{
4298 TranslationBlock *tb;
4299 uint32_t n, cflags;
4300 target_ulong pc, cs_base;
4301 uint64_t flags;
4302
4303 tb = tb_find_pc((unsigned long)retaddr);
4304 if (!tb) {
4305 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4306 retaddr);
4307 }
4308 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004309 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004310 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004311 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004312 n = n - env->icount_decr.u16.low;
4313 /* Generate a new TB ending on the I/O insn. */
4314 n++;
4315 /* On MIPS and SH, delay slot instructions can only be restarted if
4316 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004317 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004318 branch. */
4319#if defined(TARGET_MIPS)
4320 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4321 env->active_tc.PC -= 4;
4322 env->icount_decr.u16.low++;
4323 env->hflags &= ~MIPS_HFLAG_BMASK;
4324 }
4325#elif defined(TARGET_SH4)
4326 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4327 && n > 1) {
4328 env->pc -= 2;
4329 env->icount_decr.u16.low++;
4330 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4331 }
4332#endif
4333 /* This should never happen. */
4334 if (n > CF_COUNT_MASK)
4335 cpu_abort(env, "TB too big during recompile");
4336
4337 cflags = n | CF_LAST_IO;
4338 pc = tb->pc;
4339 cs_base = tb->cs_base;
4340 flags = tb->flags;
4341 tb_phys_invalidate(tb, -1);
4342 /* FIXME: In theory this could raise an exception. In practice
4343 we have already translated the block once so it's probably ok. */
4344 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004345 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004346 the first in the TB) then we end up generating a whole new TB and
4347 repeating the fault, which is horribly inefficient.
4348 Better would be to execute just this insn uncached, or generate a
4349 second new TB. */
4350 cpu_resume_from_signal(env, NULL);
4351}
4352
Paul Brookb3755a92010-03-12 16:54:58 +00004353#if !defined(CONFIG_USER_ONLY)
4354
Stefan Weil055403b2010-10-22 23:03:32 +02004355void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004356{
4357 int i, target_code_size, max_target_code_size;
4358 int direct_jmp_count, direct_jmp2_count, cross_page;
4359 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004360
bellarde3db7222005-01-26 22:00:47 +00004361 target_code_size = 0;
4362 max_target_code_size = 0;
4363 cross_page = 0;
4364 direct_jmp_count = 0;
4365 direct_jmp2_count = 0;
4366 for(i = 0; i < nb_tbs; i++) {
4367 tb = &tbs[i];
4368 target_code_size += tb->size;
4369 if (tb->size > max_target_code_size)
4370 max_target_code_size = tb->size;
4371 if (tb->page_addr[1] != -1)
4372 cross_page++;
4373 if (tb->tb_next_offset[0] != 0xffff) {
4374 direct_jmp_count++;
4375 if (tb->tb_next_offset[1] != 0xffff) {
4376 direct_jmp2_count++;
4377 }
4378 }
4379 }
4380 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004381 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004382 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004383 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4384 cpu_fprintf(f, "TB count %d/%d\n",
4385 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004386 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004387 nb_tbs ? target_code_size / nb_tbs : 0,
4388 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004389 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004390 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4391 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004392 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4393 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004394 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4395 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004396 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004397 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4398 direct_jmp2_count,
4399 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004400 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004401 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4402 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4403 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004404 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004405}
4406
Avi Kivityd39e8222012-01-01 23:35:10 +02004407/* NOTE: this function can trigger an exception */
4408/* NOTE2: the returned address is not exactly the physical address: it
4409 is the offset relative to phys_ram_base */
4410tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4411{
4412 int mmu_idx, page_index, pd;
4413 void *p;
4414
4415 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4416 mmu_idx = cpu_mmu_index(env1);
4417 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4418 (addr & TARGET_PAGE_MASK))) {
4419 ldub_code(addr);
4420 }
4421 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004422 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
4423 && !(pd & IO_MEM_ROMD)) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004424#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4425 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4426#else
4427 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4428#endif
4429 }
4430 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4431 return qemu_ram_addr_from_host_nofail(p);
4432}
4433
bellard61382a52003-10-27 21:22:23 +00004434#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004435#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004436#define GETPC() NULL
4437#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004438#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004439
4440#define SHIFT 0
4441#include "softmmu_template.h"
4442
4443#define SHIFT 1
4444#include "softmmu_template.h"
4445
4446#define SHIFT 2
4447#include "softmmu_template.h"
4448
4449#define SHIFT 3
4450#include "softmmu_template.h"
4451
4452#undef env
4453
4454#endif