blob: b81677ade9c7c67add6c4adbf4ceeb3d0d5ffe26 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
bellard6a00d602005-11-21 23:25:50 +0000126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100129DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800163/* The bits remaining after N lower levels of page tables. */
164#define P_L1_BITS_REM \
165 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
169/* Size of the L1 page table. Avoid silly small sizes. */
170#if P_L1_BITS_REM < 4
171#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
172#else
173#define P_L1_BITS P_L1_BITS_REM
174#endif
175
176#if V_L1_BITS_REM < 4
177#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
178#else
179#define V_L1_BITS V_L1_BITS_REM
180#endif
181
182#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
183#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
184
185#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
186#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
187
bellard83fb7ad2004-07-05 21:25:26 +0000188unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000189unsigned long qemu_host_page_size;
190unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000191
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800192/* This is a multi-level map on the virtual address space.
193 The bottom level has pointers to PageDesc. */
194static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000195
pbrooke2eef172008-06-08 01:09:01 +0000196#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000197typedef struct PhysPageDesc {
198 /* offset in host memory of the page + io_index in the low bits */
199 ram_addr_t phys_offset;
200 ram_addr_t region_offset;
201} PhysPageDesc;
202
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800203/* This is a multi-level map on the physical address space.
204 The bottom level has pointers to PhysPageDesc. */
205static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000206
pbrooke2eef172008-06-08 01:09:01 +0000207static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300208static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000209
bellard33417e72003-08-10 21:47:01 +0000210/* io memory support */
Avi Kivitya621f382012-01-02 13:12:08 +0200211MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000212static char io_mem_used[IO_MEM_NB_ENTRIES];
Avi Kivity1ec9b902012-01-02 12:47:48 +0200213static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000214#endif
bellard33417e72003-08-10 21:47:01 +0000215
bellard34865132003-10-05 14:28:56 +0000216/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200217#ifdef WIN32
218static const char *logfilename = "qemu.log";
219#else
blueswir1d9b630f2008-10-05 09:57:08 +0000220static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200221#endif
bellard34865132003-10-05 14:28:56 +0000222FILE *logfile;
223int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000224static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000225
bellarde3db7222005-01-26 22:00:47 +0000226/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000227#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000228static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000229#endif
bellarde3db7222005-01-26 22:00:47 +0000230static int tb_flush_count;
231static int tb_phys_invalidate_count;
232
bellard7cb69ca2008-05-10 10:55:51 +0000233#ifdef _WIN32
234static void map_exec(void *addr, long size)
235{
236 DWORD old_protect;
237 VirtualProtect(addr, size,
238 PAGE_EXECUTE_READWRITE, &old_protect);
239
240}
241#else
242static void map_exec(void *addr, long size)
243{
bellard43694152008-05-29 09:35:57 +0000244 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000245
bellard43694152008-05-29 09:35:57 +0000246 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000247 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000248 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000249
250 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000251 end += page_size - 1;
252 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000253
254 mprotect((void *)start, end - start,
255 PROT_READ | PROT_WRITE | PROT_EXEC);
256}
257#endif
258
bellardb346ff42003-06-15 20:05:50 +0000259static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000260{
bellard83fb7ad2004-07-05 21:25:26 +0000261 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000262 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000263#ifdef _WIN32
264 {
265 SYSTEM_INFO system_info;
266
267 GetSystemInfo(&system_info);
268 qemu_real_host_page_size = system_info.dwPageSize;
269 }
270#else
271 qemu_real_host_page_size = getpagesize();
272#endif
bellard83fb7ad2004-07-05 21:25:26 +0000273 if (qemu_host_page_size == 0)
274 qemu_host_page_size = qemu_real_host_page_size;
275 if (qemu_host_page_size < TARGET_PAGE_SIZE)
276 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000277 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000278
Paul Brook2e9a5712010-05-05 16:32:59 +0100279#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000280 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100281#ifdef HAVE_KINFO_GETVMMAP
282 struct kinfo_vmentry *freep;
283 int i, cnt;
284
285 freep = kinfo_getvmmap(getpid(), &cnt);
286 if (freep) {
287 mmap_lock();
288 for (i = 0; i < cnt; i++) {
289 unsigned long startaddr, endaddr;
290
291 startaddr = freep[i].kve_start;
292 endaddr = freep[i].kve_end;
293 if (h2g_valid(startaddr)) {
294 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
295
296 if (h2g_valid(endaddr)) {
297 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100299 } else {
300#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100303#endif
304 }
305 }
306 }
307 free(freep);
308 mmap_unlock();
309 }
310#else
balrog50a95692007-12-12 01:16:23 +0000311 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000312
pbrook07765902008-05-31 16:33:53 +0000313 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800314
Aurelien Jarnofd436902010-04-10 17:20:36 +0200315 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000316 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800317 mmap_lock();
318
balrog50a95692007-12-12 01:16:23 +0000319 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800320 unsigned long startaddr, endaddr;
321 int n;
322
323 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
324
325 if (n == 2 && h2g_valid(startaddr)) {
326 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
327
328 if (h2g_valid(endaddr)) {
329 endaddr = h2g(endaddr);
330 } else {
331 endaddr = ~0ul;
332 }
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000334 }
335 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800336
balrog50a95692007-12-12 01:16:23 +0000337 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800338 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000339 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100340#endif
balrog50a95692007-12-12 01:16:23 +0000341 }
342#endif
bellard54936002003-05-13 00:25:15 +0000343}
344
Paul Brook41c1b1c2010-03-12 16:54:58 +0000345static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000346{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000347 PageDesc *pd;
348 void **lp;
349 int i;
350
pbrook17e23772008-06-09 13:47:45 +0000351#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500352 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353# define ALLOC(P, SIZE) \
354 do { \
355 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
356 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800357 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000358#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800359# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500360 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000361#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800363 /* Level 1. Always allocated. */
364 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
365
366 /* Level 2..N-1. */
367 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
368 void **p = *lp;
369
370 if (p == NULL) {
371 if (!alloc) {
372 return NULL;
373 }
374 ALLOC(p, sizeof(void *) * L2_SIZE);
375 *lp = p;
376 }
377
378 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000379 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800380
381 pd = *lp;
382 if (pd == NULL) {
383 if (!alloc) {
384 return NULL;
385 }
386 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
387 *lp = pd;
388 }
389
390#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800391
392 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000393}
394
Paul Brook41c1b1c2010-03-12 16:54:58 +0000395static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000396{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800397 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000398}
399
Paul Brook6d9a1302010-02-28 23:55:53 +0000400#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500401static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000402{
pbrooke3f4e2a2006-04-08 20:02:06 +0000403 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800404 void **lp;
405 int i;
bellard92e873b2004-05-21 14:52:29 +0000406
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800407 /* Level 1. Always allocated. */
408 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000409
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800410 /* Level 2..N-1. */
411 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
412 void **p = *lp;
413 if (p == NULL) {
414 if (!alloc) {
415 return NULL;
416 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500417 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800418 }
419 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000420 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800421
pbrooke3f4e2a2006-04-08 20:02:06 +0000422 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800423 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000424 int i;
Alex Rozenman5ab97b72011-12-13 12:52:08 +0200425 int first_index = index & ~(L2_SIZE - 1);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800426
427 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000428 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800429 }
430
Anthony Liguori7267c092011-08-20 22:09:37 -0500431 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800432
pbrook67c4d232009-02-23 13:16:07 +0000433 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200434 pd[i].phys_offset = io_mem_unassigned.ram_addr;
Alex Rozenman5ab97b72011-12-13 12:52:08 +0200435 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000436 }
bellard92e873b2004-05-21 14:52:29 +0000437 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800438
439 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000440}
441
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200442static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000443{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200444 PhysPageDesc *p = phys_page_find_alloc(index, 0);
445
446 if (p) {
447 return *p;
448 } else {
449 return (PhysPageDesc) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200450 .phys_offset = io_mem_unassigned.ram_addr,
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200451 .region_offset = index << TARGET_PAGE_BITS,
452 };
453 }
bellard92e873b2004-05-21 14:52:29 +0000454}
455
Anthony Liguoric227f092009-10-01 16:12:16 -0500456static void tlb_protect_code(ram_addr_t ram_addr);
457static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000458 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000459#define mmap_lock() do { } while(0)
460#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000461#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000462
bellard43694152008-05-29 09:35:57 +0000463#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
464
465#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100466/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000467 user mode. It will change when a dedicated libc will be used */
468#define USE_STATIC_CODE_GEN_BUFFER
469#endif
470
471#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200472static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
473 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000474#endif
475
blueswir18fcd3692008-08-17 20:26:25 +0000476static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000477{
bellard43694152008-05-29 09:35:57 +0000478#ifdef USE_STATIC_CODE_GEN_BUFFER
479 code_gen_buffer = static_code_gen_buffer;
480 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
481 map_exec(code_gen_buffer, code_gen_buffer_size);
482#else
bellard26a5f132008-05-28 12:30:31 +0000483 code_gen_buffer_size = tb_size;
484 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000485#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000486 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
487#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100488 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000489 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000490#endif
bellard26a5f132008-05-28 12:30:31 +0000491 }
492 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
493 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
494 /* The code gen buffer location may have constraints depending on
495 the host cpu and OS */
496#if defined(__linux__)
497 {
498 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000499 void *start = NULL;
500
bellard26a5f132008-05-28 12:30:31 +0000501 flags = MAP_PRIVATE | MAP_ANONYMOUS;
502#if defined(__x86_64__)
503 flags |= MAP_32BIT;
504 /* Cannot map more than that */
505 if (code_gen_buffer_size > (800 * 1024 * 1024))
506 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000507#elif defined(__sparc_v9__)
508 // Map the buffer below 2G, so we can use direct calls and branches
509 flags |= MAP_FIXED;
510 start = (void *) 0x60000000UL;
511 if (code_gen_buffer_size > (512 * 1024 * 1024))
512 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000513#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100514 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000515 if (code_gen_buffer_size > 16 * 1024 * 1024)
516 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700517#elif defined(__s390x__)
518 /* Map the buffer so that we can use direct calls and branches. */
519 /* We have a +- 4GB range on the branches; leave some slop. */
520 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
521 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
522 }
523 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000524#endif
blueswir1141ac462008-07-26 15:05:57 +0000525 code_gen_buffer = mmap(start, code_gen_buffer_size,
526 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000527 flags, -1, 0);
528 if (code_gen_buffer == MAP_FAILED) {
529 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
530 exit(1);
531 }
532 }
Bradcbb608a2010-12-20 21:25:40 -0500533#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000534 || defined(__DragonFly__) || defined(__OpenBSD__) \
535 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000536 {
537 int flags;
538 void *addr = NULL;
539 flags = MAP_PRIVATE | MAP_ANONYMOUS;
540#if defined(__x86_64__)
541 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
542 * 0x40000000 is free */
543 flags |= MAP_FIXED;
544 addr = (void *)0x40000000;
545 /* Cannot map more than that */
546 if (code_gen_buffer_size > (800 * 1024 * 1024))
547 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000548#elif defined(__sparc_v9__)
549 // Map the buffer below 2G, so we can use direct calls and branches
550 flags |= MAP_FIXED;
551 addr = (void *) 0x60000000UL;
552 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
553 code_gen_buffer_size = (512 * 1024 * 1024);
554 }
aliguori06e67a82008-09-27 15:32:41 +0000555#endif
556 code_gen_buffer = mmap(addr, code_gen_buffer_size,
557 PROT_WRITE | PROT_READ | PROT_EXEC,
558 flags, -1, 0);
559 if (code_gen_buffer == MAP_FAILED) {
560 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
561 exit(1);
562 }
563 }
bellard26a5f132008-05-28 12:30:31 +0000564#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500565 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000566 map_exec(code_gen_buffer, code_gen_buffer_size);
567#endif
bellard43694152008-05-29 09:35:57 +0000568#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000569 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100570 code_gen_buffer_max_size = code_gen_buffer_size -
571 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000572 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500573 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000574}
575
576/* Must be called before using the QEMU cpus. 'tb_size' is the size
577 (in bytes) allocated to the translation buffer. Zero means default
578 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200579void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000580{
bellard26a5f132008-05-28 12:30:31 +0000581 cpu_gen_init();
582 code_gen_alloc(tb_size);
583 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000584 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700585#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
586 /* There's no guest base to take into account, so go ahead and
587 initialize the prologue now. */
588 tcg_prologue_init(&tcg_ctx);
589#endif
bellard26a5f132008-05-28 12:30:31 +0000590}
591
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200592bool tcg_enabled(void)
593{
594 return code_gen_buffer != NULL;
595}
596
597void cpu_exec_init_all(void)
598{
599#if !defined(CONFIG_USER_ONLY)
600 memory_map_init();
601 io_mem_init();
602#endif
603}
604
pbrook9656f322008-07-01 20:01:19 +0000605#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
606
Juan Quintelae59fb372009-09-29 22:48:21 +0200607static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200608{
609 CPUState *env = opaque;
610
aurel323098dba2009-03-07 21:28:24 +0000611 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
612 version_id is increased. */
613 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000614 tlb_flush(env, 1);
615
616 return 0;
617}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200618
619static const VMStateDescription vmstate_cpu_common = {
620 .name = "cpu_common",
621 .version_id = 1,
622 .minimum_version_id = 1,
623 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200624 .post_load = cpu_common_post_load,
625 .fields = (VMStateField []) {
626 VMSTATE_UINT32(halted, CPUState),
627 VMSTATE_UINT32(interrupt_request, CPUState),
628 VMSTATE_END_OF_LIST()
629 }
630};
pbrook9656f322008-07-01 20:01:19 +0000631#endif
632
Glauber Costa950f1472009-06-09 12:15:18 -0400633CPUState *qemu_get_cpu(int cpu)
634{
635 CPUState *env = first_cpu;
636
637 while (env) {
638 if (env->cpu_index == cpu)
639 break;
640 env = env->next_cpu;
641 }
642
643 return env;
644}
645
bellard6a00d602005-11-21 23:25:50 +0000646void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000647{
bellard6a00d602005-11-21 23:25:50 +0000648 CPUState **penv;
649 int cpu_index;
650
pbrookc2764712009-03-07 15:24:59 +0000651#if defined(CONFIG_USER_ONLY)
652 cpu_list_lock();
653#endif
bellard6a00d602005-11-21 23:25:50 +0000654 env->next_cpu = NULL;
655 penv = &first_cpu;
656 cpu_index = 0;
657 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700658 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000659 cpu_index++;
660 }
661 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000662 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000663 QTAILQ_INIT(&env->breakpoints);
664 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100665#ifndef CONFIG_USER_ONLY
666 env->thread_id = qemu_get_thread_id();
667#endif
bellard6a00d602005-11-21 23:25:50 +0000668 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000669#if defined(CONFIG_USER_ONLY)
670 cpu_list_unlock();
671#endif
pbrookb3c77242008-06-30 16:31:04 +0000672#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600673 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
674 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000675 cpu_save, cpu_load, env);
676#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000677}
678
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100679/* Allocate a new translation block. Flush the translation buffer if
680 too many translation blocks or too much generated code. */
681static TranslationBlock *tb_alloc(target_ulong pc)
682{
683 TranslationBlock *tb;
684
685 if (nb_tbs >= code_gen_max_blocks ||
686 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
687 return NULL;
688 tb = &tbs[nb_tbs++];
689 tb->pc = pc;
690 tb->cflags = 0;
691 return tb;
692}
693
694void tb_free(TranslationBlock *tb)
695{
696 /* In practice this is mostly used for single use temporary TB
697 Ignore the hard cases and just back up if this TB happens to
698 be the last one generated. */
699 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
700 code_gen_ptr = tb->tc_ptr;
701 nb_tbs--;
702 }
703}
704
bellard9fa3e852004-01-04 18:06:42 +0000705static inline void invalidate_page_bitmap(PageDesc *p)
706{
707 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500708 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000709 p->code_bitmap = NULL;
710 }
711 p->code_write_count = 0;
712}
713
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800714/* Set to NULL all the 'first_tb' fields in all PageDescs. */
715
716static void page_flush_tb_1 (int level, void **lp)
717{
718 int i;
719
720 if (*lp == NULL) {
721 return;
722 }
723 if (level == 0) {
724 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000725 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800726 pd[i].first_tb = NULL;
727 invalidate_page_bitmap(pd + i);
728 }
729 } else {
730 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000731 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800732 page_flush_tb_1 (level - 1, pp + i);
733 }
734 }
735}
736
bellardfd6ce8f2003-05-14 19:00:11 +0000737static void page_flush_tb(void)
738{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800739 int i;
740 for (i = 0; i < V_L1_SIZE; i++) {
741 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000742 }
743}
744
745/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000746/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000747void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000748{
bellard6a00d602005-11-21 23:25:50 +0000749 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000750#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000751 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
752 (unsigned long)(code_gen_ptr - code_gen_buffer),
753 nb_tbs, nb_tbs > 0 ?
754 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000755#endif
bellard26a5f132008-05-28 12:30:31 +0000756 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000757 cpu_abort(env1, "Internal error: code buffer overflow\n");
758
bellardfd6ce8f2003-05-14 19:00:11 +0000759 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000760
bellard6a00d602005-11-21 23:25:50 +0000761 for(env = first_cpu; env != NULL; env = env->next_cpu) {
762 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
763 }
bellard9fa3e852004-01-04 18:06:42 +0000764
bellard8a8a6082004-10-03 13:36:49 +0000765 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000766 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000767
bellardfd6ce8f2003-05-14 19:00:11 +0000768 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000769 /* XXX: flush processor icache at this point if cache flush is
770 expensive */
bellarde3db7222005-01-26 22:00:47 +0000771 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000772}
773
774#ifdef DEBUG_TB_CHECK
775
j_mayerbc98a7e2007-04-04 07:55:12 +0000776static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000777{
778 TranslationBlock *tb;
779 int i;
780 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000781 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
782 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000783 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
784 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000785 printf("ERROR invalidate: address=" TARGET_FMT_lx
786 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000787 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000788 }
789 }
790 }
791}
792
793/* verify that all the pages have correct rights for code */
794static void tb_page_check(void)
795{
796 TranslationBlock *tb;
797 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000798
pbrook99773bd2006-04-16 15:14:59 +0000799 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
800 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000801 flags1 = page_get_flags(tb->pc);
802 flags2 = page_get_flags(tb->pc + tb->size - 1);
803 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
804 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000805 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000806 }
807 }
808 }
809}
810
811#endif
812
813/* invalidate one TB */
814static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
815 int next_offset)
816{
817 TranslationBlock *tb1;
818 for(;;) {
819 tb1 = *ptb;
820 if (tb1 == tb) {
821 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
822 break;
823 }
824 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
825 }
826}
827
bellard9fa3e852004-01-04 18:06:42 +0000828static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
829{
830 TranslationBlock *tb1;
831 unsigned int n1;
832
833 for(;;) {
834 tb1 = *ptb;
835 n1 = (long)tb1 & 3;
836 tb1 = (TranslationBlock *)((long)tb1 & ~3);
837 if (tb1 == tb) {
838 *ptb = tb1->page_next[n1];
839 break;
840 }
841 ptb = &tb1->page_next[n1];
842 }
843}
844
bellardd4e81642003-05-25 16:46:15 +0000845static inline void tb_jmp_remove(TranslationBlock *tb, int n)
846{
847 TranslationBlock *tb1, **ptb;
848 unsigned int n1;
849
850 ptb = &tb->jmp_next[n];
851 tb1 = *ptb;
852 if (tb1) {
853 /* find tb(n) in circular list */
854 for(;;) {
855 tb1 = *ptb;
856 n1 = (long)tb1 & 3;
857 tb1 = (TranslationBlock *)((long)tb1 & ~3);
858 if (n1 == n && tb1 == tb)
859 break;
860 if (n1 == 2) {
861 ptb = &tb1->jmp_first;
862 } else {
863 ptb = &tb1->jmp_next[n1];
864 }
865 }
866 /* now we can suppress tb(n) from the list */
867 *ptb = tb->jmp_next[n];
868
869 tb->jmp_next[n] = NULL;
870 }
871}
872
873/* reset the jump entry 'n' of a TB so that it is not chained to
874 another TB */
875static inline void tb_reset_jump(TranslationBlock *tb, int n)
876{
877 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
878}
879
Paul Brook41c1b1c2010-03-12 16:54:58 +0000880void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000881{
bellard6a00d602005-11-21 23:25:50 +0000882 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000883 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000884 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000885 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000886 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000887
bellard9fa3e852004-01-04 18:06:42 +0000888 /* remove the TB from the hash list */
889 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
890 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000891 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000892 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000893
bellard9fa3e852004-01-04 18:06:42 +0000894 /* remove the TB from the page list */
895 if (tb->page_addr[0] != page_addr) {
896 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
897 tb_page_remove(&p->first_tb, tb);
898 invalidate_page_bitmap(p);
899 }
900 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
901 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
902 tb_page_remove(&p->first_tb, tb);
903 invalidate_page_bitmap(p);
904 }
905
bellard8a40a182005-11-20 10:35:40 +0000906 tb_invalidated_flag = 1;
907
908 /* remove the TB from the hash list */
909 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000910 for(env = first_cpu; env != NULL; env = env->next_cpu) {
911 if (env->tb_jmp_cache[h] == tb)
912 env->tb_jmp_cache[h] = NULL;
913 }
bellard8a40a182005-11-20 10:35:40 +0000914
915 /* suppress this TB from the two jump lists */
916 tb_jmp_remove(tb, 0);
917 tb_jmp_remove(tb, 1);
918
919 /* suppress any remaining jumps to this TB */
920 tb1 = tb->jmp_first;
921 for(;;) {
922 n1 = (long)tb1 & 3;
923 if (n1 == 2)
924 break;
925 tb1 = (TranslationBlock *)((long)tb1 & ~3);
926 tb2 = tb1->jmp_next[n1];
927 tb_reset_jump(tb1, n1);
928 tb1->jmp_next[n1] = NULL;
929 tb1 = tb2;
930 }
931 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
932
bellarde3db7222005-01-26 22:00:47 +0000933 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000934}
935
936static inline void set_bits(uint8_t *tab, int start, int len)
937{
938 int end, mask, end1;
939
940 end = start + len;
941 tab += start >> 3;
942 mask = 0xff << (start & 7);
943 if ((start & ~7) == (end & ~7)) {
944 if (start < end) {
945 mask &= ~(0xff << (end & 7));
946 *tab |= mask;
947 }
948 } else {
949 *tab++ |= mask;
950 start = (start + 8) & ~7;
951 end1 = end & ~7;
952 while (start < end1) {
953 *tab++ = 0xff;
954 start += 8;
955 }
956 if (start < end) {
957 mask = ~(0xff << (end & 7));
958 *tab |= mask;
959 }
960 }
961}
962
963static void build_page_bitmap(PageDesc *p)
964{
965 int n, tb_start, tb_end;
966 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000967
Anthony Liguori7267c092011-08-20 22:09:37 -0500968 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000969
970 tb = p->first_tb;
971 while (tb != NULL) {
972 n = (long)tb & 3;
973 tb = (TranslationBlock *)((long)tb & ~3);
974 /* NOTE: this is subtle as a TB may span two physical pages */
975 if (n == 0) {
976 /* NOTE: tb_end may be after the end of the page, but
977 it is not a problem */
978 tb_start = tb->pc & ~TARGET_PAGE_MASK;
979 tb_end = tb_start + tb->size;
980 if (tb_end > TARGET_PAGE_SIZE)
981 tb_end = TARGET_PAGE_SIZE;
982 } else {
983 tb_start = 0;
984 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
985 }
986 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
987 tb = tb->page_next[n];
988 }
989}
990
pbrook2e70f6e2008-06-29 01:03:05 +0000991TranslationBlock *tb_gen_code(CPUState *env,
992 target_ulong pc, target_ulong cs_base,
993 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000994{
995 TranslationBlock *tb;
996 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000997 tb_page_addr_t phys_pc, phys_page2;
998 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000999 int code_gen_size;
1000
Paul Brook41c1b1c2010-03-12 16:54:58 +00001001 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001002 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001003 if (!tb) {
1004 /* flush must be done */
1005 tb_flush(env);
1006 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001007 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001008 /* Don't forget to invalidate previous TB info. */
1009 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001010 }
1011 tc_ptr = code_gen_ptr;
1012 tb->tc_ptr = tc_ptr;
1013 tb->cs_base = cs_base;
1014 tb->flags = flags;
1015 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001016 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001017 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001018
bellardd720b932004-04-25 17:57:43 +00001019 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001020 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001021 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001022 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001023 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001024 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001025 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001026 return tb;
bellardd720b932004-04-25 17:57:43 +00001027}
ths3b46e622007-09-17 08:09:54 +00001028
bellard9fa3e852004-01-04 18:06:42 +00001029/* invalidate all TBs which intersect with the target physical page
1030 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001031 the same physical page. 'is_cpu_write_access' should be true if called
1032 from a real cpu write access: the virtual CPU will exit the current
1033 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001034void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001035 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001036{
aliguori6b917542008-11-18 19:46:41 +00001037 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001038 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001039 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001040 PageDesc *p;
1041 int n;
1042#ifdef TARGET_HAS_PRECISE_SMC
1043 int current_tb_not_found = is_cpu_write_access;
1044 TranslationBlock *current_tb = NULL;
1045 int current_tb_modified = 0;
1046 target_ulong current_pc = 0;
1047 target_ulong current_cs_base = 0;
1048 int current_flags = 0;
1049#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001050
1051 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001052 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001053 return;
ths5fafdf22007-09-16 21:08:06 +00001054 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001055 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1056 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001057 /* build code bitmap */
1058 build_page_bitmap(p);
1059 }
1060
1061 /* we remove all the TBs in the range [start, end[ */
1062 /* XXX: see if in some cases it could be faster to invalidate all the code */
1063 tb = p->first_tb;
1064 while (tb != NULL) {
1065 n = (long)tb & 3;
1066 tb = (TranslationBlock *)((long)tb & ~3);
1067 tb_next = tb->page_next[n];
1068 /* NOTE: this is subtle as a TB may span two physical pages */
1069 if (n == 0) {
1070 /* NOTE: tb_end may be after the end of the page, but
1071 it is not a problem */
1072 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1073 tb_end = tb_start + tb->size;
1074 } else {
1075 tb_start = tb->page_addr[1];
1076 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1077 }
1078 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001079#ifdef TARGET_HAS_PRECISE_SMC
1080 if (current_tb_not_found) {
1081 current_tb_not_found = 0;
1082 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001083 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001084 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001085 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001086 }
1087 }
1088 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001089 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001090 /* If we are modifying the current TB, we must stop
1091 its execution. We could be more precise by checking
1092 that the modification is after the current PC, but it
1093 would require a specialized function to partially
1094 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001095
bellardd720b932004-04-25 17:57:43 +00001096 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001097 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001098 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1099 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001100 }
1101#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001102 /* we need to do that to handle the case where a signal
1103 occurs while doing tb_phys_invalidate() */
1104 saved_tb = NULL;
1105 if (env) {
1106 saved_tb = env->current_tb;
1107 env->current_tb = NULL;
1108 }
bellard9fa3e852004-01-04 18:06:42 +00001109 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001110 if (env) {
1111 env->current_tb = saved_tb;
1112 if (env->interrupt_request && env->current_tb)
1113 cpu_interrupt(env, env->interrupt_request);
1114 }
bellard9fa3e852004-01-04 18:06:42 +00001115 }
1116 tb = tb_next;
1117 }
1118#if !defined(CONFIG_USER_ONLY)
1119 /* if no code remaining, no need to continue to use slow writes */
1120 if (!p->first_tb) {
1121 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001122 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001123 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001124 }
1125 }
1126#endif
1127#ifdef TARGET_HAS_PRECISE_SMC
1128 if (current_tb_modified) {
1129 /* we generate a block containing just the instruction
1130 modifying the memory. It will ensure that it cannot modify
1131 itself */
bellardea1c1802004-06-14 18:56:36 +00001132 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001133 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001134 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001135 }
1136#endif
1137}
1138
1139/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001140static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001141{
1142 PageDesc *p;
1143 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001144#if 0
bellarda4193c82004-06-03 14:01:43 +00001145 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001146 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1147 cpu_single_env->mem_io_vaddr, len,
1148 cpu_single_env->eip,
1149 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001150 }
1151#endif
bellard9fa3e852004-01-04 18:06:42 +00001152 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001153 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001154 return;
1155 if (p->code_bitmap) {
1156 offset = start & ~TARGET_PAGE_MASK;
1157 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1158 if (b & ((1 << len) - 1))
1159 goto do_invalidate;
1160 } else {
1161 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001162 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001163 }
1164}
1165
bellard9fa3e852004-01-04 18:06:42 +00001166#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001167static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001168 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001169{
aliguori6b917542008-11-18 19:46:41 +00001170 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001171 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001172 int n;
bellardd720b932004-04-25 17:57:43 +00001173#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001174 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001175 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001176 int current_tb_modified = 0;
1177 target_ulong current_pc = 0;
1178 target_ulong current_cs_base = 0;
1179 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001180#endif
bellard9fa3e852004-01-04 18:06:42 +00001181
1182 addr &= TARGET_PAGE_MASK;
1183 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001184 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001185 return;
1186 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001187#ifdef TARGET_HAS_PRECISE_SMC
1188 if (tb && pc != 0) {
1189 current_tb = tb_find_pc(pc);
1190 }
1191#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001192 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001193 n = (long)tb & 3;
1194 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001195#ifdef TARGET_HAS_PRECISE_SMC
1196 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001197 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001198 /* If we are modifying the current TB, we must stop
1199 its execution. We could be more precise by checking
1200 that the modification is after the current PC, but it
1201 would require a specialized function to partially
1202 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001203
bellardd720b932004-04-25 17:57:43 +00001204 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001205 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001206 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1207 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001208 }
1209#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001210 tb_phys_invalidate(tb, addr);
1211 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001212 }
1213 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001214#ifdef TARGET_HAS_PRECISE_SMC
1215 if (current_tb_modified) {
1216 /* we generate a block containing just the instruction
1217 modifying the memory. It will ensure that it cannot modify
1218 itself */
bellardea1c1802004-06-14 18:56:36 +00001219 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001220 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001221 cpu_resume_from_signal(env, puc);
1222 }
1223#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001224}
bellard9fa3e852004-01-04 18:06:42 +00001225#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001226
1227/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001228static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001229 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001230{
1231 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001232#ifndef CONFIG_USER_ONLY
1233 bool page_already_protected;
1234#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001235
bellard9fa3e852004-01-04 18:06:42 +00001236 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001237 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001238 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001239#ifndef CONFIG_USER_ONLY
1240 page_already_protected = p->first_tb != NULL;
1241#endif
bellard9fa3e852004-01-04 18:06:42 +00001242 p->first_tb = (TranslationBlock *)((long)tb | n);
1243 invalidate_page_bitmap(p);
1244
bellard107db442004-06-22 18:48:46 +00001245#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001246
bellard9fa3e852004-01-04 18:06:42 +00001247#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001248 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001249 target_ulong addr;
1250 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001251 int prot;
1252
bellardfd6ce8f2003-05-14 19:00:11 +00001253 /* force the host page as non writable (writes will have a
1254 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001255 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001256 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001257 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1258 addr += TARGET_PAGE_SIZE) {
1259
1260 p2 = page_find (addr >> TARGET_PAGE_BITS);
1261 if (!p2)
1262 continue;
1263 prot |= p2->flags;
1264 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001265 }
ths5fafdf22007-09-16 21:08:06 +00001266 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001267 (prot & PAGE_BITS) & ~PAGE_WRITE);
1268#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001269 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001270 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001271#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001272 }
bellard9fa3e852004-01-04 18:06:42 +00001273#else
1274 /* if some code is already present, then the pages are already
1275 protected. So we handle the case where only the first TB is
1276 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001277 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001278 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001279 }
1280#endif
bellardd720b932004-04-25 17:57:43 +00001281
1282#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001283}
1284
bellard9fa3e852004-01-04 18:06:42 +00001285/* add a new TB and link it to the physical page tables. phys_page2 is
1286 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001287void tb_link_page(TranslationBlock *tb,
1288 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001289{
bellard9fa3e852004-01-04 18:06:42 +00001290 unsigned int h;
1291 TranslationBlock **ptb;
1292
pbrookc8a706f2008-06-02 16:16:42 +00001293 /* Grab the mmap lock to stop another thread invalidating this TB
1294 before we are done. */
1295 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001296 /* add in the physical hash table */
1297 h = tb_phys_hash_func(phys_pc);
1298 ptb = &tb_phys_hash[h];
1299 tb->phys_hash_next = *ptb;
1300 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001301
1302 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001303 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1304 if (phys_page2 != -1)
1305 tb_alloc_page(tb, 1, phys_page2);
1306 else
1307 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001308
bellardd4e81642003-05-25 16:46:15 +00001309 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1310 tb->jmp_next[0] = NULL;
1311 tb->jmp_next[1] = NULL;
1312
1313 /* init original jump addresses */
1314 if (tb->tb_next_offset[0] != 0xffff)
1315 tb_reset_jump(tb, 0);
1316 if (tb->tb_next_offset[1] != 0xffff)
1317 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001318
1319#ifdef DEBUG_TB_CHECK
1320 tb_page_check();
1321#endif
pbrookc8a706f2008-06-02 16:16:42 +00001322 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001323}
1324
bellarda513fe12003-05-27 23:29:48 +00001325/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1326 tb[1].tc_ptr. Return NULL if not found */
1327TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1328{
1329 int m_min, m_max, m;
1330 unsigned long v;
1331 TranslationBlock *tb;
1332
1333 if (nb_tbs <= 0)
1334 return NULL;
1335 if (tc_ptr < (unsigned long)code_gen_buffer ||
1336 tc_ptr >= (unsigned long)code_gen_ptr)
1337 return NULL;
1338 /* binary search (cf Knuth) */
1339 m_min = 0;
1340 m_max = nb_tbs - 1;
1341 while (m_min <= m_max) {
1342 m = (m_min + m_max) >> 1;
1343 tb = &tbs[m];
1344 v = (unsigned long)tb->tc_ptr;
1345 if (v == tc_ptr)
1346 return tb;
1347 else if (tc_ptr < v) {
1348 m_max = m - 1;
1349 } else {
1350 m_min = m + 1;
1351 }
ths5fafdf22007-09-16 21:08:06 +00001352 }
bellarda513fe12003-05-27 23:29:48 +00001353 return &tbs[m_max];
1354}
bellard75012672003-06-21 13:11:07 +00001355
bellardea041c02003-06-25 16:16:50 +00001356static void tb_reset_jump_recursive(TranslationBlock *tb);
1357
1358static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1359{
1360 TranslationBlock *tb1, *tb_next, **ptb;
1361 unsigned int n1;
1362
1363 tb1 = tb->jmp_next[n];
1364 if (tb1 != NULL) {
1365 /* find head of list */
1366 for(;;) {
1367 n1 = (long)tb1 & 3;
1368 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1369 if (n1 == 2)
1370 break;
1371 tb1 = tb1->jmp_next[n1];
1372 }
1373 /* we are now sure now that tb jumps to tb1 */
1374 tb_next = tb1;
1375
1376 /* remove tb from the jmp_first list */
1377 ptb = &tb_next->jmp_first;
1378 for(;;) {
1379 tb1 = *ptb;
1380 n1 = (long)tb1 & 3;
1381 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1382 if (n1 == n && tb1 == tb)
1383 break;
1384 ptb = &tb1->jmp_next[n1];
1385 }
1386 *ptb = tb->jmp_next[n];
1387 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001388
bellardea041c02003-06-25 16:16:50 +00001389 /* suppress the jump to next tb in generated code */
1390 tb_reset_jump(tb, n);
1391
bellard01243112004-01-04 15:48:17 +00001392 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001393 tb_reset_jump_recursive(tb_next);
1394 }
1395}
1396
1397static void tb_reset_jump_recursive(TranslationBlock *tb)
1398{
1399 tb_reset_jump_recursive2(tb, 0);
1400 tb_reset_jump_recursive2(tb, 1);
1401}
1402
bellard1fddef42005-04-17 19:16:13 +00001403#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001404#if defined(CONFIG_USER_ONLY)
1405static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1406{
1407 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1408}
1409#else
bellardd720b932004-04-25 17:57:43 +00001410static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1411{
Anthony Liguoric227f092009-10-01 16:12:16 -05001412 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001413 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001414 ram_addr_t ram_addr;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001415 PhysPageDesc p;
bellardd720b932004-04-25 17:57:43 +00001416
pbrookc2f07f82006-04-08 17:14:56 +00001417 addr = cpu_get_phys_page_debug(env, pc);
1418 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001419 pd = p.phys_offset;
pbrookc2f07f82006-04-08 17:14:56 +00001420 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001421 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001422}
bellardc27004e2005-01-03 23:35:10 +00001423#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001424#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001425
Paul Brookc527ee82010-03-01 03:31:14 +00001426#if defined(CONFIG_USER_ONLY)
1427void cpu_watchpoint_remove_all(CPUState *env, int mask)
1428
1429{
1430}
1431
1432int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1433 int flags, CPUWatchpoint **watchpoint)
1434{
1435 return -ENOSYS;
1436}
1437#else
pbrook6658ffb2007-03-16 23:58:11 +00001438/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001439int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1440 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001441{
aliguorib4051332008-11-18 20:14:20 +00001442 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001443 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001444
aliguorib4051332008-11-18 20:14:20 +00001445 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1446 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1447 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1448 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1449 return -EINVAL;
1450 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001451 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001452
aliguoria1d1bb32008-11-18 20:07:32 +00001453 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001454 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001455 wp->flags = flags;
1456
aliguori2dc9f412008-11-18 20:56:59 +00001457 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001458 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001459 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001460 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001461 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001462
pbrook6658ffb2007-03-16 23:58:11 +00001463 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001464
1465 if (watchpoint)
1466 *watchpoint = wp;
1467 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001468}
1469
aliguoria1d1bb32008-11-18 20:07:32 +00001470/* Remove a specific watchpoint. */
1471int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1472 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001473{
aliguorib4051332008-11-18 20:14:20 +00001474 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001475 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001476
Blue Swirl72cf2d42009-09-12 07:36:22 +00001477 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001478 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001479 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001480 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001481 return 0;
1482 }
1483 }
aliguoria1d1bb32008-11-18 20:07:32 +00001484 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001485}
1486
aliguoria1d1bb32008-11-18 20:07:32 +00001487/* Remove a specific watchpoint by reference. */
1488void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1489{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001490 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001491
aliguoria1d1bb32008-11-18 20:07:32 +00001492 tlb_flush_page(env, watchpoint->vaddr);
1493
Anthony Liguori7267c092011-08-20 22:09:37 -05001494 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001495}
1496
aliguoria1d1bb32008-11-18 20:07:32 +00001497/* Remove all matching watchpoints. */
1498void cpu_watchpoint_remove_all(CPUState *env, int mask)
1499{
aliguoric0ce9982008-11-25 22:13:57 +00001500 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001501
Blue Swirl72cf2d42009-09-12 07:36:22 +00001502 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001503 if (wp->flags & mask)
1504 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001505 }
aliguoria1d1bb32008-11-18 20:07:32 +00001506}
Paul Brookc527ee82010-03-01 03:31:14 +00001507#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001508
1509/* Add a breakpoint. */
1510int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1511 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001512{
bellard1fddef42005-04-17 19:16:13 +00001513#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001514 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001515
Anthony Liguori7267c092011-08-20 22:09:37 -05001516 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001517
1518 bp->pc = pc;
1519 bp->flags = flags;
1520
aliguori2dc9f412008-11-18 20:56:59 +00001521 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001522 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001523 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001524 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001525 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001526
1527 breakpoint_invalidate(env, pc);
1528
1529 if (breakpoint)
1530 *breakpoint = bp;
1531 return 0;
1532#else
1533 return -ENOSYS;
1534#endif
1535}
1536
1537/* Remove a specific breakpoint. */
1538int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1539{
1540#if defined(TARGET_HAS_ICE)
1541 CPUBreakpoint *bp;
1542
Blue Swirl72cf2d42009-09-12 07:36:22 +00001543 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001544 if (bp->pc == pc && bp->flags == flags) {
1545 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001546 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001547 }
bellard4c3a88a2003-07-26 12:06:08 +00001548 }
aliguoria1d1bb32008-11-18 20:07:32 +00001549 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001550#else
aliguoria1d1bb32008-11-18 20:07:32 +00001551 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001552#endif
1553}
1554
aliguoria1d1bb32008-11-18 20:07:32 +00001555/* Remove a specific breakpoint by reference. */
1556void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001557{
bellard1fddef42005-04-17 19:16:13 +00001558#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001559 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001560
aliguoria1d1bb32008-11-18 20:07:32 +00001561 breakpoint_invalidate(env, breakpoint->pc);
1562
Anthony Liguori7267c092011-08-20 22:09:37 -05001563 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001564#endif
1565}
1566
1567/* Remove all matching breakpoints. */
1568void cpu_breakpoint_remove_all(CPUState *env, int mask)
1569{
1570#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001571 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001572
Blue Swirl72cf2d42009-09-12 07:36:22 +00001573 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001574 if (bp->flags & mask)
1575 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001576 }
bellard4c3a88a2003-07-26 12:06:08 +00001577#endif
1578}
1579
bellardc33a3462003-07-29 20:50:33 +00001580/* enable or disable single step mode. EXCP_DEBUG is returned by the
1581 CPU loop after each instruction */
1582void cpu_single_step(CPUState *env, int enabled)
1583{
bellard1fddef42005-04-17 19:16:13 +00001584#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001585 if (env->singlestep_enabled != enabled) {
1586 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001587 if (kvm_enabled())
1588 kvm_update_guest_debug(env, 0);
1589 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001590 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001591 /* XXX: only flush what is necessary */
1592 tb_flush(env);
1593 }
bellardc33a3462003-07-29 20:50:33 +00001594 }
1595#endif
1596}
1597
bellard34865132003-10-05 14:28:56 +00001598/* enable or disable low levels log */
1599void cpu_set_log(int log_flags)
1600{
1601 loglevel = log_flags;
1602 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001603 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001604 if (!logfile) {
1605 perror(logfilename);
1606 _exit(1);
1607 }
bellard9fa3e852004-01-04 18:06:42 +00001608#if !defined(CONFIG_SOFTMMU)
1609 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1610 {
blueswir1b55266b2008-09-20 08:07:15 +00001611 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001612 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1613 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001614#elif defined(_WIN32)
1615 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1616 setvbuf(logfile, NULL, _IONBF, 0);
1617#else
bellard34865132003-10-05 14:28:56 +00001618 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001619#endif
pbrooke735b912007-06-30 13:53:24 +00001620 log_append = 1;
1621 }
1622 if (!loglevel && logfile) {
1623 fclose(logfile);
1624 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001625 }
1626}
1627
1628void cpu_set_log_filename(const char *filename)
1629{
1630 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001631 if (logfile) {
1632 fclose(logfile);
1633 logfile = NULL;
1634 }
1635 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001636}
bellardc33a3462003-07-29 20:50:33 +00001637
aurel323098dba2009-03-07 21:28:24 +00001638static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001639{
pbrookd5975362008-06-07 20:50:51 +00001640 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1641 problem and hope the cpu will stop of its own accord. For userspace
1642 emulation this often isn't actually as bad as it sounds. Often
1643 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001644 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001645 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001646
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001647 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001648 tb = env->current_tb;
1649 /* if the cpu is currently executing code, we must unlink it and
1650 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001651 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001652 env->current_tb = NULL;
1653 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001654 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001655 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001656}
1657
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001658#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001659/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001660static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001661{
1662 int old_mask;
1663
1664 old_mask = env->interrupt_request;
1665 env->interrupt_request |= mask;
1666
aliguori8edac962009-04-24 18:03:45 +00001667 /*
1668 * If called from iothread context, wake the target cpu in
1669 * case its halted.
1670 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001671 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001672 qemu_cpu_kick(env);
1673 return;
1674 }
aliguori8edac962009-04-24 18:03:45 +00001675
pbrook2e70f6e2008-06-29 01:03:05 +00001676 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001677 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001678 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001679 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001680 cpu_abort(env, "Raised interrupt while not in I/O function");
1681 }
pbrook2e70f6e2008-06-29 01:03:05 +00001682 } else {
aurel323098dba2009-03-07 21:28:24 +00001683 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001684 }
1685}
1686
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001687CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1688
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001689#else /* CONFIG_USER_ONLY */
1690
1691void cpu_interrupt(CPUState *env, int mask)
1692{
1693 env->interrupt_request |= mask;
1694 cpu_unlink_tb(env);
1695}
1696#endif /* CONFIG_USER_ONLY */
1697
bellardb54ad042004-05-20 13:42:52 +00001698void cpu_reset_interrupt(CPUState *env, int mask)
1699{
1700 env->interrupt_request &= ~mask;
1701}
1702
aurel323098dba2009-03-07 21:28:24 +00001703void cpu_exit(CPUState *env)
1704{
1705 env->exit_request = 1;
1706 cpu_unlink_tb(env);
1707}
1708
blueswir1c7cd6a32008-10-02 18:27:46 +00001709const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001710 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001711 "show generated host assembly code for each compiled TB" },
1712 { CPU_LOG_TB_IN_ASM, "in_asm",
1713 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001714 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001715 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001716 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001717 "show micro ops "
1718#ifdef TARGET_I386
1719 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001720#endif
blueswir1e01a1152008-03-14 17:37:11 +00001721 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001722 { CPU_LOG_INT, "int",
1723 "show interrupts/exceptions in short format" },
1724 { CPU_LOG_EXEC, "exec",
1725 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001726 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001727 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001728#ifdef TARGET_I386
1729 { CPU_LOG_PCALL, "pcall",
1730 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001731 { CPU_LOG_RESET, "cpu_reset",
1732 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001733#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001734#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001735 { CPU_LOG_IOPORT, "ioport",
1736 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001737#endif
bellardf193c792004-03-21 17:06:25 +00001738 { 0, NULL, NULL },
1739};
1740
1741static int cmp1(const char *s1, int n, const char *s2)
1742{
1743 if (strlen(s2) != n)
1744 return 0;
1745 return memcmp(s1, s2, n) == 0;
1746}
ths3b46e622007-09-17 08:09:54 +00001747
bellardf193c792004-03-21 17:06:25 +00001748/* takes a comma separated list of log masks. Return 0 if error. */
1749int cpu_str_to_log_mask(const char *str)
1750{
blueswir1c7cd6a32008-10-02 18:27:46 +00001751 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001752 int mask;
1753 const char *p, *p1;
1754
1755 p = str;
1756 mask = 0;
1757 for(;;) {
1758 p1 = strchr(p, ',');
1759 if (!p1)
1760 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001761 if(cmp1(p,p1-p,"all")) {
1762 for(item = cpu_log_items; item->mask != 0; item++) {
1763 mask |= item->mask;
1764 }
1765 } else {
1766 for(item = cpu_log_items; item->mask != 0; item++) {
1767 if (cmp1(p, p1 - p, item->name))
1768 goto found;
1769 }
1770 return 0;
bellardf193c792004-03-21 17:06:25 +00001771 }
bellardf193c792004-03-21 17:06:25 +00001772 found:
1773 mask |= item->mask;
1774 if (*p1 != ',')
1775 break;
1776 p = p1 + 1;
1777 }
1778 return mask;
1779}
bellardea041c02003-06-25 16:16:50 +00001780
bellard75012672003-06-21 13:11:07 +00001781void cpu_abort(CPUState *env, const char *fmt, ...)
1782{
1783 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001784 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001785
1786 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001787 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001788 fprintf(stderr, "qemu: fatal: ");
1789 vfprintf(stderr, fmt, ap);
1790 fprintf(stderr, "\n");
1791#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001792 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1793#else
1794 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001795#endif
aliguori93fcfe32009-01-15 22:34:14 +00001796 if (qemu_log_enabled()) {
1797 qemu_log("qemu: fatal: ");
1798 qemu_log_vprintf(fmt, ap2);
1799 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001800#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001801 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001802#else
aliguori93fcfe32009-01-15 22:34:14 +00001803 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001804#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001805 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001806 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001807 }
pbrook493ae1f2007-11-23 16:53:59 +00001808 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001809 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001810#if defined(CONFIG_USER_ONLY)
1811 {
1812 struct sigaction act;
1813 sigfillset(&act.sa_mask);
1814 act.sa_handler = SIG_DFL;
1815 sigaction(SIGABRT, &act, NULL);
1816 }
1817#endif
bellard75012672003-06-21 13:11:07 +00001818 abort();
1819}
1820
thsc5be9f02007-02-28 20:20:53 +00001821CPUState *cpu_copy(CPUState *env)
1822{
ths01ba9812007-12-09 02:22:57 +00001823 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001824 CPUState *next_cpu = new_env->next_cpu;
1825 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001826#if defined(TARGET_HAS_ICE)
1827 CPUBreakpoint *bp;
1828 CPUWatchpoint *wp;
1829#endif
1830
thsc5be9f02007-02-28 20:20:53 +00001831 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001832
1833 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001834 new_env->next_cpu = next_cpu;
1835 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001836
1837 /* Clone all break/watchpoints.
1838 Note: Once we support ptrace with hw-debug register access, make sure
1839 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001840 QTAILQ_INIT(&env->breakpoints);
1841 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001842#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001843 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001844 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1845 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001846 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001847 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1848 wp->flags, NULL);
1849 }
1850#endif
1851
thsc5be9f02007-02-28 20:20:53 +00001852 return new_env;
1853}
1854
bellard01243112004-01-04 15:48:17 +00001855#if !defined(CONFIG_USER_ONLY)
1856
edgar_igl5c751e92008-05-06 08:44:21 +00001857static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1858{
1859 unsigned int i;
1860
1861 /* Discard jump cache entries for any tb which might potentially
1862 overlap the flushed page. */
1863 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1864 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001865 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001866
1867 i = tb_jmp_cache_hash_page(addr);
1868 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001869 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001870}
1871
Igor Kovalenko08738982009-07-12 02:15:40 +04001872static CPUTLBEntry s_cputlb_empty_entry = {
1873 .addr_read = -1,
1874 .addr_write = -1,
1875 .addr_code = -1,
1876 .addend = -1,
1877};
1878
Peter Maydell771124e2012-01-17 13:23:13 +00001879/* NOTE:
1880 * If flush_global is true (the usual case), flush all tlb entries.
1881 * If flush_global is false, flush (at least) all tlb entries not
1882 * marked global.
1883 *
1884 * Since QEMU doesn't currently implement a global/not-global flag
1885 * for tlb entries, at the moment tlb_flush() will also flush all
1886 * tlb entries in the flush_global == false case. This is OK because
1887 * CPU architectures generally permit an implementation to drop
1888 * entries from the TLB at any time, so flushing more entries than
1889 * required is only an efficiency issue, not a correctness issue.
1890 */
bellardee8b7022004-02-03 23:35:10 +00001891void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001892{
bellard33417e72003-08-10 21:47:01 +00001893 int i;
bellard01243112004-01-04 15:48:17 +00001894
bellard9fa3e852004-01-04 18:06:42 +00001895#if defined(DEBUG_TLB)
1896 printf("tlb_flush:\n");
1897#endif
bellard01243112004-01-04 15:48:17 +00001898 /* must reset current TB so that interrupts cannot modify the
1899 links while we are modifying them */
1900 env->current_tb = NULL;
1901
bellard33417e72003-08-10 21:47:01 +00001902 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001903 int mmu_idx;
1904 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001905 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001906 }
bellard33417e72003-08-10 21:47:01 +00001907 }
bellard9fa3e852004-01-04 18:06:42 +00001908
bellard8a40a182005-11-20 10:35:40 +00001909 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001910
Paul Brookd4c430a2010-03-17 02:14:28 +00001911 env->tlb_flush_addr = -1;
1912 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001913 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001914}
1915
bellard274da6b2004-05-20 21:56:27 +00001916static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001917{
ths5fafdf22007-09-16 21:08:06 +00001918 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001919 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001920 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001921 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001922 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001923 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001924 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001925 }
bellard61382a52003-10-27 21:22:23 +00001926}
1927
bellard2e126692004-04-25 21:28:44 +00001928void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001929{
bellard8a40a182005-11-20 10:35:40 +00001930 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001931 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001932
bellard9fa3e852004-01-04 18:06:42 +00001933#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001934 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001935#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001936 /* Check if we need to flush due to large pages. */
1937 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1938#if defined(DEBUG_TLB)
1939 printf("tlb_flush_page: forced full flush ("
1940 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1941 env->tlb_flush_addr, env->tlb_flush_mask);
1942#endif
1943 tlb_flush(env, 1);
1944 return;
1945 }
bellard01243112004-01-04 15:48:17 +00001946 /* must reset current TB so that interrupts cannot modify the
1947 links while we are modifying them */
1948 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001949
bellard61382a52003-10-27 21:22:23 +00001950 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001951 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001952 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1953 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001954
edgar_igl5c751e92008-05-06 08:44:21 +00001955 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001956}
1957
bellard9fa3e852004-01-04 18:06:42 +00001958/* update the TLBs so that writes to code in the virtual page 'addr'
1959 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001960static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001961{
ths5fafdf22007-09-16 21:08:06 +00001962 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00001963 ram_addr + TARGET_PAGE_SIZE,
1964 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00001965}
1966
bellard9fa3e852004-01-04 18:06:42 +00001967/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00001968 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05001969static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00001970 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00001971{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001972 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00001973}
1974
ths5fafdf22007-09-16 21:08:06 +00001975static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00001976 unsigned long start, unsigned long length)
1977{
1978 unsigned long addr;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001979 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
bellard84b7b8e2005-11-28 21:19:04 +00001980 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00001981 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00001982 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00001983 }
1984 }
1985}
1986
pbrook5579c7f2009-04-11 14:47:08 +00001987/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001988void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001989 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001990{
1991 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00001992 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001993 int i;
bellard1ccde1c2004-02-06 19:46:14 +00001994
1995 start &= TARGET_PAGE_MASK;
1996 end = TARGET_PAGE_ALIGN(end);
1997
1998 length = end - start;
1999 if (length == 0)
2000 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002001 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002002
bellard1ccde1c2004-02-06 19:46:14 +00002003 /* we modify the TLB cache so that the dirty bit will be set again
2004 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002005 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002006 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002007 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002008 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002009 != (end - 1) - start) {
2010 abort();
2011 }
2012
bellard6a00d602005-11-21 23:25:50 +00002013 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002014 int mmu_idx;
2015 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2016 for(i = 0; i < CPU_TLB_SIZE; i++)
2017 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2018 start1, length);
2019 }
bellard6a00d602005-11-21 23:25:50 +00002020 }
bellard1ccde1c2004-02-06 19:46:14 +00002021}
2022
aliguori74576192008-10-06 14:02:03 +00002023int cpu_physical_memory_set_dirty_tracking(int enable)
2024{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002025 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002026 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002027 return ret;
aliguori74576192008-10-06 14:02:03 +00002028}
2029
bellard3a7d9292005-08-21 09:26:42 +00002030static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2031{
Anthony Liguoric227f092009-10-01 16:12:16 -05002032 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002033 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002034
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002035 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
pbrook5579c7f2009-04-11 14:47:08 +00002036 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2037 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002038 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002039 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002040 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002041 }
2042 }
2043}
2044
2045/* update the TLB according to the current state of the dirty bits */
2046void cpu_tlb_update_dirty(CPUState *env)
2047{
2048 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002049 int mmu_idx;
2050 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2051 for(i = 0; i < CPU_TLB_SIZE; i++)
2052 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2053 }
bellard3a7d9292005-08-21 09:26:42 +00002054}
2055
pbrook0f459d12008-06-09 00:20:13 +00002056static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002057{
pbrook0f459d12008-06-09 00:20:13 +00002058 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2059 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002060}
2061
pbrook0f459d12008-06-09 00:20:13 +00002062/* update the TLB corresponding to virtual page vaddr
2063 so that it is no longer dirty */
2064static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002065{
bellard1ccde1c2004-02-06 19:46:14 +00002066 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002067 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002068
pbrook0f459d12008-06-09 00:20:13 +00002069 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002070 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002071 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2072 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002073}
2074
Paul Brookd4c430a2010-03-17 02:14:28 +00002075/* Our TLB does not support large pages, so remember the area covered by
2076 large pages and trigger a full TLB flush if these are invalidated. */
2077static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2078 target_ulong size)
2079{
2080 target_ulong mask = ~(size - 1);
2081
2082 if (env->tlb_flush_addr == (target_ulong)-1) {
2083 env->tlb_flush_addr = vaddr & mask;
2084 env->tlb_flush_mask = mask;
2085 return;
2086 }
2087 /* Extend the existing region to include the new page.
2088 This is a compromise between unnecessary flushes and the cost
2089 of maintaining a full variable size TLB. */
2090 mask &= env->tlb_flush_mask;
2091 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2092 mask <<= 1;
2093 }
2094 env->tlb_flush_addr &= mask;
2095 env->tlb_flush_mask = mask;
2096}
2097
Avi Kivity1d393fa2012-01-01 21:15:42 +02002098static bool is_ram_rom(ram_addr_t pd)
2099{
2100 pd &= ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002101 return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002102}
2103
Avi Kivity75c578d2012-01-02 15:40:52 +02002104static bool is_romd(ram_addr_t pd)
2105{
2106 MemoryRegion *mr;
2107
2108 pd &= ~TARGET_PAGE_MASK;
Avi Kivity11c7ef02012-01-02 17:21:07 +02002109 mr = io_mem_region[pd];
Avi Kivity75c578d2012-01-02 15:40:52 +02002110 return mr->rom_device && mr->readable;
2111}
2112
Avi Kivity1d393fa2012-01-01 21:15:42 +02002113static bool is_ram_rom_romd(ram_addr_t pd)
2114{
Avi Kivity75c578d2012-01-02 15:40:52 +02002115 return is_ram_rom(pd) || is_romd(pd);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002116}
2117
Paul Brookd4c430a2010-03-17 02:14:28 +00002118/* Add a new TLB entry. At most one entry for a given virtual address
2119 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2120 supplied size is only used by tlb_flush_page. */
2121void tlb_set_page(CPUState *env, target_ulong vaddr,
2122 target_phys_addr_t paddr, int prot,
2123 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002124{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002125 PhysPageDesc p;
bellard4f2ac232004-04-26 19:44:02 +00002126 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002127 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002128 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002129 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002130 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002131 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002132 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002133 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002134
Paul Brookd4c430a2010-03-17 02:14:28 +00002135 assert(size >= TARGET_PAGE_SIZE);
2136 if (size != TARGET_PAGE_SIZE) {
2137 tlb_add_large_page(env, vaddr, size);
2138 }
bellard92e873b2004-05-21 14:52:29 +00002139 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002140 pd = p.phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002141#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002142 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2143 " prot=%x idx=%d pd=0x%08lx\n",
2144 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002145#endif
2146
pbrook0f459d12008-06-09 00:20:13 +00002147 address = vaddr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002148 if (!is_ram_rom_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002149 /* IO memory case (romd handled later) */
2150 address |= TLB_MMIO;
2151 }
pbrook5579c7f2009-04-11 14:47:08 +00002152 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002153 if (is_ram_rom(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002154 /* Normal RAM. */
2155 iotlb = pd & TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002156 if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2157 iotlb |= io_mem_notdirty.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002158 else
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002159 iotlb |= io_mem_rom.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002160 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002161 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002162 It would be nice to pass an offset from the base address
2163 of that region. This would avoid having to special case RAM,
2164 and avoid full address decoding in every device.
2165 We can't use the high bits of pd for this because
2166 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002167 iotlb = (pd & ~TARGET_PAGE_MASK);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002168 iotlb += p.region_offset;
pbrook0f459d12008-06-09 00:20:13 +00002169 }
pbrook6658ffb2007-03-16 23:58:11 +00002170
pbrook0f459d12008-06-09 00:20:13 +00002171 code_address = address;
2172 /* Make accesses to pages with watchpoints go via the
2173 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002174 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002175 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002176 /* Avoid trapping reads of pages with a write breakpoint. */
2177 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivity1ec9b902012-01-02 12:47:48 +02002178 iotlb = io_mem_watch.ram_addr + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002179 address |= TLB_MMIO;
2180 break;
2181 }
pbrook6658ffb2007-03-16 23:58:11 +00002182 }
pbrook0f459d12008-06-09 00:20:13 +00002183 }
balrogd79acba2007-06-26 20:01:13 +00002184
pbrook0f459d12008-06-09 00:20:13 +00002185 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2186 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2187 te = &env->tlb_table[mmu_idx][index];
2188 te->addend = addend - vaddr;
2189 if (prot & PAGE_READ) {
2190 te->addr_read = address;
2191 } else {
2192 te->addr_read = -1;
2193 }
edgar_igl5c751e92008-05-06 08:44:21 +00002194
pbrook0f459d12008-06-09 00:20:13 +00002195 if (prot & PAGE_EXEC) {
2196 te->addr_code = code_address;
2197 } else {
2198 te->addr_code = -1;
2199 }
2200 if (prot & PAGE_WRITE) {
Avi Kivity75c578d2012-01-02 15:40:52 +02002201 if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr || is_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002202 /* Write access calls the I/O callback. */
2203 te->addr_write = address | TLB_MMIO;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002204 } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
pbrook0f459d12008-06-09 00:20:13 +00002205 !cpu_physical_memory_is_dirty(pd)) {
2206 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002207 } else {
pbrook0f459d12008-06-09 00:20:13 +00002208 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002209 }
pbrook0f459d12008-06-09 00:20:13 +00002210 } else {
2211 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002212 }
bellard9fa3e852004-01-04 18:06:42 +00002213}
2214
bellard01243112004-01-04 15:48:17 +00002215#else
2216
bellardee8b7022004-02-03 23:35:10 +00002217void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002218{
2219}
2220
bellard2e126692004-04-25 21:28:44 +00002221void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002222{
2223}
2224
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002225/*
2226 * Walks guest process memory "regions" one by one
2227 * and calls callback function 'fn' for each region.
2228 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002229
2230struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002231{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002232 walk_memory_regions_fn fn;
2233 void *priv;
2234 unsigned long start;
2235 int prot;
2236};
bellard9fa3e852004-01-04 18:06:42 +00002237
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002238static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002239 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002240{
2241 if (data->start != -1ul) {
2242 int rc = data->fn(data->priv, data->start, end, data->prot);
2243 if (rc != 0) {
2244 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002245 }
bellard33417e72003-08-10 21:47:01 +00002246 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002247
2248 data->start = (new_prot ? end : -1ul);
2249 data->prot = new_prot;
2250
2251 return 0;
2252}
2253
2254static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002255 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002256{
Paul Brookb480d9b2010-03-12 23:23:29 +00002257 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002258 int i, rc;
2259
2260 if (*lp == NULL) {
2261 return walk_memory_regions_end(data, base, 0);
2262 }
2263
2264 if (level == 0) {
2265 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002266 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002267 int prot = pd[i].flags;
2268
2269 pa = base | (i << TARGET_PAGE_BITS);
2270 if (prot != data->prot) {
2271 rc = walk_memory_regions_end(data, pa, prot);
2272 if (rc != 0) {
2273 return rc;
2274 }
2275 }
2276 }
2277 } else {
2278 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002279 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002280 pa = base | ((abi_ulong)i <<
2281 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002282 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2283 if (rc != 0) {
2284 return rc;
2285 }
2286 }
2287 }
2288
2289 return 0;
2290}
2291
2292int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2293{
2294 struct walk_memory_regions_data data;
2295 unsigned long i;
2296
2297 data.fn = fn;
2298 data.priv = priv;
2299 data.start = -1ul;
2300 data.prot = 0;
2301
2302 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002303 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002304 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2305 if (rc != 0) {
2306 return rc;
2307 }
2308 }
2309
2310 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002311}
2312
Paul Brookb480d9b2010-03-12 23:23:29 +00002313static int dump_region(void *priv, abi_ulong start,
2314 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002315{
2316 FILE *f = (FILE *)priv;
2317
Paul Brookb480d9b2010-03-12 23:23:29 +00002318 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2319 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002320 start, end, end - start,
2321 ((prot & PAGE_READ) ? 'r' : '-'),
2322 ((prot & PAGE_WRITE) ? 'w' : '-'),
2323 ((prot & PAGE_EXEC) ? 'x' : '-'));
2324
2325 return (0);
2326}
2327
2328/* dump memory mappings */
2329void page_dump(FILE *f)
2330{
2331 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2332 "start", "end", "size", "prot");
2333 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002334}
2335
pbrook53a59602006-03-25 19:31:22 +00002336int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002337{
bellard9fa3e852004-01-04 18:06:42 +00002338 PageDesc *p;
2339
2340 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002341 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002342 return 0;
2343 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002344}
2345
Richard Henderson376a7902010-03-10 15:57:04 -08002346/* Modify the flags of a page and invalidate the code if necessary.
2347 The flag PAGE_WRITE_ORG is positioned automatically depending
2348 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002349void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002350{
Richard Henderson376a7902010-03-10 15:57:04 -08002351 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002352
Richard Henderson376a7902010-03-10 15:57:04 -08002353 /* This function should never be called with addresses outside the
2354 guest address space. If this assert fires, it probably indicates
2355 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002356#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2357 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002358#endif
2359 assert(start < end);
2360
bellard9fa3e852004-01-04 18:06:42 +00002361 start = start & TARGET_PAGE_MASK;
2362 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002363
2364 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002365 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002366 }
2367
2368 for (addr = start, len = end - start;
2369 len != 0;
2370 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2371 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2372
2373 /* If the write protection bit is set, then we invalidate
2374 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002375 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002376 (flags & PAGE_WRITE) &&
2377 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002378 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002379 }
2380 p->flags = flags;
2381 }
bellard9fa3e852004-01-04 18:06:42 +00002382}
2383
ths3d97b402007-11-02 19:02:07 +00002384int page_check_range(target_ulong start, target_ulong len, int flags)
2385{
2386 PageDesc *p;
2387 target_ulong end;
2388 target_ulong addr;
2389
Richard Henderson376a7902010-03-10 15:57:04 -08002390 /* This function should never be called with addresses outside the
2391 guest address space. If this assert fires, it probably indicates
2392 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002393#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2394 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002395#endif
2396
Richard Henderson3e0650a2010-03-29 10:54:42 -07002397 if (len == 0) {
2398 return 0;
2399 }
Richard Henderson376a7902010-03-10 15:57:04 -08002400 if (start + len - 1 < start) {
2401 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002402 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002403 }
balrog55f280c2008-10-28 10:24:11 +00002404
ths3d97b402007-11-02 19:02:07 +00002405 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2406 start = start & TARGET_PAGE_MASK;
2407
Richard Henderson376a7902010-03-10 15:57:04 -08002408 for (addr = start, len = end - start;
2409 len != 0;
2410 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002411 p = page_find(addr >> TARGET_PAGE_BITS);
2412 if( !p )
2413 return -1;
2414 if( !(p->flags & PAGE_VALID) )
2415 return -1;
2416
bellarddae32702007-11-14 10:51:00 +00002417 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002418 return -1;
bellarddae32702007-11-14 10:51:00 +00002419 if (flags & PAGE_WRITE) {
2420 if (!(p->flags & PAGE_WRITE_ORG))
2421 return -1;
2422 /* unprotect the page if it was put read-only because it
2423 contains translated code */
2424 if (!(p->flags & PAGE_WRITE)) {
2425 if (!page_unprotect(addr, 0, NULL))
2426 return -1;
2427 }
2428 return 0;
2429 }
ths3d97b402007-11-02 19:02:07 +00002430 }
2431 return 0;
2432}
2433
bellard9fa3e852004-01-04 18:06:42 +00002434/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002435 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002436int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002437{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002438 unsigned int prot;
2439 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002440 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002441
pbrookc8a706f2008-06-02 16:16:42 +00002442 /* Technically this isn't safe inside a signal handler. However we
2443 know this only ever happens in a synchronous SEGV handler, so in
2444 practice it seems to be ok. */
2445 mmap_lock();
2446
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002447 p = page_find(address >> TARGET_PAGE_BITS);
2448 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002449 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002450 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002451 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002452
bellard9fa3e852004-01-04 18:06:42 +00002453 /* if the page was really writable, then we change its
2454 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002455 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2456 host_start = address & qemu_host_page_mask;
2457 host_end = host_start + qemu_host_page_size;
2458
2459 prot = 0;
2460 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2461 p = page_find(addr >> TARGET_PAGE_BITS);
2462 p->flags |= PAGE_WRITE;
2463 prot |= p->flags;
2464
bellard9fa3e852004-01-04 18:06:42 +00002465 /* and since the content will be modified, we must invalidate
2466 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002467 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002468#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002469 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002470#endif
bellard9fa3e852004-01-04 18:06:42 +00002471 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002472 mprotect((void *)g2h(host_start), qemu_host_page_size,
2473 prot & PAGE_BITS);
2474
2475 mmap_unlock();
2476 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002477 }
pbrookc8a706f2008-06-02 16:16:42 +00002478 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002479 return 0;
2480}
2481
bellard6a00d602005-11-21 23:25:50 +00002482static inline void tlb_set_dirty(CPUState *env,
2483 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002484{
2485}
bellard9fa3e852004-01-04 18:06:42 +00002486#endif /* defined(CONFIG_USER_ONLY) */
2487
pbrooke2eef172008-06-08 01:09:01 +00002488#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002489
Paul Brookc04b2b72010-03-01 03:31:14 +00002490#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2491typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002492 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002493 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002494 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2495 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002496} subpage_t;
2497
Anthony Liguoric227f092009-10-01 16:12:16 -05002498static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2499 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002500static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2501 ram_addr_t orig_memory,
2502 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002503#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2504 need_subpage) \
2505 do { \
2506 if (addr > start_addr) \
2507 start_addr2 = 0; \
2508 else { \
2509 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2510 if (start_addr2 > 0) \
2511 need_subpage = 1; \
2512 } \
2513 \
blueswir149e9fba2007-05-30 17:25:06 +00002514 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002515 end_addr2 = TARGET_PAGE_SIZE - 1; \
2516 else { \
2517 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2518 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2519 need_subpage = 1; \
2520 } \
2521 } while (0)
2522
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002523/* register physical memory.
2524 For RAM, 'size' must be a multiple of the target page size.
2525 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002526 io memory page. The address used when calling the IO function is
2527 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002528 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002529 before calculating this offset. This should not be a problem unless
2530 the low bits of start_addr and region_offset differ. */
Avi Kivitydd811242012-01-02 12:17:03 +02002531void cpu_register_physical_memory_log(MemoryRegionSection *section,
2532 bool readable, bool readonly)
bellard33417e72003-08-10 21:47:01 +00002533{
Avi Kivitydd811242012-01-02 12:17:03 +02002534 target_phys_addr_t start_addr = section->offset_within_address_space;
2535 ram_addr_t size = section->size;
2536 ram_addr_t phys_offset = section->mr->ram_addr;
2537 ram_addr_t region_offset = section->offset_within_region;
Anthony Liguoric227f092009-10-01 16:12:16 -05002538 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002539 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002540 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002541 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002542 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002543
Avi Kivitydd811242012-01-02 12:17:03 +02002544 if (memory_region_is_ram(section->mr)) {
2545 phys_offset += region_offset;
2546 region_offset = 0;
2547 }
2548
Avi Kivitydd811242012-01-02 12:17:03 +02002549 if (readonly) {
2550 phys_offset |= io_mem_rom.ram_addr;
2551 }
2552
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002553 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002554
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002555 if (phys_offset == io_mem_unassigned.ram_addr) {
pbrook67c4d232009-02-23 13:16:07 +00002556 region_offset = start_addr;
2557 }
pbrook8da3ff12008-12-01 18:59:50 +00002558 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002559 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002560 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002561
2562 addr = start_addr;
2563 do {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002564 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002565 if (p && p->phys_offset != io_mem_unassigned.ram_addr) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002566 ram_addr_t orig_memory = p->phys_offset;
2567 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002568 int need_subpage = 0;
Avi Kivity11c7ef02012-01-02 17:21:07 +02002569 MemoryRegion *mr = io_mem_region[orig_memory & ~TARGET_PAGE_MASK];
blueswir1db7b5422007-05-26 17:36:03 +00002570
2571 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2572 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002573 if (need_subpage) {
Avi Kivityb3b00c72012-01-02 13:20:11 +02002574 if (!(mr->subpage)) {
blueswir1db7b5422007-05-26 17:36:03 +00002575 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002576 &p->phys_offset, orig_memory,
2577 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002578 } else {
Avi Kivitya621f382012-01-02 13:12:08 +02002579 subpage = container_of(mr, subpage_t, iomem);
blueswir1db7b5422007-05-26 17:36:03 +00002580 }
pbrook8da3ff12008-12-01 18:59:50 +00002581 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2582 region_offset);
2583 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002584 } else {
2585 p->phys_offset = phys_offset;
Avi Kivity2774c6d2012-01-01 18:24:24 +02002586 p->region_offset = region_offset;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002587 if (is_ram_rom_romd(phys_offset))
blueswir1db7b5422007-05-26 17:36:03 +00002588 phys_offset += TARGET_PAGE_SIZE;
2589 }
2590 } else {
2591 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2592 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002593 p->region_offset = region_offset;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002594 if (is_ram_rom_romd(phys_offset)) {
blueswir1db7b5422007-05-26 17:36:03 +00002595 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002596 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002597 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002598 int need_subpage = 0;
2599
2600 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2601 end_addr2, need_subpage);
2602
Richard Hendersonf6405242010-04-22 16:47:31 -07002603 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002604 subpage = subpage_init((addr & TARGET_PAGE_MASK),
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002605 &p->phys_offset,
2606 io_mem_unassigned.ram_addr,
pbrook67c4d232009-02-23 13:16:07 +00002607 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002608 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002609 phys_offset, region_offset);
2610 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002611 }
2612 }
2613 }
pbrook8da3ff12008-12-01 18:59:50 +00002614 region_offset += TARGET_PAGE_SIZE;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002615 addr += TARGET_PAGE_SIZE;
2616 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002617
bellard9d420372006-06-25 22:25:22 +00002618 /* since each CPU stores ram addresses in its TLB cache, we must
2619 reset the modified entries */
2620 /* XXX: slow ! */
2621 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2622 tlb_flush(env, 1);
2623 }
bellard33417e72003-08-10 21:47:01 +00002624}
2625
Anthony Liguoric227f092009-10-01 16:12:16 -05002626void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002627{
2628 if (kvm_enabled())
2629 kvm_coalesce_mmio_region(addr, size);
2630}
2631
Anthony Liguoric227f092009-10-01 16:12:16 -05002632void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002633{
2634 if (kvm_enabled())
2635 kvm_uncoalesce_mmio_region(addr, size);
2636}
2637
Sheng Yang62a27442010-01-26 19:21:16 +08002638void qemu_flush_coalesced_mmio_buffer(void)
2639{
2640 if (kvm_enabled())
2641 kvm_flush_coalesced_mmio_buffer();
2642}
2643
Marcelo Tosattic9027602010-03-01 20:25:08 -03002644#if defined(__linux__) && !defined(TARGET_S390X)
2645
2646#include <sys/vfs.h>
2647
2648#define HUGETLBFS_MAGIC 0x958458f6
2649
2650static long gethugepagesize(const char *path)
2651{
2652 struct statfs fs;
2653 int ret;
2654
2655 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002656 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002657 } while (ret != 0 && errno == EINTR);
2658
2659 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002660 perror(path);
2661 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002662 }
2663
2664 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002665 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002666
2667 return fs.f_bsize;
2668}
2669
Alex Williamson04b16652010-07-02 11:13:17 -06002670static void *file_ram_alloc(RAMBlock *block,
2671 ram_addr_t memory,
2672 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002673{
2674 char *filename;
2675 void *area;
2676 int fd;
2677#ifdef MAP_POPULATE
2678 int flags;
2679#endif
2680 unsigned long hpagesize;
2681
2682 hpagesize = gethugepagesize(path);
2683 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002684 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002685 }
2686
2687 if (memory < hpagesize) {
2688 return NULL;
2689 }
2690
2691 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2692 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2693 return NULL;
2694 }
2695
2696 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002697 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002698 }
2699
2700 fd = mkstemp(filename);
2701 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002702 perror("unable to create backing store for hugepages");
2703 free(filename);
2704 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002705 }
2706 unlink(filename);
2707 free(filename);
2708
2709 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2710
2711 /*
2712 * ftruncate is not supported by hugetlbfs in older
2713 * hosts, so don't bother bailing out on errors.
2714 * If anything goes wrong with it under other filesystems,
2715 * mmap will fail.
2716 */
2717 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002718 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002719
2720#ifdef MAP_POPULATE
2721 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2722 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2723 * to sidestep this quirk.
2724 */
2725 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2726 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2727#else
2728 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2729#endif
2730 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002731 perror("file_ram_alloc: can't mmap RAM pages");
2732 close(fd);
2733 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002734 }
Alex Williamson04b16652010-07-02 11:13:17 -06002735 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002736 return area;
2737}
2738#endif
2739
Alex Williamsond17b5282010-06-25 11:08:38 -06002740static ram_addr_t find_ram_offset(ram_addr_t size)
2741{
Alex Williamson04b16652010-07-02 11:13:17 -06002742 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002743 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002744
2745 if (QLIST_EMPTY(&ram_list.blocks))
2746 return 0;
2747
2748 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002749 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002750
2751 end = block->offset + block->length;
2752
2753 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2754 if (next_block->offset >= end) {
2755 next = MIN(next, next_block->offset);
2756 }
2757 }
2758 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002759 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002760 mingap = next - end;
2761 }
2762 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002763
2764 if (offset == RAM_ADDR_MAX) {
2765 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2766 (uint64_t)size);
2767 abort();
2768 }
2769
Alex Williamson04b16652010-07-02 11:13:17 -06002770 return offset;
2771}
2772
2773static ram_addr_t last_ram_offset(void)
2774{
Alex Williamsond17b5282010-06-25 11:08:38 -06002775 RAMBlock *block;
2776 ram_addr_t last = 0;
2777
2778 QLIST_FOREACH(block, &ram_list.blocks, next)
2779 last = MAX(last, block->offset + block->length);
2780
2781 return last;
2782}
2783
Avi Kivityc5705a72011-12-20 15:59:12 +02002784void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002785{
2786 RAMBlock *new_block, *block;
2787
Avi Kivityc5705a72011-12-20 15:59:12 +02002788 new_block = NULL;
2789 QLIST_FOREACH(block, &ram_list.blocks, next) {
2790 if (block->offset == addr) {
2791 new_block = block;
2792 break;
2793 }
2794 }
2795 assert(new_block);
2796 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002797
2798 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2799 char *id = dev->parent_bus->info->get_dev_path(dev);
2800 if (id) {
2801 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002802 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002803 }
2804 }
2805 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2806
2807 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002808 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002809 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2810 new_block->idstr);
2811 abort();
2812 }
2813 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002814}
2815
2816ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2817 MemoryRegion *mr)
2818{
2819 RAMBlock *new_block;
2820
2821 size = TARGET_PAGE_ALIGN(size);
2822 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002823
Avi Kivity7c637362011-12-21 13:09:49 +02002824 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002825 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002826 if (host) {
2827 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002828 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002829 } else {
2830 if (mem_path) {
2831#if defined (__linux__) && !defined(TARGET_S390X)
2832 new_block->host = file_ram_alloc(new_block, size, mem_path);
2833 if (!new_block->host) {
2834 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002835 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002836 }
2837#else
2838 fprintf(stderr, "-mem-path option unsupported\n");
2839 exit(1);
2840#endif
2841 } else {
2842#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002843 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2844 an system defined value, which is at least 256GB. Larger systems
2845 have larger values. We put the guest between the end of data
2846 segment (system break) and this value. We use 32GB as a base to
2847 have enough room for the system break to grow. */
2848 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002849 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002850 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002851 if (new_block->host == MAP_FAILED) {
2852 fprintf(stderr, "Allocating RAM failed\n");
2853 abort();
2854 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002855#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002856 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002857 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002858 } else {
2859 new_block->host = qemu_vmalloc(size);
2860 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002861#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002862 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002863 }
2864 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002865 new_block->length = size;
2866
2867 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2868
Anthony Liguori7267c092011-08-20 22:09:37 -05002869 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002870 last_ram_offset() >> TARGET_PAGE_BITS);
2871 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2872 0xff, size >> TARGET_PAGE_BITS);
2873
2874 if (kvm_enabled())
2875 kvm_setup_guest_memory(new_block->host, size);
2876
2877 return new_block->offset;
2878}
2879
Avi Kivityc5705a72011-12-20 15:59:12 +02002880ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002881{
Avi Kivityc5705a72011-12-20 15:59:12 +02002882 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002883}
bellarde9a1ab12007-02-08 23:08:38 +00002884
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002885void qemu_ram_free_from_ptr(ram_addr_t addr)
2886{
2887 RAMBlock *block;
2888
2889 QLIST_FOREACH(block, &ram_list.blocks, next) {
2890 if (addr == block->offset) {
2891 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002892 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002893 return;
2894 }
2895 }
2896}
2897
Anthony Liguoric227f092009-10-01 16:12:16 -05002898void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002899{
Alex Williamson04b16652010-07-02 11:13:17 -06002900 RAMBlock *block;
2901
2902 QLIST_FOREACH(block, &ram_list.blocks, next) {
2903 if (addr == block->offset) {
2904 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002905 if (block->flags & RAM_PREALLOC_MASK) {
2906 ;
2907 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002908#if defined (__linux__) && !defined(TARGET_S390X)
2909 if (block->fd) {
2910 munmap(block->host, block->length);
2911 close(block->fd);
2912 } else {
2913 qemu_vfree(block->host);
2914 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002915#else
2916 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002917#endif
2918 } else {
2919#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2920 munmap(block->host, block->length);
2921#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002922 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002923 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002924 } else {
2925 qemu_vfree(block->host);
2926 }
Alex Williamson04b16652010-07-02 11:13:17 -06002927#endif
2928 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002929 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002930 return;
2931 }
2932 }
2933
bellarde9a1ab12007-02-08 23:08:38 +00002934}
2935
Huang Yingcd19cfa2011-03-02 08:56:19 +01002936#ifndef _WIN32
2937void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2938{
2939 RAMBlock *block;
2940 ram_addr_t offset;
2941 int flags;
2942 void *area, *vaddr;
2943
2944 QLIST_FOREACH(block, &ram_list.blocks, next) {
2945 offset = addr - block->offset;
2946 if (offset < block->length) {
2947 vaddr = block->host + offset;
2948 if (block->flags & RAM_PREALLOC_MASK) {
2949 ;
2950 } else {
2951 flags = MAP_FIXED;
2952 munmap(vaddr, length);
2953 if (mem_path) {
2954#if defined(__linux__) && !defined(TARGET_S390X)
2955 if (block->fd) {
2956#ifdef MAP_POPULATE
2957 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2958 MAP_PRIVATE;
2959#else
2960 flags |= MAP_PRIVATE;
2961#endif
2962 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2963 flags, block->fd, offset);
2964 } else {
2965 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2966 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2967 flags, -1, 0);
2968 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002969#else
2970 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002971#endif
2972 } else {
2973#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2974 flags |= MAP_SHARED | MAP_ANONYMOUS;
2975 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2976 flags, -1, 0);
2977#else
2978 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2979 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2980 flags, -1, 0);
2981#endif
2982 }
2983 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002984 fprintf(stderr, "Could not remap addr: "
2985 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002986 length, addr);
2987 exit(1);
2988 }
2989 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2990 }
2991 return;
2992 }
2993 }
2994}
2995#endif /* !_WIN32 */
2996
pbrookdc828ca2009-04-09 22:21:07 +00002997/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002998 With the exception of the softmmu code in this file, this should
2999 only be used for local memory (e.g. video ram) that the device owns,
3000 and knows it isn't going to access beyond the end of the block.
3001
3002 It should not be used for general purpose DMA.
3003 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3004 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003005void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003006{
pbrook94a6b542009-04-11 17:15:54 +00003007 RAMBlock *block;
3008
Alex Williamsonf471a172010-06-11 11:11:42 -06003009 QLIST_FOREACH(block, &ram_list.blocks, next) {
3010 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003011 /* Move this entry to to start of the list. */
3012 if (block != QLIST_FIRST(&ram_list.blocks)) {
3013 QLIST_REMOVE(block, next);
3014 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3015 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003016 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003017 /* We need to check if the requested address is in the RAM
3018 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003019 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003020 */
3021 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003022 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003023 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003024 block->host =
3025 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003026 }
3027 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003028 return block->host + (addr - block->offset);
3029 }
pbrook94a6b542009-04-11 17:15:54 +00003030 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003031
3032 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3033 abort();
3034
3035 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003036}
3037
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003038/* Return a host pointer to ram allocated with qemu_ram_alloc.
3039 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3040 */
3041void *qemu_safe_ram_ptr(ram_addr_t addr)
3042{
3043 RAMBlock *block;
3044
3045 QLIST_FOREACH(block, &ram_list.blocks, next) {
3046 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003047 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003048 /* We need to check if the requested address is in the RAM
3049 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003050 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003051 */
3052 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003053 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003054 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003055 block->host =
3056 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003057 }
3058 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003059 return block->host + (addr - block->offset);
3060 }
3061 }
3062
3063 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3064 abort();
3065
3066 return NULL;
3067}
3068
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003069/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3070 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003071void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003072{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003073 if (*size == 0) {
3074 return NULL;
3075 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003076 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003077 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003078 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003079 RAMBlock *block;
3080
3081 QLIST_FOREACH(block, &ram_list.blocks, next) {
3082 if (addr - block->offset < block->length) {
3083 if (addr - block->offset + *size > block->length)
3084 *size = block->length - addr + block->offset;
3085 return block->host + (addr - block->offset);
3086 }
3087 }
3088
3089 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3090 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003091 }
3092}
3093
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003094void qemu_put_ram_ptr(void *addr)
3095{
3096 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003097}
3098
Marcelo Tosattie8902612010-10-11 15:31:19 -03003099int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003100{
pbrook94a6b542009-04-11 17:15:54 +00003101 RAMBlock *block;
3102 uint8_t *host = ptr;
3103
Jan Kiszka868bb332011-06-21 22:59:09 +02003104 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003105 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003106 return 0;
3107 }
3108
Alex Williamsonf471a172010-06-11 11:11:42 -06003109 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003110 /* This case append when the block is not mapped. */
3111 if (block->host == NULL) {
3112 continue;
3113 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003114 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003115 *ram_addr = block->offset + (host - block->host);
3116 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003117 }
pbrook94a6b542009-04-11 17:15:54 +00003118 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003119
Marcelo Tosattie8902612010-10-11 15:31:19 -03003120 return -1;
3121}
Alex Williamsonf471a172010-06-11 11:11:42 -06003122
Marcelo Tosattie8902612010-10-11 15:31:19 -03003123/* Some of the softmmu routines need to translate from a host pointer
3124 (typically a TLB entry) back to a ram offset. */
3125ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3126{
3127 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003128
Marcelo Tosattie8902612010-10-11 15:31:19 -03003129 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3130 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3131 abort();
3132 }
3133 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003134}
3135
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003136static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3137 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003138{
pbrook67d3b952006-12-18 05:03:52 +00003139#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003140 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003141#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003142#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003143 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003144#endif
3145 return 0;
3146}
3147
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003148static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3149 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003150{
3151#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003152 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003153#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003154#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003155 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003156#endif
3157}
3158
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003159static const MemoryRegionOps unassigned_mem_ops = {
3160 .read = unassigned_mem_read,
3161 .write = unassigned_mem_write,
3162 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003163};
3164
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003165static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3166 unsigned size)
3167{
3168 abort();
3169}
3170
3171static void error_mem_write(void *opaque, target_phys_addr_t addr,
3172 uint64_t value, unsigned size)
3173{
3174 abort();
3175}
3176
3177static const MemoryRegionOps error_mem_ops = {
3178 .read = error_mem_read,
3179 .write = error_mem_write,
3180 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003181};
3182
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003183static const MemoryRegionOps rom_mem_ops = {
3184 .read = error_mem_read,
3185 .write = unassigned_mem_write,
3186 .endianness = DEVICE_NATIVE_ENDIAN,
3187};
3188
3189static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3190 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003191{
bellard3a7d9292005-08-21 09:26:42 +00003192 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003193 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003194 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3195#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003196 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003197 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003198#endif
3199 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003200 switch (size) {
3201 case 1:
3202 stb_p(qemu_get_ram_ptr(ram_addr), val);
3203 break;
3204 case 2:
3205 stw_p(qemu_get_ram_ptr(ram_addr), val);
3206 break;
3207 case 4:
3208 stl_p(qemu_get_ram_ptr(ram_addr), val);
3209 break;
3210 default:
3211 abort();
3212 }
bellardf23db162005-08-21 19:12:28 +00003213 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003214 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003215 /* we remove the notdirty callback only if the code has been
3216 flushed */
3217 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003218 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003219}
3220
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003221static const MemoryRegionOps notdirty_mem_ops = {
3222 .read = error_mem_read,
3223 .write = notdirty_mem_write,
3224 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003225};
3226
pbrook0f459d12008-06-09 00:20:13 +00003227/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003228static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003229{
3230 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003231 target_ulong pc, cs_base;
3232 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003233 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003234 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003235 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003236
aliguori06d55cc2008-11-18 20:24:06 +00003237 if (env->watchpoint_hit) {
3238 /* We re-entered the check after replacing the TB. Now raise
3239 * the debug interrupt so that is will trigger after the
3240 * current instruction. */
3241 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3242 return;
3243 }
pbrook2e70f6e2008-06-29 01:03:05 +00003244 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003245 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003246 if ((vaddr == (wp->vaddr & len_mask) ||
3247 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003248 wp->flags |= BP_WATCHPOINT_HIT;
3249 if (!env->watchpoint_hit) {
3250 env->watchpoint_hit = wp;
3251 tb = tb_find_pc(env->mem_io_pc);
3252 if (!tb) {
3253 cpu_abort(env, "check_watchpoint: could not find TB for "
3254 "pc=%p", (void *)env->mem_io_pc);
3255 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003256 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003257 tb_phys_invalidate(tb, -1);
3258 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3259 env->exception_index = EXCP_DEBUG;
3260 } else {
3261 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3262 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3263 }
3264 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003265 }
aliguori6e140f22008-11-18 20:37:55 +00003266 } else {
3267 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003268 }
3269 }
3270}
3271
pbrook6658ffb2007-03-16 23:58:11 +00003272/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3273 so these check for a hit then pass through to the normal out-of-line
3274 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003275static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3276 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003277{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003278 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3279 switch (size) {
3280 case 1: return ldub_phys(addr);
3281 case 2: return lduw_phys(addr);
3282 case 4: return ldl_phys(addr);
3283 default: abort();
3284 }
pbrook6658ffb2007-03-16 23:58:11 +00003285}
3286
Avi Kivity1ec9b902012-01-02 12:47:48 +02003287static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3288 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003289{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003290 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3291 switch (size) {
3292 case 1: stb_phys(addr, val);
3293 case 2: stw_phys(addr, val);
3294 case 4: stl_phys(addr, val);
3295 default: abort();
3296 }
pbrook6658ffb2007-03-16 23:58:11 +00003297}
3298
Avi Kivity1ec9b902012-01-02 12:47:48 +02003299static const MemoryRegionOps watch_mem_ops = {
3300 .read = watch_mem_read,
3301 .write = watch_mem_write,
3302 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003303};
pbrook6658ffb2007-03-16 23:58:11 +00003304
Avi Kivity70c68e42012-01-02 12:32:48 +02003305static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3306 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003307{
Avi Kivity70c68e42012-01-02 12:32:48 +02003308 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003309 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003310#if defined(DEBUG_SUBPAGE)
3311 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3312 mmio, len, addr, idx);
3313#endif
blueswir1db7b5422007-05-26 17:36:03 +00003314
Richard Hendersonf6405242010-04-22 16:47:31 -07003315 addr += mmio->region_offset[idx];
3316 idx = mmio->sub_io_index[idx];
Avi Kivity70c68e42012-01-02 12:32:48 +02003317 return io_mem_read(idx, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003318}
3319
Avi Kivity70c68e42012-01-02 12:32:48 +02003320static void subpage_write(void *opaque, target_phys_addr_t addr,
3321 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003322{
Avi Kivity70c68e42012-01-02 12:32:48 +02003323 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003324 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003325#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003326 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3327 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003328 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003329#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003330
3331 addr += mmio->region_offset[idx];
3332 idx = mmio->sub_io_index[idx];
Avi Kivity70c68e42012-01-02 12:32:48 +02003333 io_mem_write(idx, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003334}
3335
Avi Kivity70c68e42012-01-02 12:32:48 +02003336static const MemoryRegionOps subpage_ops = {
3337 .read = subpage_read,
3338 .write = subpage_write,
3339 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003340};
3341
Avi Kivityde712f92012-01-02 12:41:07 +02003342static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3343 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003344{
3345 ram_addr_t raddr = addr;
3346 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003347 switch (size) {
3348 case 1: return ldub_p(ptr);
3349 case 2: return lduw_p(ptr);
3350 case 4: return ldl_p(ptr);
3351 default: abort();
3352 }
Andreas Färber56384e82011-11-30 16:26:21 +01003353}
3354
Avi Kivityde712f92012-01-02 12:41:07 +02003355static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3356 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003357{
3358 ram_addr_t raddr = addr;
3359 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003360 switch (size) {
3361 case 1: return stb_p(ptr, value);
3362 case 2: return stw_p(ptr, value);
3363 case 4: return stl_p(ptr, value);
3364 default: abort();
3365 }
Andreas Färber56384e82011-11-30 16:26:21 +01003366}
3367
Avi Kivityde712f92012-01-02 12:41:07 +02003368static const MemoryRegionOps subpage_ram_ops = {
3369 .read = subpage_ram_read,
3370 .write = subpage_ram_write,
3371 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003372};
3373
Anthony Liguoric227f092009-10-01 16:12:16 -05003374static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3375 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003376{
3377 int idx, eidx;
3378
3379 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3380 return -1;
3381 idx = SUBPAGE_IDX(start);
3382 eidx = SUBPAGE_IDX(end);
3383#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003384 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003385 mmio, start, end, idx, eidx, memory);
3386#endif
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003387 if ((memory & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
Avi Kivityde712f92012-01-02 12:41:07 +02003388 memory = io_mem_subpage_ram.ram_addr;
Andreas Färber56384e82011-11-30 16:26:21 +01003389 }
Avi Kivity11c7ef02012-01-02 17:21:07 +02003390 memory &= IO_MEM_NB_ENTRIES - 1;
blueswir1db7b5422007-05-26 17:36:03 +00003391 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003392 mmio->sub_io_index[idx] = memory;
3393 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003394 }
3395
3396 return 0;
3397}
3398
Richard Hendersonf6405242010-04-22 16:47:31 -07003399static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3400 ram_addr_t orig_memory,
3401 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003402{
Anthony Liguoric227f092009-10-01 16:12:16 -05003403 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003404 int subpage_memory;
3405
Anthony Liguori7267c092011-08-20 22:09:37 -05003406 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003407
3408 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003409 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3410 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003411 mmio->iomem.subpage = true;
Avi Kivity70c68e42012-01-02 12:32:48 +02003412 subpage_memory = mmio->iomem.ram_addr;
blueswir1db7b5422007-05-26 17:36:03 +00003413#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003414 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3415 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003416#endif
Avi Kivityb3b00c72012-01-02 13:20:11 +02003417 *phys = subpage_memory;
Richard Hendersonf6405242010-04-22 16:47:31 -07003418 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003419
3420 return mmio;
3421}
3422
aliguori88715652009-02-11 15:20:58 +00003423static int get_free_io_mem_idx(void)
3424{
3425 int i;
3426
3427 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3428 if (!io_mem_used[i]) {
3429 io_mem_used[i] = 1;
3430 return i;
3431 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003432 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003433 return -1;
3434}
3435
bellard33417e72003-08-10 21:47:01 +00003436/* mem_read and mem_write are arrays of functions containing the
3437 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003438 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003439 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003440 modified. If it is zero, a new io zone is allocated. The return
3441 value can be used with cpu_register_physical_memory(). (-1) is
3442 returned if error. */
Avi Kivitya621f382012-01-02 13:12:08 +02003443static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
bellard33417e72003-08-10 21:47:01 +00003444{
bellard33417e72003-08-10 21:47:01 +00003445 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003446 io_index = get_free_io_mem_idx();
3447 if (io_index == -1)
3448 return io_index;
bellard33417e72003-08-10 21:47:01 +00003449 } else {
3450 if (io_index >= IO_MEM_NB_ENTRIES)
3451 return -1;
3452 }
bellardb5ff1b32005-11-26 10:38:39 +00003453
Avi Kivitya621f382012-01-02 13:12:08 +02003454 io_mem_region[io_index] = mr;
Richard Hendersonf6405242010-04-22 16:47:31 -07003455
Avi Kivity11c7ef02012-01-02 17:21:07 +02003456 return io_index;
bellard33417e72003-08-10 21:47:01 +00003457}
bellard61382a52003-10-27 21:22:23 +00003458
Avi Kivitya621f382012-01-02 13:12:08 +02003459int cpu_register_io_memory(MemoryRegion *mr)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003460{
Avi Kivitya621f382012-01-02 13:12:08 +02003461 return cpu_register_io_memory_fixed(0, mr);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003462}
3463
Avi Kivity11c7ef02012-01-02 17:21:07 +02003464void cpu_unregister_io_memory(int io_index)
aliguori88715652009-02-11 15:20:58 +00003465{
Avi Kivitya621f382012-01-02 13:12:08 +02003466 io_mem_region[io_index] = NULL;
aliguori88715652009-02-11 15:20:58 +00003467 io_mem_used[io_index] = 0;
3468}
3469
Avi Kivitye9179ce2009-06-14 11:38:52 +03003470static void io_mem_init(void)
3471{
3472 int i;
3473
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003474 /* Must be first: */
3475 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3476 assert(io_mem_ram.ram_addr == 0);
3477 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3478 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3479 "unassigned", UINT64_MAX);
3480 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3481 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003482 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3483 "subpage-ram", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003484 for (i=0; i<5; i++)
3485 io_mem_used[i] = 1;
3486
Avi Kivity1ec9b902012-01-02 12:47:48 +02003487 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3488 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003489}
3490
Avi Kivity62152b82011-07-26 14:26:14 +03003491static void memory_map_init(void)
3492{
Anthony Liguori7267c092011-08-20 22:09:37 -05003493 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003494 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003495 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003496
Anthony Liguori7267c092011-08-20 22:09:37 -05003497 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003498 memory_region_init(system_io, "io", 65536);
3499 set_system_io_map(system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003500}
3501
3502MemoryRegion *get_system_memory(void)
3503{
3504 return system_memory;
3505}
3506
Avi Kivity309cb472011-08-08 16:09:03 +03003507MemoryRegion *get_system_io(void)
3508{
3509 return system_io;
3510}
3511
pbrooke2eef172008-06-08 01:09:01 +00003512#endif /* !defined(CONFIG_USER_ONLY) */
3513
bellard13eb76e2004-01-24 15:23:36 +00003514/* physical memory access (slow version, mainly for debug) */
3515#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003516int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3517 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003518{
3519 int l, flags;
3520 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003521 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003522
3523 while (len > 0) {
3524 page = addr & TARGET_PAGE_MASK;
3525 l = (page + TARGET_PAGE_SIZE) - addr;
3526 if (l > len)
3527 l = len;
3528 flags = page_get_flags(page);
3529 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003530 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003531 if (is_write) {
3532 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003533 return -1;
bellard579a97f2007-11-11 14:26:47 +00003534 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003535 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003536 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003537 memcpy(p, buf, l);
3538 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003539 } else {
3540 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003541 return -1;
bellard579a97f2007-11-11 14:26:47 +00003542 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003543 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003544 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003545 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003546 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003547 }
3548 len -= l;
3549 buf += l;
3550 addr += l;
3551 }
Paul Brooka68fe892010-03-01 00:08:59 +00003552 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003553}
bellard8df1cd02005-01-28 22:37:22 +00003554
bellard13eb76e2004-01-24 15:23:36 +00003555#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003556void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003557 int len, int is_write)
3558{
3559 int l, io_index;
3560 uint8_t *ptr;
3561 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003562 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003563 ram_addr_t pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003564 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003565
bellard13eb76e2004-01-24 15:23:36 +00003566 while (len > 0) {
3567 page = addr & TARGET_PAGE_MASK;
3568 l = (page + TARGET_PAGE_SIZE) - addr;
3569 if (l > len)
3570 l = len;
bellard92e873b2004-05-21 14:52:29 +00003571 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003572 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003573
bellard13eb76e2004-01-24 15:23:36 +00003574 if (is_write) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003575 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003576 target_phys_addr_t addr1;
Avi Kivity11c7ef02012-01-02 17:21:07 +02003577 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003578 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
bellard6a00d602005-11-21 23:25:50 +00003579 /* XXX: could force cpu_single_env to NULL to avoid
3580 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003581 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003582 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003583 val = ldl_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003584 io_mem_write(io_index, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003585 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003586 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003587 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003588 val = lduw_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003589 io_mem_write(io_index, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003590 l = 2;
3591 } else {
bellard1c213d12005-09-03 10:49:04 +00003592 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003593 val = ldub_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003594 io_mem_write(io_index, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003595 l = 1;
3596 }
3597 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003598 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00003599 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003600 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003601 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003602 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003603 if (!cpu_physical_memory_is_dirty(addr1)) {
3604 /* invalidate code */
3605 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3606 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003607 cpu_physical_memory_set_dirty_flags(
3608 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003609 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003610 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003611 }
3612 } else {
Avi Kivity1d393fa2012-01-01 21:15:42 +02003613 if (!is_ram_rom_romd(pd)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003614 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003615 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02003616 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003617 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
aurel326c2934d2009-02-18 21:37:17 +00003618 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003619 /* 32 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003620 val = io_mem_read(io_index, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003621 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003622 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003623 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003624 /* 16 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003625 val = io_mem_read(io_index, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003626 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003627 l = 2;
3628 } else {
bellard1c213d12005-09-03 10:49:04 +00003629 /* 8 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003630 val = io_mem_read(io_index, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003631 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003632 l = 1;
3633 }
3634 } else {
3635 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003636 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3637 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3638 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003639 }
3640 }
3641 len -= l;
3642 buf += l;
3643 addr += l;
3644 }
3645}
bellard8df1cd02005-01-28 22:37:22 +00003646
bellardd0ecd2a2006-04-23 17:14:48 +00003647/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003648void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003649 const uint8_t *buf, int len)
3650{
3651 int l;
3652 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003653 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003654 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003655 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003656
bellardd0ecd2a2006-04-23 17:14:48 +00003657 while (len > 0) {
3658 page = addr & TARGET_PAGE_MASK;
3659 l = (page + TARGET_PAGE_SIZE) - addr;
3660 if (l > len)
3661 l = len;
3662 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003663 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003664
Avi Kivity1d393fa2012-01-01 21:15:42 +02003665 if (!is_ram_rom_romd(pd)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003666 /* do nothing */
3667 } else {
3668 unsigned long addr1;
3669 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3670 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003671 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003672 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003673 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003674 }
3675 len -= l;
3676 buf += l;
3677 addr += l;
3678 }
3679}
3680
aliguori6d16c2f2009-01-22 16:59:11 +00003681typedef struct {
3682 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003683 target_phys_addr_t addr;
3684 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003685} BounceBuffer;
3686
3687static BounceBuffer bounce;
3688
aliguoriba223c22009-01-22 16:59:16 +00003689typedef struct MapClient {
3690 void *opaque;
3691 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003692 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003693} MapClient;
3694
Blue Swirl72cf2d42009-09-12 07:36:22 +00003695static QLIST_HEAD(map_client_list, MapClient) map_client_list
3696 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003697
3698void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3699{
Anthony Liguori7267c092011-08-20 22:09:37 -05003700 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003701
3702 client->opaque = opaque;
3703 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003704 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003705 return client;
3706}
3707
3708void cpu_unregister_map_client(void *_client)
3709{
3710 MapClient *client = (MapClient *)_client;
3711
Blue Swirl72cf2d42009-09-12 07:36:22 +00003712 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003713 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003714}
3715
3716static void cpu_notify_map_clients(void)
3717{
3718 MapClient *client;
3719
Blue Swirl72cf2d42009-09-12 07:36:22 +00003720 while (!QLIST_EMPTY(&map_client_list)) {
3721 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003722 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003723 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003724 }
3725}
3726
aliguori6d16c2f2009-01-22 16:59:11 +00003727/* Map a physical memory region into a host virtual address.
3728 * May map a subset of the requested range, given by and returned in *plen.
3729 * May return NULL if resources needed to perform the mapping are exhausted.
3730 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003731 * Use cpu_register_map_client() to know when retrying the map operation is
3732 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003733 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003734void *cpu_physical_memory_map(target_phys_addr_t addr,
3735 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003736 int is_write)
3737{
Anthony Liguoric227f092009-10-01 16:12:16 -05003738 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003739 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003740 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003741 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003742 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003743 PhysPageDesc p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003744 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003745 ram_addr_t rlen;
3746 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003747
3748 while (len > 0) {
3749 page = addr & TARGET_PAGE_MASK;
3750 l = (page + TARGET_PAGE_SIZE) - addr;
3751 if (l > len)
3752 l = len;
3753 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003754 pd = p.phys_offset;
aliguori6d16c2f2009-01-22 16:59:11 +00003755
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003756 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003757 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003758 break;
3759 }
3760 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3761 bounce.addr = addr;
3762 bounce.len = l;
3763 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003764 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003765 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003766
3767 *plen = l;
3768 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003769 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003770 if (!todo) {
3771 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3772 }
aliguori6d16c2f2009-01-22 16:59:11 +00003773
3774 len -= l;
3775 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003776 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003777 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003778 rlen = todo;
3779 ret = qemu_ram_ptr_length(raddr, &rlen);
3780 *plen = rlen;
3781 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003782}
3783
3784/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3785 * Will also mark the memory as dirty if is_write == 1. access_len gives
3786 * the amount of memory that was actually read or written by the caller.
3787 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003788void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3789 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003790{
3791 if (buffer != bounce.buffer) {
3792 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003793 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003794 while (access_len) {
3795 unsigned l;
3796 l = TARGET_PAGE_SIZE;
3797 if (l > access_len)
3798 l = access_len;
3799 if (!cpu_physical_memory_is_dirty(addr1)) {
3800 /* invalidate code */
3801 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3802 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003803 cpu_physical_memory_set_dirty_flags(
3804 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00003805 }
3806 addr1 += l;
3807 access_len -= l;
3808 }
3809 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003810 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003811 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003812 }
aliguori6d16c2f2009-01-22 16:59:11 +00003813 return;
3814 }
3815 if (is_write) {
3816 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3817 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003818 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003819 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003820 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003821}
bellardd0ecd2a2006-04-23 17:14:48 +00003822
bellard8df1cd02005-01-28 22:37:22 +00003823/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003824static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3825 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003826{
3827 int io_index;
3828 uint8_t *ptr;
3829 uint32_t val;
3830 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003831 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00003832
3833 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003834 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003835
Avi Kivity1d393fa2012-01-01 21:15:42 +02003836 if (!is_ram_rom_romd(pd)) {
bellard8df1cd02005-01-28 22:37:22 +00003837 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02003838 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003839 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02003840 val = io_mem_read(io_index, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003841#if defined(TARGET_WORDS_BIGENDIAN)
3842 if (endian == DEVICE_LITTLE_ENDIAN) {
3843 val = bswap32(val);
3844 }
3845#else
3846 if (endian == DEVICE_BIG_ENDIAN) {
3847 val = bswap32(val);
3848 }
3849#endif
bellard8df1cd02005-01-28 22:37:22 +00003850 } else {
3851 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003852 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00003853 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003854 switch (endian) {
3855 case DEVICE_LITTLE_ENDIAN:
3856 val = ldl_le_p(ptr);
3857 break;
3858 case DEVICE_BIG_ENDIAN:
3859 val = ldl_be_p(ptr);
3860 break;
3861 default:
3862 val = ldl_p(ptr);
3863 break;
3864 }
bellard8df1cd02005-01-28 22:37:22 +00003865 }
3866 return val;
3867}
3868
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003869uint32_t ldl_phys(target_phys_addr_t addr)
3870{
3871 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3872}
3873
3874uint32_t ldl_le_phys(target_phys_addr_t addr)
3875{
3876 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3877}
3878
3879uint32_t ldl_be_phys(target_phys_addr_t addr)
3880{
3881 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3882}
3883
bellard84b7b8e2005-11-28 21:19:04 +00003884/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003885static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3886 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003887{
3888 int io_index;
3889 uint8_t *ptr;
3890 uint64_t val;
3891 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003892 PhysPageDesc p;
bellard84b7b8e2005-11-28 21:19:04 +00003893
3894 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003895 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003896
Avi Kivity1d393fa2012-01-01 21:15:42 +02003897 if (!is_ram_rom_romd(pd)) {
bellard84b7b8e2005-11-28 21:19:04 +00003898 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02003899 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003900 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003901
3902 /* XXX This is broken when device endian != cpu endian.
3903 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003904#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02003905 val = io_mem_read(io_index, addr, 4) << 32;
3906 val |= io_mem_read(io_index, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003907#else
Avi Kivityacbbec52011-11-21 12:27:03 +02003908 val = io_mem_read(io_index, addr, 4);
3909 val |= io_mem_read(io_index, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003910#endif
3911 } else {
3912 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003913 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00003914 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003915 switch (endian) {
3916 case DEVICE_LITTLE_ENDIAN:
3917 val = ldq_le_p(ptr);
3918 break;
3919 case DEVICE_BIG_ENDIAN:
3920 val = ldq_be_p(ptr);
3921 break;
3922 default:
3923 val = ldq_p(ptr);
3924 break;
3925 }
bellard84b7b8e2005-11-28 21:19:04 +00003926 }
3927 return val;
3928}
3929
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003930uint64_t ldq_phys(target_phys_addr_t addr)
3931{
3932 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3933}
3934
3935uint64_t ldq_le_phys(target_phys_addr_t addr)
3936{
3937 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3938}
3939
3940uint64_t ldq_be_phys(target_phys_addr_t addr)
3941{
3942 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3943}
3944
bellardaab33092005-10-30 20:48:42 +00003945/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003946uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003947{
3948 uint8_t val;
3949 cpu_physical_memory_read(addr, &val, 1);
3950 return val;
3951}
3952
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003953/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003954static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3955 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003956{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003957 int io_index;
3958 uint8_t *ptr;
3959 uint64_t val;
3960 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003961 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003962
3963 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003964 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003965
Avi Kivity1d393fa2012-01-01 21:15:42 +02003966 if (!is_ram_rom_romd(pd)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003967 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02003968 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003969 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02003970 val = io_mem_read(io_index, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003971#if defined(TARGET_WORDS_BIGENDIAN)
3972 if (endian == DEVICE_LITTLE_ENDIAN) {
3973 val = bswap16(val);
3974 }
3975#else
3976 if (endian == DEVICE_BIG_ENDIAN) {
3977 val = bswap16(val);
3978 }
3979#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003980 } else {
3981 /* RAM case */
3982 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3983 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003984 switch (endian) {
3985 case DEVICE_LITTLE_ENDIAN:
3986 val = lduw_le_p(ptr);
3987 break;
3988 case DEVICE_BIG_ENDIAN:
3989 val = lduw_be_p(ptr);
3990 break;
3991 default:
3992 val = lduw_p(ptr);
3993 break;
3994 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003995 }
3996 return val;
bellardaab33092005-10-30 20:48:42 +00003997}
3998
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003999uint32_t lduw_phys(target_phys_addr_t addr)
4000{
4001 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4002}
4003
4004uint32_t lduw_le_phys(target_phys_addr_t addr)
4005{
4006 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4007}
4008
4009uint32_t lduw_be_phys(target_phys_addr_t addr)
4010{
4011 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4012}
4013
bellard8df1cd02005-01-28 22:37:22 +00004014/* warning: addr must be aligned. The ram page is not masked as dirty
4015 and the code inside is not invalidated. It is useful if the dirty
4016 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004017void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004018{
4019 int io_index;
4020 uint8_t *ptr;
4021 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004022 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004023
4024 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004025 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004026
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004027 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004028 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004029 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004030 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004031 } else {
aliguori74576192008-10-06 14:02:03 +00004032 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004033 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004034 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004035
4036 if (unlikely(in_migration)) {
4037 if (!cpu_physical_memory_is_dirty(addr1)) {
4038 /* invalidate code */
4039 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4040 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004041 cpu_physical_memory_set_dirty_flags(
4042 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004043 }
4044 }
bellard8df1cd02005-01-28 22:37:22 +00004045 }
4046}
4047
Anthony Liguoric227f092009-10-01 16:12:16 -05004048void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004049{
4050 int io_index;
4051 uint8_t *ptr;
4052 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004053 PhysPageDesc p;
j_mayerbc98a7e2007-04-04 07:55:12 +00004054
4055 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004056 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004057
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004058 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004059 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004060 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004061#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004062 io_mem_write(io_index, addr, val >> 32, 4);
4063 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004064#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004065 io_mem_write(io_index, addr, (uint32_t)val, 4);
4066 io_mem_write(io_index, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004067#endif
4068 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004069 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004070 (addr & ~TARGET_PAGE_MASK);
4071 stq_p(ptr, val);
4072 }
4073}
4074
bellard8df1cd02005-01-28 22:37:22 +00004075/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004076static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4077 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004078{
4079 int io_index;
4080 uint8_t *ptr;
4081 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004082 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004083
4084 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004085 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004086
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004087 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004088 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004089 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004090#if defined(TARGET_WORDS_BIGENDIAN)
4091 if (endian == DEVICE_LITTLE_ENDIAN) {
4092 val = bswap32(val);
4093 }
4094#else
4095 if (endian == DEVICE_BIG_ENDIAN) {
4096 val = bswap32(val);
4097 }
4098#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004099 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004100 } else {
4101 unsigned long addr1;
4102 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4103 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004104 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004105 switch (endian) {
4106 case DEVICE_LITTLE_ENDIAN:
4107 stl_le_p(ptr, val);
4108 break;
4109 case DEVICE_BIG_ENDIAN:
4110 stl_be_p(ptr, val);
4111 break;
4112 default:
4113 stl_p(ptr, val);
4114 break;
4115 }
bellard3a7d9292005-08-21 09:26:42 +00004116 if (!cpu_physical_memory_is_dirty(addr1)) {
4117 /* invalidate code */
4118 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4119 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004120 cpu_physical_memory_set_dirty_flags(addr1,
4121 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004122 }
bellard8df1cd02005-01-28 22:37:22 +00004123 }
4124}
4125
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004126void stl_phys(target_phys_addr_t addr, uint32_t val)
4127{
4128 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4129}
4130
4131void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4132{
4133 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4134}
4135
4136void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4137{
4138 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4139}
4140
bellardaab33092005-10-30 20:48:42 +00004141/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004142void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004143{
4144 uint8_t v = val;
4145 cpu_physical_memory_write(addr, &v, 1);
4146}
4147
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004148/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004149static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4150 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004151{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004152 int io_index;
4153 uint8_t *ptr;
4154 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004155 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004156
4157 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004158 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004159
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004160 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004161 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004162 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004163#if defined(TARGET_WORDS_BIGENDIAN)
4164 if (endian == DEVICE_LITTLE_ENDIAN) {
4165 val = bswap16(val);
4166 }
4167#else
4168 if (endian == DEVICE_BIG_ENDIAN) {
4169 val = bswap16(val);
4170 }
4171#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004172 io_mem_write(io_index, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004173 } else {
4174 unsigned long addr1;
4175 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4176 /* RAM case */
4177 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004178 switch (endian) {
4179 case DEVICE_LITTLE_ENDIAN:
4180 stw_le_p(ptr, val);
4181 break;
4182 case DEVICE_BIG_ENDIAN:
4183 stw_be_p(ptr, val);
4184 break;
4185 default:
4186 stw_p(ptr, val);
4187 break;
4188 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004189 if (!cpu_physical_memory_is_dirty(addr1)) {
4190 /* invalidate code */
4191 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4192 /* set dirty bit */
4193 cpu_physical_memory_set_dirty_flags(addr1,
4194 (0xff & ~CODE_DIRTY_FLAG));
4195 }
4196 }
bellardaab33092005-10-30 20:48:42 +00004197}
4198
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004199void stw_phys(target_phys_addr_t addr, uint32_t val)
4200{
4201 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4202}
4203
4204void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4205{
4206 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4207}
4208
4209void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4210{
4211 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4212}
4213
bellardaab33092005-10-30 20:48:42 +00004214/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004215void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004216{
4217 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004218 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004219}
4220
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004221void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4222{
4223 val = cpu_to_le64(val);
4224 cpu_physical_memory_write(addr, &val, 8);
4225}
4226
4227void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4228{
4229 val = cpu_to_be64(val);
4230 cpu_physical_memory_write(addr, &val, 8);
4231}
4232
aliguori5e2972f2009-03-28 17:51:36 +00004233/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004234int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004235 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004236{
4237 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004238 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004239 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004240
4241 while (len > 0) {
4242 page = addr & TARGET_PAGE_MASK;
4243 phys_addr = cpu_get_phys_page_debug(env, page);
4244 /* if no physical page mapped, return an error */
4245 if (phys_addr == -1)
4246 return -1;
4247 l = (page + TARGET_PAGE_SIZE) - addr;
4248 if (l > len)
4249 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004250 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004251 if (is_write)
4252 cpu_physical_memory_write_rom(phys_addr, buf, l);
4253 else
aliguori5e2972f2009-03-28 17:51:36 +00004254 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004255 len -= l;
4256 buf += l;
4257 addr += l;
4258 }
4259 return 0;
4260}
Paul Brooka68fe892010-03-01 00:08:59 +00004261#endif
bellard13eb76e2004-01-24 15:23:36 +00004262
pbrook2e70f6e2008-06-29 01:03:05 +00004263/* in deterministic execution mode, instructions doing device I/Os
4264 must be at the end of the TB */
4265void cpu_io_recompile(CPUState *env, void *retaddr)
4266{
4267 TranslationBlock *tb;
4268 uint32_t n, cflags;
4269 target_ulong pc, cs_base;
4270 uint64_t flags;
4271
4272 tb = tb_find_pc((unsigned long)retaddr);
4273 if (!tb) {
4274 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4275 retaddr);
4276 }
4277 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004278 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004279 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004280 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004281 n = n - env->icount_decr.u16.low;
4282 /* Generate a new TB ending on the I/O insn. */
4283 n++;
4284 /* On MIPS and SH, delay slot instructions can only be restarted if
4285 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004286 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004287 branch. */
4288#if defined(TARGET_MIPS)
4289 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4290 env->active_tc.PC -= 4;
4291 env->icount_decr.u16.low++;
4292 env->hflags &= ~MIPS_HFLAG_BMASK;
4293 }
4294#elif defined(TARGET_SH4)
4295 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4296 && n > 1) {
4297 env->pc -= 2;
4298 env->icount_decr.u16.low++;
4299 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4300 }
4301#endif
4302 /* This should never happen. */
4303 if (n > CF_COUNT_MASK)
4304 cpu_abort(env, "TB too big during recompile");
4305
4306 cflags = n | CF_LAST_IO;
4307 pc = tb->pc;
4308 cs_base = tb->cs_base;
4309 flags = tb->flags;
4310 tb_phys_invalidate(tb, -1);
4311 /* FIXME: In theory this could raise an exception. In practice
4312 we have already translated the block once so it's probably ok. */
4313 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004314 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004315 the first in the TB) then we end up generating a whole new TB and
4316 repeating the fault, which is horribly inefficient.
4317 Better would be to execute just this insn uncached, or generate a
4318 second new TB. */
4319 cpu_resume_from_signal(env, NULL);
4320}
4321
Paul Brookb3755a92010-03-12 16:54:58 +00004322#if !defined(CONFIG_USER_ONLY)
4323
Stefan Weil055403b2010-10-22 23:03:32 +02004324void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004325{
4326 int i, target_code_size, max_target_code_size;
4327 int direct_jmp_count, direct_jmp2_count, cross_page;
4328 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004329
bellarde3db7222005-01-26 22:00:47 +00004330 target_code_size = 0;
4331 max_target_code_size = 0;
4332 cross_page = 0;
4333 direct_jmp_count = 0;
4334 direct_jmp2_count = 0;
4335 for(i = 0; i < nb_tbs; i++) {
4336 tb = &tbs[i];
4337 target_code_size += tb->size;
4338 if (tb->size > max_target_code_size)
4339 max_target_code_size = tb->size;
4340 if (tb->page_addr[1] != -1)
4341 cross_page++;
4342 if (tb->tb_next_offset[0] != 0xffff) {
4343 direct_jmp_count++;
4344 if (tb->tb_next_offset[1] != 0xffff) {
4345 direct_jmp2_count++;
4346 }
4347 }
4348 }
4349 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004350 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004351 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004352 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4353 cpu_fprintf(f, "TB count %d/%d\n",
4354 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004355 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004356 nb_tbs ? target_code_size / nb_tbs : 0,
4357 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004358 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004359 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4360 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004361 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4362 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004363 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4364 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004365 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004366 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4367 direct_jmp2_count,
4368 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004369 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004370 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4371 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4372 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004373 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004374}
4375
Avi Kivityd39e8222012-01-01 23:35:10 +02004376/* NOTE: this function can trigger an exception */
4377/* NOTE2: the returned address is not exactly the physical address: it
4378 is the offset relative to phys_ram_base */
4379tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4380{
4381 int mmu_idx, page_index, pd;
4382 void *p;
4383
4384 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4385 mmu_idx = cpu_mmu_index(env1);
4386 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4387 (addr & TARGET_PAGE_MASK))) {
4388 ldub_code(addr);
4389 }
4390 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004391 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
Avi Kivity75c578d2012-01-02 15:40:52 +02004392 && !is_romd(pd)) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004393#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4394 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4395#else
4396 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4397#endif
4398 }
4399 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4400 return qemu_ram_addr_from_host_nofail(p);
4401}
4402
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004403/*
4404 * A helper function for the _utterly broken_ virtio device model to find out if
4405 * it's running on a big endian machine. Don't do this at home kids!
4406 */
4407bool virtio_is_big_endian(void);
4408bool virtio_is_big_endian(void)
4409{
4410#if defined(TARGET_WORDS_BIGENDIAN)
4411 return true;
4412#else
4413 return false;
4414#endif
4415}
4416
bellard61382a52003-10-27 21:22:23 +00004417#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004418#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004419#define GETPC() NULL
4420#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004421#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004422
4423#define SHIFT 0
4424#include "softmmu_template.h"
4425
4426#define SHIFT 1
4427#include "softmmu_template.h"
4428
4429#define SHIFT 2
4430#include "softmmu_template.h"
4431
4432#define SHIFT 3
4433#include "softmmu_template.h"
4434
4435#undef env
4436
4437#endif