blob: 80560fad57510c420105d9327b210809f2572772 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200122static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200123
pbrooke2eef172008-06-08 01:09:01 +0000124#endif
bellard9fa3e852004-01-04 18:06:42 +0000125
bellard6a00d602005-11-21 23:25:50 +0000126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100129DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000130/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000131 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000134
bellard54936002003-05-13 00:25:15 +0000135typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000136 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000137 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
bellard54936002003-05-13 00:25:15 +0000145} PageDesc;
146
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000154#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000155#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000161#define L2_SIZE (1 << L2_BITS)
162
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800163/* The bits remaining after N lower levels of page tables. */
164#define P_L1_BITS_REM \
165 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
169/* Size of the L1 page table. Avoid silly small sizes. */
170#if P_L1_BITS_REM < 4
171#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
172#else
173#define P_L1_BITS P_L1_BITS_REM
174#endif
175
176#if V_L1_BITS_REM < 4
177#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
178#else
179#define V_L1_BITS V_L1_BITS_REM
180#endif
181
182#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
183#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
184
185#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
186#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
187
bellard83fb7ad2004-07-05 21:25:26 +0000188unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000189unsigned long qemu_host_page_size;
190unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000191
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800192/* This is a multi-level map on the virtual address space.
193 The bottom level has pointers to PageDesc. */
194static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000195
pbrooke2eef172008-06-08 01:09:01 +0000196#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000197typedef struct PhysPageDesc {
198 /* offset in host memory of the page + io_index in the low bits */
199 ram_addr_t phys_offset;
200 ram_addr_t region_offset;
201} PhysPageDesc;
202
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800203/* This is a multi-level map on the physical address space.
204 The bottom level has pointers to PhysPageDesc. */
205static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000206
pbrooke2eef172008-06-08 01:09:01 +0000207static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300208static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000209
bellard33417e72003-08-10 21:47:01 +0000210/* io memory support */
Avi Kivitya621f382012-01-02 13:12:08 +0200211MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000212static char io_mem_used[IO_MEM_NB_ENTRIES];
Avi Kivity1ec9b902012-01-02 12:47:48 +0200213static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000214#endif
bellard33417e72003-08-10 21:47:01 +0000215
bellard34865132003-10-05 14:28:56 +0000216/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200217#ifdef WIN32
218static const char *logfilename = "qemu.log";
219#else
blueswir1d9b630f2008-10-05 09:57:08 +0000220static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200221#endif
bellard34865132003-10-05 14:28:56 +0000222FILE *logfile;
223int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000224static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000225
bellarde3db7222005-01-26 22:00:47 +0000226/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000227#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000228static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000229#endif
bellarde3db7222005-01-26 22:00:47 +0000230static int tb_flush_count;
231static int tb_phys_invalidate_count;
232
bellard7cb69ca2008-05-10 10:55:51 +0000233#ifdef _WIN32
234static void map_exec(void *addr, long size)
235{
236 DWORD old_protect;
237 VirtualProtect(addr, size,
238 PAGE_EXECUTE_READWRITE, &old_protect);
239
240}
241#else
242static void map_exec(void *addr, long size)
243{
bellard43694152008-05-29 09:35:57 +0000244 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000245
bellard43694152008-05-29 09:35:57 +0000246 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000247 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000248 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000249
250 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000251 end += page_size - 1;
252 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000253
254 mprotect((void *)start, end - start,
255 PROT_READ | PROT_WRITE | PROT_EXEC);
256}
257#endif
258
bellardb346ff42003-06-15 20:05:50 +0000259static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000260{
bellard83fb7ad2004-07-05 21:25:26 +0000261 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000262 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000263#ifdef _WIN32
264 {
265 SYSTEM_INFO system_info;
266
267 GetSystemInfo(&system_info);
268 qemu_real_host_page_size = system_info.dwPageSize;
269 }
270#else
271 qemu_real_host_page_size = getpagesize();
272#endif
bellard83fb7ad2004-07-05 21:25:26 +0000273 if (qemu_host_page_size == 0)
274 qemu_host_page_size = qemu_real_host_page_size;
275 if (qemu_host_page_size < TARGET_PAGE_SIZE)
276 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000277 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000278
Paul Brook2e9a5712010-05-05 16:32:59 +0100279#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000280 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100281#ifdef HAVE_KINFO_GETVMMAP
282 struct kinfo_vmentry *freep;
283 int i, cnt;
284
285 freep = kinfo_getvmmap(getpid(), &cnt);
286 if (freep) {
287 mmap_lock();
288 for (i = 0; i < cnt; i++) {
289 unsigned long startaddr, endaddr;
290
291 startaddr = freep[i].kve_start;
292 endaddr = freep[i].kve_end;
293 if (h2g_valid(startaddr)) {
294 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
295
296 if (h2g_valid(endaddr)) {
297 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100299 } else {
300#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100303#endif
304 }
305 }
306 }
307 free(freep);
308 mmap_unlock();
309 }
310#else
balrog50a95692007-12-12 01:16:23 +0000311 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000312
pbrook07765902008-05-31 16:33:53 +0000313 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800314
Aurelien Jarnofd436902010-04-10 17:20:36 +0200315 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000316 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800317 mmap_lock();
318
balrog50a95692007-12-12 01:16:23 +0000319 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800320 unsigned long startaddr, endaddr;
321 int n;
322
323 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
324
325 if (n == 2 && h2g_valid(startaddr)) {
326 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
327
328 if (h2g_valid(endaddr)) {
329 endaddr = h2g(endaddr);
330 } else {
331 endaddr = ~0ul;
332 }
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000334 }
335 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800336
balrog50a95692007-12-12 01:16:23 +0000337 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800338 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000339 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100340#endif
balrog50a95692007-12-12 01:16:23 +0000341 }
342#endif
bellard54936002003-05-13 00:25:15 +0000343}
344
Paul Brook41c1b1c2010-03-12 16:54:58 +0000345static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000346{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000347 PageDesc *pd;
348 void **lp;
349 int i;
350
pbrook17e23772008-06-09 13:47:45 +0000351#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500352 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353# define ALLOC(P, SIZE) \
354 do { \
355 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
356 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800357 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000358#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800359# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500360 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000361#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800363 /* Level 1. Always allocated. */
364 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
365
366 /* Level 2..N-1. */
367 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
368 void **p = *lp;
369
370 if (p == NULL) {
371 if (!alloc) {
372 return NULL;
373 }
374 ALLOC(p, sizeof(void *) * L2_SIZE);
375 *lp = p;
376 }
377
378 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000379 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800380
381 pd = *lp;
382 if (pd == NULL) {
383 if (!alloc) {
384 return NULL;
385 }
386 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
387 *lp = pd;
388 }
389
390#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800391
392 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000393}
394
Paul Brook41c1b1c2010-03-12 16:54:58 +0000395static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000396{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800397 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000398}
399
Paul Brook6d9a1302010-02-28 23:55:53 +0000400#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500401static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000402{
pbrooke3f4e2a2006-04-08 20:02:06 +0000403 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800404 void **lp;
405 int i;
bellard92e873b2004-05-21 14:52:29 +0000406
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800407 /* Level 1. Always allocated. */
408 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000409
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800410 /* Level 2..N-1. */
411 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
412 void **p = *lp;
413 if (p == NULL) {
414 if (!alloc) {
415 return NULL;
416 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500417 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800418 }
419 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000420 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800421
pbrooke3f4e2a2006-04-08 20:02:06 +0000422 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800423 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000424 int i;
Alex Rozenman5ab97b72011-12-13 12:52:08 +0200425 int first_index = index & ~(L2_SIZE - 1);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800426
427 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000428 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800429 }
430
Anthony Liguori7267c092011-08-20 22:09:37 -0500431 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800432
pbrook67c4d232009-02-23 13:16:07 +0000433 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200434 pd[i].phys_offset = io_mem_unassigned.ram_addr;
Alex Rozenman5ab97b72011-12-13 12:52:08 +0200435 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000436 }
bellard92e873b2004-05-21 14:52:29 +0000437 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800438
439 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000440}
441
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200442static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000443{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200444 PhysPageDesc *p = phys_page_find_alloc(index, 0);
445
446 if (p) {
447 return *p;
448 } else {
449 return (PhysPageDesc) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200450 .phys_offset = io_mem_unassigned.ram_addr,
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200451 .region_offset = index << TARGET_PAGE_BITS,
452 };
453 }
bellard92e873b2004-05-21 14:52:29 +0000454}
455
Anthony Liguoric227f092009-10-01 16:12:16 -0500456static void tlb_protect_code(ram_addr_t ram_addr);
457static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000458 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000459#define mmap_lock() do { } while(0)
460#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000461#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000462
bellard43694152008-05-29 09:35:57 +0000463#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
464
465#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100466/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000467 user mode. It will change when a dedicated libc will be used */
468#define USE_STATIC_CODE_GEN_BUFFER
469#endif
470
471#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200472static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
473 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000474#endif
475
blueswir18fcd3692008-08-17 20:26:25 +0000476static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000477{
bellard43694152008-05-29 09:35:57 +0000478#ifdef USE_STATIC_CODE_GEN_BUFFER
479 code_gen_buffer = static_code_gen_buffer;
480 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
481 map_exec(code_gen_buffer, code_gen_buffer_size);
482#else
bellard26a5f132008-05-28 12:30:31 +0000483 code_gen_buffer_size = tb_size;
484 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000485#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000486 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
487#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100488 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000489 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000490#endif
bellard26a5f132008-05-28 12:30:31 +0000491 }
492 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
493 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
494 /* The code gen buffer location may have constraints depending on
495 the host cpu and OS */
496#if defined(__linux__)
497 {
498 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000499 void *start = NULL;
500
bellard26a5f132008-05-28 12:30:31 +0000501 flags = MAP_PRIVATE | MAP_ANONYMOUS;
502#if defined(__x86_64__)
503 flags |= MAP_32BIT;
504 /* Cannot map more than that */
505 if (code_gen_buffer_size > (800 * 1024 * 1024))
506 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000507#elif defined(__sparc_v9__)
508 // Map the buffer below 2G, so we can use direct calls and branches
509 flags |= MAP_FIXED;
510 start = (void *) 0x60000000UL;
511 if (code_gen_buffer_size > (512 * 1024 * 1024))
512 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000513#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100514 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000515 if (code_gen_buffer_size > 16 * 1024 * 1024)
516 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700517#elif defined(__s390x__)
518 /* Map the buffer so that we can use direct calls and branches. */
519 /* We have a +- 4GB range on the branches; leave some slop. */
520 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
521 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
522 }
523 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000524#endif
blueswir1141ac462008-07-26 15:05:57 +0000525 code_gen_buffer = mmap(start, code_gen_buffer_size,
526 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000527 flags, -1, 0);
528 if (code_gen_buffer == MAP_FAILED) {
529 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
530 exit(1);
531 }
532 }
Bradcbb608a2010-12-20 21:25:40 -0500533#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000534 || defined(__DragonFly__) || defined(__OpenBSD__) \
535 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000536 {
537 int flags;
538 void *addr = NULL;
539 flags = MAP_PRIVATE | MAP_ANONYMOUS;
540#if defined(__x86_64__)
541 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
542 * 0x40000000 is free */
543 flags |= MAP_FIXED;
544 addr = (void *)0x40000000;
545 /* Cannot map more than that */
546 if (code_gen_buffer_size > (800 * 1024 * 1024))
547 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000548#elif defined(__sparc_v9__)
549 // Map the buffer below 2G, so we can use direct calls and branches
550 flags |= MAP_FIXED;
551 addr = (void *) 0x60000000UL;
552 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
553 code_gen_buffer_size = (512 * 1024 * 1024);
554 }
aliguori06e67a82008-09-27 15:32:41 +0000555#endif
556 code_gen_buffer = mmap(addr, code_gen_buffer_size,
557 PROT_WRITE | PROT_READ | PROT_EXEC,
558 flags, -1, 0);
559 if (code_gen_buffer == MAP_FAILED) {
560 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
561 exit(1);
562 }
563 }
bellard26a5f132008-05-28 12:30:31 +0000564#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500565 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000566 map_exec(code_gen_buffer, code_gen_buffer_size);
567#endif
bellard43694152008-05-29 09:35:57 +0000568#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000569 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100570 code_gen_buffer_max_size = code_gen_buffer_size -
571 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000572 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500573 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000574}
575
576/* Must be called before using the QEMU cpus. 'tb_size' is the size
577 (in bytes) allocated to the translation buffer. Zero means default
578 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200579void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000580{
bellard26a5f132008-05-28 12:30:31 +0000581 cpu_gen_init();
582 code_gen_alloc(tb_size);
583 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000584 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700585#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
586 /* There's no guest base to take into account, so go ahead and
587 initialize the prologue now. */
588 tcg_prologue_init(&tcg_ctx);
589#endif
bellard26a5f132008-05-28 12:30:31 +0000590}
591
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200592bool tcg_enabled(void)
593{
594 return code_gen_buffer != NULL;
595}
596
597void cpu_exec_init_all(void)
598{
599#if !defined(CONFIG_USER_ONLY)
600 memory_map_init();
601 io_mem_init();
602#endif
603}
604
pbrook9656f322008-07-01 20:01:19 +0000605#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
606
Juan Quintelae59fb372009-09-29 22:48:21 +0200607static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200608{
609 CPUState *env = opaque;
610
aurel323098dba2009-03-07 21:28:24 +0000611 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
612 version_id is increased. */
613 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000614 tlb_flush(env, 1);
615
616 return 0;
617}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200618
619static const VMStateDescription vmstate_cpu_common = {
620 .name = "cpu_common",
621 .version_id = 1,
622 .minimum_version_id = 1,
623 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200624 .post_load = cpu_common_post_load,
625 .fields = (VMStateField []) {
626 VMSTATE_UINT32(halted, CPUState),
627 VMSTATE_UINT32(interrupt_request, CPUState),
628 VMSTATE_END_OF_LIST()
629 }
630};
pbrook9656f322008-07-01 20:01:19 +0000631#endif
632
Glauber Costa950f1472009-06-09 12:15:18 -0400633CPUState *qemu_get_cpu(int cpu)
634{
635 CPUState *env = first_cpu;
636
637 while (env) {
638 if (env->cpu_index == cpu)
639 break;
640 env = env->next_cpu;
641 }
642
643 return env;
644}
645
bellard6a00d602005-11-21 23:25:50 +0000646void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000647{
bellard6a00d602005-11-21 23:25:50 +0000648 CPUState **penv;
649 int cpu_index;
650
pbrookc2764712009-03-07 15:24:59 +0000651#if defined(CONFIG_USER_ONLY)
652 cpu_list_lock();
653#endif
bellard6a00d602005-11-21 23:25:50 +0000654 env->next_cpu = NULL;
655 penv = &first_cpu;
656 cpu_index = 0;
657 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700658 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000659 cpu_index++;
660 }
661 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000662 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000663 QTAILQ_INIT(&env->breakpoints);
664 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100665#ifndef CONFIG_USER_ONLY
666 env->thread_id = qemu_get_thread_id();
667#endif
bellard6a00d602005-11-21 23:25:50 +0000668 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000669#if defined(CONFIG_USER_ONLY)
670 cpu_list_unlock();
671#endif
pbrookb3c77242008-06-30 16:31:04 +0000672#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600673 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
674 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000675 cpu_save, cpu_load, env);
676#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000677}
678
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100679/* Allocate a new translation block. Flush the translation buffer if
680 too many translation blocks or too much generated code. */
681static TranslationBlock *tb_alloc(target_ulong pc)
682{
683 TranslationBlock *tb;
684
685 if (nb_tbs >= code_gen_max_blocks ||
686 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
687 return NULL;
688 tb = &tbs[nb_tbs++];
689 tb->pc = pc;
690 tb->cflags = 0;
691 return tb;
692}
693
694void tb_free(TranslationBlock *tb)
695{
696 /* In practice this is mostly used for single use temporary TB
697 Ignore the hard cases and just back up if this TB happens to
698 be the last one generated. */
699 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
700 code_gen_ptr = tb->tc_ptr;
701 nb_tbs--;
702 }
703}
704
bellard9fa3e852004-01-04 18:06:42 +0000705static inline void invalidate_page_bitmap(PageDesc *p)
706{
707 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500708 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000709 p->code_bitmap = NULL;
710 }
711 p->code_write_count = 0;
712}
713
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800714/* Set to NULL all the 'first_tb' fields in all PageDescs. */
715
716static void page_flush_tb_1 (int level, void **lp)
717{
718 int i;
719
720 if (*lp == NULL) {
721 return;
722 }
723 if (level == 0) {
724 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000725 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800726 pd[i].first_tb = NULL;
727 invalidate_page_bitmap(pd + i);
728 }
729 } else {
730 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000731 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800732 page_flush_tb_1 (level - 1, pp + i);
733 }
734 }
735}
736
bellardfd6ce8f2003-05-14 19:00:11 +0000737static void page_flush_tb(void)
738{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800739 int i;
740 for (i = 0; i < V_L1_SIZE; i++) {
741 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000742 }
743}
744
745/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000746/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000747void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000748{
bellard6a00d602005-11-21 23:25:50 +0000749 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000750#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000751 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
752 (unsigned long)(code_gen_ptr - code_gen_buffer),
753 nb_tbs, nb_tbs > 0 ?
754 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000755#endif
bellard26a5f132008-05-28 12:30:31 +0000756 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000757 cpu_abort(env1, "Internal error: code buffer overflow\n");
758
bellardfd6ce8f2003-05-14 19:00:11 +0000759 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000760
bellard6a00d602005-11-21 23:25:50 +0000761 for(env = first_cpu; env != NULL; env = env->next_cpu) {
762 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
763 }
bellard9fa3e852004-01-04 18:06:42 +0000764
bellard8a8a6082004-10-03 13:36:49 +0000765 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000766 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000767
bellardfd6ce8f2003-05-14 19:00:11 +0000768 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000769 /* XXX: flush processor icache at this point if cache flush is
770 expensive */
bellarde3db7222005-01-26 22:00:47 +0000771 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000772}
773
774#ifdef DEBUG_TB_CHECK
775
j_mayerbc98a7e2007-04-04 07:55:12 +0000776static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000777{
778 TranslationBlock *tb;
779 int i;
780 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000781 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
782 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000783 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
784 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000785 printf("ERROR invalidate: address=" TARGET_FMT_lx
786 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000787 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000788 }
789 }
790 }
791}
792
793/* verify that all the pages have correct rights for code */
794static void tb_page_check(void)
795{
796 TranslationBlock *tb;
797 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000798
pbrook99773bd2006-04-16 15:14:59 +0000799 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
800 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000801 flags1 = page_get_flags(tb->pc);
802 flags2 = page_get_flags(tb->pc + tb->size - 1);
803 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
804 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000805 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000806 }
807 }
808 }
809}
810
811#endif
812
813/* invalidate one TB */
814static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
815 int next_offset)
816{
817 TranslationBlock *tb1;
818 for(;;) {
819 tb1 = *ptb;
820 if (tb1 == tb) {
821 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
822 break;
823 }
824 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
825 }
826}
827
bellard9fa3e852004-01-04 18:06:42 +0000828static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
829{
830 TranslationBlock *tb1;
831 unsigned int n1;
832
833 for(;;) {
834 tb1 = *ptb;
835 n1 = (long)tb1 & 3;
836 tb1 = (TranslationBlock *)((long)tb1 & ~3);
837 if (tb1 == tb) {
838 *ptb = tb1->page_next[n1];
839 break;
840 }
841 ptb = &tb1->page_next[n1];
842 }
843}
844
bellardd4e81642003-05-25 16:46:15 +0000845static inline void tb_jmp_remove(TranslationBlock *tb, int n)
846{
847 TranslationBlock *tb1, **ptb;
848 unsigned int n1;
849
850 ptb = &tb->jmp_next[n];
851 tb1 = *ptb;
852 if (tb1) {
853 /* find tb(n) in circular list */
854 for(;;) {
855 tb1 = *ptb;
856 n1 = (long)tb1 & 3;
857 tb1 = (TranslationBlock *)((long)tb1 & ~3);
858 if (n1 == n && tb1 == tb)
859 break;
860 if (n1 == 2) {
861 ptb = &tb1->jmp_first;
862 } else {
863 ptb = &tb1->jmp_next[n1];
864 }
865 }
866 /* now we can suppress tb(n) from the list */
867 *ptb = tb->jmp_next[n];
868
869 tb->jmp_next[n] = NULL;
870 }
871}
872
873/* reset the jump entry 'n' of a TB so that it is not chained to
874 another TB */
875static inline void tb_reset_jump(TranslationBlock *tb, int n)
876{
877 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
878}
879
Paul Brook41c1b1c2010-03-12 16:54:58 +0000880void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000881{
bellard6a00d602005-11-21 23:25:50 +0000882 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000883 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000884 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000885 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000886 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000887
bellard9fa3e852004-01-04 18:06:42 +0000888 /* remove the TB from the hash list */
889 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
890 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000891 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000892 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000893
bellard9fa3e852004-01-04 18:06:42 +0000894 /* remove the TB from the page list */
895 if (tb->page_addr[0] != page_addr) {
896 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
897 tb_page_remove(&p->first_tb, tb);
898 invalidate_page_bitmap(p);
899 }
900 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
901 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
902 tb_page_remove(&p->first_tb, tb);
903 invalidate_page_bitmap(p);
904 }
905
bellard8a40a182005-11-20 10:35:40 +0000906 tb_invalidated_flag = 1;
907
908 /* remove the TB from the hash list */
909 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000910 for(env = first_cpu; env != NULL; env = env->next_cpu) {
911 if (env->tb_jmp_cache[h] == tb)
912 env->tb_jmp_cache[h] = NULL;
913 }
bellard8a40a182005-11-20 10:35:40 +0000914
915 /* suppress this TB from the two jump lists */
916 tb_jmp_remove(tb, 0);
917 tb_jmp_remove(tb, 1);
918
919 /* suppress any remaining jumps to this TB */
920 tb1 = tb->jmp_first;
921 for(;;) {
922 n1 = (long)tb1 & 3;
923 if (n1 == 2)
924 break;
925 tb1 = (TranslationBlock *)((long)tb1 & ~3);
926 tb2 = tb1->jmp_next[n1];
927 tb_reset_jump(tb1, n1);
928 tb1->jmp_next[n1] = NULL;
929 tb1 = tb2;
930 }
931 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
932
bellarde3db7222005-01-26 22:00:47 +0000933 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000934}
935
936static inline void set_bits(uint8_t *tab, int start, int len)
937{
938 int end, mask, end1;
939
940 end = start + len;
941 tab += start >> 3;
942 mask = 0xff << (start & 7);
943 if ((start & ~7) == (end & ~7)) {
944 if (start < end) {
945 mask &= ~(0xff << (end & 7));
946 *tab |= mask;
947 }
948 } else {
949 *tab++ |= mask;
950 start = (start + 8) & ~7;
951 end1 = end & ~7;
952 while (start < end1) {
953 *tab++ = 0xff;
954 start += 8;
955 }
956 if (start < end) {
957 mask = ~(0xff << (end & 7));
958 *tab |= mask;
959 }
960 }
961}
962
963static void build_page_bitmap(PageDesc *p)
964{
965 int n, tb_start, tb_end;
966 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000967
Anthony Liguori7267c092011-08-20 22:09:37 -0500968 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000969
970 tb = p->first_tb;
971 while (tb != NULL) {
972 n = (long)tb & 3;
973 tb = (TranslationBlock *)((long)tb & ~3);
974 /* NOTE: this is subtle as a TB may span two physical pages */
975 if (n == 0) {
976 /* NOTE: tb_end may be after the end of the page, but
977 it is not a problem */
978 tb_start = tb->pc & ~TARGET_PAGE_MASK;
979 tb_end = tb_start + tb->size;
980 if (tb_end > TARGET_PAGE_SIZE)
981 tb_end = TARGET_PAGE_SIZE;
982 } else {
983 tb_start = 0;
984 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
985 }
986 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
987 tb = tb->page_next[n];
988 }
989}
990
pbrook2e70f6e2008-06-29 01:03:05 +0000991TranslationBlock *tb_gen_code(CPUState *env,
992 target_ulong pc, target_ulong cs_base,
993 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000994{
995 TranslationBlock *tb;
996 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000997 tb_page_addr_t phys_pc, phys_page2;
998 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000999 int code_gen_size;
1000
Paul Brook41c1b1c2010-03-12 16:54:58 +00001001 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001002 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001003 if (!tb) {
1004 /* flush must be done */
1005 tb_flush(env);
1006 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001007 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001008 /* Don't forget to invalidate previous TB info. */
1009 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001010 }
1011 tc_ptr = code_gen_ptr;
1012 tb->tc_ptr = tc_ptr;
1013 tb->cs_base = cs_base;
1014 tb->flags = flags;
1015 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001016 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001017 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001018
bellardd720b932004-04-25 17:57:43 +00001019 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001020 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001021 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001022 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001023 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001024 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001025 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001026 return tb;
bellardd720b932004-04-25 17:57:43 +00001027}
ths3b46e622007-09-17 08:09:54 +00001028
bellard9fa3e852004-01-04 18:06:42 +00001029/* invalidate all TBs which intersect with the target physical page
1030 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001031 the same physical page. 'is_cpu_write_access' should be true if called
1032 from a real cpu write access: the virtual CPU will exit the current
1033 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001034void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001035 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001036{
aliguori6b917542008-11-18 19:46:41 +00001037 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001038 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001039 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001040 PageDesc *p;
1041 int n;
1042#ifdef TARGET_HAS_PRECISE_SMC
1043 int current_tb_not_found = is_cpu_write_access;
1044 TranslationBlock *current_tb = NULL;
1045 int current_tb_modified = 0;
1046 target_ulong current_pc = 0;
1047 target_ulong current_cs_base = 0;
1048 int current_flags = 0;
1049#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001050
1051 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001052 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001053 return;
ths5fafdf22007-09-16 21:08:06 +00001054 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001055 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1056 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001057 /* build code bitmap */
1058 build_page_bitmap(p);
1059 }
1060
1061 /* we remove all the TBs in the range [start, end[ */
1062 /* XXX: see if in some cases it could be faster to invalidate all the code */
1063 tb = p->first_tb;
1064 while (tb != NULL) {
1065 n = (long)tb & 3;
1066 tb = (TranslationBlock *)((long)tb & ~3);
1067 tb_next = tb->page_next[n];
1068 /* NOTE: this is subtle as a TB may span two physical pages */
1069 if (n == 0) {
1070 /* NOTE: tb_end may be after the end of the page, but
1071 it is not a problem */
1072 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1073 tb_end = tb_start + tb->size;
1074 } else {
1075 tb_start = tb->page_addr[1];
1076 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1077 }
1078 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001079#ifdef TARGET_HAS_PRECISE_SMC
1080 if (current_tb_not_found) {
1081 current_tb_not_found = 0;
1082 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001083 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001084 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001085 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001086 }
1087 }
1088 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001089 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001090 /* If we are modifying the current TB, we must stop
1091 its execution. We could be more precise by checking
1092 that the modification is after the current PC, but it
1093 would require a specialized function to partially
1094 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001095
bellardd720b932004-04-25 17:57:43 +00001096 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001097 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001098 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1099 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001100 }
1101#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001102 /* we need to do that to handle the case where a signal
1103 occurs while doing tb_phys_invalidate() */
1104 saved_tb = NULL;
1105 if (env) {
1106 saved_tb = env->current_tb;
1107 env->current_tb = NULL;
1108 }
bellard9fa3e852004-01-04 18:06:42 +00001109 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001110 if (env) {
1111 env->current_tb = saved_tb;
1112 if (env->interrupt_request && env->current_tb)
1113 cpu_interrupt(env, env->interrupt_request);
1114 }
bellard9fa3e852004-01-04 18:06:42 +00001115 }
1116 tb = tb_next;
1117 }
1118#if !defined(CONFIG_USER_ONLY)
1119 /* if no code remaining, no need to continue to use slow writes */
1120 if (!p->first_tb) {
1121 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001122 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001123 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001124 }
1125 }
1126#endif
1127#ifdef TARGET_HAS_PRECISE_SMC
1128 if (current_tb_modified) {
1129 /* we generate a block containing just the instruction
1130 modifying the memory. It will ensure that it cannot modify
1131 itself */
bellardea1c1802004-06-14 18:56:36 +00001132 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001133 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001134 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001135 }
1136#endif
1137}
1138
1139/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001140static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001141{
1142 PageDesc *p;
1143 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001144#if 0
bellarda4193c82004-06-03 14:01:43 +00001145 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001146 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1147 cpu_single_env->mem_io_vaddr, len,
1148 cpu_single_env->eip,
1149 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001150 }
1151#endif
bellard9fa3e852004-01-04 18:06:42 +00001152 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001153 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001154 return;
1155 if (p->code_bitmap) {
1156 offset = start & ~TARGET_PAGE_MASK;
1157 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1158 if (b & ((1 << len) - 1))
1159 goto do_invalidate;
1160 } else {
1161 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001162 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001163 }
1164}
1165
bellard9fa3e852004-01-04 18:06:42 +00001166#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001167static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001168 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001169{
aliguori6b917542008-11-18 19:46:41 +00001170 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001171 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001172 int n;
bellardd720b932004-04-25 17:57:43 +00001173#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001174 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001175 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001176 int current_tb_modified = 0;
1177 target_ulong current_pc = 0;
1178 target_ulong current_cs_base = 0;
1179 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001180#endif
bellard9fa3e852004-01-04 18:06:42 +00001181
1182 addr &= TARGET_PAGE_MASK;
1183 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001184 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001185 return;
1186 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001187#ifdef TARGET_HAS_PRECISE_SMC
1188 if (tb && pc != 0) {
1189 current_tb = tb_find_pc(pc);
1190 }
1191#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001192 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001193 n = (long)tb & 3;
1194 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001195#ifdef TARGET_HAS_PRECISE_SMC
1196 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001197 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001198 /* If we are modifying the current TB, we must stop
1199 its execution. We could be more precise by checking
1200 that the modification is after the current PC, but it
1201 would require a specialized function to partially
1202 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001203
bellardd720b932004-04-25 17:57:43 +00001204 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001205 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001206 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1207 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001208 }
1209#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001210 tb_phys_invalidate(tb, addr);
1211 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001212 }
1213 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001214#ifdef TARGET_HAS_PRECISE_SMC
1215 if (current_tb_modified) {
1216 /* we generate a block containing just the instruction
1217 modifying the memory. It will ensure that it cannot modify
1218 itself */
bellardea1c1802004-06-14 18:56:36 +00001219 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001220 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001221 cpu_resume_from_signal(env, puc);
1222 }
1223#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001224}
bellard9fa3e852004-01-04 18:06:42 +00001225#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001226
1227/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001228static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001229 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001230{
1231 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001232#ifndef CONFIG_USER_ONLY
1233 bool page_already_protected;
1234#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001235
bellard9fa3e852004-01-04 18:06:42 +00001236 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001237 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001238 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001239#ifndef CONFIG_USER_ONLY
1240 page_already_protected = p->first_tb != NULL;
1241#endif
bellard9fa3e852004-01-04 18:06:42 +00001242 p->first_tb = (TranslationBlock *)((long)tb | n);
1243 invalidate_page_bitmap(p);
1244
bellard107db442004-06-22 18:48:46 +00001245#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001246
bellard9fa3e852004-01-04 18:06:42 +00001247#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001248 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001249 target_ulong addr;
1250 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001251 int prot;
1252
bellardfd6ce8f2003-05-14 19:00:11 +00001253 /* force the host page as non writable (writes will have a
1254 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001255 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001256 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001257 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1258 addr += TARGET_PAGE_SIZE) {
1259
1260 p2 = page_find (addr >> TARGET_PAGE_BITS);
1261 if (!p2)
1262 continue;
1263 prot |= p2->flags;
1264 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001265 }
ths5fafdf22007-09-16 21:08:06 +00001266 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001267 (prot & PAGE_BITS) & ~PAGE_WRITE);
1268#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001269 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001270 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001271#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001272 }
bellard9fa3e852004-01-04 18:06:42 +00001273#else
1274 /* if some code is already present, then the pages are already
1275 protected. So we handle the case where only the first TB is
1276 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001277 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001278 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001279 }
1280#endif
bellardd720b932004-04-25 17:57:43 +00001281
1282#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001283}
1284
bellard9fa3e852004-01-04 18:06:42 +00001285/* add a new TB and link it to the physical page tables. phys_page2 is
1286 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001287void tb_link_page(TranslationBlock *tb,
1288 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001289{
bellard9fa3e852004-01-04 18:06:42 +00001290 unsigned int h;
1291 TranslationBlock **ptb;
1292
pbrookc8a706f2008-06-02 16:16:42 +00001293 /* Grab the mmap lock to stop another thread invalidating this TB
1294 before we are done. */
1295 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001296 /* add in the physical hash table */
1297 h = tb_phys_hash_func(phys_pc);
1298 ptb = &tb_phys_hash[h];
1299 tb->phys_hash_next = *ptb;
1300 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001301
1302 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001303 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1304 if (phys_page2 != -1)
1305 tb_alloc_page(tb, 1, phys_page2);
1306 else
1307 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001308
bellardd4e81642003-05-25 16:46:15 +00001309 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1310 tb->jmp_next[0] = NULL;
1311 tb->jmp_next[1] = NULL;
1312
1313 /* init original jump addresses */
1314 if (tb->tb_next_offset[0] != 0xffff)
1315 tb_reset_jump(tb, 0);
1316 if (tb->tb_next_offset[1] != 0xffff)
1317 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001318
1319#ifdef DEBUG_TB_CHECK
1320 tb_page_check();
1321#endif
pbrookc8a706f2008-06-02 16:16:42 +00001322 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001323}
1324
bellarda513fe12003-05-27 23:29:48 +00001325/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1326 tb[1].tc_ptr. Return NULL if not found */
1327TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1328{
1329 int m_min, m_max, m;
1330 unsigned long v;
1331 TranslationBlock *tb;
1332
1333 if (nb_tbs <= 0)
1334 return NULL;
1335 if (tc_ptr < (unsigned long)code_gen_buffer ||
1336 tc_ptr >= (unsigned long)code_gen_ptr)
1337 return NULL;
1338 /* binary search (cf Knuth) */
1339 m_min = 0;
1340 m_max = nb_tbs - 1;
1341 while (m_min <= m_max) {
1342 m = (m_min + m_max) >> 1;
1343 tb = &tbs[m];
1344 v = (unsigned long)tb->tc_ptr;
1345 if (v == tc_ptr)
1346 return tb;
1347 else if (tc_ptr < v) {
1348 m_max = m - 1;
1349 } else {
1350 m_min = m + 1;
1351 }
ths5fafdf22007-09-16 21:08:06 +00001352 }
bellarda513fe12003-05-27 23:29:48 +00001353 return &tbs[m_max];
1354}
bellard75012672003-06-21 13:11:07 +00001355
bellardea041c02003-06-25 16:16:50 +00001356static void tb_reset_jump_recursive(TranslationBlock *tb);
1357
1358static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1359{
1360 TranslationBlock *tb1, *tb_next, **ptb;
1361 unsigned int n1;
1362
1363 tb1 = tb->jmp_next[n];
1364 if (tb1 != NULL) {
1365 /* find head of list */
1366 for(;;) {
1367 n1 = (long)tb1 & 3;
1368 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1369 if (n1 == 2)
1370 break;
1371 tb1 = tb1->jmp_next[n1];
1372 }
1373 /* we are now sure now that tb jumps to tb1 */
1374 tb_next = tb1;
1375
1376 /* remove tb from the jmp_first list */
1377 ptb = &tb_next->jmp_first;
1378 for(;;) {
1379 tb1 = *ptb;
1380 n1 = (long)tb1 & 3;
1381 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1382 if (n1 == n && tb1 == tb)
1383 break;
1384 ptb = &tb1->jmp_next[n1];
1385 }
1386 *ptb = tb->jmp_next[n];
1387 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001388
bellardea041c02003-06-25 16:16:50 +00001389 /* suppress the jump to next tb in generated code */
1390 tb_reset_jump(tb, n);
1391
bellard01243112004-01-04 15:48:17 +00001392 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001393 tb_reset_jump_recursive(tb_next);
1394 }
1395}
1396
1397static void tb_reset_jump_recursive(TranslationBlock *tb)
1398{
1399 tb_reset_jump_recursive2(tb, 0);
1400 tb_reset_jump_recursive2(tb, 1);
1401}
1402
bellard1fddef42005-04-17 19:16:13 +00001403#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001404#if defined(CONFIG_USER_ONLY)
1405static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1406{
1407 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1408}
1409#else
bellardd720b932004-04-25 17:57:43 +00001410static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1411{
Anthony Liguoric227f092009-10-01 16:12:16 -05001412 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001413 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001414 ram_addr_t ram_addr;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001415 PhysPageDesc p;
bellardd720b932004-04-25 17:57:43 +00001416
pbrookc2f07f82006-04-08 17:14:56 +00001417 addr = cpu_get_phys_page_debug(env, pc);
1418 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001419 pd = p.phys_offset;
pbrookc2f07f82006-04-08 17:14:56 +00001420 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001421 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001422}
bellardc27004e2005-01-03 23:35:10 +00001423#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001424#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001425
Paul Brookc527ee82010-03-01 03:31:14 +00001426#if defined(CONFIG_USER_ONLY)
1427void cpu_watchpoint_remove_all(CPUState *env, int mask)
1428
1429{
1430}
1431
1432int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1433 int flags, CPUWatchpoint **watchpoint)
1434{
1435 return -ENOSYS;
1436}
1437#else
pbrook6658ffb2007-03-16 23:58:11 +00001438/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001439int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1440 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001441{
aliguorib4051332008-11-18 20:14:20 +00001442 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001443 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001444
aliguorib4051332008-11-18 20:14:20 +00001445 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001446 if ((len & (len - 1)) || (addr & ~len_mask) ||
1447 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001448 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1449 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1450 return -EINVAL;
1451 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001452 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001453
aliguoria1d1bb32008-11-18 20:07:32 +00001454 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001455 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001456 wp->flags = flags;
1457
aliguori2dc9f412008-11-18 20:56:59 +00001458 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001459 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001460 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001461 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001462 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001463
pbrook6658ffb2007-03-16 23:58:11 +00001464 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001465
1466 if (watchpoint)
1467 *watchpoint = wp;
1468 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001469}
1470
aliguoria1d1bb32008-11-18 20:07:32 +00001471/* Remove a specific watchpoint. */
1472int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1473 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001474{
aliguorib4051332008-11-18 20:14:20 +00001475 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001476 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001477
Blue Swirl72cf2d42009-09-12 07:36:22 +00001478 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001479 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001480 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001481 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001482 return 0;
1483 }
1484 }
aliguoria1d1bb32008-11-18 20:07:32 +00001485 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001486}
1487
aliguoria1d1bb32008-11-18 20:07:32 +00001488/* Remove a specific watchpoint by reference. */
1489void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1490{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001491 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001492
aliguoria1d1bb32008-11-18 20:07:32 +00001493 tlb_flush_page(env, watchpoint->vaddr);
1494
Anthony Liguori7267c092011-08-20 22:09:37 -05001495 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001496}
1497
aliguoria1d1bb32008-11-18 20:07:32 +00001498/* Remove all matching watchpoints. */
1499void cpu_watchpoint_remove_all(CPUState *env, int mask)
1500{
aliguoric0ce9982008-11-25 22:13:57 +00001501 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001502
Blue Swirl72cf2d42009-09-12 07:36:22 +00001503 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001504 if (wp->flags & mask)
1505 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001506 }
aliguoria1d1bb32008-11-18 20:07:32 +00001507}
Paul Brookc527ee82010-03-01 03:31:14 +00001508#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001509
1510/* Add a breakpoint. */
1511int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1512 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001513{
bellard1fddef42005-04-17 19:16:13 +00001514#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001515 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001516
Anthony Liguori7267c092011-08-20 22:09:37 -05001517 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001518
1519 bp->pc = pc;
1520 bp->flags = flags;
1521
aliguori2dc9f412008-11-18 20:56:59 +00001522 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001523 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001524 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001525 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001526 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001527
1528 breakpoint_invalidate(env, pc);
1529
1530 if (breakpoint)
1531 *breakpoint = bp;
1532 return 0;
1533#else
1534 return -ENOSYS;
1535#endif
1536}
1537
1538/* Remove a specific breakpoint. */
1539int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1540{
1541#if defined(TARGET_HAS_ICE)
1542 CPUBreakpoint *bp;
1543
Blue Swirl72cf2d42009-09-12 07:36:22 +00001544 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001545 if (bp->pc == pc && bp->flags == flags) {
1546 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001547 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001548 }
bellard4c3a88a2003-07-26 12:06:08 +00001549 }
aliguoria1d1bb32008-11-18 20:07:32 +00001550 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001551#else
aliguoria1d1bb32008-11-18 20:07:32 +00001552 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001553#endif
1554}
1555
aliguoria1d1bb32008-11-18 20:07:32 +00001556/* Remove a specific breakpoint by reference. */
1557void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001558{
bellard1fddef42005-04-17 19:16:13 +00001559#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001560 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001561
aliguoria1d1bb32008-11-18 20:07:32 +00001562 breakpoint_invalidate(env, breakpoint->pc);
1563
Anthony Liguori7267c092011-08-20 22:09:37 -05001564 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001565#endif
1566}
1567
1568/* Remove all matching breakpoints. */
1569void cpu_breakpoint_remove_all(CPUState *env, int mask)
1570{
1571#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001572 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001573
Blue Swirl72cf2d42009-09-12 07:36:22 +00001574 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001575 if (bp->flags & mask)
1576 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001577 }
bellard4c3a88a2003-07-26 12:06:08 +00001578#endif
1579}
1580
bellardc33a3462003-07-29 20:50:33 +00001581/* enable or disable single step mode. EXCP_DEBUG is returned by the
1582 CPU loop after each instruction */
1583void cpu_single_step(CPUState *env, int enabled)
1584{
bellard1fddef42005-04-17 19:16:13 +00001585#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001586 if (env->singlestep_enabled != enabled) {
1587 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001588 if (kvm_enabled())
1589 kvm_update_guest_debug(env, 0);
1590 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001591 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001592 /* XXX: only flush what is necessary */
1593 tb_flush(env);
1594 }
bellardc33a3462003-07-29 20:50:33 +00001595 }
1596#endif
1597}
1598
bellard34865132003-10-05 14:28:56 +00001599/* enable or disable low levels log */
1600void cpu_set_log(int log_flags)
1601{
1602 loglevel = log_flags;
1603 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001604 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001605 if (!logfile) {
1606 perror(logfilename);
1607 _exit(1);
1608 }
bellard9fa3e852004-01-04 18:06:42 +00001609#if !defined(CONFIG_SOFTMMU)
1610 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1611 {
blueswir1b55266b2008-09-20 08:07:15 +00001612 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001613 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1614 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001615#elif defined(_WIN32)
1616 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1617 setvbuf(logfile, NULL, _IONBF, 0);
1618#else
bellard34865132003-10-05 14:28:56 +00001619 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001620#endif
pbrooke735b912007-06-30 13:53:24 +00001621 log_append = 1;
1622 }
1623 if (!loglevel && logfile) {
1624 fclose(logfile);
1625 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001626 }
1627}
1628
1629void cpu_set_log_filename(const char *filename)
1630{
1631 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001632 if (logfile) {
1633 fclose(logfile);
1634 logfile = NULL;
1635 }
1636 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001637}
bellardc33a3462003-07-29 20:50:33 +00001638
aurel323098dba2009-03-07 21:28:24 +00001639static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001640{
pbrookd5975362008-06-07 20:50:51 +00001641 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1642 problem and hope the cpu will stop of its own accord. For userspace
1643 emulation this often isn't actually as bad as it sounds. Often
1644 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001645 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001646 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001647
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001648 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001649 tb = env->current_tb;
1650 /* if the cpu is currently executing code, we must unlink it and
1651 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001652 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001653 env->current_tb = NULL;
1654 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001655 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001656 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001657}
1658
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001659#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001660/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001661static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001662{
1663 int old_mask;
1664
1665 old_mask = env->interrupt_request;
1666 env->interrupt_request |= mask;
1667
aliguori8edac962009-04-24 18:03:45 +00001668 /*
1669 * If called from iothread context, wake the target cpu in
1670 * case its halted.
1671 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001672 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001673 qemu_cpu_kick(env);
1674 return;
1675 }
aliguori8edac962009-04-24 18:03:45 +00001676
pbrook2e70f6e2008-06-29 01:03:05 +00001677 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001678 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001679 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001680 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001681 cpu_abort(env, "Raised interrupt while not in I/O function");
1682 }
pbrook2e70f6e2008-06-29 01:03:05 +00001683 } else {
aurel323098dba2009-03-07 21:28:24 +00001684 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001685 }
1686}
1687
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001688CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1689
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001690#else /* CONFIG_USER_ONLY */
1691
1692void cpu_interrupt(CPUState *env, int mask)
1693{
1694 env->interrupt_request |= mask;
1695 cpu_unlink_tb(env);
1696}
1697#endif /* CONFIG_USER_ONLY */
1698
bellardb54ad042004-05-20 13:42:52 +00001699void cpu_reset_interrupt(CPUState *env, int mask)
1700{
1701 env->interrupt_request &= ~mask;
1702}
1703
aurel323098dba2009-03-07 21:28:24 +00001704void cpu_exit(CPUState *env)
1705{
1706 env->exit_request = 1;
1707 cpu_unlink_tb(env);
1708}
1709
blueswir1c7cd6a32008-10-02 18:27:46 +00001710const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001711 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001712 "show generated host assembly code for each compiled TB" },
1713 { CPU_LOG_TB_IN_ASM, "in_asm",
1714 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001715 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001716 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001717 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001718 "show micro ops "
1719#ifdef TARGET_I386
1720 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001721#endif
blueswir1e01a1152008-03-14 17:37:11 +00001722 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001723 { CPU_LOG_INT, "int",
1724 "show interrupts/exceptions in short format" },
1725 { CPU_LOG_EXEC, "exec",
1726 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001727 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001728 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001729#ifdef TARGET_I386
1730 { CPU_LOG_PCALL, "pcall",
1731 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001732 { CPU_LOG_RESET, "cpu_reset",
1733 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001734#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001735#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001736 { CPU_LOG_IOPORT, "ioport",
1737 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001738#endif
bellardf193c792004-03-21 17:06:25 +00001739 { 0, NULL, NULL },
1740};
1741
1742static int cmp1(const char *s1, int n, const char *s2)
1743{
1744 if (strlen(s2) != n)
1745 return 0;
1746 return memcmp(s1, s2, n) == 0;
1747}
ths3b46e622007-09-17 08:09:54 +00001748
bellardf193c792004-03-21 17:06:25 +00001749/* takes a comma separated list of log masks. Return 0 if error. */
1750int cpu_str_to_log_mask(const char *str)
1751{
blueswir1c7cd6a32008-10-02 18:27:46 +00001752 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001753 int mask;
1754 const char *p, *p1;
1755
1756 p = str;
1757 mask = 0;
1758 for(;;) {
1759 p1 = strchr(p, ',');
1760 if (!p1)
1761 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001762 if(cmp1(p,p1-p,"all")) {
1763 for(item = cpu_log_items; item->mask != 0; item++) {
1764 mask |= item->mask;
1765 }
1766 } else {
1767 for(item = cpu_log_items; item->mask != 0; item++) {
1768 if (cmp1(p, p1 - p, item->name))
1769 goto found;
1770 }
1771 return 0;
bellardf193c792004-03-21 17:06:25 +00001772 }
bellardf193c792004-03-21 17:06:25 +00001773 found:
1774 mask |= item->mask;
1775 if (*p1 != ',')
1776 break;
1777 p = p1 + 1;
1778 }
1779 return mask;
1780}
bellardea041c02003-06-25 16:16:50 +00001781
bellard75012672003-06-21 13:11:07 +00001782void cpu_abort(CPUState *env, const char *fmt, ...)
1783{
1784 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001785 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001786
1787 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001788 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001789 fprintf(stderr, "qemu: fatal: ");
1790 vfprintf(stderr, fmt, ap);
1791 fprintf(stderr, "\n");
1792#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001793 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1794#else
1795 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001796#endif
aliguori93fcfe32009-01-15 22:34:14 +00001797 if (qemu_log_enabled()) {
1798 qemu_log("qemu: fatal: ");
1799 qemu_log_vprintf(fmt, ap2);
1800 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001801#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001802 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001803#else
aliguori93fcfe32009-01-15 22:34:14 +00001804 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001805#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001806 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001807 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001808 }
pbrook493ae1f2007-11-23 16:53:59 +00001809 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001810 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001811#if defined(CONFIG_USER_ONLY)
1812 {
1813 struct sigaction act;
1814 sigfillset(&act.sa_mask);
1815 act.sa_handler = SIG_DFL;
1816 sigaction(SIGABRT, &act, NULL);
1817 }
1818#endif
bellard75012672003-06-21 13:11:07 +00001819 abort();
1820}
1821
thsc5be9f02007-02-28 20:20:53 +00001822CPUState *cpu_copy(CPUState *env)
1823{
ths01ba9812007-12-09 02:22:57 +00001824 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001825 CPUState *next_cpu = new_env->next_cpu;
1826 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001827#if defined(TARGET_HAS_ICE)
1828 CPUBreakpoint *bp;
1829 CPUWatchpoint *wp;
1830#endif
1831
thsc5be9f02007-02-28 20:20:53 +00001832 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001833
1834 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001835 new_env->next_cpu = next_cpu;
1836 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001837
1838 /* Clone all break/watchpoints.
1839 Note: Once we support ptrace with hw-debug register access, make sure
1840 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001841 QTAILQ_INIT(&env->breakpoints);
1842 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001843#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001844 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001845 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1846 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001847 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001848 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1849 wp->flags, NULL);
1850 }
1851#endif
1852
thsc5be9f02007-02-28 20:20:53 +00001853 return new_env;
1854}
1855
bellard01243112004-01-04 15:48:17 +00001856#if !defined(CONFIG_USER_ONLY)
1857
edgar_igl5c751e92008-05-06 08:44:21 +00001858static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1859{
1860 unsigned int i;
1861
1862 /* Discard jump cache entries for any tb which might potentially
1863 overlap the flushed page. */
1864 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1865 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001866 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001867
1868 i = tb_jmp_cache_hash_page(addr);
1869 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001870 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001871}
1872
Igor Kovalenko08738982009-07-12 02:15:40 +04001873static CPUTLBEntry s_cputlb_empty_entry = {
1874 .addr_read = -1,
1875 .addr_write = -1,
1876 .addr_code = -1,
1877 .addend = -1,
1878};
1879
Peter Maydell771124e2012-01-17 13:23:13 +00001880/* NOTE:
1881 * If flush_global is true (the usual case), flush all tlb entries.
1882 * If flush_global is false, flush (at least) all tlb entries not
1883 * marked global.
1884 *
1885 * Since QEMU doesn't currently implement a global/not-global flag
1886 * for tlb entries, at the moment tlb_flush() will also flush all
1887 * tlb entries in the flush_global == false case. This is OK because
1888 * CPU architectures generally permit an implementation to drop
1889 * entries from the TLB at any time, so flushing more entries than
1890 * required is only an efficiency issue, not a correctness issue.
1891 */
bellardee8b7022004-02-03 23:35:10 +00001892void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001893{
bellard33417e72003-08-10 21:47:01 +00001894 int i;
bellard01243112004-01-04 15:48:17 +00001895
bellard9fa3e852004-01-04 18:06:42 +00001896#if defined(DEBUG_TLB)
1897 printf("tlb_flush:\n");
1898#endif
bellard01243112004-01-04 15:48:17 +00001899 /* must reset current TB so that interrupts cannot modify the
1900 links while we are modifying them */
1901 env->current_tb = NULL;
1902
bellard33417e72003-08-10 21:47:01 +00001903 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001904 int mmu_idx;
1905 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001906 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001907 }
bellard33417e72003-08-10 21:47:01 +00001908 }
bellard9fa3e852004-01-04 18:06:42 +00001909
bellard8a40a182005-11-20 10:35:40 +00001910 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001911
Paul Brookd4c430a2010-03-17 02:14:28 +00001912 env->tlb_flush_addr = -1;
1913 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001914 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001915}
1916
bellard274da6b2004-05-20 21:56:27 +00001917static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001918{
ths5fafdf22007-09-16 21:08:06 +00001919 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001920 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001921 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001922 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001923 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001924 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001925 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001926 }
bellard61382a52003-10-27 21:22:23 +00001927}
1928
bellard2e126692004-04-25 21:28:44 +00001929void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001930{
bellard8a40a182005-11-20 10:35:40 +00001931 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001932 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001933
bellard9fa3e852004-01-04 18:06:42 +00001934#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001935 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001936#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001937 /* Check if we need to flush due to large pages. */
1938 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1939#if defined(DEBUG_TLB)
1940 printf("tlb_flush_page: forced full flush ("
1941 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1942 env->tlb_flush_addr, env->tlb_flush_mask);
1943#endif
1944 tlb_flush(env, 1);
1945 return;
1946 }
bellard01243112004-01-04 15:48:17 +00001947 /* must reset current TB so that interrupts cannot modify the
1948 links while we are modifying them */
1949 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001950
bellard61382a52003-10-27 21:22:23 +00001951 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001952 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001953 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1954 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001955
edgar_igl5c751e92008-05-06 08:44:21 +00001956 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001957}
1958
bellard9fa3e852004-01-04 18:06:42 +00001959/* update the TLBs so that writes to code in the virtual page 'addr'
1960 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001961static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001962{
ths5fafdf22007-09-16 21:08:06 +00001963 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00001964 ram_addr + TARGET_PAGE_SIZE,
1965 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00001966}
1967
bellard9fa3e852004-01-04 18:06:42 +00001968/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00001969 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05001970static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00001971 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00001972{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001973 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00001974}
1975
ths5fafdf22007-09-16 21:08:06 +00001976static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00001977 unsigned long start, unsigned long length)
1978{
1979 unsigned long addr;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001980 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
bellard84b7b8e2005-11-28 21:19:04 +00001981 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00001982 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00001983 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00001984 }
1985 }
1986}
1987
pbrook5579c7f2009-04-11 14:47:08 +00001988/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001989void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001990 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001991{
1992 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00001993 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001994 int i;
bellard1ccde1c2004-02-06 19:46:14 +00001995
1996 start &= TARGET_PAGE_MASK;
1997 end = TARGET_PAGE_ALIGN(end);
1998
1999 length = end - start;
2000 if (length == 0)
2001 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002002 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002003
bellard1ccde1c2004-02-06 19:46:14 +00002004 /* we modify the TLB cache so that the dirty bit will be set again
2005 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002006 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002007 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002008 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002009 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002010 != (end - 1) - start) {
2011 abort();
2012 }
2013
bellard6a00d602005-11-21 23:25:50 +00002014 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002015 int mmu_idx;
2016 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2017 for(i = 0; i < CPU_TLB_SIZE; i++)
2018 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2019 start1, length);
2020 }
bellard6a00d602005-11-21 23:25:50 +00002021 }
bellard1ccde1c2004-02-06 19:46:14 +00002022}
2023
aliguori74576192008-10-06 14:02:03 +00002024int cpu_physical_memory_set_dirty_tracking(int enable)
2025{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002026 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002027 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002028 return ret;
aliguori74576192008-10-06 14:02:03 +00002029}
2030
bellard3a7d9292005-08-21 09:26:42 +00002031static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2032{
Anthony Liguoric227f092009-10-01 16:12:16 -05002033 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002034 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002035
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002036 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
pbrook5579c7f2009-04-11 14:47:08 +00002037 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2038 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002039 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002040 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002041 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002042 }
2043 }
2044}
2045
2046/* update the TLB according to the current state of the dirty bits */
2047void cpu_tlb_update_dirty(CPUState *env)
2048{
2049 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002050 int mmu_idx;
2051 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2052 for(i = 0; i < CPU_TLB_SIZE; i++)
2053 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2054 }
bellard3a7d9292005-08-21 09:26:42 +00002055}
2056
pbrook0f459d12008-06-09 00:20:13 +00002057static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002058{
pbrook0f459d12008-06-09 00:20:13 +00002059 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2060 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002061}
2062
pbrook0f459d12008-06-09 00:20:13 +00002063/* update the TLB corresponding to virtual page vaddr
2064 so that it is no longer dirty */
2065static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002066{
bellard1ccde1c2004-02-06 19:46:14 +00002067 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002068 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002069
pbrook0f459d12008-06-09 00:20:13 +00002070 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002071 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002072 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2073 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002074}
2075
Paul Brookd4c430a2010-03-17 02:14:28 +00002076/* Our TLB does not support large pages, so remember the area covered by
2077 large pages and trigger a full TLB flush if these are invalidated. */
2078static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2079 target_ulong size)
2080{
2081 target_ulong mask = ~(size - 1);
2082
2083 if (env->tlb_flush_addr == (target_ulong)-1) {
2084 env->tlb_flush_addr = vaddr & mask;
2085 env->tlb_flush_mask = mask;
2086 return;
2087 }
2088 /* Extend the existing region to include the new page.
2089 This is a compromise between unnecessary flushes and the cost
2090 of maintaining a full variable size TLB. */
2091 mask &= env->tlb_flush_mask;
2092 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2093 mask <<= 1;
2094 }
2095 env->tlb_flush_addr &= mask;
2096 env->tlb_flush_mask = mask;
2097}
2098
Avi Kivity1d393fa2012-01-01 21:15:42 +02002099static bool is_ram_rom(ram_addr_t pd)
2100{
2101 pd &= ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002102 return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002103}
2104
Avi Kivity75c578d2012-01-02 15:40:52 +02002105static bool is_romd(ram_addr_t pd)
2106{
2107 MemoryRegion *mr;
2108
2109 pd &= ~TARGET_PAGE_MASK;
Avi Kivity11c7ef02012-01-02 17:21:07 +02002110 mr = io_mem_region[pd];
Avi Kivity75c578d2012-01-02 15:40:52 +02002111 return mr->rom_device && mr->readable;
2112}
2113
Avi Kivity1d393fa2012-01-01 21:15:42 +02002114static bool is_ram_rom_romd(ram_addr_t pd)
2115{
Avi Kivity75c578d2012-01-02 15:40:52 +02002116 return is_ram_rom(pd) || is_romd(pd);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002117}
2118
Paul Brookd4c430a2010-03-17 02:14:28 +00002119/* Add a new TLB entry. At most one entry for a given virtual address
2120 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2121 supplied size is only used by tlb_flush_page. */
2122void tlb_set_page(CPUState *env, target_ulong vaddr,
2123 target_phys_addr_t paddr, int prot,
2124 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002125{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002126 PhysPageDesc p;
bellard4f2ac232004-04-26 19:44:02 +00002127 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002128 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002129 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002130 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002131 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002132 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002133 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002134 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002135
Paul Brookd4c430a2010-03-17 02:14:28 +00002136 assert(size >= TARGET_PAGE_SIZE);
2137 if (size != TARGET_PAGE_SIZE) {
2138 tlb_add_large_page(env, vaddr, size);
2139 }
bellard92e873b2004-05-21 14:52:29 +00002140 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002141 pd = p.phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002142#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002143 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2144 " prot=%x idx=%d pd=0x%08lx\n",
2145 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002146#endif
2147
pbrook0f459d12008-06-09 00:20:13 +00002148 address = vaddr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002149 if (!is_ram_rom_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002150 /* IO memory case (romd handled later) */
2151 address |= TLB_MMIO;
2152 }
pbrook5579c7f2009-04-11 14:47:08 +00002153 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002154 if (is_ram_rom(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002155 /* Normal RAM. */
2156 iotlb = pd & TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002157 if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2158 iotlb |= io_mem_notdirty.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002159 else
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002160 iotlb |= io_mem_rom.ram_addr;
pbrook0f459d12008-06-09 00:20:13 +00002161 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002162 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002163 It would be nice to pass an offset from the base address
2164 of that region. This would avoid having to special case RAM,
2165 and avoid full address decoding in every device.
2166 We can't use the high bits of pd for this because
2167 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002168 iotlb = (pd & ~TARGET_PAGE_MASK);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002169 iotlb += p.region_offset;
pbrook0f459d12008-06-09 00:20:13 +00002170 }
pbrook6658ffb2007-03-16 23:58:11 +00002171
pbrook0f459d12008-06-09 00:20:13 +00002172 code_address = address;
2173 /* Make accesses to pages with watchpoints go via the
2174 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002175 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002176 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002177 /* Avoid trapping reads of pages with a write breakpoint. */
2178 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Avi Kivity1ec9b902012-01-02 12:47:48 +02002179 iotlb = io_mem_watch.ram_addr + paddr;
Jun Koibf298f82010-05-06 14:36:59 +09002180 address |= TLB_MMIO;
2181 break;
2182 }
pbrook6658ffb2007-03-16 23:58:11 +00002183 }
pbrook0f459d12008-06-09 00:20:13 +00002184 }
balrogd79acba2007-06-26 20:01:13 +00002185
pbrook0f459d12008-06-09 00:20:13 +00002186 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2187 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2188 te = &env->tlb_table[mmu_idx][index];
2189 te->addend = addend - vaddr;
2190 if (prot & PAGE_READ) {
2191 te->addr_read = address;
2192 } else {
2193 te->addr_read = -1;
2194 }
edgar_igl5c751e92008-05-06 08:44:21 +00002195
pbrook0f459d12008-06-09 00:20:13 +00002196 if (prot & PAGE_EXEC) {
2197 te->addr_code = code_address;
2198 } else {
2199 te->addr_code = -1;
2200 }
2201 if (prot & PAGE_WRITE) {
Avi Kivity75c578d2012-01-02 15:40:52 +02002202 if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr || is_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002203 /* Write access calls the I/O callback. */
2204 te->addr_write = address | TLB_MMIO;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002205 } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
pbrook0f459d12008-06-09 00:20:13 +00002206 !cpu_physical_memory_is_dirty(pd)) {
2207 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002208 } else {
pbrook0f459d12008-06-09 00:20:13 +00002209 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002210 }
pbrook0f459d12008-06-09 00:20:13 +00002211 } else {
2212 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002213 }
bellard9fa3e852004-01-04 18:06:42 +00002214}
2215
bellard01243112004-01-04 15:48:17 +00002216#else
2217
bellardee8b7022004-02-03 23:35:10 +00002218void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002219{
2220}
2221
bellard2e126692004-04-25 21:28:44 +00002222void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002223{
2224}
2225
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002226/*
2227 * Walks guest process memory "regions" one by one
2228 * and calls callback function 'fn' for each region.
2229 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002230
2231struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002232{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002233 walk_memory_regions_fn fn;
2234 void *priv;
2235 unsigned long start;
2236 int prot;
2237};
bellard9fa3e852004-01-04 18:06:42 +00002238
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002239static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002240 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002241{
2242 if (data->start != -1ul) {
2243 int rc = data->fn(data->priv, data->start, end, data->prot);
2244 if (rc != 0) {
2245 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002246 }
bellard33417e72003-08-10 21:47:01 +00002247 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002248
2249 data->start = (new_prot ? end : -1ul);
2250 data->prot = new_prot;
2251
2252 return 0;
2253}
2254
2255static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002256 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002257{
Paul Brookb480d9b2010-03-12 23:23:29 +00002258 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002259 int i, rc;
2260
2261 if (*lp == NULL) {
2262 return walk_memory_regions_end(data, base, 0);
2263 }
2264
2265 if (level == 0) {
2266 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002267 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002268 int prot = pd[i].flags;
2269
2270 pa = base | (i << TARGET_PAGE_BITS);
2271 if (prot != data->prot) {
2272 rc = walk_memory_regions_end(data, pa, prot);
2273 if (rc != 0) {
2274 return rc;
2275 }
2276 }
2277 }
2278 } else {
2279 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002280 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002281 pa = base | ((abi_ulong)i <<
2282 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002283 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2284 if (rc != 0) {
2285 return rc;
2286 }
2287 }
2288 }
2289
2290 return 0;
2291}
2292
2293int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2294{
2295 struct walk_memory_regions_data data;
2296 unsigned long i;
2297
2298 data.fn = fn;
2299 data.priv = priv;
2300 data.start = -1ul;
2301 data.prot = 0;
2302
2303 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002304 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002305 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2306 if (rc != 0) {
2307 return rc;
2308 }
2309 }
2310
2311 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002312}
2313
Paul Brookb480d9b2010-03-12 23:23:29 +00002314static int dump_region(void *priv, abi_ulong start,
2315 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002316{
2317 FILE *f = (FILE *)priv;
2318
Paul Brookb480d9b2010-03-12 23:23:29 +00002319 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2320 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002321 start, end, end - start,
2322 ((prot & PAGE_READ) ? 'r' : '-'),
2323 ((prot & PAGE_WRITE) ? 'w' : '-'),
2324 ((prot & PAGE_EXEC) ? 'x' : '-'));
2325
2326 return (0);
2327}
2328
2329/* dump memory mappings */
2330void page_dump(FILE *f)
2331{
2332 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2333 "start", "end", "size", "prot");
2334 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002335}
2336
pbrook53a59602006-03-25 19:31:22 +00002337int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002338{
bellard9fa3e852004-01-04 18:06:42 +00002339 PageDesc *p;
2340
2341 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002342 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002343 return 0;
2344 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002345}
2346
Richard Henderson376a7902010-03-10 15:57:04 -08002347/* Modify the flags of a page and invalidate the code if necessary.
2348 The flag PAGE_WRITE_ORG is positioned automatically depending
2349 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002350void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002351{
Richard Henderson376a7902010-03-10 15:57:04 -08002352 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002353
Richard Henderson376a7902010-03-10 15:57:04 -08002354 /* This function should never be called with addresses outside the
2355 guest address space. If this assert fires, it probably indicates
2356 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002357#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2358 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002359#endif
2360 assert(start < end);
2361
bellard9fa3e852004-01-04 18:06:42 +00002362 start = start & TARGET_PAGE_MASK;
2363 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002364
2365 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002366 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002367 }
2368
2369 for (addr = start, len = end - start;
2370 len != 0;
2371 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2372 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2373
2374 /* If the write protection bit is set, then we invalidate
2375 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002376 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002377 (flags & PAGE_WRITE) &&
2378 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002379 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002380 }
2381 p->flags = flags;
2382 }
bellard9fa3e852004-01-04 18:06:42 +00002383}
2384
ths3d97b402007-11-02 19:02:07 +00002385int page_check_range(target_ulong start, target_ulong len, int flags)
2386{
2387 PageDesc *p;
2388 target_ulong end;
2389 target_ulong addr;
2390
Richard Henderson376a7902010-03-10 15:57:04 -08002391 /* This function should never be called with addresses outside the
2392 guest address space. If this assert fires, it probably indicates
2393 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002394#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2395 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002396#endif
2397
Richard Henderson3e0650a2010-03-29 10:54:42 -07002398 if (len == 0) {
2399 return 0;
2400 }
Richard Henderson376a7902010-03-10 15:57:04 -08002401 if (start + len - 1 < start) {
2402 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002403 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002404 }
balrog55f280c2008-10-28 10:24:11 +00002405
ths3d97b402007-11-02 19:02:07 +00002406 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2407 start = start & TARGET_PAGE_MASK;
2408
Richard Henderson376a7902010-03-10 15:57:04 -08002409 for (addr = start, len = end - start;
2410 len != 0;
2411 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002412 p = page_find(addr >> TARGET_PAGE_BITS);
2413 if( !p )
2414 return -1;
2415 if( !(p->flags & PAGE_VALID) )
2416 return -1;
2417
bellarddae32702007-11-14 10:51:00 +00002418 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002419 return -1;
bellarddae32702007-11-14 10:51:00 +00002420 if (flags & PAGE_WRITE) {
2421 if (!(p->flags & PAGE_WRITE_ORG))
2422 return -1;
2423 /* unprotect the page if it was put read-only because it
2424 contains translated code */
2425 if (!(p->flags & PAGE_WRITE)) {
2426 if (!page_unprotect(addr, 0, NULL))
2427 return -1;
2428 }
2429 return 0;
2430 }
ths3d97b402007-11-02 19:02:07 +00002431 }
2432 return 0;
2433}
2434
bellard9fa3e852004-01-04 18:06:42 +00002435/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002436 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002437int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002438{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002439 unsigned int prot;
2440 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002441 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002442
pbrookc8a706f2008-06-02 16:16:42 +00002443 /* Technically this isn't safe inside a signal handler. However we
2444 know this only ever happens in a synchronous SEGV handler, so in
2445 practice it seems to be ok. */
2446 mmap_lock();
2447
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002448 p = page_find(address >> TARGET_PAGE_BITS);
2449 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002450 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002451 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002452 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002453
bellard9fa3e852004-01-04 18:06:42 +00002454 /* if the page was really writable, then we change its
2455 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002456 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2457 host_start = address & qemu_host_page_mask;
2458 host_end = host_start + qemu_host_page_size;
2459
2460 prot = 0;
2461 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2462 p = page_find(addr >> TARGET_PAGE_BITS);
2463 p->flags |= PAGE_WRITE;
2464 prot |= p->flags;
2465
bellard9fa3e852004-01-04 18:06:42 +00002466 /* and since the content will be modified, we must invalidate
2467 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002468 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002469#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002470 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002471#endif
bellard9fa3e852004-01-04 18:06:42 +00002472 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002473 mprotect((void *)g2h(host_start), qemu_host_page_size,
2474 prot & PAGE_BITS);
2475
2476 mmap_unlock();
2477 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002478 }
pbrookc8a706f2008-06-02 16:16:42 +00002479 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002480 return 0;
2481}
2482
bellard6a00d602005-11-21 23:25:50 +00002483static inline void tlb_set_dirty(CPUState *env,
2484 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002485{
2486}
bellard9fa3e852004-01-04 18:06:42 +00002487#endif /* defined(CONFIG_USER_ONLY) */
2488
pbrooke2eef172008-06-08 01:09:01 +00002489#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002490
Paul Brookc04b2b72010-03-01 03:31:14 +00002491#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2492typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002493 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002494 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002495 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2496 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002497} subpage_t;
2498
Anthony Liguoric227f092009-10-01 16:12:16 -05002499static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2500 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002501static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2502 ram_addr_t orig_memory,
2503 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002504#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2505 need_subpage) \
2506 do { \
2507 if (addr > start_addr) \
2508 start_addr2 = 0; \
2509 else { \
2510 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2511 if (start_addr2 > 0) \
2512 need_subpage = 1; \
2513 } \
2514 \
blueswir149e9fba2007-05-30 17:25:06 +00002515 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002516 end_addr2 = TARGET_PAGE_SIZE - 1; \
2517 else { \
2518 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2519 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2520 need_subpage = 1; \
2521 } \
2522 } while (0)
2523
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002524/* register physical memory.
2525 For RAM, 'size' must be a multiple of the target page size.
2526 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002527 io memory page. The address used when calling the IO function is
2528 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002529 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002530 before calculating this offset. This should not be a problem unless
2531 the low bits of start_addr and region_offset differ. */
Avi Kivitydd811242012-01-02 12:17:03 +02002532void cpu_register_physical_memory_log(MemoryRegionSection *section,
2533 bool readable, bool readonly)
bellard33417e72003-08-10 21:47:01 +00002534{
Avi Kivitydd811242012-01-02 12:17:03 +02002535 target_phys_addr_t start_addr = section->offset_within_address_space;
2536 ram_addr_t size = section->size;
2537 ram_addr_t phys_offset = section->mr->ram_addr;
2538 ram_addr_t region_offset = section->offset_within_region;
Anthony Liguoric227f092009-10-01 16:12:16 -05002539 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002540 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002541 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002542 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002543 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002544
Avi Kivitydd811242012-01-02 12:17:03 +02002545 if (memory_region_is_ram(section->mr)) {
2546 phys_offset += region_offset;
2547 region_offset = 0;
2548 }
2549
Avi Kivitydd811242012-01-02 12:17:03 +02002550 if (readonly) {
2551 phys_offset |= io_mem_rom.ram_addr;
2552 }
2553
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002554 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002555
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002556 if (phys_offset == io_mem_unassigned.ram_addr) {
pbrook67c4d232009-02-23 13:16:07 +00002557 region_offset = start_addr;
2558 }
pbrook8da3ff12008-12-01 18:59:50 +00002559 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002560 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002561 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002562
2563 addr = start_addr;
2564 do {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002565 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002566 if (p && p->phys_offset != io_mem_unassigned.ram_addr) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002567 ram_addr_t orig_memory = p->phys_offset;
2568 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002569 int need_subpage = 0;
Avi Kivity11c7ef02012-01-02 17:21:07 +02002570 MemoryRegion *mr = io_mem_region[orig_memory & ~TARGET_PAGE_MASK];
blueswir1db7b5422007-05-26 17:36:03 +00002571
2572 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2573 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002574 if (need_subpage) {
Avi Kivityb3b00c72012-01-02 13:20:11 +02002575 if (!(mr->subpage)) {
blueswir1db7b5422007-05-26 17:36:03 +00002576 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002577 &p->phys_offset, orig_memory,
2578 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002579 } else {
Avi Kivitya621f382012-01-02 13:12:08 +02002580 subpage = container_of(mr, subpage_t, iomem);
blueswir1db7b5422007-05-26 17:36:03 +00002581 }
pbrook8da3ff12008-12-01 18:59:50 +00002582 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2583 region_offset);
2584 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002585 } else {
2586 p->phys_offset = phys_offset;
Avi Kivity2774c6d2012-01-01 18:24:24 +02002587 p->region_offset = region_offset;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002588 if (is_ram_rom_romd(phys_offset))
blueswir1db7b5422007-05-26 17:36:03 +00002589 phys_offset += TARGET_PAGE_SIZE;
2590 }
2591 } else {
2592 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2593 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002594 p->region_offset = region_offset;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002595 if (is_ram_rom_romd(phys_offset)) {
blueswir1db7b5422007-05-26 17:36:03 +00002596 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002597 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002598 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002599 int need_subpage = 0;
2600
2601 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2602 end_addr2, need_subpage);
2603
Richard Hendersonf6405242010-04-22 16:47:31 -07002604 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002605 subpage = subpage_init((addr & TARGET_PAGE_MASK),
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002606 &p->phys_offset,
2607 io_mem_unassigned.ram_addr,
pbrook67c4d232009-02-23 13:16:07 +00002608 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002609 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002610 phys_offset, region_offset);
2611 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002612 }
2613 }
2614 }
pbrook8da3ff12008-12-01 18:59:50 +00002615 region_offset += TARGET_PAGE_SIZE;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002616 addr += TARGET_PAGE_SIZE;
2617 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002618
bellard9d420372006-06-25 22:25:22 +00002619 /* since each CPU stores ram addresses in its TLB cache, we must
2620 reset the modified entries */
2621 /* XXX: slow ! */
2622 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2623 tlb_flush(env, 1);
2624 }
bellard33417e72003-08-10 21:47:01 +00002625}
2626
Anthony Liguoric227f092009-10-01 16:12:16 -05002627void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002628{
2629 if (kvm_enabled())
2630 kvm_coalesce_mmio_region(addr, size);
2631}
2632
Anthony Liguoric227f092009-10-01 16:12:16 -05002633void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002634{
2635 if (kvm_enabled())
2636 kvm_uncoalesce_mmio_region(addr, size);
2637}
2638
Sheng Yang62a27442010-01-26 19:21:16 +08002639void qemu_flush_coalesced_mmio_buffer(void)
2640{
2641 if (kvm_enabled())
2642 kvm_flush_coalesced_mmio_buffer();
2643}
2644
Marcelo Tosattic9027602010-03-01 20:25:08 -03002645#if defined(__linux__) && !defined(TARGET_S390X)
2646
2647#include <sys/vfs.h>
2648
2649#define HUGETLBFS_MAGIC 0x958458f6
2650
2651static long gethugepagesize(const char *path)
2652{
2653 struct statfs fs;
2654 int ret;
2655
2656 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002657 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002658 } while (ret != 0 && errno == EINTR);
2659
2660 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002661 perror(path);
2662 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002663 }
2664
2665 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002666 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002667
2668 return fs.f_bsize;
2669}
2670
Alex Williamson04b16652010-07-02 11:13:17 -06002671static void *file_ram_alloc(RAMBlock *block,
2672 ram_addr_t memory,
2673 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002674{
2675 char *filename;
2676 void *area;
2677 int fd;
2678#ifdef MAP_POPULATE
2679 int flags;
2680#endif
2681 unsigned long hpagesize;
2682
2683 hpagesize = gethugepagesize(path);
2684 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002685 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002686 }
2687
2688 if (memory < hpagesize) {
2689 return NULL;
2690 }
2691
2692 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2693 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2694 return NULL;
2695 }
2696
2697 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002698 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002699 }
2700
2701 fd = mkstemp(filename);
2702 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002703 perror("unable to create backing store for hugepages");
2704 free(filename);
2705 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002706 }
2707 unlink(filename);
2708 free(filename);
2709
2710 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2711
2712 /*
2713 * ftruncate is not supported by hugetlbfs in older
2714 * hosts, so don't bother bailing out on errors.
2715 * If anything goes wrong with it under other filesystems,
2716 * mmap will fail.
2717 */
2718 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002719 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002720
2721#ifdef MAP_POPULATE
2722 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2723 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2724 * to sidestep this quirk.
2725 */
2726 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2727 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2728#else
2729 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2730#endif
2731 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002732 perror("file_ram_alloc: can't mmap RAM pages");
2733 close(fd);
2734 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002735 }
Alex Williamson04b16652010-07-02 11:13:17 -06002736 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002737 return area;
2738}
2739#endif
2740
Alex Williamsond17b5282010-06-25 11:08:38 -06002741static ram_addr_t find_ram_offset(ram_addr_t size)
2742{
Alex Williamson04b16652010-07-02 11:13:17 -06002743 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002744 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002745
2746 if (QLIST_EMPTY(&ram_list.blocks))
2747 return 0;
2748
2749 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002750 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002751
2752 end = block->offset + block->length;
2753
2754 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2755 if (next_block->offset >= end) {
2756 next = MIN(next, next_block->offset);
2757 }
2758 }
2759 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002760 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002761 mingap = next - end;
2762 }
2763 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002764
2765 if (offset == RAM_ADDR_MAX) {
2766 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2767 (uint64_t)size);
2768 abort();
2769 }
2770
Alex Williamson04b16652010-07-02 11:13:17 -06002771 return offset;
2772}
2773
2774static ram_addr_t last_ram_offset(void)
2775{
Alex Williamsond17b5282010-06-25 11:08:38 -06002776 RAMBlock *block;
2777 ram_addr_t last = 0;
2778
2779 QLIST_FOREACH(block, &ram_list.blocks, next)
2780 last = MAX(last, block->offset + block->length);
2781
2782 return last;
2783}
2784
Avi Kivityc5705a72011-12-20 15:59:12 +02002785void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002786{
2787 RAMBlock *new_block, *block;
2788
Avi Kivityc5705a72011-12-20 15:59:12 +02002789 new_block = NULL;
2790 QLIST_FOREACH(block, &ram_list.blocks, next) {
2791 if (block->offset == addr) {
2792 new_block = block;
2793 break;
2794 }
2795 }
2796 assert(new_block);
2797 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002798
2799 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2800 char *id = dev->parent_bus->info->get_dev_path(dev);
2801 if (id) {
2802 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002803 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002804 }
2805 }
2806 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2807
2808 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002809 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002810 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2811 new_block->idstr);
2812 abort();
2813 }
2814 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002815}
2816
2817ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2818 MemoryRegion *mr)
2819{
2820 RAMBlock *new_block;
2821
2822 size = TARGET_PAGE_ALIGN(size);
2823 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002824
Avi Kivity7c637362011-12-21 13:09:49 +02002825 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002826 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002827 if (host) {
2828 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002829 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002830 } else {
2831 if (mem_path) {
2832#if defined (__linux__) && !defined(TARGET_S390X)
2833 new_block->host = file_ram_alloc(new_block, size, mem_path);
2834 if (!new_block->host) {
2835 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002836 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002837 }
2838#else
2839 fprintf(stderr, "-mem-path option unsupported\n");
2840 exit(1);
2841#endif
2842 } else {
2843#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002844 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2845 an system defined value, which is at least 256GB. Larger systems
2846 have larger values. We put the guest between the end of data
2847 segment (system break) and this value. We use 32GB as a base to
2848 have enough room for the system break to grow. */
2849 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002850 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002851 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002852 if (new_block->host == MAP_FAILED) {
2853 fprintf(stderr, "Allocating RAM failed\n");
2854 abort();
2855 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002856#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002857 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002858 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002859 } else {
2860 new_block->host = qemu_vmalloc(size);
2861 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002862#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002863 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002864 }
2865 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002866 new_block->length = size;
2867
2868 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2869
Anthony Liguori7267c092011-08-20 22:09:37 -05002870 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002871 last_ram_offset() >> TARGET_PAGE_BITS);
2872 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2873 0xff, size >> TARGET_PAGE_BITS);
2874
2875 if (kvm_enabled())
2876 kvm_setup_guest_memory(new_block->host, size);
2877
2878 return new_block->offset;
2879}
2880
Avi Kivityc5705a72011-12-20 15:59:12 +02002881ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002882{
Avi Kivityc5705a72011-12-20 15:59:12 +02002883 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002884}
bellarde9a1ab12007-02-08 23:08:38 +00002885
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002886void qemu_ram_free_from_ptr(ram_addr_t addr)
2887{
2888 RAMBlock *block;
2889
2890 QLIST_FOREACH(block, &ram_list.blocks, next) {
2891 if (addr == block->offset) {
2892 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002893 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002894 return;
2895 }
2896 }
2897}
2898
Anthony Liguoric227f092009-10-01 16:12:16 -05002899void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002900{
Alex Williamson04b16652010-07-02 11:13:17 -06002901 RAMBlock *block;
2902
2903 QLIST_FOREACH(block, &ram_list.blocks, next) {
2904 if (addr == block->offset) {
2905 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002906 if (block->flags & RAM_PREALLOC_MASK) {
2907 ;
2908 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002909#if defined (__linux__) && !defined(TARGET_S390X)
2910 if (block->fd) {
2911 munmap(block->host, block->length);
2912 close(block->fd);
2913 } else {
2914 qemu_vfree(block->host);
2915 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002916#else
2917 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002918#endif
2919 } else {
2920#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2921 munmap(block->host, block->length);
2922#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002923 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002924 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002925 } else {
2926 qemu_vfree(block->host);
2927 }
Alex Williamson04b16652010-07-02 11:13:17 -06002928#endif
2929 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002930 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002931 return;
2932 }
2933 }
2934
bellarde9a1ab12007-02-08 23:08:38 +00002935}
2936
Huang Yingcd19cfa2011-03-02 08:56:19 +01002937#ifndef _WIN32
2938void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2939{
2940 RAMBlock *block;
2941 ram_addr_t offset;
2942 int flags;
2943 void *area, *vaddr;
2944
2945 QLIST_FOREACH(block, &ram_list.blocks, next) {
2946 offset = addr - block->offset;
2947 if (offset < block->length) {
2948 vaddr = block->host + offset;
2949 if (block->flags & RAM_PREALLOC_MASK) {
2950 ;
2951 } else {
2952 flags = MAP_FIXED;
2953 munmap(vaddr, length);
2954 if (mem_path) {
2955#if defined(__linux__) && !defined(TARGET_S390X)
2956 if (block->fd) {
2957#ifdef MAP_POPULATE
2958 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2959 MAP_PRIVATE;
2960#else
2961 flags |= MAP_PRIVATE;
2962#endif
2963 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2964 flags, block->fd, offset);
2965 } else {
2966 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2967 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2968 flags, -1, 0);
2969 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002970#else
2971 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002972#endif
2973 } else {
2974#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2975 flags |= MAP_SHARED | MAP_ANONYMOUS;
2976 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2977 flags, -1, 0);
2978#else
2979 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2980 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2981 flags, -1, 0);
2982#endif
2983 }
2984 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002985 fprintf(stderr, "Could not remap addr: "
2986 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002987 length, addr);
2988 exit(1);
2989 }
2990 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2991 }
2992 return;
2993 }
2994 }
2995}
2996#endif /* !_WIN32 */
2997
pbrookdc828ca2009-04-09 22:21:07 +00002998/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002999 With the exception of the softmmu code in this file, this should
3000 only be used for local memory (e.g. video ram) that the device owns,
3001 and knows it isn't going to access beyond the end of the block.
3002
3003 It should not be used for general purpose DMA.
3004 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3005 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003006void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003007{
pbrook94a6b542009-04-11 17:15:54 +00003008 RAMBlock *block;
3009
Alex Williamsonf471a172010-06-11 11:11:42 -06003010 QLIST_FOREACH(block, &ram_list.blocks, next) {
3011 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003012 /* Move this entry to to start of the list. */
3013 if (block != QLIST_FIRST(&ram_list.blocks)) {
3014 QLIST_REMOVE(block, next);
3015 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3016 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003017 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003018 /* We need to check if the requested address is in the RAM
3019 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003020 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003021 */
3022 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003023 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003024 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003025 block->host =
3026 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003027 }
3028 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003029 return block->host + (addr - block->offset);
3030 }
pbrook94a6b542009-04-11 17:15:54 +00003031 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003032
3033 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3034 abort();
3035
3036 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003037}
3038
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003039/* Return a host pointer to ram allocated with qemu_ram_alloc.
3040 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3041 */
3042void *qemu_safe_ram_ptr(ram_addr_t addr)
3043{
3044 RAMBlock *block;
3045
3046 QLIST_FOREACH(block, &ram_list.blocks, next) {
3047 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003048 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003049 /* We need to check if the requested address is in the RAM
3050 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003051 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003052 */
3053 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003054 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003055 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003056 block->host =
3057 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003058 }
3059 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003060 return block->host + (addr - block->offset);
3061 }
3062 }
3063
3064 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3065 abort();
3066
3067 return NULL;
3068}
3069
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003070/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3071 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003072void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003073{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003074 if (*size == 0) {
3075 return NULL;
3076 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003077 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003078 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003079 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003080 RAMBlock *block;
3081
3082 QLIST_FOREACH(block, &ram_list.blocks, next) {
3083 if (addr - block->offset < block->length) {
3084 if (addr - block->offset + *size > block->length)
3085 *size = block->length - addr + block->offset;
3086 return block->host + (addr - block->offset);
3087 }
3088 }
3089
3090 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3091 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003092 }
3093}
3094
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003095void qemu_put_ram_ptr(void *addr)
3096{
3097 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003098}
3099
Marcelo Tosattie8902612010-10-11 15:31:19 -03003100int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003101{
pbrook94a6b542009-04-11 17:15:54 +00003102 RAMBlock *block;
3103 uint8_t *host = ptr;
3104
Jan Kiszka868bb332011-06-21 22:59:09 +02003105 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003106 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003107 return 0;
3108 }
3109
Alex Williamsonf471a172010-06-11 11:11:42 -06003110 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003111 /* This case append when the block is not mapped. */
3112 if (block->host == NULL) {
3113 continue;
3114 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003115 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003116 *ram_addr = block->offset + (host - block->host);
3117 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003118 }
pbrook94a6b542009-04-11 17:15:54 +00003119 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003120
Marcelo Tosattie8902612010-10-11 15:31:19 -03003121 return -1;
3122}
Alex Williamsonf471a172010-06-11 11:11:42 -06003123
Marcelo Tosattie8902612010-10-11 15:31:19 -03003124/* Some of the softmmu routines need to translate from a host pointer
3125 (typically a TLB entry) back to a ram offset. */
3126ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3127{
3128 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003129
Marcelo Tosattie8902612010-10-11 15:31:19 -03003130 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3131 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3132 abort();
3133 }
3134 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003135}
3136
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003137static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3138 unsigned size)
bellard33417e72003-08-10 21:47:01 +00003139{
pbrook67d3b952006-12-18 05:03:52 +00003140#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003141 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003142#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003143#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003144 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003145#endif
3146 return 0;
3147}
3148
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003149static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3150 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00003151{
3152#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003153 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00003154#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003155#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003156 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00003157#endif
3158}
3159
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003160static const MemoryRegionOps unassigned_mem_ops = {
3161 .read = unassigned_mem_read,
3162 .write = unassigned_mem_write,
3163 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003164};
3165
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003166static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3167 unsigned size)
3168{
3169 abort();
3170}
3171
3172static void error_mem_write(void *opaque, target_phys_addr_t addr,
3173 uint64_t value, unsigned size)
3174{
3175 abort();
3176}
3177
3178static const MemoryRegionOps error_mem_ops = {
3179 .read = error_mem_read,
3180 .write = error_mem_write,
3181 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00003182};
3183
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003184static const MemoryRegionOps rom_mem_ops = {
3185 .read = error_mem_read,
3186 .write = unassigned_mem_write,
3187 .endianness = DEVICE_NATIVE_ENDIAN,
3188};
3189
3190static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3191 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00003192{
bellard3a7d9292005-08-21 09:26:42 +00003193 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003194 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003195 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3196#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003197 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003198 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003199#endif
3200 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003201 switch (size) {
3202 case 1:
3203 stb_p(qemu_get_ram_ptr(ram_addr), val);
3204 break;
3205 case 2:
3206 stw_p(qemu_get_ram_ptr(ram_addr), val);
3207 break;
3208 case 4:
3209 stl_p(qemu_get_ram_ptr(ram_addr), val);
3210 break;
3211 default:
3212 abort();
3213 }
bellardf23db162005-08-21 19:12:28 +00003214 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003215 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003216 /* we remove the notdirty callback only if the code has been
3217 flushed */
3218 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003219 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003220}
3221
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003222static const MemoryRegionOps notdirty_mem_ops = {
3223 .read = error_mem_read,
3224 .write = notdirty_mem_write,
3225 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00003226};
3227
pbrook0f459d12008-06-09 00:20:13 +00003228/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003229static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003230{
3231 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003232 target_ulong pc, cs_base;
3233 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003234 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003235 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003236 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003237
aliguori06d55cc2008-11-18 20:24:06 +00003238 if (env->watchpoint_hit) {
3239 /* We re-entered the check after replacing the TB. Now raise
3240 * the debug interrupt so that is will trigger after the
3241 * current instruction. */
3242 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3243 return;
3244 }
pbrook2e70f6e2008-06-29 01:03:05 +00003245 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003246 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003247 if ((vaddr == (wp->vaddr & len_mask) ||
3248 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003249 wp->flags |= BP_WATCHPOINT_HIT;
3250 if (!env->watchpoint_hit) {
3251 env->watchpoint_hit = wp;
3252 tb = tb_find_pc(env->mem_io_pc);
3253 if (!tb) {
3254 cpu_abort(env, "check_watchpoint: could not find TB for "
3255 "pc=%p", (void *)env->mem_io_pc);
3256 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003257 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003258 tb_phys_invalidate(tb, -1);
3259 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3260 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04003261 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00003262 } else {
3263 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3264 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04003265 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00003266 }
aliguori06d55cc2008-11-18 20:24:06 +00003267 }
aliguori6e140f22008-11-18 20:37:55 +00003268 } else {
3269 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003270 }
3271 }
3272}
3273
pbrook6658ffb2007-03-16 23:58:11 +00003274/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3275 so these check for a hit then pass through to the normal out-of-line
3276 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003277static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3278 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003279{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003280 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3281 switch (size) {
3282 case 1: return ldub_phys(addr);
3283 case 2: return lduw_phys(addr);
3284 case 4: return ldl_phys(addr);
3285 default: abort();
3286 }
pbrook6658ffb2007-03-16 23:58:11 +00003287}
3288
Avi Kivity1ec9b902012-01-02 12:47:48 +02003289static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3290 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003291{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003292 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3293 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04003294 case 1:
3295 stb_phys(addr, val);
3296 break;
3297 case 2:
3298 stw_phys(addr, val);
3299 break;
3300 case 4:
3301 stl_phys(addr, val);
3302 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003303 default: abort();
3304 }
pbrook6658ffb2007-03-16 23:58:11 +00003305}
3306
Avi Kivity1ec9b902012-01-02 12:47:48 +02003307static const MemoryRegionOps watch_mem_ops = {
3308 .read = watch_mem_read,
3309 .write = watch_mem_write,
3310 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003311};
pbrook6658ffb2007-03-16 23:58:11 +00003312
Avi Kivity70c68e42012-01-02 12:32:48 +02003313static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3314 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003315{
Avi Kivity70c68e42012-01-02 12:32:48 +02003316 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003317 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003318#if defined(DEBUG_SUBPAGE)
3319 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3320 mmio, len, addr, idx);
3321#endif
blueswir1db7b5422007-05-26 17:36:03 +00003322
Richard Hendersonf6405242010-04-22 16:47:31 -07003323 addr += mmio->region_offset[idx];
3324 idx = mmio->sub_io_index[idx];
Avi Kivity70c68e42012-01-02 12:32:48 +02003325 return io_mem_read(idx, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003326}
3327
Avi Kivity70c68e42012-01-02 12:32:48 +02003328static void subpage_write(void *opaque, target_phys_addr_t addr,
3329 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003330{
Avi Kivity70c68e42012-01-02 12:32:48 +02003331 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003332 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003333#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003334 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3335 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003336 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003337#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003338
3339 addr += mmio->region_offset[idx];
3340 idx = mmio->sub_io_index[idx];
Avi Kivity70c68e42012-01-02 12:32:48 +02003341 io_mem_write(idx, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003342}
3343
Avi Kivity70c68e42012-01-02 12:32:48 +02003344static const MemoryRegionOps subpage_ops = {
3345 .read = subpage_read,
3346 .write = subpage_write,
3347 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003348};
3349
Avi Kivityde712f92012-01-02 12:41:07 +02003350static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3351 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003352{
3353 ram_addr_t raddr = addr;
3354 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003355 switch (size) {
3356 case 1: return ldub_p(ptr);
3357 case 2: return lduw_p(ptr);
3358 case 4: return ldl_p(ptr);
3359 default: abort();
3360 }
Andreas Färber56384e82011-11-30 16:26:21 +01003361}
3362
Avi Kivityde712f92012-01-02 12:41:07 +02003363static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3364 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003365{
3366 ram_addr_t raddr = addr;
3367 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003368 switch (size) {
3369 case 1: return stb_p(ptr, value);
3370 case 2: return stw_p(ptr, value);
3371 case 4: return stl_p(ptr, value);
3372 default: abort();
3373 }
Andreas Färber56384e82011-11-30 16:26:21 +01003374}
3375
Avi Kivityde712f92012-01-02 12:41:07 +02003376static const MemoryRegionOps subpage_ram_ops = {
3377 .read = subpage_ram_read,
3378 .write = subpage_ram_write,
3379 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003380};
3381
Anthony Liguoric227f092009-10-01 16:12:16 -05003382static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3383 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003384{
3385 int idx, eidx;
3386
3387 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3388 return -1;
3389 idx = SUBPAGE_IDX(start);
3390 eidx = SUBPAGE_IDX(end);
3391#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003392 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003393 mmio, start, end, idx, eidx, memory);
3394#endif
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003395 if ((memory & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
Avi Kivityde712f92012-01-02 12:41:07 +02003396 memory = io_mem_subpage_ram.ram_addr;
Andreas Färber56384e82011-11-30 16:26:21 +01003397 }
Avi Kivity11c7ef02012-01-02 17:21:07 +02003398 memory &= IO_MEM_NB_ENTRIES - 1;
blueswir1db7b5422007-05-26 17:36:03 +00003399 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003400 mmio->sub_io_index[idx] = memory;
3401 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003402 }
3403
3404 return 0;
3405}
3406
Richard Hendersonf6405242010-04-22 16:47:31 -07003407static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3408 ram_addr_t orig_memory,
3409 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003410{
Anthony Liguoric227f092009-10-01 16:12:16 -05003411 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003412 int subpage_memory;
3413
Anthony Liguori7267c092011-08-20 22:09:37 -05003414 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003415
3416 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003417 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3418 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003419 mmio->iomem.subpage = true;
Avi Kivity70c68e42012-01-02 12:32:48 +02003420 subpage_memory = mmio->iomem.ram_addr;
blueswir1db7b5422007-05-26 17:36:03 +00003421#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003422 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3423 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003424#endif
Avi Kivityb3b00c72012-01-02 13:20:11 +02003425 *phys = subpage_memory;
Richard Hendersonf6405242010-04-22 16:47:31 -07003426 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003427
3428 return mmio;
3429}
3430
aliguori88715652009-02-11 15:20:58 +00003431static int get_free_io_mem_idx(void)
3432{
3433 int i;
3434
3435 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3436 if (!io_mem_used[i]) {
3437 io_mem_used[i] = 1;
3438 return i;
3439 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003440 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003441 return -1;
3442}
3443
bellard33417e72003-08-10 21:47:01 +00003444/* mem_read and mem_write are arrays of functions containing the
3445 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003446 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003447 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003448 modified. If it is zero, a new io zone is allocated. The return
3449 value can be used with cpu_register_physical_memory(). (-1) is
3450 returned if error. */
Avi Kivitya621f382012-01-02 13:12:08 +02003451static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
bellard33417e72003-08-10 21:47:01 +00003452{
bellard33417e72003-08-10 21:47:01 +00003453 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003454 io_index = get_free_io_mem_idx();
3455 if (io_index == -1)
3456 return io_index;
bellard33417e72003-08-10 21:47:01 +00003457 } else {
3458 if (io_index >= IO_MEM_NB_ENTRIES)
3459 return -1;
3460 }
bellardb5ff1b32005-11-26 10:38:39 +00003461
Avi Kivitya621f382012-01-02 13:12:08 +02003462 io_mem_region[io_index] = mr;
Richard Hendersonf6405242010-04-22 16:47:31 -07003463
Avi Kivity11c7ef02012-01-02 17:21:07 +02003464 return io_index;
bellard33417e72003-08-10 21:47:01 +00003465}
bellard61382a52003-10-27 21:22:23 +00003466
Avi Kivitya621f382012-01-02 13:12:08 +02003467int cpu_register_io_memory(MemoryRegion *mr)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003468{
Avi Kivitya621f382012-01-02 13:12:08 +02003469 return cpu_register_io_memory_fixed(0, mr);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003470}
3471
Avi Kivity11c7ef02012-01-02 17:21:07 +02003472void cpu_unregister_io_memory(int io_index)
aliguori88715652009-02-11 15:20:58 +00003473{
Avi Kivitya621f382012-01-02 13:12:08 +02003474 io_mem_region[io_index] = NULL;
aliguori88715652009-02-11 15:20:58 +00003475 io_mem_used[io_index] = 0;
3476}
3477
Avi Kivitye9179ce2009-06-14 11:38:52 +03003478static void io_mem_init(void)
3479{
3480 int i;
3481
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003482 /* Must be first: */
3483 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3484 assert(io_mem_ram.ram_addr == 0);
3485 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3486 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3487 "unassigned", UINT64_MAX);
3488 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3489 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003490 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3491 "subpage-ram", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003492 for (i=0; i<5; i++)
3493 io_mem_used[i] = 1;
3494
Avi Kivity1ec9b902012-01-02 12:47:48 +02003495 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3496 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003497}
3498
Avi Kivity62152b82011-07-26 14:26:14 +03003499static void memory_map_init(void)
3500{
Anthony Liguori7267c092011-08-20 22:09:37 -05003501 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003502 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003503 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003504
Anthony Liguori7267c092011-08-20 22:09:37 -05003505 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003506 memory_region_init(system_io, "io", 65536);
3507 set_system_io_map(system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003508}
3509
3510MemoryRegion *get_system_memory(void)
3511{
3512 return system_memory;
3513}
3514
Avi Kivity309cb472011-08-08 16:09:03 +03003515MemoryRegion *get_system_io(void)
3516{
3517 return system_io;
3518}
3519
pbrooke2eef172008-06-08 01:09:01 +00003520#endif /* !defined(CONFIG_USER_ONLY) */
3521
bellard13eb76e2004-01-24 15:23:36 +00003522/* physical memory access (slow version, mainly for debug) */
3523#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003524int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3525 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003526{
3527 int l, flags;
3528 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003529 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003530
3531 while (len > 0) {
3532 page = addr & TARGET_PAGE_MASK;
3533 l = (page + TARGET_PAGE_SIZE) - addr;
3534 if (l > len)
3535 l = len;
3536 flags = page_get_flags(page);
3537 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003538 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003539 if (is_write) {
3540 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003541 return -1;
bellard579a97f2007-11-11 14:26:47 +00003542 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003543 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003544 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003545 memcpy(p, buf, l);
3546 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003547 } else {
3548 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003549 return -1;
bellard579a97f2007-11-11 14:26:47 +00003550 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003551 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003552 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003553 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003554 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003555 }
3556 len -= l;
3557 buf += l;
3558 addr += l;
3559 }
Paul Brooka68fe892010-03-01 00:08:59 +00003560 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003561}
bellard8df1cd02005-01-28 22:37:22 +00003562
bellard13eb76e2004-01-24 15:23:36 +00003563#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003564void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003565 int len, int is_write)
3566{
3567 int l, io_index;
3568 uint8_t *ptr;
3569 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003570 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003571 ram_addr_t pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003572 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003573
bellard13eb76e2004-01-24 15:23:36 +00003574 while (len > 0) {
3575 page = addr & TARGET_PAGE_MASK;
3576 l = (page + TARGET_PAGE_SIZE) - addr;
3577 if (l > len)
3578 l = len;
bellard92e873b2004-05-21 14:52:29 +00003579 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003580 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003581
bellard13eb76e2004-01-24 15:23:36 +00003582 if (is_write) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003583 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003584 target_phys_addr_t addr1;
Avi Kivity11c7ef02012-01-02 17:21:07 +02003585 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003586 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
bellard6a00d602005-11-21 23:25:50 +00003587 /* XXX: could force cpu_single_env to NULL to avoid
3588 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003589 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003590 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003591 val = ldl_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003592 io_mem_write(io_index, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003593 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003594 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003595 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003596 val = lduw_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003597 io_mem_write(io_index, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003598 l = 2;
3599 } else {
bellard1c213d12005-09-03 10:49:04 +00003600 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003601 val = ldub_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003602 io_mem_write(io_index, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003603 l = 1;
3604 }
3605 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003606 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00003607 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003608 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003609 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003610 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003611 if (!cpu_physical_memory_is_dirty(addr1)) {
3612 /* invalidate code */
3613 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3614 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003615 cpu_physical_memory_set_dirty_flags(
3616 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003617 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003618 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003619 }
3620 } else {
Avi Kivity1d393fa2012-01-01 21:15:42 +02003621 if (!is_ram_rom_romd(pd)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003622 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003623 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02003624 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003625 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
aurel326c2934d2009-02-18 21:37:17 +00003626 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003627 /* 32 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003628 val = io_mem_read(io_index, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003629 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003630 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003631 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003632 /* 16 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003633 val = io_mem_read(io_index, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003634 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003635 l = 2;
3636 } else {
bellard1c213d12005-09-03 10:49:04 +00003637 /* 8 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003638 val = io_mem_read(io_index, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003639 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003640 l = 1;
3641 }
3642 } else {
3643 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003644 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3645 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3646 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003647 }
3648 }
3649 len -= l;
3650 buf += l;
3651 addr += l;
3652 }
3653}
bellard8df1cd02005-01-28 22:37:22 +00003654
bellardd0ecd2a2006-04-23 17:14:48 +00003655/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003656void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003657 const uint8_t *buf, int len)
3658{
3659 int l;
3660 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003661 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003662 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003663 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003664
bellardd0ecd2a2006-04-23 17:14:48 +00003665 while (len > 0) {
3666 page = addr & TARGET_PAGE_MASK;
3667 l = (page + TARGET_PAGE_SIZE) - addr;
3668 if (l > len)
3669 l = len;
3670 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003671 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003672
Avi Kivity1d393fa2012-01-01 21:15:42 +02003673 if (!is_ram_rom_romd(pd)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003674 /* do nothing */
3675 } else {
3676 unsigned long addr1;
3677 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3678 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003679 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003680 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003681 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003682 }
3683 len -= l;
3684 buf += l;
3685 addr += l;
3686 }
3687}
3688
aliguori6d16c2f2009-01-22 16:59:11 +00003689typedef struct {
3690 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003691 target_phys_addr_t addr;
3692 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003693} BounceBuffer;
3694
3695static BounceBuffer bounce;
3696
aliguoriba223c22009-01-22 16:59:16 +00003697typedef struct MapClient {
3698 void *opaque;
3699 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003700 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003701} MapClient;
3702
Blue Swirl72cf2d42009-09-12 07:36:22 +00003703static QLIST_HEAD(map_client_list, MapClient) map_client_list
3704 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003705
3706void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3707{
Anthony Liguori7267c092011-08-20 22:09:37 -05003708 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003709
3710 client->opaque = opaque;
3711 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003712 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003713 return client;
3714}
3715
3716void cpu_unregister_map_client(void *_client)
3717{
3718 MapClient *client = (MapClient *)_client;
3719
Blue Swirl72cf2d42009-09-12 07:36:22 +00003720 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003721 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003722}
3723
3724static void cpu_notify_map_clients(void)
3725{
3726 MapClient *client;
3727
Blue Swirl72cf2d42009-09-12 07:36:22 +00003728 while (!QLIST_EMPTY(&map_client_list)) {
3729 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003730 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003731 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003732 }
3733}
3734
aliguori6d16c2f2009-01-22 16:59:11 +00003735/* Map a physical memory region into a host virtual address.
3736 * May map a subset of the requested range, given by and returned in *plen.
3737 * May return NULL if resources needed to perform the mapping are exhausted.
3738 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003739 * Use cpu_register_map_client() to know when retrying the map operation is
3740 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003741 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003742void *cpu_physical_memory_map(target_phys_addr_t addr,
3743 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003744 int is_write)
3745{
Anthony Liguoric227f092009-10-01 16:12:16 -05003746 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003747 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003748 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003749 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003750 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003751 PhysPageDesc p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003752 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003753 ram_addr_t rlen;
3754 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003755
3756 while (len > 0) {
3757 page = addr & TARGET_PAGE_MASK;
3758 l = (page + TARGET_PAGE_SIZE) - addr;
3759 if (l > len)
3760 l = len;
3761 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003762 pd = p.phys_offset;
aliguori6d16c2f2009-01-22 16:59:11 +00003763
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003764 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003765 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003766 break;
3767 }
3768 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3769 bounce.addr = addr;
3770 bounce.len = l;
3771 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003772 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003773 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003774
3775 *plen = l;
3776 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003777 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003778 if (!todo) {
3779 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3780 }
aliguori6d16c2f2009-01-22 16:59:11 +00003781
3782 len -= l;
3783 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003784 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003785 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003786 rlen = todo;
3787 ret = qemu_ram_ptr_length(raddr, &rlen);
3788 *plen = rlen;
3789 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003790}
3791
3792/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3793 * Will also mark the memory as dirty if is_write == 1. access_len gives
3794 * the amount of memory that was actually read or written by the caller.
3795 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003796void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3797 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003798{
3799 if (buffer != bounce.buffer) {
3800 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003801 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003802 while (access_len) {
3803 unsigned l;
3804 l = TARGET_PAGE_SIZE;
3805 if (l > access_len)
3806 l = access_len;
3807 if (!cpu_physical_memory_is_dirty(addr1)) {
3808 /* invalidate code */
3809 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3810 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003811 cpu_physical_memory_set_dirty_flags(
3812 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00003813 }
3814 addr1 += l;
3815 access_len -= l;
3816 }
3817 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003818 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003819 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003820 }
aliguori6d16c2f2009-01-22 16:59:11 +00003821 return;
3822 }
3823 if (is_write) {
3824 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3825 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003826 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003827 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003828 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003829}
bellardd0ecd2a2006-04-23 17:14:48 +00003830
bellard8df1cd02005-01-28 22:37:22 +00003831/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003832static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3833 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003834{
3835 int io_index;
3836 uint8_t *ptr;
3837 uint32_t val;
3838 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003839 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00003840
3841 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003842 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003843
Avi Kivity1d393fa2012-01-01 21:15:42 +02003844 if (!is_ram_rom_romd(pd)) {
bellard8df1cd02005-01-28 22:37:22 +00003845 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02003846 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003847 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02003848 val = io_mem_read(io_index, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003849#if defined(TARGET_WORDS_BIGENDIAN)
3850 if (endian == DEVICE_LITTLE_ENDIAN) {
3851 val = bswap32(val);
3852 }
3853#else
3854 if (endian == DEVICE_BIG_ENDIAN) {
3855 val = bswap32(val);
3856 }
3857#endif
bellard8df1cd02005-01-28 22:37:22 +00003858 } else {
3859 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003860 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00003861 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003862 switch (endian) {
3863 case DEVICE_LITTLE_ENDIAN:
3864 val = ldl_le_p(ptr);
3865 break;
3866 case DEVICE_BIG_ENDIAN:
3867 val = ldl_be_p(ptr);
3868 break;
3869 default:
3870 val = ldl_p(ptr);
3871 break;
3872 }
bellard8df1cd02005-01-28 22:37:22 +00003873 }
3874 return val;
3875}
3876
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003877uint32_t ldl_phys(target_phys_addr_t addr)
3878{
3879 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3880}
3881
3882uint32_t ldl_le_phys(target_phys_addr_t addr)
3883{
3884 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3885}
3886
3887uint32_t ldl_be_phys(target_phys_addr_t addr)
3888{
3889 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3890}
3891
bellard84b7b8e2005-11-28 21:19:04 +00003892/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003893static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3894 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003895{
3896 int io_index;
3897 uint8_t *ptr;
3898 uint64_t val;
3899 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003900 PhysPageDesc p;
bellard84b7b8e2005-11-28 21:19:04 +00003901
3902 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003903 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003904
Avi Kivity1d393fa2012-01-01 21:15:42 +02003905 if (!is_ram_rom_romd(pd)) {
bellard84b7b8e2005-11-28 21:19:04 +00003906 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02003907 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003908 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003909
3910 /* XXX This is broken when device endian != cpu endian.
3911 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003912#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02003913 val = io_mem_read(io_index, addr, 4) << 32;
3914 val |= io_mem_read(io_index, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003915#else
Avi Kivityacbbec52011-11-21 12:27:03 +02003916 val = io_mem_read(io_index, addr, 4);
3917 val |= io_mem_read(io_index, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003918#endif
3919 } else {
3920 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003921 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00003922 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003923 switch (endian) {
3924 case DEVICE_LITTLE_ENDIAN:
3925 val = ldq_le_p(ptr);
3926 break;
3927 case DEVICE_BIG_ENDIAN:
3928 val = ldq_be_p(ptr);
3929 break;
3930 default:
3931 val = ldq_p(ptr);
3932 break;
3933 }
bellard84b7b8e2005-11-28 21:19:04 +00003934 }
3935 return val;
3936}
3937
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003938uint64_t ldq_phys(target_phys_addr_t addr)
3939{
3940 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3941}
3942
3943uint64_t ldq_le_phys(target_phys_addr_t addr)
3944{
3945 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3946}
3947
3948uint64_t ldq_be_phys(target_phys_addr_t addr)
3949{
3950 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3951}
3952
bellardaab33092005-10-30 20:48:42 +00003953/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003954uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003955{
3956 uint8_t val;
3957 cpu_physical_memory_read(addr, &val, 1);
3958 return val;
3959}
3960
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003961/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003962static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3963 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003964{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003965 int io_index;
3966 uint8_t *ptr;
3967 uint64_t val;
3968 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003969 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003970
3971 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003972 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003973
Avi Kivity1d393fa2012-01-01 21:15:42 +02003974 if (!is_ram_rom_romd(pd)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003975 /* I/O case */
Avi Kivity11c7ef02012-01-02 17:21:07 +02003976 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003977 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02003978 val = io_mem_read(io_index, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003979#if defined(TARGET_WORDS_BIGENDIAN)
3980 if (endian == DEVICE_LITTLE_ENDIAN) {
3981 val = bswap16(val);
3982 }
3983#else
3984 if (endian == DEVICE_BIG_ENDIAN) {
3985 val = bswap16(val);
3986 }
3987#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003988 } else {
3989 /* RAM case */
3990 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3991 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003992 switch (endian) {
3993 case DEVICE_LITTLE_ENDIAN:
3994 val = lduw_le_p(ptr);
3995 break;
3996 case DEVICE_BIG_ENDIAN:
3997 val = lduw_be_p(ptr);
3998 break;
3999 default:
4000 val = lduw_p(ptr);
4001 break;
4002 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004003 }
4004 return val;
bellardaab33092005-10-30 20:48:42 +00004005}
4006
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004007uint32_t lduw_phys(target_phys_addr_t addr)
4008{
4009 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4010}
4011
4012uint32_t lduw_le_phys(target_phys_addr_t addr)
4013{
4014 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4015}
4016
4017uint32_t lduw_be_phys(target_phys_addr_t addr)
4018{
4019 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4020}
4021
bellard8df1cd02005-01-28 22:37:22 +00004022/* warning: addr must be aligned. The ram page is not masked as dirty
4023 and the code inside is not invalidated. It is useful if the dirty
4024 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004025void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004026{
4027 int io_index;
4028 uint8_t *ptr;
4029 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004030 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004031
4032 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004033 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004034
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004035 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004036 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004037 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004038 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004039 } else {
aliguori74576192008-10-06 14:02:03 +00004040 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004041 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004042 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004043
4044 if (unlikely(in_migration)) {
4045 if (!cpu_physical_memory_is_dirty(addr1)) {
4046 /* invalidate code */
4047 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4048 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004049 cpu_physical_memory_set_dirty_flags(
4050 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004051 }
4052 }
bellard8df1cd02005-01-28 22:37:22 +00004053 }
4054}
4055
Anthony Liguoric227f092009-10-01 16:12:16 -05004056void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004057{
4058 int io_index;
4059 uint8_t *ptr;
4060 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004061 PhysPageDesc p;
j_mayerbc98a7e2007-04-04 07:55:12 +00004062
4063 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004064 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004065
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004066 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004067 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004068 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004069#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004070 io_mem_write(io_index, addr, val >> 32, 4);
4071 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004072#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004073 io_mem_write(io_index, addr, (uint32_t)val, 4);
4074 io_mem_write(io_index, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004075#endif
4076 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004077 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004078 (addr & ~TARGET_PAGE_MASK);
4079 stq_p(ptr, val);
4080 }
4081}
4082
bellard8df1cd02005-01-28 22:37:22 +00004083/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004084static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4085 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004086{
4087 int io_index;
4088 uint8_t *ptr;
4089 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004090 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004091
4092 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004093 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004094
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004095 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004096 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004097 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004098#if defined(TARGET_WORDS_BIGENDIAN)
4099 if (endian == DEVICE_LITTLE_ENDIAN) {
4100 val = bswap32(val);
4101 }
4102#else
4103 if (endian == DEVICE_BIG_ENDIAN) {
4104 val = bswap32(val);
4105 }
4106#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004107 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004108 } else {
4109 unsigned long addr1;
4110 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4111 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004112 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004113 switch (endian) {
4114 case DEVICE_LITTLE_ENDIAN:
4115 stl_le_p(ptr, val);
4116 break;
4117 case DEVICE_BIG_ENDIAN:
4118 stl_be_p(ptr, val);
4119 break;
4120 default:
4121 stl_p(ptr, val);
4122 break;
4123 }
bellard3a7d9292005-08-21 09:26:42 +00004124 if (!cpu_physical_memory_is_dirty(addr1)) {
4125 /* invalidate code */
4126 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4127 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004128 cpu_physical_memory_set_dirty_flags(addr1,
4129 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004130 }
bellard8df1cd02005-01-28 22:37:22 +00004131 }
4132}
4133
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004134void stl_phys(target_phys_addr_t addr, uint32_t val)
4135{
4136 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4137}
4138
4139void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4140{
4141 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4142}
4143
4144void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4145{
4146 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4147}
4148
bellardaab33092005-10-30 20:48:42 +00004149/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004150void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004151{
4152 uint8_t v = val;
4153 cpu_physical_memory_write(addr, &v, 1);
4154}
4155
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004156/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004157static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4158 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004159{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004160 int io_index;
4161 uint8_t *ptr;
4162 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004163 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004164
4165 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004166 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004167
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004168 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
Avi Kivity11c7ef02012-01-02 17:21:07 +02004169 io_index = pd & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004170 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004171#if defined(TARGET_WORDS_BIGENDIAN)
4172 if (endian == DEVICE_LITTLE_ENDIAN) {
4173 val = bswap16(val);
4174 }
4175#else
4176 if (endian == DEVICE_BIG_ENDIAN) {
4177 val = bswap16(val);
4178 }
4179#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004180 io_mem_write(io_index, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004181 } else {
4182 unsigned long addr1;
4183 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4184 /* RAM case */
4185 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004186 switch (endian) {
4187 case DEVICE_LITTLE_ENDIAN:
4188 stw_le_p(ptr, val);
4189 break;
4190 case DEVICE_BIG_ENDIAN:
4191 stw_be_p(ptr, val);
4192 break;
4193 default:
4194 stw_p(ptr, val);
4195 break;
4196 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004197 if (!cpu_physical_memory_is_dirty(addr1)) {
4198 /* invalidate code */
4199 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4200 /* set dirty bit */
4201 cpu_physical_memory_set_dirty_flags(addr1,
4202 (0xff & ~CODE_DIRTY_FLAG));
4203 }
4204 }
bellardaab33092005-10-30 20:48:42 +00004205}
4206
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004207void stw_phys(target_phys_addr_t addr, uint32_t val)
4208{
4209 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4210}
4211
4212void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4213{
4214 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4215}
4216
4217void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4218{
4219 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4220}
4221
bellardaab33092005-10-30 20:48:42 +00004222/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004223void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004224{
4225 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004226 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004227}
4228
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004229void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4230{
4231 val = cpu_to_le64(val);
4232 cpu_physical_memory_write(addr, &val, 8);
4233}
4234
4235void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4236{
4237 val = cpu_to_be64(val);
4238 cpu_physical_memory_write(addr, &val, 8);
4239}
4240
aliguori5e2972f2009-03-28 17:51:36 +00004241/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004242int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004243 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004244{
4245 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004246 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004247 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004248
4249 while (len > 0) {
4250 page = addr & TARGET_PAGE_MASK;
4251 phys_addr = cpu_get_phys_page_debug(env, page);
4252 /* if no physical page mapped, return an error */
4253 if (phys_addr == -1)
4254 return -1;
4255 l = (page + TARGET_PAGE_SIZE) - addr;
4256 if (l > len)
4257 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004258 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004259 if (is_write)
4260 cpu_physical_memory_write_rom(phys_addr, buf, l);
4261 else
aliguori5e2972f2009-03-28 17:51:36 +00004262 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004263 len -= l;
4264 buf += l;
4265 addr += l;
4266 }
4267 return 0;
4268}
Paul Brooka68fe892010-03-01 00:08:59 +00004269#endif
bellard13eb76e2004-01-24 15:23:36 +00004270
pbrook2e70f6e2008-06-29 01:03:05 +00004271/* in deterministic execution mode, instructions doing device I/Os
4272 must be at the end of the TB */
4273void cpu_io_recompile(CPUState *env, void *retaddr)
4274{
4275 TranslationBlock *tb;
4276 uint32_t n, cflags;
4277 target_ulong pc, cs_base;
4278 uint64_t flags;
4279
4280 tb = tb_find_pc((unsigned long)retaddr);
4281 if (!tb) {
4282 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4283 retaddr);
4284 }
4285 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004286 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004287 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004288 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004289 n = n - env->icount_decr.u16.low;
4290 /* Generate a new TB ending on the I/O insn. */
4291 n++;
4292 /* On MIPS and SH, delay slot instructions can only be restarted if
4293 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004294 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004295 branch. */
4296#if defined(TARGET_MIPS)
4297 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4298 env->active_tc.PC -= 4;
4299 env->icount_decr.u16.low++;
4300 env->hflags &= ~MIPS_HFLAG_BMASK;
4301 }
4302#elif defined(TARGET_SH4)
4303 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4304 && n > 1) {
4305 env->pc -= 2;
4306 env->icount_decr.u16.low++;
4307 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4308 }
4309#endif
4310 /* This should never happen. */
4311 if (n > CF_COUNT_MASK)
4312 cpu_abort(env, "TB too big during recompile");
4313
4314 cflags = n | CF_LAST_IO;
4315 pc = tb->pc;
4316 cs_base = tb->cs_base;
4317 flags = tb->flags;
4318 tb_phys_invalidate(tb, -1);
4319 /* FIXME: In theory this could raise an exception. In practice
4320 we have already translated the block once so it's probably ok. */
4321 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004322 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004323 the first in the TB) then we end up generating a whole new TB and
4324 repeating the fault, which is horribly inefficient.
4325 Better would be to execute just this insn uncached, or generate a
4326 second new TB. */
4327 cpu_resume_from_signal(env, NULL);
4328}
4329
Paul Brookb3755a92010-03-12 16:54:58 +00004330#if !defined(CONFIG_USER_ONLY)
4331
Stefan Weil055403b2010-10-22 23:03:32 +02004332void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004333{
4334 int i, target_code_size, max_target_code_size;
4335 int direct_jmp_count, direct_jmp2_count, cross_page;
4336 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004337
bellarde3db7222005-01-26 22:00:47 +00004338 target_code_size = 0;
4339 max_target_code_size = 0;
4340 cross_page = 0;
4341 direct_jmp_count = 0;
4342 direct_jmp2_count = 0;
4343 for(i = 0; i < nb_tbs; i++) {
4344 tb = &tbs[i];
4345 target_code_size += tb->size;
4346 if (tb->size > max_target_code_size)
4347 max_target_code_size = tb->size;
4348 if (tb->page_addr[1] != -1)
4349 cross_page++;
4350 if (tb->tb_next_offset[0] != 0xffff) {
4351 direct_jmp_count++;
4352 if (tb->tb_next_offset[1] != 0xffff) {
4353 direct_jmp2_count++;
4354 }
4355 }
4356 }
4357 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004358 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004359 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004360 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4361 cpu_fprintf(f, "TB count %d/%d\n",
4362 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004363 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004364 nb_tbs ? target_code_size / nb_tbs : 0,
4365 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004366 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004367 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4368 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004369 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4370 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004371 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4372 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004373 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004374 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4375 direct_jmp2_count,
4376 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004377 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004378 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4379 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4380 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004381 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004382}
4383
Avi Kivityd39e8222012-01-01 23:35:10 +02004384/* NOTE: this function can trigger an exception */
4385/* NOTE2: the returned address is not exactly the physical address: it
4386 is the offset relative to phys_ram_base */
4387tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4388{
4389 int mmu_idx, page_index, pd;
4390 void *p;
4391
4392 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4393 mmu_idx = cpu_mmu_index(env1);
4394 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4395 (addr & TARGET_PAGE_MASK))) {
4396 ldub_code(addr);
4397 }
4398 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
Avi Kivity0e0df1e2012-01-02 00:32:15 +02004399 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
Avi Kivity75c578d2012-01-02 15:40:52 +02004400 && !is_romd(pd)) {
Avi Kivityd39e8222012-01-01 23:35:10 +02004401#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4402 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4403#else
4404 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4405#endif
4406 }
4407 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4408 return qemu_ram_addr_from_host_nofail(p);
4409}
4410
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004411/*
4412 * A helper function for the _utterly broken_ virtio device model to find out if
4413 * it's running on a big endian machine. Don't do this at home kids!
4414 */
4415bool virtio_is_big_endian(void);
4416bool virtio_is_big_endian(void)
4417{
4418#if defined(TARGET_WORDS_BIGENDIAN)
4419 return true;
4420#else
4421 return false;
4422#endif
4423}
4424
bellard61382a52003-10-27 21:22:23 +00004425#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004426#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004427#define GETPC() NULL
4428#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004429#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004430
4431#define SHIFT 0
4432#include "softmmu_template.h"
4433
4434#define SHIFT 1
4435#include "softmmu_template.h"
4436
4437#define SHIFT 2
4438#include "softmmu_template.h"
4439
4440#define SHIFT 3
4441#include "softmmu_template.h"
4442
4443#undef env
4444
4445#endif