blob: e735c179877dd373afa9a2d964c6ac0c2b8cfbdd [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
pbrooke2eef172008-06-08 01:09:01 +0000121#endif
bellard9fa3e852004-01-04 18:06:42 +0000122
bellard6a00d602005-11-21 23:25:50 +0000123CPUState *first_cpu;
124/* current CPU in the current thread. It is only valid inside
125 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100126DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000127/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000128 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000129 2 = Adaptive rate instruction counting. */
130int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000131
bellard54936002003-05-13 00:25:15 +0000132typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000133 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000134 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000135 /* in order to optimize self modifying code, we count the number
136 of lookups we do to a given page to use a bitmap */
137 unsigned int code_write_count;
138 uint8_t *code_bitmap;
139#if defined(CONFIG_USER_ONLY)
140 unsigned long flags;
141#endif
bellard54936002003-05-13 00:25:15 +0000142} PageDesc;
143
Paul Brook41c1b1c2010-03-12 16:54:58 +0000144/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800145 while in user mode we want it to be based on virtual addresses. */
146#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
148# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
149#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800150# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000151#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000154#endif
bellard54936002003-05-13 00:25:15 +0000155
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156/* Size of the L2 (and L3, etc) page tables. */
157#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000158#define L2_SIZE (1 << L2_BITS)
159
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800160/* The bits remaining after N lower levels of page tables. */
161#define P_L1_BITS_REM \
162 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163#define V_L1_BITS_REM \
164 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
165
166/* Size of the L1 page table. Avoid silly small sizes. */
167#if P_L1_BITS_REM < 4
168#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
169#else
170#define P_L1_BITS P_L1_BITS_REM
171#endif
172
173#if V_L1_BITS_REM < 4
174#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
175#else
176#define V_L1_BITS V_L1_BITS_REM
177#endif
178
179#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
180#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
181
182#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
183#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
184
bellard83fb7ad2004-07-05 21:25:26 +0000185unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000186unsigned long qemu_host_page_size;
187unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000188
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800189/* This is a multi-level map on the virtual address space.
190 The bottom level has pointers to PageDesc. */
191static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000192
pbrooke2eef172008-06-08 01:09:01 +0000193#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000194typedef struct PhysPageDesc {
195 /* offset in host memory of the page + io_index in the low bits */
196 ram_addr_t phys_offset;
197 ram_addr_t region_offset;
198} PhysPageDesc;
199
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800200/* This is a multi-level map on the physical address space.
201 The bottom level has pointers to PhysPageDesc. */
202static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000203
pbrooke2eef172008-06-08 01:09:01 +0000204static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300205static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000206
bellard33417e72003-08-10 21:47:01 +0000207/* io memory support */
Avi Kivityacbbec52011-11-21 12:27:03 +0200208CPUWriteMemoryFunc *_io_mem_write[IO_MEM_NB_ENTRIES][4];
209CPUReadMemoryFunc *_io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000210void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000211static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000212static int io_mem_watch;
213#endif
bellard33417e72003-08-10 21:47:01 +0000214
bellard34865132003-10-05 14:28:56 +0000215/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200216#ifdef WIN32
217static const char *logfilename = "qemu.log";
218#else
blueswir1d9b630f2008-10-05 09:57:08 +0000219static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200220#endif
bellard34865132003-10-05 14:28:56 +0000221FILE *logfile;
222int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000223static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000224
bellarde3db7222005-01-26 22:00:47 +0000225/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000226#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000227static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000228#endif
bellarde3db7222005-01-26 22:00:47 +0000229static int tb_flush_count;
230static int tb_phys_invalidate_count;
231
bellard7cb69ca2008-05-10 10:55:51 +0000232#ifdef _WIN32
233static void map_exec(void *addr, long size)
234{
235 DWORD old_protect;
236 VirtualProtect(addr, size,
237 PAGE_EXECUTE_READWRITE, &old_protect);
238
239}
240#else
241static void map_exec(void *addr, long size)
242{
bellard43694152008-05-29 09:35:57 +0000243 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000244
bellard43694152008-05-29 09:35:57 +0000245 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000246 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000247 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000248
249 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000250 end += page_size - 1;
251 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000252
253 mprotect((void *)start, end - start,
254 PROT_READ | PROT_WRITE | PROT_EXEC);
255}
256#endif
257
bellardb346ff42003-06-15 20:05:50 +0000258static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000259{
bellard83fb7ad2004-07-05 21:25:26 +0000260 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000261 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000262#ifdef _WIN32
263 {
264 SYSTEM_INFO system_info;
265
266 GetSystemInfo(&system_info);
267 qemu_real_host_page_size = system_info.dwPageSize;
268 }
269#else
270 qemu_real_host_page_size = getpagesize();
271#endif
bellard83fb7ad2004-07-05 21:25:26 +0000272 if (qemu_host_page_size == 0)
273 qemu_host_page_size = qemu_real_host_page_size;
274 if (qemu_host_page_size < TARGET_PAGE_SIZE)
275 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000276 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000277
Paul Brook2e9a5712010-05-05 16:32:59 +0100278#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000279 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100280#ifdef HAVE_KINFO_GETVMMAP
281 struct kinfo_vmentry *freep;
282 int i, cnt;
283
284 freep = kinfo_getvmmap(getpid(), &cnt);
285 if (freep) {
286 mmap_lock();
287 for (i = 0; i < cnt; i++) {
288 unsigned long startaddr, endaddr;
289
290 startaddr = freep[i].kve_start;
291 endaddr = freep[i].kve_end;
292 if (h2g_valid(startaddr)) {
293 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
294
295 if (h2g_valid(endaddr)) {
296 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200297 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100298 } else {
299#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
300 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200301 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100302#endif
303 }
304 }
305 }
306 free(freep);
307 mmap_unlock();
308 }
309#else
balrog50a95692007-12-12 01:16:23 +0000310 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000311
pbrook07765902008-05-31 16:33:53 +0000312 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800313
Aurelien Jarnofd436902010-04-10 17:20:36 +0200314 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000315 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800316 mmap_lock();
317
balrog50a95692007-12-12 01:16:23 +0000318 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800319 unsigned long startaddr, endaddr;
320 int n;
321
322 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
323
324 if (n == 2 && h2g_valid(startaddr)) {
325 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
326
327 if (h2g_valid(endaddr)) {
328 endaddr = h2g(endaddr);
329 } else {
330 endaddr = ~0ul;
331 }
332 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000333 }
334 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800335
balrog50a95692007-12-12 01:16:23 +0000336 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800337 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000338 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100339#endif
balrog50a95692007-12-12 01:16:23 +0000340 }
341#endif
bellard54936002003-05-13 00:25:15 +0000342}
343
Paul Brook41c1b1c2010-03-12 16:54:58 +0000344static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000345{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000346 PageDesc *pd;
347 void **lp;
348 int i;
349
pbrook17e23772008-06-09 13:47:45 +0000350#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500351 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800352# define ALLOC(P, SIZE) \
353 do { \
354 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
355 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800356 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000357#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800358# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500359 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000360#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800361
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362 /* Level 1. Always allocated. */
363 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
364
365 /* Level 2..N-1. */
366 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
367 void **p = *lp;
368
369 if (p == NULL) {
370 if (!alloc) {
371 return NULL;
372 }
373 ALLOC(p, sizeof(void *) * L2_SIZE);
374 *lp = p;
375 }
376
377 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000378 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800379
380 pd = *lp;
381 if (pd == NULL) {
382 if (!alloc) {
383 return NULL;
384 }
385 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
386 *lp = pd;
387 }
388
389#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800390
391 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000392}
393
Paul Brook41c1b1c2010-03-12 16:54:58 +0000394static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000395{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800396 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000397}
398
Paul Brook6d9a1302010-02-28 23:55:53 +0000399#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500400static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000401{
pbrooke3f4e2a2006-04-08 20:02:06 +0000402 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800403 void **lp;
404 int i;
bellard92e873b2004-05-21 14:52:29 +0000405
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800406 /* Level 1. Always allocated. */
407 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000408
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800409 /* Level 2..N-1. */
410 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
411 void **p = *lp;
412 if (p == NULL) {
413 if (!alloc) {
414 return NULL;
415 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500416 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800417 }
418 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000419 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800420
pbrooke3f4e2a2006-04-08 20:02:06 +0000421 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800422 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000423 int i;
Alex Rozenman5ab97b72011-12-13 12:52:08 +0200424 int first_index = index & ~(L2_SIZE - 1);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800425
426 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000427 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800428 }
429
Anthony Liguori7267c092011-08-20 22:09:37 -0500430 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800431
pbrook67c4d232009-02-23 13:16:07 +0000432 for (i = 0; i < L2_SIZE; i++) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800433 pd[i].phys_offset = IO_MEM_UNASSIGNED;
Alex Rozenman5ab97b72011-12-13 12:52:08 +0200434 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000435 }
bellard92e873b2004-05-21 14:52:29 +0000436 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800437
438 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000439}
440
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200441static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000442{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200443 PhysPageDesc *p = phys_page_find_alloc(index, 0);
444
445 if (p) {
446 return *p;
447 } else {
448 return (PhysPageDesc) {
449 .phys_offset = IO_MEM_UNASSIGNED,
450 .region_offset = index << TARGET_PAGE_BITS,
451 };
452 }
bellard92e873b2004-05-21 14:52:29 +0000453}
454
Anthony Liguoric227f092009-10-01 16:12:16 -0500455static void tlb_protect_code(ram_addr_t ram_addr);
456static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000457 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000458#define mmap_lock() do { } while(0)
459#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000460#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000461
bellard43694152008-05-29 09:35:57 +0000462#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
463
464#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100465/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000466 user mode. It will change when a dedicated libc will be used */
467#define USE_STATIC_CODE_GEN_BUFFER
468#endif
469
470#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200471static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
472 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000473#endif
474
blueswir18fcd3692008-08-17 20:26:25 +0000475static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000476{
bellard43694152008-05-29 09:35:57 +0000477#ifdef USE_STATIC_CODE_GEN_BUFFER
478 code_gen_buffer = static_code_gen_buffer;
479 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
480 map_exec(code_gen_buffer, code_gen_buffer_size);
481#else
bellard26a5f132008-05-28 12:30:31 +0000482 code_gen_buffer_size = tb_size;
483 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000484#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000485 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
486#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100487 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000488 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000489#endif
bellard26a5f132008-05-28 12:30:31 +0000490 }
491 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
492 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
493 /* The code gen buffer location may have constraints depending on
494 the host cpu and OS */
495#if defined(__linux__)
496 {
497 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000498 void *start = NULL;
499
bellard26a5f132008-05-28 12:30:31 +0000500 flags = MAP_PRIVATE | MAP_ANONYMOUS;
501#if defined(__x86_64__)
502 flags |= MAP_32BIT;
503 /* Cannot map more than that */
504 if (code_gen_buffer_size > (800 * 1024 * 1024))
505 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000506#elif defined(__sparc_v9__)
507 // Map the buffer below 2G, so we can use direct calls and branches
508 flags |= MAP_FIXED;
509 start = (void *) 0x60000000UL;
510 if (code_gen_buffer_size > (512 * 1024 * 1024))
511 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000512#elif defined(__arm__)
Dr. David Alan Gilbert222f23f2011-12-12 16:37:31 +0100513 /* Keep the buffer no bigger than 16GB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000514 if (code_gen_buffer_size > 16 * 1024 * 1024)
515 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700516#elif defined(__s390x__)
517 /* Map the buffer so that we can use direct calls and branches. */
518 /* We have a +- 4GB range on the branches; leave some slop. */
519 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
520 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
521 }
522 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000523#endif
blueswir1141ac462008-07-26 15:05:57 +0000524 code_gen_buffer = mmap(start, code_gen_buffer_size,
525 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000526 flags, -1, 0);
527 if (code_gen_buffer == MAP_FAILED) {
528 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
529 exit(1);
530 }
531 }
Bradcbb608a2010-12-20 21:25:40 -0500532#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000533 || defined(__DragonFly__) || defined(__OpenBSD__) \
534 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000535 {
536 int flags;
537 void *addr = NULL;
538 flags = MAP_PRIVATE | MAP_ANONYMOUS;
539#if defined(__x86_64__)
540 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
541 * 0x40000000 is free */
542 flags |= MAP_FIXED;
543 addr = (void *)0x40000000;
544 /* Cannot map more than that */
545 if (code_gen_buffer_size > (800 * 1024 * 1024))
546 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000547#elif defined(__sparc_v9__)
548 // Map the buffer below 2G, so we can use direct calls and branches
549 flags |= MAP_FIXED;
550 addr = (void *) 0x60000000UL;
551 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
552 code_gen_buffer_size = (512 * 1024 * 1024);
553 }
aliguori06e67a82008-09-27 15:32:41 +0000554#endif
555 code_gen_buffer = mmap(addr, code_gen_buffer_size,
556 PROT_WRITE | PROT_READ | PROT_EXEC,
557 flags, -1, 0);
558 if (code_gen_buffer == MAP_FAILED) {
559 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
560 exit(1);
561 }
562 }
bellard26a5f132008-05-28 12:30:31 +0000563#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500564 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000565 map_exec(code_gen_buffer, code_gen_buffer_size);
566#endif
bellard43694152008-05-29 09:35:57 +0000567#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000568 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100569 code_gen_buffer_max_size = code_gen_buffer_size -
570 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000571 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500572 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000573}
574
575/* Must be called before using the QEMU cpus. 'tb_size' is the size
576 (in bytes) allocated to the translation buffer. Zero means default
577 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200578void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000579{
bellard26a5f132008-05-28 12:30:31 +0000580 cpu_gen_init();
581 code_gen_alloc(tb_size);
582 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000583 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700584#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
585 /* There's no guest base to take into account, so go ahead and
586 initialize the prologue now. */
587 tcg_prologue_init(&tcg_ctx);
588#endif
bellard26a5f132008-05-28 12:30:31 +0000589}
590
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200591bool tcg_enabled(void)
592{
593 return code_gen_buffer != NULL;
594}
595
596void cpu_exec_init_all(void)
597{
598#if !defined(CONFIG_USER_ONLY)
599 memory_map_init();
600 io_mem_init();
601#endif
602}
603
pbrook9656f322008-07-01 20:01:19 +0000604#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
605
Juan Quintelae59fb372009-09-29 22:48:21 +0200606static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200607{
608 CPUState *env = opaque;
609
aurel323098dba2009-03-07 21:28:24 +0000610 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
611 version_id is increased. */
612 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000613 tlb_flush(env, 1);
614
615 return 0;
616}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200617
618static const VMStateDescription vmstate_cpu_common = {
619 .name = "cpu_common",
620 .version_id = 1,
621 .minimum_version_id = 1,
622 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200623 .post_load = cpu_common_post_load,
624 .fields = (VMStateField []) {
625 VMSTATE_UINT32(halted, CPUState),
626 VMSTATE_UINT32(interrupt_request, CPUState),
627 VMSTATE_END_OF_LIST()
628 }
629};
pbrook9656f322008-07-01 20:01:19 +0000630#endif
631
Glauber Costa950f1472009-06-09 12:15:18 -0400632CPUState *qemu_get_cpu(int cpu)
633{
634 CPUState *env = first_cpu;
635
636 while (env) {
637 if (env->cpu_index == cpu)
638 break;
639 env = env->next_cpu;
640 }
641
642 return env;
643}
644
bellard6a00d602005-11-21 23:25:50 +0000645void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000646{
bellard6a00d602005-11-21 23:25:50 +0000647 CPUState **penv;
648 int cpu_index;
649
pbrookc2764712009-03-07 15:24:59 +0000650#if defined(CONFIG_USER_ONLY)
651 cpu_list_lock();
652#endif
bellard6a00d602005-11-21 23:25:50 +0000653 env->next_cpu = NULL;
654 penv = &first_cpu;
655 cpu_index = 0;
656 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700657 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000658 cpu_index++;
659 }
660 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000661 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000662 QTAILQ_INIT(&env->breakpoints);
663 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100664#ifndef CONFIG_USER_ONLY
665 env->thread_id = qemu_get_thread_id();
666#endif
bellard6a00d602005-11-21 23:25:50 +0000667 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000668#if defined(CONFIG_USER_ONLY)
669 cpu_list_unlock();
670#endif
pbrookb3c77242008-06-30 16:31:04 +0000671#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600672 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
673 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000674 cpu_save, cpu_load, env);
675#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000676}
677
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100678/* Allocate a new translation block. Flush the translation buffer if
679 too many translation blocks or too much generated code. */
680static TranslationBlock *tb_alloc(target_ulong pc)
681{
682 TranslationBlock *tb;
683
684 if (nb_tbs >= code_gen_max_blocks ||
685 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
686 return NULL;
687 tb = &tbs[nb_tbs++];
688 tb->pc = pc;
689 tb->cflags = 0;
690 return tb;
691}
692
693void tb_free(TranslationBlock *tb)
694{
695 /* In practice this is mostly used for single use temporary TB
696 Ignore the hard cases and just back up if this TB happens to
697 be the last one generated. */
698 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
699 code_gen_ptr = tb->tc_ptr;
700 nb_tbs--;
701 }
702}
703
bellard9fa3e852004-01-04 18:06:42 +0000704static inline void invalidate_page_bitmap(PageDesc *p)
705{
706 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500707 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000708 p->code_bitmap = NULL;
709 }
710 p->code_write_count = 0;
711}
712
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800713/* Set to NULL all the 'first_tb' fields in all PageDescs. */
714
715static void page_flush_tb_1 (int level, void **lp)
716{
717 int i;
718
719 if (*lp == NULL) {
720 return;
721 }
722 if (level == 0) {
723 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000724 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800725 pd[i].first_tb = NULL;
726 invalidate_page_bitmap(pd + i);
727 }
728 } else {
729 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000730 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800731 page_flush_tb_1 (level - 1, pp + i);
732 }
733 }
734}
735
bellardfd6ce8f2003-05-14 19:00:11 +0000736static void page_flush_tb(void)
737{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800738 int i;
739 for (i = 0; i < V_L1_SIZE; i++) {
740 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000741 }
742}
743
744/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000745/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000746void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000747{
bellard6a00d602005-11-21 23:25:50 +0000748 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000749#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000750 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
751 (unsigned long)(code_gen_ptr - code_gen_buffer),
752 nb_tbs, nb_tbs > 0 ?
753 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000754#endif
bellard26a5f132008-05-28 12:30:31 +0000755 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000756 cpu_abort(env1, "Internal error: code buffer overflow\n");
757
bellardfd6ce8f2003-05-14 19:00:11 +0000758 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000759
bellard6a00d602005-11-21 23:25:50 +0000760 for(env = first_cpu; env != NULL; env = env->next_cpu) {
761 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
762 }
bellard9fa3e852004-01-04 18:06:42 +0000763
bellard8a8a6082004-10-03 13:36:49 +0000764 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000765 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000766
bellardfd6ce8f2003-05-14 19:00:11 +0000767 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000768 /* XXX: flush processor icache at this point if cache flush is
769 expensive */
bellarde3db7222005-01-26 22:00:47 +0000770 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000771}
772
773#ifdef DEBUG_TB_CHECK
774
j_mayerbc98a7e2007-04-04 07:55:12 +0000775static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000776{
777 TranslationBlock *tb;
778 int i;
779 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000780 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
781 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000782 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
783 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000784 printf("ERROR invalidate: address=" TARGET_FMT_lx
785 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000786 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000787 }
788 }
789 }
790}
791
792/* verify that all the pages have correct rights for code */
793static void tb_page_check(void)
794{
795 TranslationBlock *tb;
796 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000797
pbrook99773bd2006-04-16 15:14:59 +0000798 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
799 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000800 flags1 = page_get_flags(tb->pc);
801 flags2 = page_get_flags(tb->pc + tb->size - 1);
802 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
803 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000804 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000805 }
806 }
807 }
808}
809
810#endif
811
812/* invalidate one TB */
813static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
814 int next_offset)
815{
816 TranslationBlock *tb1;
817 for(;;) {
818 tb1 = *ptb;
819 if (tb1 == tb) {
820 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
821 break;
822 }
823 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
824 }
825}
826
bellard9fa3e852004-01-04 18:06:42 +0000827static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
828{
829 TranslationBlock *tb1;
830 unsigned int n1;
831
832 for(;;) {
833 tb1 = *ptb;
834 n1 = (long)tb1 & 3;
835 tb1 = (TranslationBlock *)((long)tb1 & ~3);
836 if (tb1 == tb) {
837 *ptb = tb1->page_next[n1];
838 break;
839 }
840 ptb = &tb1->page_next[n1];
841 }
842}
843
bellardd4e81642003-05-25 16:46:15 +0000844static inline void tb_jmp_remove(TranslationBlock *tb, int n)
845{
846 TranslationBlock *tb1, **ptb;
847 unsigned int n1;
848
849 ptb = &tb->jmp_next[n];
850 tb1 = *ptb;
851 if (tb1) {
852 /* find tb(n) in circular list */
853 for(;;) {
854 tb1 = *ptb;
855 n1 = (long)tb1 & 3;
856 tb1 = (TranslationBlock *)((long)tb1 & ~3);
857 if (n1 == n && tb1 == tb)
858 break;
859 if (n1 == 2) {
860 ptb = &tb1->jmp_first;
861 } else {
862 ptb = &tb1->jmp_next[n1];
863 }
864 }
865 /* now we can suppress tb(n) from the list */
866 *ptb = tb->jmp_next[n];
867
868 tb->jmp_next[n] = NULL;
869 }
870}
871
872/* reset the jump entry 'n' of a TB so that it is not chained to
873 another TB */
874static inline void tb_reset_jump(TranslationBlock *tb, int n)
875{
876 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
877}
878
Paul Brook41c1b1c2010-03-12 16:54:58 +0000879void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000880{
bellard6a00d602005-11-21 23:25:50 +0000881 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000882 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000883 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000884 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000885 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000886
bellard9fa3e852004-01-04 18:06:42 +0000887 /* remove the TB from the hash list */
888 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
889 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000890 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000891 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000892
bellard9fa3e852004-01-04 18:06:42 +0000893 /* remove the TB from the page list */
894 if (tb->page_addr[0] != page_addr) {
895 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
896 tb_page_remove(&p->first_tb, tb);
897 invalidate_page_bitmap(p);
898 }
899 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
900 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
901 tb_page_remove(&p->first_tb, tb);
902 invalidate_page_bitmap(p);
903 }
904
bellard8a40a182005-11-20 10:35:40 +0000905 tb_invalidated_flag = 1;
906
907 /* remove the TB from the hash list */
908 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000909 for(env = first_cpu; env != NULL; env = env->next_cpu) {
910 if (env->tb_jmp_cache[h] == tb)
911 env->tb_jmp_cache[h] = NULL;
912 }
bellard8a40a182005-11-20 10:35:40 +0000913
914 /* suppress this TB from the two jump lists */
915 tb_jmp_remove(tb, 0);
916 tb_jmp_remove(tb, 1);
917
918 /* suppress any remaining jumps to this TB */
919 tb1 = tb->jmp_first;
920 for(;;) {
921 n1 = (long)tb1 & 3;
922 if (n1 == 2)
923 break;
924 tb1 = (TranslationBlock *)((long)tb1 & ~3);
925 tb2 = tb1->jmp_next[n1];
926 tb_reset_jump(tb1, n1);
927 tb1->jmp_next[n1] = NULL;
928 tb1 = tb2;
929 }
930 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
931
bellarde3db7222005-01-26 22:00:47 +0000932 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000933}
934
935static inline void set_bits(uint8_t *tab, int start, int len)
936{
937 int end, mask, end1;
938
939 end = start + len;
940 tab += start >> 3;
941 mask = 0xff << (start & 7);
942 if ((start & ~7) == (end & ~7)) {
943 if (start < end) {
944 mask &= ~(0xff << (end & 7));
945 *tab |= mask;
946 }
947 } else {
948 *tab++ |= mask;
949 start = (start + 8) & ~7;
950 end1 = end & ~7;
951 while (start < end1) {
952 *tab++ = 0xff;
953 start += 8;
954 }
955 if (start < end) {
956 mask = ~(0xff << (end & 7));
957 *tab |= mask;
958 }
959 }
960}
961
962static void build_page_bitmap(PageDesc *p)
963{
964 int n, tb_start, tb_end;
965 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000966
Anthony Liguori7267c092011-08-20 22:09:37 -0500967 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000968
969 tb = p->first_tb;
970 while (tb != NULL) {
971 n = (long)tb & 3;
972 tb = (TranslationBlock *)((long)tb & ~3);
973 /* NOTE: this is subtle as a TB may span two physical pages */
974 if (n == 0) {
975 /* NOTE: tb_end may be after the end of the page, but
976 it is not a problem */
977 tb_start = tb->pc & ~TARGET_PAGE_MASK;
978 tb_end = tb_start + tb->size;
979 if (tb_end > TARGET_PAGE_SIZE)
980 tb_end = TARGET_PAGE_SIZE;
981 } else {
982 tb_start = 0;
983 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
984 }
985 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
986 tb = tb->page_next[n];
987 }
988}
989
pbrook2e70f6e2008-06-29 01:03:05 +0000990TranslationBlock *tb_gen_code(CPUState *env,
991 target_ulong pc, target_ulong cs_base,
992 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000993{
994 TranslationBlock *tb;
995 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000996 tb_page_addr_t phys_pc, phys_page2;
997 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000998 int code_gen_size;
999
Paul Brook41c1b1c2010-03-12 16:54:58 +00001000 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001001 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001002 if (!tb) {
1003 /* flush must be done */
1004 tb_flush(env);
1005 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001006 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001007 /* Don't forget to invalidate previous TB info. */
1008 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001009 }
1010 tc_ptr = code_gen_ptr;
1011 tb->tc_ptr = tc_ptr;
1012 tb->cs_base = cs_base;
1013 tb->flags = flags;
1014 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001015 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001016 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001017
bellardd720b932004-04-25 17:57:43 +00001018 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001019 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001020 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001021 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001022 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001023 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001024 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001025 return tb;
bellardd720b932004-04-25 17:57:43 +00001026}
ths3b46e622007-09-17 08:09:54 +00001027
bellard9fa3e852004-01-04 18:06:42 +00001028/* invalidate all TBs which intersect with the target physical page
1029 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001030 the same physical page. 'is_cpu_write_access' should be true if called
1031 from a real cpu write access: the virtual CPU will exit the current
1032 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001033void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001034 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001035{
aliguori6b917542008-11-18 19:46:41 +00001036 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001037 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001038 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001039 PageDesc *p;
1040 int n;
1041#ifdef TARGET_HAS_PRECISE_SMC
1042 int current_tb_not_found = is_cpu_write_access;
1043 TranslationBlock *current_tb = NULL;
1044 int current_tb_modified = 0;
1045 target_ulong current_pc = 0;
1046 target_ulong current_cs_base = 0;
1047 int current_flags = 0;
1048#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001049
1050 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001051 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001052 return;
ths5fafdf22007-09-16 21:08:06 +00001053 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001054 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1055 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001056 /* build code bitmap */
1057 build_page_bitmap(p);
1058 }
1059
1060 /* we remove all the TBs in the range [start, end[ */
1061 /* XXX: see if in some cases it could be faster to invalidate all the code */
1062 tb = p->first_tb;
1063 while (tb != NULL) {
1064 n = (long)tb & 3;
1065 tb = (TranslationBlock *)((long)tb & ~3);
1066 tb_next = tb->page_next[n];
1067 /* NOTE: this is subtle as a TB may span two physical pages */
1068 if (n == 0) {
1069 /* NOTE: tb_end may be after the end of the page, but
1070 it is not a problem */
1071 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1072 tb_end = tb_start + tb->size;
1073 } else {
1074 tb_start = tb->page_addr[1];
1075 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1076 }
1077 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001078#ifdef TARGET_HAS_PRECISE_SMC
1079 if (current_tb_not_found) {
1080 current_tb_not_found = 0;
1081 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001082 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001083 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001084 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001085 }
1086 }
1087 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001088 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001089 /* If we are modifying the current TB, we must stop
1090 its execution. We could be more precise by checking
1091 that the modification is after the current PC, but it
1092 would require a specialized function to partially
1093 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001094
bellardd720b932004-04-25 17:57:43 +00001095 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001096 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001097 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1098 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001099 }
1100#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001101 /* we need to do that to handle the case where a signal
1102 occurs while doing tb_phys_invalidate() */
1103 saved_tb = NULL;
1104 if (env) {
1105 saved_tb = env->current_tb;
1106 env->current_tb = NULL;
1107 }
bellard9fa3e852004-01-04 18:06:42 +00001108 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001109 if (env) {
1110 env->current_tb = saved_tb;
1111 if (env->interrupt_request && env->current_tb)
1112 cpu_interrupt(env, env->interrupt_request);
1113 }
bellard9fa3e852004-01-04 18:06:42 +00001114 }
1115 tb = tb_next;
1116 }
1117#if !defined(CONFIG_USER_ONLY)
1118 /* if no code remaining, no need to continue to use slow writes */
1119 if (!p->first_tb) {
1120 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001121 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001122 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001123 }
1124 }
1125#endif
1126#ifdef TARGET_HAS_PRECISE_SMC
1127 if (current_tb_modified) {
1128 /* we generate a block containing just the instruction
1129 modifying the memory. It will ensure that it cannot modify
1130 itself */
bellardea1c1802004-06-14 18:56:36 +00001131 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001132 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001133 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001134 }
1135#endif
1136}
1137
1138/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001139static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001140{
1141 PageDesc *p;
1142 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001143#if 0
bellarda4193c82004-06-03 14:01:43 +00001144 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001145 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1146 cpu_single_env->mem_io_vaddr, len,
1147 cpu_single_env->eip,
1148 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001149 }
1150#endif
bellard9fa3e852004-01-04 18:06:42 +00001151 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001152 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001153 return;
1154 if (p->code_bitmap) {
1155 offset = start & ~TARGET_PAGE_MASK;
1156 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1157 if (b & ((1 << len) - 1))
1158 goto do_invalidate;
1159 } else {
1160 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001161 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001162 }
1163}
1164
bellard9fa3e852004-01-04 18:06:42 +00001165#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001166static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001167 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001168{
aliguori6b917542008-11-18 19:46:41 +00001169 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001170 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001171 int n;
bellardd720b932004-04-25 17:57:43 +00001172#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001173 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001174 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001175 int current_tb_modified = 0;
1176 target_ulong current_pc = 0;
1177 target_ulong current_cs_base = 0;
1178 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001179#endif
bellard9fa3e852004-01-04 18:06:42 +00001180
1181 addr &= TARGET_PAGE_MASK;
1182 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001183 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001184 return;
1185 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001186#ifdef TARGET_HAS_PRECISE_SMC
1187 if (tb && pc != 0) {
1188 current_tb = tb_find_pc(pc);
1189 }
1190#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001191 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001192 n = (long)tb & 3;
1193 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001194#ifdef TARGET_HAS_PRECISE_SMC
1195 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001196 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001197 /* If we are modifying the current TB, we must stop
1198 its execution. We could be more precise by checking
1199 that the modification is after the current PC, but it
1200 would require a specialized function to partially
1201 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001202
bellardd720b932004-04-25 17:57:43 +00001203 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001204 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001205 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1206 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001207 }
1208#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001209 tb_phys_invalidate(tb, addr);
1210 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001211 }
1212 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001213#ifdef TARGET_HAS_PRECISE_SMC
1214 if (current_tb_modified) {
1215 /* we generate a block containing just the instruction
1216 modifying the memory. It will ensure that it cannot modify
1217 itself */
bellardea1c1802004-06-14 18:56:36 +00001218 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001219 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001220 cpu_resume_from_signal(env, puc);
1221 }
1222#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001223}
bellard9fa3e852004-01-04 18:06:42 +00001224#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001225
1226/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001227static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001228 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001229{
1230 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001231#ifndef CONFIG_USER_ONLY
1232 bool page_already_protected;
1233#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001234
bellard9fa3e852004-01-04 18:06:42 +00001235 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001236 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001237 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001238#ifndef CONFIG_USER_ONLY
1239 page_already_protected = p->first_tb != NULL;
1240#endif
bellard9fa3e852004-01-04 18:06:42 +00001241 p->first_tb = (TranslationBlock *)((long)tb | n);
1242 invalidate_page_bitmap(p);
1243
bellard107db442004-06-22 18:48:46 +00001244#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001245
bellard9fa3e852004-01-04 18:06:42 +00001246#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001247 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001248 target_ulong addr;
1249 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001250 int prot;
1251
bellardfd6ce8f2003-05-14 19:00:11 +00001252 /* force the host page as non writable (writes will have a
1253 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001254 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001255 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001256 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1257 addr += TARGET_PAGE_SIZE) {
1258
1259 p2 = page_find (addr >> TARGET_PAGE_BITS);
1260 if (!p2)
1261 continue;
1262 prot |= p2->flags;
1263 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001264 }
ths5fafdf22007-09-16 21:08:06 +00001265 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001266 (prot & PAGE_BITS) & ~PAGE_WRITE);
1267#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001268 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001269 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001270#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001271 }
bellard9fa3e852004-01-04 18:06:42 +00001272#else
1273 /* if some code is already present, then the pages are already
1274 protected. So we handle the case where only the first TB is
1275 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001276 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001277 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001278 }
1279#endif
bellardd720b932004-04-25 17:57:43 +00001280
1281#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001282}
1283
bellard9fa3e852004-01-04 18:06:42 +00001284/* add a new TB and link it to the physical page tables. phys_page2 is
1285 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001286void tb_link_page(TranslationBlock *tb,
1287 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001288{
bellard9fa3e852004-01-04 18:06:42 +00001289 unsigned int h;
1290 TranslationBlock **ptb;
1291
pbrookc8a706f2008-06-02 16:16:42 +00001292 /* Grab the mmap lock to stop another thread invalidating this TB
1293 before we are done. */
1294 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001295 /* add in the physical hash table */
1296 h = tb_phys_hash_func(phys_pc);
1297 ptb = &tb_phys_hash[h];
1298 tb->phys_hash_next = *ptb;
1299 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001300
1301 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001302 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1303 if (phys_page2 != -1)
1304 tb_alloc_page(tb, 1, phys_page2);
1305 else
1306 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001307
bellardd4e81642003-05-25 16:46:15 +00001308 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1309 tb->jmp_next[0] = NULL;
1310 tb->jmp_next[1] = NULL;
1311
1312 /* init original jump addresses */
1313 if (tb->tb_next_offset[0] != 0xffff)
1314 tb_reset_jump(tb, 0);
1315 if (tb->tb_next_offset[1] != 0xffff)
1316 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001317
1318#ifdef DEBUG_TB_CHECK
1319 tb_page_check();
1320#endif
pbrookc8a706f2008-06-02 16:16:42 +00001321 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001322}
1323
bellarda513fe12003-05-27 23:29:48 +00001324/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1325 tb[1].tc_ptr. Return NULL if not found */
1326TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1327{
1328 int m_min, m_max, m;
1329 unsigned long v;
1330 TranslationBlock *tb;
1331
1332 if (nb_tbs <= 0)
1333 return NULL;
1334 if (tc_ptr < (unsigned long)code_gen_buffer ||
1335 tc_ptr >= (unsigned long)code_gen_ptr)
1336 return NULL;
1337 /* binary search (cf Knuth) */
1338 m_min = 0;
1339 m_max = nb_tbs - 1;
1340 while (m_min <= m_max) {
1341 m = (m_min + m_max) >> 1;
1342 tb = &tbs[m];
1343 v = (unsigned long)tb->tc_ptr;
1344 if (v == tc_ptr)
1345 return tb;
1346 else if (tc_ptr < v) {
1347 m_max = m - 1;
1348 } else {
1349 m_min = m + 1;
1350 }
ths5fafdf22007-09-16 21:08:06 +00001351 }
bellarda513fe12003-05-27 23:29:48 +00001352 return &tbs[m_max];
1353}
bellard75012672003-06-21 13:11:07 +00001354
bellardea041c02003-06-25 16:16:50 +00001355static void tb_reset_jump_recursive(TranslationBlock *tb);
1356
1357static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1358{
1359 TranslationBlock *tb1, *tb_next, **ptb;
1360 unsigned int n1;
1361
1362 tb1 = tb->jmp_next[n];
1363 if (tb1 != NULL) {
1364 /* find head of list */
1365 for(;;) {
1366 n1 = (long)tb1 & 3;
1367 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1368 if (n1 == 2)
1369 break;
1370 tb1 = tb1->jmp_next[n1];
1371 }
1372 /* we are now sure now that tb jumps to tb1 */
1373 tb_next = tb1;
1374
1375 /* remove tb from the jmp_first list */
1376 ptb = &tb_next->jmp_first;
1377 for(;;) {
1378 tb1 = *ptb;
1379 n1 = (long)tb1 & 3;
1380 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1381 if (n1 == n && tb1 == tb)
1382 break;
1383 ptb = &tb1->jmp_next[n1];
1384 }
1385 *ptb = tb->jmp_next[n];
1386 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001387
bellardea041c02003-06-25 16:16:50 +00001388 /* suppress the jump to next tb in generated code */
1389 tb_reset_jump(tb, n);
1390
bellard01243112004-01-04 15:48:17 +00001391 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001392 tb_reset_jump_recursive(tb_next);
1393 }
1394}
1395
1396static void tb_reset_jump_recursive(TranslationBlock *tb)
1397{
1398 tb_reset_jump_recursive2(tb, 0);
1399 tb_reset_jump_recursive2(tb, 1);
1400}
1401
bellard1fddef42005-04-17 19:16:13 +00001402#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001403#if defined(CONFIG_USER_ONLY)
1404static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1405{
1406 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1407}
1408#else
bellardd720b932004-04-25 17:57:43 +00001409static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1410{
Anthony Liguoric227f092009-10-01 16:12:16 -05001411 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001412 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001413 ram_addr_t ram_addr;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001414 PhysPageDesc p;
bellardd720b932004-04-25 17:57:43 +00001415
pbrookc2f07f82006-04-08 17:14:56 +00001416 addr = cpu_get_phys_page_debug(env, pc);
1417 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02001418 pd = p.phys_offset;
pbrookc2f07f82006-04-08 17:14:56 +00001419 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001420 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001421}
bellardc27004e2005-01-03 23:35:10 +00001422#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001423#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001424
Paul Brookc527ee82010-03-01 03:31:14 +00001425#if defined(CONFIG_USER_ONLY)
1426void cpu_watchpoint_remove_all(CPUState *env, int mask)
1427
1428{
1429}
1430
1431int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1432 int flags, CPUWatchpoint **watchpoint)
1433{
1434 return -ENOSYS;
1435}
1436#else
pbrook6658ffb2007-03-16 23:58:11 +00001437/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001438int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1439 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001440{
aliguorib4051332008-11-18 20:14:20 +00001441 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001442 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001443
aliguorib4051332008-11-18 20:14:20 +00001444 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1445 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1446 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1447 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1448 return -EINVAL;
1449 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001450 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001451
aliguoria1d1bb32008-11-18 20:07:32 +00001452 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001453 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001454 wp->flags = flags;
1455
aliguori2dc9f412008-11-18 20:56:59 +00001456 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001457 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001458 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001459 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001460 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001461
pbrook6658ffb2007-03-16 23:58:11 +00001462 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001463
1464 if (watchpoint)
1465 *watchpoint = wp;
1466 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001467}
1468
aliguoria1d1bb32008-11-18 20:07:32 +00001469/* Remove a specific watchpoint. */
1470int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1471 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001472{
aliguorib4051332008-11-18 20:14:20 +00001473 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001474 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001475
Blue Swirl72cf2d42009-09-12 07:36:22 +00001476 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001477 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001478 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001479 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001480 return 0;
1481 }
1482 }
aliguoria1d1bb32008-11-18 20:07:32 +00001483 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001484}
1485
aliguoria1d1bb32008-11-18 20:07:32 +00001486/* Remove a specific watchpoint by reference. */
1487void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1488{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001489 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001490
aliguoria1d1bb32008-11-18 20:07:32 +00001491 tlb_flush_page(env, watchpoint->vaddr);
1492
Anthony Liguori7267c092011-08-20 22:09:37 -05001493 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001494}
1495
aliguoria1d1bb32008-11-18 20:07:32 +00001496/* Remove all matching watchpoints. */
1497void cpu_watchpoint_remove_all(CPUState *env, int mask)
1498{
aliguoric0ce9982008-11-25 22:13:57 +00001499 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001500
Blue Swirl72cf2d42009-09-12 07:36:22 +00001501 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001502 if (wp->flags & mask)
1503 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001504 }
aliguoria1d1bb32008-11-18 20:07:32 +00001505}
Paul Brookc527ee82010-03-01 03:31:14 +00001506#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001507
1508/* Add a breakpoint. */
1509int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1510 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001511{
bellard1fddef42005-04-17 19:16:13 +00001512#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001513 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001514
Anthony Liguori7267c092011-08-20 22:09:37 -05001515 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001516
1517 bp->pc = pc;
1518 bp->flags = flags;
1519
aliguori2dc9f412008-11-18 20:56:59 +00001520 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001521 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001522 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001523 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001524 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001525
1526 breakpoint_invalidate(env, pc);
1527
1528 if (breakpoint)
1529 *breakpoint = bp;
1530 return 0;
1531#else
1532 return -ENOSYS;
1533#endif
1534}
1535
1536/* Remove a specific breakpoint. */
1537int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1538{
1539#if defined(TARGET_HAS_ICE)
1540 CPUBreakpoint *bp;
1541
Blue Swirl72cf2d42009-09-12 07:36:22 +00001542 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001543 if (bp->pc == pc && bp->flags == flags) {
1544 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001545 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001546 }
bellard4c3a88a2003-07-26 12:06:08 +00001547 }
aliguoria1d1bb32008-11-18 20:07:32 +00001548 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001549#else
aliguoria1d1bb32008-11-18 20:07:32 +00001550 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001551#endif
1552}
1553
aliguoria1d1bb32008-11-18 20:07:32 +00001554/* Remove a specific breakpoint by reference. */
1555void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001556{
bellard1fddef42005-04-17 19:16:13 +00001557#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001558 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001559
aliguoria1d1bb32008-11-18 20:07:32 +00001560 breakpoint_invalidate(env, breakpoint->pc);
1561
Anthony Liguori7267c092011-08-20 22:09:37 -05001562 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001563#endif
1564}
1565
1566/* Remove all matching breakpoints. */
1567void cpu_breakpoint_remove_all(CPUState *env, int mask)
1568{
1569#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001570 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001571
Blue Swirl72cf2d42009-09-12 07:36:22 +00001572 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001573 if (bp->flags & mask)
1574 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001575 }
bellard4c3a88a2003-07-26 12:06:08 +00001576#endif
1577}
1578
bellardc33a3462003-07-29 20:50:33 +00001579/* enable or disable single step mode. EXCP_DEBUG is returned by the
1580 CPU loop after each instruction */
1581void cpu_single_step(CPUState *env, int enabled)
1582{
bellard1fddef42005-04-17 19:16:13 +00001583#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001584 if (env->singlestep_enabled != enabled) {
1585 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001586 if (kvm_enabled())
1587 kvm_update_guest_debug(env, 0);
1588 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001589 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001590 /* XXX: only flush what is necessary */
1591 tb_flush(env);
1592 }
bellardc33a3462003-07-29 20:50:33 +00001593 }
1594#endif
1595}
1596
bellard34865132003-10-05 14:28:56 +00001597/* enable or disable low levels log */
1598void cpu_set_log(int log_flags)
1599{
1600 loglevel = log_flags;
1601 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001602 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001603 if (!logfile) {
1604 perror(logfilename);
1605 _exit(1);
1606 }
bellard9fa3e852004-01-04 18:06:42 +00001607#if !defined(CONFIG_SOFTMMU)
1608 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1609 {
blueswir1b55266b2008-09-20 08:07:15 +00001610 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001611 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1612 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001613#elif defined(_WIN32)
1614 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1615 setvbuf(logfile, NULL, _IONBF, 0);
1616#else
bellard34865132003-10-05 14:28:56 +00001617 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001618#endif
pbrooke735b912007-06-30 13:53:24 +00001619 log_append = 1;
1620 }
1621 if (!loglevel && logfile) {
1622 fclose(logfile);
1623 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001624 }
1625}
1626
1627void cpu_set_log_filename(const char *filename)
1628{
1629 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001630 if (logfile) {
1631 fclose(logfile);
1632 logfile = NULL;
1633 }
1634 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001635}
bellardc33a3462003-07-29 20:50:33 +00001636
aurel323098dba2009-03-07 21:28:24 +00001637static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001638{
pbrookd5975362008-06-07 20:50:51 +00001639 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1640 problem and hope the cpu will stop of its own accord. For userspace
1641 emulation this often isn't actually as bad as it sounds. Often
1642 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001643 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001644 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001645
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001646 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001647 tb = env->current_tb;
1648 /* if the cpu is currently executing code, we must unlink it and
1649 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001650 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001651 env->current_tb = NULL;
1652 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001653 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001654 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001655}
1656
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001657#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001658/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001659static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001660{
1661 int old_mask;
1662
1663 old_mask = env->interrupt_request;
1664 env->interrupt_request |= mask;
1665
aliguori8edac962009-04-24 18:03:45 +00001666 /*
1667 * If called from iothread context, wake the target cpu in
1668 * case its halted.
1669 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001670 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001671 qemu_cpu_kick(env);
1672 return;
1673 }
aliguori8edac962009-04-24 18:03:45 +00001674
pbrook2e70f6e2008-06-29 01:03:05 +00001675 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001676 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001677 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001678 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001679 cpu_abort(env, "Raised interrupt while not in I/O function");
1680 }
pbrook2e70f6e2008-06-29 01:03:05 +00001681 } else {
aurel323098dba2009-03-07 21:28:24 +00001682 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001683 }
1684}
1685
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001686CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1687
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001688#else /* CONFIG_USER_ONLY */
1689
1690void cpu_interrupt(CPUState *env, int mask)
1691{
1692 env->interrupt_request |= mask;
1693 cpu_unlink_tb(env);
1694}
1695#endif /* CONFIG_USER_ONLY */
1696
bellardb54ad042004-05-20 13:42:52 +00001697void cpu_reset_interrupt(CPUState *env, int mask)
1698{
1699 env->interrupt_request &= ~mask;
1700}
1701
aurel323098dba2009-03-07 21:28:24 +00001702void cpu_exit(CPUState *env)
1703{
1704 env->exit_request = 1;
1705 cpu_unlink_tb(env);
1706}
1707
blueswir1c7cd6a32008-10-02 18:27:46 +00001708const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001709 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001710 "show generated host assembly code for each compiled TB" },
1711 { CPU_LOG_TB_IN_ASM, "in_asm",
1712 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001713 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001714 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001715 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001716 "show micro ops "
1717#ifdef TARGET_I386
1718 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001719#endif
blueswir1e01a1152008-03-14 17:37:11 +00001720 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001721 { CPU_LOG_INT, "int",
1722 "show interrupts/exceptions in short format" },
1723 { CPU_LOG_EXEC, "exec",
1724 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001725 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001726 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001727#ifdef TARGET_I386
1728 { CPU_LOG_PCALL, "pcall",
1729 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001730 { CPU_LOG_RESET, "cpu_reset",
1731 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001732#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001733#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001734 { CPU_LOG_IOPORT, "ioport",
1735 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001736#endif
bellardf193c792004-03-21 17:06:25 +00001737 { 0, NULL, NULL },
1738};
1739
1740static int cmp1(const char *s1, int n, const char *s2)
1741{
1742 if (strlen(s2) != n)
1743 return 0;
1744 return memcmp(s1, s2, n) == 0;
1745}
ths3b46e622007-09-17 08:09:54 +00001746
bellardf193c792004-03-21 17:06:25 +00001747/* takes a comma separated list of log masks. Return 0 if error. */
1748int cpu_str_to_log_mask(const char *str)
1749{
blueswir1c7cd6a32008-10-02 18:27:46 +00001750 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001751 int mask;
1752 const char *p, *p1;
1753
1754 p = str;
1755 mask = 0;
1756 for(;;) {
1757 p1 = strchr(p, ',');
1758 if (!p1)
1759 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001760 if(cmp1(p,p1-p,"all")) {
1761 for(item = cpu_log_items; item->mask != 0; item++) {
1762 mask |= item->mask;
1763 }
1764 } else {
1765 for(item = cpu_log_items; item->mask != 0; item++) {
1766 if (cmp1(p, p1 - p, item->name))
1767 goto found;
1768 }
1769 return 0;
bellardf193c792004-03-21 17:06:25 +00001770 }
bellardf193c792004-03-21 17:06:25 +00001771 found:
1772 mask |= item->mask;
1773 if (*p1 != ',')
1774 break;
1775 p = p1 + 1;
1776 }
1777 return mask;
1778}
bellardea041c02003-06-25 16:16:50 +00001779
bellard75012672003-06-21 13:11:07 +00001780void cpu_abort(CPUState *env, const char *fmt, ...)
1781{
1782 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001783 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001784
1785 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001786 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001787 fprintf(stderr, "qemu: fatal: ");
1788 vfprintf(stderr, fmt, ap);
1789 fprintf(stderr, "\n");
1790#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001791 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1792#else
1793 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001794#endif
aliguori93fcfe32009-01-15 22:34:14 +00001795 if (qemu_log_enabled()) {
1796 qemu_log("qemu: fatal: ");
1797 qemu_log_vprintf(fmt, ap2);
1798 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001799#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001800 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001801#else
aliguori93fcfe32009-01-15 22:34:14 +00001802 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001803#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001804 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001805 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001806 }
pbrook493ae1f2007-11-23 16:53:59 +00001807 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001808 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001809#if defined(CONFIG_USER_ONLY)
1810 {
1811 struct sigaction act;
1812 sigfillset(&act.sa_mask);
1813 act.sa_handler = SIG_DFL;
1814 sigaction(SIGABRT, &act, NULL);
1815 }
1816#endif
bellard75012672003-06-21 13:11:07 +00001817 abort();
1818}
1819
thsc5be9f02007-02-28 20:20:53 +00001820CPUState *cpu_copy(CPUState *env)
1821{
ths01ba9812007-12-09 02:22:57 +00001822 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001823 CPUState *next_cpu = new_env->next_cpu;
1824 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001825#if defined(TARGET_HAS_ICE)
1826 CPUBreakpoint *bp;
1827 CPUWatchpoint *wp;
1828#endif
1829
thsc5be9f02007-02-28 20:20:53 +00001830 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001831
1832 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001833 new_env->next_cpu = next_cpu;
1834 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001835
1836 /* Clone all break/watchpoints.
1837 Note: Once we support ptrace with hw-debug register access, make sure
1838 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001839 QTAILQ_INIT(&env->breakpoints);
1840 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001841#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001842 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001843 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1844 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001845 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001846 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1847 wp->flags, NULL);
1848 }
1849#endif
1850
thsc5be9f02007-02-28 20:20:53 +00001851 return new_env;
1852}
1853
bellard01243112004-01-04 15:48:17 +00001854#if !defined(CONFIG_USER_ONLY)
1855
edgar_igl5c751e92008-05-06 08:44:21 +00001856static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1857{
1858 unsigned int i;
1859
1860 /* Discard jump cache entries for any tb which might potentially
1861 overlap the flushed page. */
1862 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1863 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001864 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001865
1866 i = tb_jmp_cache_hash_page(addr);
1867 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001868 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001869}
1870
Igor Kovalenko08738982009-07-12 02:15:40 +04001871static CPUTLBEntry s_cputlb_empty_entry = {
1872 .addr_read = -1,
1873 .addr_write = -1,
1874 .addr_code = -1,
1875 .addend = -1,
1876};
1877
bellardee8b7022004-02-03 23:35:10 +00001878/* NOTE: if flush_global is true, also flush global entries (not
1879 implemented yet) */
1880void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001881{
bellard33417e72003-08-10 21:47:01 +00001882 int i;
bellard01243112004-01-04 15:48:17 +00001883
bellard9fa3e852004-01-04 18:06:42 +00001884#if defined(DEBUG_TLB)
1885 printf("tlb_flush:\n");
1886#endif
bellard01243112004-01-04 15:48:17 +00001887 /* must reset current TB so that interrupts cannot modify the
1888 links while we are modifying them */
1889 env->current_tb = NULL;
1890
bellard33417e72003-08-10 21:47:01 +00001891 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001892 int mmu_idx;
1893 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001894 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001895 }
bellard33417e72003-08-10 21:47:01 +00001896 }
bellard9fa3e852004-01-04 18:06:42 +00001897
bellard8a40a182005-11-20 10:35:40 +00001898 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001899
Paul Brookd4c430a2010-03-17 02:14:28 +00001900 env->tlb_flush_addr = -1;
1901 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001902 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001903}
1904
bellard274da6b2004-05-20 21:56:27 +00001905static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001906{
ths5fafdf22007-09-16 21:08:06 +00001907 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001908 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001909 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001910 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001911 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001912 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001913 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001914 }
bellard61382a52003-10-27 21:22:23 +00001915}
1916
bellard2e126692004-04-25 21:28:44 +00001917void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001918{
bellard8a40a182005-11-20 10:35:40 +00001919 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001920 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001921
bellard9fa3e852004-01-04 18:06:42 +00001922#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001923 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001924#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001925 /* Check if we need to flush due to large pages. */
1926 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1927#if defined(DEBUG_TLB)
1928 printf("tlb_flush_page: forced full flush ("
1929 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1930 env->tlb_flush_addr, env->tlb_flush_mask);
1931#endif
1932 tlb_flush(env, 1);
1933 return;
1934 }
bellard01243112004-01-04 15:48:17 +00001935 /* must reset current TB so that interrupts cannot modify the
1936 links while we are modifying them */
1937 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001938
bellard61382a52003-10-27 21:22:23 +00001939 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001940 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001941 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1942 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001943
edgar_igl5c751e92008-05-06 08:44:21 +00001944 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001945}
1946
bellard9fa3e852004-01-04 18:06:42 +00001947/* update the TLBs so that writes to code in the virtual page 'addr'
1948 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001949static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001950{
ths5fafdf22007-09-16 21:08:06 +00001951 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00001952 ram_addr + TARGET_PAGE_SIZE,
1953 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00001954}
1955
bellard9fa3e852004-01-04 18:06:42 +00001956/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00001957 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05001958static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00001959 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00001960{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001961 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00001962}
1963
ths5fafdf22007-09-16 21:08:06 +00001964static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00001965 unsigned long start, unsigned long length)
1966{
1967 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00001968 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1969 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00001970 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00001971 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00001972 }
1973 }
1974}
1975
pbrook5579c7f2009-04-11 14:47:08 +00001976/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001977void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001978 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001979{
1980 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00001981 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001982 int i;
bellard1ccde1c2004-02-06 19:46:14 +00001983
1984 start &= TARGET_PAGE_MASK;
1985 end = TARGET_PAGE_ALIGN(end);
1986
1987 length = end - start;
1988 if (length == 0)
1989 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001990 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001991
bellard1ccde1c2004-02-06 19:46:14 +00001992 /* we modify the TLB cache so that the dirty bit will be set again
1993 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001994 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001995 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001996 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001997 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001998 != (end - 1) - start) {
1999 abort();
2000 }
2001
bellard6a00d602005-11-21 23:25:50 +00002002 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002003 int mmu_idx;
2004 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2005 for(i = 0; i < CPU_TLB_SIZE; i++)
2006 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2007 start1, length);
2008 }
bellard6a00d602005-11-21 23:25:50 +00002009 }
bellard1ccde1c2004-02-06 19:46:14 +00002010}
2011
aliguori74576192008-10-06 14:02:03 +00002012int cpu_physical_memory_set_dirty_tracking(int enable)
2013{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002014 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002015 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002016 return ret;
aliguori74576192008-10-06 14:02:03 +00002017}
2018
bellard3a7d9292005-08-21 09:26:42 +00002019static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2020{
Anthony Liguoric227f092009-10-01 16:12:16 -05002021 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002022 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002023
bellard84b7b8e2005-11-28 21:19:04 +00002024 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00002025 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2026 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002027 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002028 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002029 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002030 }
2031 }
2032}
2033
2034/* update the TLB according to the current state of the dirty bits */
2035void cpu_tlb_update_dirty(CPUState *env)
2036{
2037 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002038 int mmu_idx;
2039 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2040 for(i = 0; i < CPU_TLB_SIZE; i++)
2041 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2042 }
bellard3a7d9292005-08-21 09:26:42 +00002043}
2044
pbrook0f459d12008-06-09 00:20:13 +00002045static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002046{
pbrook0f459d12008-06-09 00:20:13 +00002047 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2048 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002049}
2050
pbrook0f459d12008-06-09 00:20:13 +00002051/* update the TLB corresponding to virtual page vaddr
2052 so that it is no longer dirty */
2053static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002054{
bellard1ccde1c2004-02-06 19:46:14 +00002055 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002056 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002057
pbrook0f459d12008-06-09 00:20:13 +00002058 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002059 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002060 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2061 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002062}
2063
Paul Brookd4c430a2010-03-17 02:14:28 +00002064/* Our TLB does not support large pages, so remember the area covered by
2065 large pages and trigger a full TLB flush if these are invalidated. */
2066static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2067 target_ulong size)
2068{
2069 target_ulong mask = ~(size - 1);
2070
2071 if (env->tlb_flush_addr == (target_ulong)-1) {
2072 env->tlb_flush_addr = vaddr & mask;
2073 env->tlb_flush_mask = mask;
2074 return;
2075 }
2076 /* Extend the existing region to include the new page.
2077 This is a compromise between unnecessary flushes and the cost
2078 of maintaining a full variable size TLB. */
2079 mask &= env->tlb_flush_mask;
2080 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2081 mask <<= 1;
2082 }
2083 env->tlb_flush_addr &= mask;
2084 env->tlb_flush_mask = mask;
2085}
2086
Avi Kivity1d393fa2012-01-01 21:15:42 +02002087static bool is_ram_rom(ram_addr_t pd)
2088{
2089 pd &= ~TARGET_PAGE_MASK;
2090 return pd == IO_MEM_RAM || pd == IO_MEM_ROM;
2091}
2092
2093static bool is_ram_rom_romd(ram_addr_t pd)
2094{
2095 return is_ram_rom(pd) || (pd & IO_MEM_ROMD);
2096}
2097
Paul Brookd4c430a2010-03-17 02:14:28 +00002098/* Add a new TLB entry. At most one entry for a given virtual address
2099 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2100 supplied size is only used by tlb_flush_page. */
2101void tlb_set_page(CPUState *env, target_ulong vaddr,
2102 target_phys_addr_t paddr, int prot,
2103 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002104{
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002105 PhysPageDesc p;
bellard4f2ac232004-04-26 19:44:02 +00002106 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002107 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002108 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002109 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002110 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002111 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002112 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002113 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002114
Paul Brookd4c430a2010-03-17 02:14:28 +00002115 assert(size >= TARGET_PAGE_SIZE);
2116 if (size != TARGET_PAGE_SIZE) {
2117 tlb_add_large_page(env, vaddr, size);
2118 }
bellard92e873b2004-05-21 14:52:29 +00002119 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002120 pd = p.phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002121#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002122 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2123 " prot=%x idx=%d pd=0x%08lx\n",
2124 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002125#endif
2126
pbrook0f459d12008-06-09 00:20:13 +00002127 address = vaddr;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002128 if (!is_ram_rom_romd(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002129 /* IO memory case (romd handled later) */
2130 address |= TLB_MMIO;
2131 }
pbrook5579c7f2009-04-11 14:47:08 +00002132 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
Avi Kivity1d393fa2012-01-01 21:15:42 +02002133 if (is_ram_rom(pd)) {
pbrook0f459d12008-06-09 00:20:13 +00002134 /* Normal RAM. */
2135 iotlb = pd & TARGET_PAGE_MASK;
2136 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2137 iotlb |= IO_MEM_NOTDIRTY;
2138 else
2139 iotlb |= IO_MEM_ROM;
2140 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002141 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002142 It would be nice to pass an offset from the base address
2143 of that region. This would avoid having to special case RAM,
2144 and avoid full address decoding in every device.
2145 We can't use the high bits of pd for this because
2146 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002147 iotlb = (pd & ~TARGET_PAGE_MASK);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002148 iotlb += p.region_offset;
pbrook0f459d12008-06-09 00:20:13 +00002149 }
pbrook6658ffb2007-03-16 23:58:11 +00002150
pbrook0f459d12008-06-09 00:20:13 +00002151 code_address = address;
2152 /* Make accesses to pages with watchpoints go via the
2153 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002154 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002155 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002156 /* Avoid trapping reads of pages with a write breakpoint. */
2157 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2158 iotlb = io_mem_watch + paddr;
2159 address |= TLB_MMIO;
2160 break;
2161 }
pbrook6658ffb2007-03-16 23:58:11 +00002162 }
pbrook0f459d12008-06-09 00:20:13 +00002163 }
balrogd79acba2007-06-26 20:01:13 +00002164
pbrook0f459d12008-06-09 00:20:13 +00002165 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2166 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2167 te = &env->tlb_table[mmu_idx][index];
2168 te->addend = addend - vaddr;
2169 if (prot & PAGE_READ) {
2170 te->addr_read = address;
2171 } else {
2172 te->addr_read = -1;
2173 }
edgar_igl5c751e92008-05-06 08:44:21 +00002174
pbrook0f459d12008-06-09 00:20:13 +00002175 if (prot & PAGE_EXEC) {
2176 te->addr_code = code_address;
2177 } else {
2178 te->addr_code = -1;
2179 }
2180 if (prot & PAGE_WRITE) {
2181 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2182 (pd & IO_MEM_ROMD)) {
2183 /* Write access calls the I/O callback. */
2184 te->addr_write = address | TLB_MMIO;
2185 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2186 !cpu_physical_memory_is_dirty(pd)) {
2187 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002188 } else {
pbrook0f459d12008-06-09 00:20:13 +00002189 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002190 }
pbrook0f459d12008-06-09 00:20:13 +00002191 } else {
2192 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002193 }
bellard9fa3e852004-01-04 18:06:42 +00002194}
2195
bellard01243112004-01-04 15:48:17 +00002196#else
2197
bellardee8b7022004-02-03 23:35:10 +00002198void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002199{
2200}
2201
bellard2e126692004-04-25 21:28:44 +00002202void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002203{
2204}
2205
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002206/*
2207 * Walks guest process memory "regions" one by one
2208 * and calls callback function 'fn' for each region.
2209 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002210
2211struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002212{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002213 walk_memory_regions_fn fn;
2214 void *priv;
2215 unsigned long start;
2216 int prot;
2217};
bellard9fa3e852004-01-04 18:06:42 +00002218
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002219static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002220 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002221{
2222 if (data->start != -1ul) {
2223 int rc = data->fn(data->priv, data->start, end, data->prot);
2224 if (rc != 0) {
2225 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002226 }
bellard33417e72003-08-10 21:47:01 +00002227 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002228
2229 data->start = (new_prot ? end : -1ul);
2230 data->prot = new_prot;
2231
2232 return 0;
2233}
2234
2235static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002236 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002237{
Paul Brookb480d9b2010-03-12 23:23:29 +00002238 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002239 int i, rc;
2240
2241 if (*lp == NULL) {
2242 return walk_memory_regions_end(data, base, 0);
2243 }
2244
2245 if (level == 0) {
2246 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002247 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002248 int prot = pd[i].flags;
2249
2250 pa = base | (i << TARGET_PAGE_BITS);
2251 if (prot != data->prot) {
2252 rc = walk_memory_regions_end(data, pa, prot);
2253 if (rc != 0) {
2254 return rc;
2255 }
2256 }
2257 }
2258 } else {
2259 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002260 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002261 pa = base | ((abi_ulong)i <<
2262 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002263 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2264 if (rc != 0) {
2265 return rc;
2266 }
2267 }
2268 }
2269
2270 return 0;
2271}
2272
2273int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2274{
2275 struct walk_memory_regions_data data;
2276 unsigned long i;
2277
2278 data.fn = fn;
2279 data.priv = priv;
2280 data.start = -1ul;
2281 data.prot = 0;
2282
2283 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002284 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002285 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2286 if (rc != 0) {
2287 return rc;
2288 }
2289 }
2290
2291 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002292}
2293
Paul Brookb480d9b2010-03-12 23:23:29 +00002294static int dump_region(void *priv, abi_ulong start,
2295 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002296{
2297 FILE *f = (FILE *)priv;
2298
Paul Brookb480d9b2010-03-12 23:23:29 +00002299 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2300 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002301 start, end, end - start,
2302 ((prot & PAGE_READ) ? 'r' : '-'),
2303 ((prot & PAGE_WRITE) ? 'w' : '-'),
2304 ((prot & PAGE_EXEC) ? 'x' : '-'));
2305
2306 return (0);
2307}
2308
2309/* dump memory mappings */
2310void page_dump(FILE *f)
2311{
2312 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2313 "start", "end", "size", "prot");
2314 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002315}
2316
pbrook53a59602006-03-25 19:31:22 +00002317int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002318{
bellard9fa3e852004-01-04 18:06:42 +00002319 PageDesc *p;
2320
2321 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002322 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002323 return 0;
2324 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002325}
2326
Richard Henderson376a7902010-03-10 15:57:04 -08002327/* Modify the flags of a page and invalidate the code if necessary.
2328 The flag PAGE_WRITE_ORG is positioned automatically depending
2329 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002330void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002331{
Richard Henderson376a7902010-03-10 15:57:04 -08002332 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002333
Richard Henderson376a7902010-03-10 15:57:04 -08002334 /* This function should never be called with addresses outside the
2335 guest address space. If this assert fires, it probably indicates
2336 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002337#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2338 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002339#endif
2340 assert(start < end);
2341
bellard9fa3e852004-01-04 18:06:42 +00002342 start = start & TARGET_PAGE_MASK;
2343 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002344
2345 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002346 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002347 }
2348
2349 for (addr = start, len = end - start;
2350 len != 0;
2351 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2352 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2353
2354 /* If the write protection bit is set, then we invalidate
2355 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002356 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002357 (flags & PAGE_WRITE) &&
2358 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002359 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002360 }
2361 p->flags = flags;
2362 }
bellard9fa3e852004-01-04 18:06:42 +00002363}
2364
ths3d97b402007-11-02 19:02:07 +00002365int page_check_range(target_ulong start, target_ulong len, int flags)
2366{
2367 PageDesc *p;
2368 target_ulong end;
2369 target_ulong addr;
2370
Richard Henderson376a7902010-03-10 15:57:04 -08002371 /* This function should never be called with addresses outside the
2372 guest address space. If this assert fires, it probably indicates
2373 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002374#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2375 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002376#endif
2377
Richard Henderson3e0650a2010-03-29 10:54:42 -07002378 if (len == 0) {
2379 return 0;
2380 }
Richard Henderson376a7902010-03-10 15:57:04 -08002381 if (start + len - 1 < start) {
2382 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002383 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002384 }
balrog55f280c2008-10-28 10:24:11 +00002385
ths3d97b402007-11-02 19:02:07 +00002386 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2387 start = start & TARGET_PAGE_MASK;
2388
Richard Henderson376a7902010-03-10 15:57:04 -08002389 for (addr = start, len = end - start;
2390 len != 0;
2391 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002392 p = page_find(addr >> TARGET_PAGE_BITS);
2393 if( !p )
2394 return -1;
2395 if( !(p->flags & PAGE_VALID) )
2396 return -1;
2397
bellarddae32702007-11-14 10:51:00 +00002398 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002399 return -1;
bellarddae32702007-11-14 10:51:00 +00002400 if (flags & PAGE_WRITE) {
2401 if (!(p->flags & PAGE_WRITE_ORG))
2402 return -1;
2403 /* unprotect the page if it was put read-only because it
2404 contains translated code */
2405 if (!(p->flags & PAGE_WRITE)) {
2406 if (!page_unprotect(addr, 0, NULL))
2407 return -1;
2408 }
2409 return 0;
2410 }
ths3d97b402007-11-02 19:02:07 +00002411 }
2412 return 0;
2413}
2414
bellard9fa3e852004-01-04 18:06:42 +00002415/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002416 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002417int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002418{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002419 unsigned int prot;
2420 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002421 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002422
pbrookc8a706f2008-06-02 16:16:42 +00002423 /* Technically this isn't safe inside a signal handler. However we
2424 know this only ever happens in a synchronous SEGV handler, so in
2425 practice it seems to be ok. */
2426 mmap_lock();
2427
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002428 p = page_find(address >> TARGET_PAGE_BITS);
2429 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002430 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002431 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002432 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002433
bellard9fa3e852004-01-04 18:06:42 +00002434 /* if the page was really writable, then we change its
2435 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002436 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2437 host_start = address & qemu_host_page_mask;
2438 host_end = host_start + qemu_host_page_size;
2439
2440 prot = 0;
2441 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2442 p = page_find(addr >> TARGET_PAGE_BITS);
2443 p->flags |= PAGE_WRITE;
2444 prot |= p->flags;
2445
bellard9fa3e852004-01-04 18:06:42 +00002446 /* and since the content will be modified, we must invalidate
2447 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002448 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002449#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002450 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002451#endif
bellard9fa3e852004-01-04 18:06:42 +00002452 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002453 mprotect((void *)g2h(host_start), qemu_host_page_size,
2454 prot & PAGE_BITS);
2455
2456 mmap_unlock();
2457 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002458 }
pbrookc8a706f2008-06-02 16:16:42 +00002459 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002460 return 0;
2461}
2462
bellard6a00d602005-11-21 23:25:50 +00002463static inline void tlb_set_dirty(CPUState *env,
2464 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002465{
2466}
bellard9fa3e852004-01-04 18:06:42 +00002467#endif /* defined(CONFIG_USER_ONLY) */
2468
pbrooke2eef172008-06-08 01:09:01 +00002469#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002470
Paul Brookc04b2b72010-03-01 03:31:14 +00002471#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2472typedef struct subpage_t {
2473 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002474 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2475 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002476} subpage_t;
2477
Anthony Liguoric227f092009-10-01 16:12:16 -05002478static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2479 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002480static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2481 ram_addr_t orig_memory,
2482 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002483#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2484 need_subpage) \
2485 do { \
2486 if (addr > start_addr) \
2487 start_addr2 = 0; \
2488 else { \
2489 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2490 if (start_addr2 > 0) \
2491 need_subpage = 1; \
2492 } \
2493 \
blueswir149e9fba2007-05-30 17:25:06 +00002494 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002495 end_addr2 = TARGET_PAGE_SIZE - 1; \
2496 else { \
2497 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2498 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2499 need_subpage = 1; \
2500 } \
2501 } while (0)
2502
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002503/* register physical memory.
2504 For RAM, 'size' must be a multiple of the target page size.
2505 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002506 io memory page. The address used when calling the IO function is
2507 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002508 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002509 before calculating this offset. This should not be a problem unless
2510 the low bits of start_addr and region_offset differ. */
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002511void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
Anthony Liguoric227f092009-10-01 16:12:16 -05002512 ram_addr_t size,
2513 ram_addr_t phys_offset,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002514 ram_addr_t region_offset,
2515 bool log_dirty)
bellard33417e72003-08-10 21:47:01 +00002516{
Anthony Liguoric227f092009-10-01 16:12:16 -05002517 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002518 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002519 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002520 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002521 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002522
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002523 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002524
pbrook67c4d232009-02-23 13:16:07 +00002525 if (phys_offset == IO_MEM_UNASSIGNED) {
2526 region_offset = start_addr;
2527 }
pbrook8da3ff12008-12-01 18:59:50 +00002528 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002529 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002530 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002531
2532 addr = start_addr;
2533 do {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02002534 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
blueswir1db7b5422007-05-26 17:36:03 +00002535 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002536 ram_addr_t orig_memory = p->phys_offset;
2537 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002538 int need_subpage = 0;
2539
2540 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2541 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002542 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002543 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2544 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002545 &p->phys_offset, orig_memory,
2546 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002547 } else {
2548 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2549 >> IO_MEM_SHIFT];
2550 }
pbrook8da3ff12008-12-01 18:59:50 +00002551 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2552 region_offset);
2553 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002554 } else {
2555 p->phys_offset = phys_offset;
Avi Kivity2774c6d2012-01-01 18:24:24 +02002556 p->region_offset = region_offset;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002557 if (is_ram_rom_romd(phys_offset))
blueswir1db7b5422007-05-26 17:36:03 +00002558 phys_offset += TARGET_PAGE_SIZE;
2559 }
2560 } else {
2561 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2562 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002563 p->region_offset = region_offset;
Avi Kivity1d393fa2012-01-01 21:15:42 +02002564 if (is_ram_rom_romd(phys_offset)) {
blueswir1db7b5422007-05-26 17:36:03 +00002565 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002566 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002567 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002568 int need_subpage = 0;
2569
2570 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2571 end_addr2, need_subpage);
2572
Richard Hendersonf6405242010-04-22 16:47:31 -07002573 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002574 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002575 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002576 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002577 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002578 phys_offset, region_offset);
2579 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002580 }
2581 }
2582 }
pbrook8da3ff12008-12-01 18:59:50 +00002583 region_offset += TARGET_PAGE_SIZE;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002584 addr += TARGET_PAGE_SIZE;
2585 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002586
bellard9d420372006-06-25 22:25:22 +00002587 /* since each CPU stores ram addresses in its TLB cache, we must
2588 reset the modified entries */
2589 /* XXX: slow ! */
2590 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2591 tlb_flush(env, 1);
2592 }
bellard33417e72003-08-10 21:47:01 +00002593}
2594
Anthony Liguoric227f092009-10-01 16:12:16 -05002595void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002596{
2597 if (kvm_enabled())
2598 kvm_coalesce_mmio_region(addr, size);
2599}
2600
Anthony Liguoric227f092009-10-01 16:12:16 -05002601void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002602{
2603 if (kvm_enabled())
2604 kvm_uncoalesce_mmio_region(addr, size);
2605}
2606
Sheng Yang62a27442010-01-26 19:21:16 +08002607void qemu_flush_coalesced_mmio_buffer(void)
2608{
2609 if (kvm_enabled())
2610 kvm_flush_coalesced_mmio_buffer();
2611}
2612
Marcelo Tosattic9027602010-03-01 20:25:08 -03002613#if defined(__linux__) && !defined(TARGET_S390X)
2614
2615#include <sys/vfs.h>
2616
2617#define HUGETLBFS_MAGIC 0x958458f6
2618
2619static long gethugepagesize(const char *path)
2620{
2621 struct statfs fs;
2622 int ret;
2623
2624 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002625 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002626 } while (ret != 0 && errno == EINTR);
2627
2628 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002629 perror(path);
2630 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002631 }
2632
2633 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002634 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002635
2636 return fs.f_bsize;
2637}
2638
Alex Williamson04b16652010-07-02 11:13:17 -06002639static void *file_ram_alloc(RAMBlock *block,
2640 ram_addr_t memory,
2641 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002642{
2643 char *filename;
2644 void *area;
2645 int fd;
2646#ifdef MAP_POPULATE
2647 int flags;
2648#endif
2649 unsigned long hpagesize;
2650
2651 hpagesize = gethugepagesize(path);
2652 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002653 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002654 }
2655
2656 if (memory < hpagesize) {
2657 return NULL;
2658 }
2659
2660 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2661 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2662 return NULL;
2663 }
2664
2665 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002666 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002667 }
2668
2669 fd = mkstemp(filename);
2670 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002671 perror("unable to create backing store for hugepages");
2672 free(filename);
2673 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002674 }
2675 unlink(filename);
2676 free(filename);
2677
2678 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2679
2680 /*
2681 * ftruncate is not supported by hugetlbfs in older
2682 * hosts, so don't bother bailing out on errors.
2683 * If anything goes wrong with it under other filesystems,
2684 * mmap will fail.
2685 */
2686 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002687 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002688
2689#ifdef MAP_POPULATE
2690 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2691 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2692 * to sidestep this quirk.
2693 */
2694 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2695 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2696#else
2697 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2698#endif
2699 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002700 perror("file_ram_alloc: can't mmap RAM pages");
2701 close(fd);
2702 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002703 }
Alex Williamson04b16652010-07-02 11:13:17 -06002704 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002705 return area;
2706}
2707#endif
2708
Alex Williamsond17b5282010-06-25 11:08:38 -06002709static ram_addr_t find_ram_offset(ram_addr_t size)
2710{
Alex Williamson04b16652010-07-02 11:13:17 -06002711 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002712 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002713
2714 if (QLIST_EMPTY(&ram_list.blocks))
2715 return 0;
2716
2717 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002718 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002719
2720 end = block->offset + block->length;
2721
2722 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2723 if (next_block->offset >= end) {
2724 next = MIN(next, next_block->offset);
2725 }
2726 }
2727 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002728 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002729 mingap = next - end;
2730 }
2731 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002732
2733 if (offset == RAM_ADDR_MAX) {
2734 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2735 (uint64_t)size);
2736 abort();
2737 }
2738
Alex Williamson04b16652010-07-02 11:13:17 -06002739 return offset;
2740}
2741
2742static ram_addr_t last_ram_offset(void)
2743{
Alex Williamsond17b5282010-06-25 11:08:38 -06002744 RAMBlock *block;
2745 ram_addr_t last = 0;
2746
2747 QLIST_FOREACH(block, &ram_list.blocks, next)
2748 last = MAX(last, block->offset + block->length);
2749
2750 return last;
2751}
2752
Avi Kivityc5705a72011-12-20 15:59:12 +02002753void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002754{
2755 RAMBlock *new_block, *block;
2756
Avi Kivityc5705a72011-12-20 15:59:12 +02002757 new_block = NULL;
2758 QLIST_FOREACH(block, &ram_list.blocks, next) {
2759 if (block->offset == addr) {
2760 new_block = block;
2761 break;
2762 }
2763 }
2764 assert(new_block);
2765 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002766
2767 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2768 char *id = dev->parent_bus->info->get_dev_path(dev);
2769 if (id) {
2770 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002771 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002772 }
2773 }
2774 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2775
2776 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002777 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002778 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2779 new_block->idstr);
2780 abort();
2781 }
2782 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002783}
2784
2785ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2786 MemoryRegion *mr)
2787{
2788 RAMBlock *new_block;
2789
2790 size = TARGET_PAGE_ALIGN(size);
2791 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002792
Avi Kivity7c637362011-12-21 13:09:49 +02002793 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002794 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002795 if (host) {
2796 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002797 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002798 } else {
2799 if (mem_path) {
2800#if defined (__linux__) && !defined(TARGET_S390X)
2801 new_block->host = file_ram_alloc(new_block, size, mem_path);
2802 if (!new_block->host) {
2803 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002804 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002805 }
2806#else
2807 fprintf(stderr, "-mem-path option unsupported\n");
2808 exit(1);
2809#endif
2810 } else {
2811#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002812 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2813 an system defined value, which is at least 256GB. Larger systems
2814 have larger values. We put the guest between the end of data
2815 segment (system break) and this value. We use 32GB as a base to
2816 have enough room for the system break to grow. */
2817 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002818 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002819 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002820 if (new_block->host == MAP_FAILED) {
2821 fprintf(stderr, "Allocating RAM failed\n");
2822 abort();
2823 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002824#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002825 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002826 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002827 } else {
2828 new_block->host = qemu_vmalloc(size);
2829 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002830#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002831 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002832 }
2833 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002834 new_block->length = size;
2835
2836 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2837
Anthony Liguori7267c092011-08-20 22:09:37 -05002838 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002839 last_ram_offset() >> TARGET_PAGE_BITS);
2840 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2841 0xff, size >> TARGET_PAGE_BITS);
2842
2843 if (kvm_enabled())
2844 kvm_setup_guest_memory(new_block->host, size);
2845
2846 return new_block->offset;
2847}
2848
Avi Kivityc5705a72011-12-20 15:59:12 +02002849ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002850{
Avi Kivityc5705a72011-12-20 15:59:12 +02002851 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002852}
bellarde9a1ab12007-02-08 23:08:38 +00002853
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002854void qemu_ram_free_from_ptr(ram_addr_t addr)
2855{
2856 RAMBlock *block;
2857
2858 QLIST_FOREACH(block, &ram_list.blocks, next) {
2859 if (addr == block->offset) {
2860 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002861 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002862 return;
2863 }
2864 }
2865}
2866
Anthony Liguoric227f092009-10-01 16:12:16 -05002867void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002868{
Alex Williamson04b16652010-07-02 11:13:17 -06002869 RAMBlock *block;
2870
2871 QLIST_FOREACH(block, &ram_list.blocks, next) {
2872 if (addr == block->offset) {
2873 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002874 if (block->flags & RAM_PREALLOC_MASK) {
2875 ;
2876 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002877#if defined (__linux__) && !defined(TARGET_S390X)
2878 if (block->fd) {
2879 munmap(block->host, block->length);
2880 close(block->fd);
2881 } else {
2882 qemu_vfree(block->host);
2883 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002884#else
2885 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002886#endif
2887 } else {
2888#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2889 munmap(block->host, block->length);
2890#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002891 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002892 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002893 } else {
2894 qemu_vfree(block->host);
2895 }
Alex Williamson04b16652010-07-02 11:13:17 -06002896#endif
2897 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002898 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002899 return;
2900 }
2901 }
2902
bellarde9a1ab12007-02-08 23:08:38 +00002903}
2904
Huang Yingcd19cfa2011-03-02 08:56:19 +01002905#ifndef _WIN32
2906void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2907{
2908 RAMBlock *block;
2909 ram_addr_t offset;
2910 int flags;
2911 void *area, *vaddr;
2912
2913 QLIST_FOREACH(block, &ram_list.blocks, next) {
2914 offset = addr - block->offset;
2915 if (offset < block->length) {
2916 vaddr = block->host + offset;
2917 if (block->flags & RAM_PREALLOC_MASK) {
2918 ;
2919 } else {
2920 flags = MAP_FIXED;
2921 munmap(vaddr, length);
2922 if (mem_path) {
2923#if defined(__linux__) && !defined(TARGET_S390X)
2924 if (block->fd) {
2925#ifdef MAP_POPULATE
2926 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2927 MAP_PRIVATE;
2928#else
2929 flags |= MAP_PRIVATE;
2930#endif
2931 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2932 flags, block->fd, offset);
2933 } else {
2934 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2935 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2936 flags, -1, 0);
2937 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002938#else
2939 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002940#endif
2941 } else {
2942#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2943 flags |= MAP_SHARED | MAP_ANONYMOUS;
2944 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2945 flags, -1, 0);
2946#else
2947 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2948 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2949 flags, -1, 0);
2950#endif
2951 }
2952 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002953 fprintf(stderr, "Could not remap addr: "
2954 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002955 length, addr);
2956 exit(1);
2957 }
2958 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2959 }
2960 return;
2961 }
2962 }
2963}
2964#endif /* !_WIN32 */
2965
pbrookdc828ca2009-04-09 22:21:07 +00002966/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002967 With the exception of the softmmu code in this file, this should
2968 only be used for local memory (e.g. video ram) that the device owns,
2969 and knows it isn't going to access beyond the end of the block.
2970
2971 It should not be used for general purpose DMA.
2972 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2973 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002974void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002975{
pbrook94a6b542009-04-11 17:15:54 +00002976 RAMBlock *block;
2977
Alex Williamsonf471a172010-06-11 11:11:42 -06002978 QLIST_FOREACH(block, &ram_list.blocks, next) {
2979 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002980 /* Move this entry to to start of the list. */
2981 if (block != QLIST_FIRST(&ram_list.blocks)) {
2982 QLIST_REMOVE(block, next);
2983 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2984 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002985 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002986 /* We need to check if the requested address is in the RAM
2987 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002988 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002989 */
2990 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002991 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002992 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002993 block->host =
2994 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002995 }
2996 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002997 return block->host + (addr - block->offset);
2998 }
pbrook94a6b542009-04-11 17:15:54 +00002999 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003000
3001 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3002 abort();
3003
3004 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003005}
3006
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003007/* Return a host pointer to ram allocated with qemu_ram_alloc.
3008 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3009 */
3010void *qemu_safe_ram_ptr(ram_addr_t addr)
3011{
3012 RAMBlock *block;
3013
3014 QLIST_FOREACH(block, &ram_list.blocks, next) {
3015 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003016 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003017 /* We need to check if the requested address is in the RAM
3018 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003019 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003020 */
3021 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003022 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003023 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003024 block->host =
3025 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003026 }
3027 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003028 return block->host + (addr - block->offset);
3029 }
3030 }
3031
3032 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3033 abort();
3034
3035 return NULL;
3036}
3037
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003038/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3039 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003040void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003041{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003042 if (*size == 0) {
3043 return NULL;
3044 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003045 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003046 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003047 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003048 RAMBlock *block;
3049
3050 QLIST_FOREACH(block, &ram_list.blocks, next) {
3051 if (addr - block->offset < block->length) {
3052 if (addr - block->offset + *size > block->length)
3053 *size = block->length - addr + block->offset;
3054 return block->host + (addr - block->offset);
3055 }
3056 }
3057
3058 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3059 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003060 }
3061}
3062
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003063void qemu_put_ram_ptr(void *addr)
3064{
3065 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003066}
3067
Marcelo Tosattie8902612010-10-11 15:31:19 -03003068int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003069{
pbrook94a6b542009-04-11 17:15:54 +00003070 RAMBlock *block;
3071 uint8_t *host = ptr;
3072
Jan Kiszka868bb332011-06-21 22:59:09 +02003073 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003074 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003075 return 0;
3076 }
3077
Alex Williamsonf471a172010-06-11 11:11:42 -06003078 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003079 /* This case append when the block is not mapped. */
3080 if (block->host == NULL) {
3081 continue;
3082 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003083 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003084 *ram_addr = block->offset + (host - block->host);
3085 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003086 }
pbrook94a6b542009-04-11 17:15:54 +00003087 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003088
Marcelo Tosattie8902612010-10-11 15:31:19 -03003089 return -1;
3090}
Alex Williamsonf471a172010-06-11 11:11:42 -06003091
Marcelo Tosattie8902612010-10-11 15:31:19 -03003092/* Some of the softmmu routines need to translate from a host pointer
3093 (typically a TLB entry) back to a ram offset. */
3094ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3095{
3096 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003097
Marcelo Tosattie8902612010-10-11 15:31:19 -03003098 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3099 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3100 abort();
3101 }
3102 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003103}
3104
Anthony Liguoric227f092009-10-01 16:12:16 -05003105static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00003106{
pbrook67d3b952006-12-18 05:03:52 +00003107#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003108 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003109#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003110#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003111 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
blueswir1e18231a2008-10-06 18:46:28 +00003112#endif
3113 return 0;
3114}
3115
Anthony Liguoric227f092009-10-01 16:12:16 -05003116static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003117{
3118#ifdef DEBUG_UNASSIGNED
3119 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3120#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003121#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003122 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
blueswir1e18231a2008-10-06 18:46:28 +00003123#endif
3124 return 0;
3125}
3126
Anthony Liguoric227f092009-10-01 16:12:16 -05003127static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003128{
3129#ifdef DEBUG_UNASSIGNED
3130 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3131#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003132#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003133 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003134#endif
bellard33417e72003-08-10 21:47:01 +00003135 return 0;
3136}
3137
Anthony Liguoric227f092009-10-01 16:12:16 -05003138static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00003139{
pbrook67d3b952006-12-18 05:03:52 +00003140#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003141 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00003142#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003143#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003144 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
blueswir1e18231a2008-10-06 18:46:28 +00003145#endif
3146}
3147
Anthony Liguoric227f092009-10-01 16:12:16 -05003148static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003149{
3150#ifdef DEBUG_UNASSIGNED
3151 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3152#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003153#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003154 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
blueswir1e18231a2008-10-06 18:46:28 +00003155#endif
3156}
3157
Anthony Liguoric227f092009-10-01 16:12:16 -05003158static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003159{
3160#ifdef DEBUG_UNASSIGNED
3161 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3162#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003163#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003164 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003165#endif
bellard33417e72003-08-10 21:47:01 +00003166}
3167
Blue Swirld60efc62009-08-25 18:29:31 +00003168static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00003169 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00003170 unassigned_mem_readw,
3171 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00003172};
3173
Blue Swirld60efc62009-08-25 18:29:31 +00003174static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00003175 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00003176 unassigned_mem_writew,
3177 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00003178};
3179
Anthony Liguoric227f092009-10-01 16:12:16 -05003180static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003181 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003182{
bellard3a7d9292005-08-21 09:26:42 +00003183 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003184 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003185 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3186#if !defined(CONFIG_USER_ONLY)
3187 tb_invalidate_phys_page_fast(ram_addr, 1);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003188 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003189#endif
3190 }
pbrook5579c7f2009-04-11 14:47:08 +00003191 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003192 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003193 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003194 /* we remove the notdirty callback only if the code has been
3195 flushed */
3196 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003197 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003198}
3199
Anthony Liguoric227f092009-10-01 16:12:16 -05003200static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003201 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003202{
bellard3a7d9292005-08-21 09:26:42 +00003203 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003204 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003205 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3206#if !defined(CONFIG_USER_ONLY)
3207 tb_invalidate_phys_page_fast(ram_addr, 2);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003208 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003209#endif
3210 }
pbrook5579c7f2009-04-11 14:47:08 +00003211 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003212 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003213 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003214 /* we remove the notdirty callback only if the code has been
3215 flushed */
3216 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003217 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003218}
3219
Anthony Liguoric227f092009-10-01 16:12:16 -05003220static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003221 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003222{
bellard3a7d9292005-08-21 09:26:42 +00003223 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003224 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003225 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3226#if !defined(CONFIG_USER_ONLY)
3227 tb_invalidate_phys_page_fast(ram_addr, 4);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003228 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003229#endif
3230 }
pbrook5579c7f2009-04-11 14:47:08 +00003231 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003232 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003233 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003234 /* we remove the notdirty callback only if the code has been
3235 flushed */
3236 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003237 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003238}
3239
Blue Swirld60efc62009-08-25 18:29:31 +00003240static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00003241 NULL, /* never used */
3242 NULL, /* never used */
3243 NULL, /* never used */
3244};
3245
Blue Swirld60efc62009-08-25 18:29:31 +00003246static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00003247 notdirty_mem_writeb,
3248 notdirty_mem_writew,
3249 notdirty_mem_writel,
3250};
3251
pbrook0f459d12008-06-09 00:20:13 +00003252/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003253static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003254{
3255 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003256 target_ulong pc, cs_base;
3257 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003258 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003259 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003260 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003261
aliguori06d55cc2008-11-18 20:24:06 +00003262 if (env->watchpoint_hit) {
3263 /* We re-entered the check after replacing the TB. Now raise
3264 * the debug interrupt so that is will trigger after the
3265 * current instruction. */
3266 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3267 return;
3268 }
pbrook2e70f6e2008-06-29 01:03:05 +00003269 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003270 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003271 if ((vaddr == (wp->vaddr & len_mask) ||
3272 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003273 wp->flags |= BP_WATCHPOINT_HIT;
3274 if (!env->watchpoint_hit) {
3275 env->watchpoint_hit = wp;
3276 tb = tb_find_pc(env->mem_io_pc);
3277 if (!tb) {
3278 cpu_abort(env, "check_watchpoint: could not find TB for "
3279 "pc=%p", (void *)env->mem_io_pc);
3280 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003281 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003282 tb_phys_invalidate(tb, -1);
3283 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3284 env->exception_index = EXCP_DEBUG;
3285 } else {
3286 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3287 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3288 }
3289 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003290 }
aliguori6e140f22008-11-18 20:37:55 +00003291 } else {
3292 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003293 }
3294 }
3295}
3296
pbrook6658ffb2007-03-16 23:58:11 +00003297/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3298 so these check for a hit then pass through to the normal out-of-line
3299 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05003300static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003301{
aliguorib4051332008-11-18 20:14:20 +00003302 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003303 return ldub_phys(addr);
3304}
3305
Anthony Liguoric227f092009-10-01 16:12:16 -05003306static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003307{
aliguorib4051332008-11-18 20:14:20 +00003308 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003309 return lduw_phys(addr);
3310}
3311
Anthony Liguoric227f092009-10-01 16:12:16 -05003312static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003313{
aliguorib4051332008-11-18 20:14:20 +00003314 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003315 return ldl_phys(addr);
3316}
3317
Anthony Liguoric227f092009-10-01 16:12:16 -05003318static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003319 uint32_t val)
3320{
aliguorib4051332008-11-18 20:14:20 +00003321 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003322 stb_phys(addr, val);
3323}
3324
Anthony Liguoric227f092009-10-01 16:12:16 -05003325static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003326 uint32_t val)
3327{
aliguorib4051332008-11-18 20:14:20 +00003328 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003329 stw_phys(addr, val);
3330}
3331
Anthony Liguoric227f092009-10-01 16:12:16 -05003332static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003333 uint32_t val)
3334{
aliguorib4051332008-11-18 20:14:20 +00003335 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003336 stl_phys(addr, val);
3337}
3338
Blue Swirld60efc62009-08-25 18:29:31 +00003339static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003340 watch_mem_readb,
3341 watch_mem_readw,
3342 watch_mem_readl,
3343};
3344
Blue Swirld60efc62009-08-25 18:29:31 +00003345static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003346 watch_mem_writeb,
3347 watch_mem_writew,
3348 watch_mem_writel,
3349};
pbrook6658ffb2007-03-16 23:58:11 +00003350
Richard Hendersonf6405242010-04-22 16:47:31 -07003351static inline uint32_t subpage_readlen (subpage_t *mmio,
3352 target_phys_addr_t addr,
3353 unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003354{
Richard Hendersonf6405242010-04-22 16:47:31 -07003355 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003356#if defined(DEBUG_SUBPAGE)
3357 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3358 mmio, len, addr, idx);
3359#endif
blueswir1db7b5422007-05-26 17:36:03 +00003360
Richard Hendersonf6405242010-04-22 16:47:31 -07003361 addr += mmio->region_offset[idx];
3362 idx = mmio->sub_io_index[idx];
Avi Kivityacbbec52011-11-21 12:27:03 +02003363 return io_mem_read(idx, addr, 1 <<len);
blueswir1db7b5422007-05-26 17:36:03 +00003364}
3365
Anthony Liguoric227f092009-10-01 16:12:16 -05003366static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
Richard Hendersonf6405242010-04-22 16:47:31 -07003367 uint32_t value, unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003368{
Richard Hendersonf6405242010-04-22 16:47:31 -07003369 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003370#if defined(DEBUG_SUBPAGE)
Richard Hendersonf6405242010-04-22 16:47:31 -07003371 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3372 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003373#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003374
3375 addr += mmio->region_offset[idx];
3376 idx = mmio->sub_io_index[idx];
Avi Kivityacbbec52011-11-21 12:27:03 +02003377 io_mem_write(idx, addr, value, 1 << len);
blueswir1db7b5422007-05-26 17:36:03 +00003378}
3379
Anthony Liguoric227f092009-10-01 16:12:16 -05003380static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003381{
blueswir1db7b5422007-05-26 17:36:03 +00003382 return subpage_readlen(opaque, addr, 0);
3383}
3384
Anthony Liguoric227f092009-10-01 16:12:16 -05003385static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003386 uint32_t value)
3387{
blueswir1db7b5422007-05-26 17:36:03 +00003388 subpage_writelen(opaque, addr, value, 0);
3389}
3390
Anthony Liguoric227f092009-10-01 16:12:16 -05003391static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003392{
blueswir1db7b5422007-05-26 17:36:03 +00003393 return subpage_readlen(opaque, addr, 1);
3394}
3395
Anthony Liguoric227f092009-10-01 16:12:16 -05003396static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003397 uint32_t value)
3398{
blueswir1db7b5422007-05-26 17:36:03 +00003399 subpage_writelen(opaque, addr, value, 1);
3400}
3401
Anthony Liguoric227f092009-10-01 16:12:16 -05003402static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003403{
blueswir1db7b5422007-05-26 17:36:03 +00003404 return subpage_readlen(opaque, addr, 2);
3405}
3406
Richard Hendersonf6405242010-04-22 16:47:31 -07003407static void subpage_writel (void *opaque, target_phys_addr_t addr,
3408 uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00003409{
blueswir1db7b5422007-05-26 17:36:03 +00003410 subpage_writelen(opaque, addr, value, 2);
3411}
3412
Blue Swirld60efc62009-08-25 18:29:31 +00003413static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003414 &subpage_readb,
3415 &subpage_readw,
3416 &subpage_readl,
3417};
3418
Blue Swirld60efc62009-08-25 18:29:31 +00003419static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003420 &subpage_writeb,
3421 &subpage_writew,
3422 &subpage_writel,
3423};
3424
Andreas Färber56384e82011-11-30 16:26:21 +01003425static uint32_t subpage_ram_readb(void *opaque, target_phys_addr_t addr)
3426{
3427 ram_addr_t raddr = addr;
3428 void *ptr = qemu_get_ram_ptr(raddr);
3429 return ldub_p(ptr);
3430}
3431
3432static void subpage_ram_writeb(void *opaque, target_phys_addr_t addr,
3433 uint32_t value)
3434{
3435 ram_addr_t raddr = addr;
3436 void *ptr = qemu_get_ram_ptr(raddr);
3437 stb_p(ptr, value);
3438}
3439
3440static uint32_t subpage_ram_readw(void *opaque, target_phys_addr_t addr)
3441{
3442 ram_addr_t raddr = addr;
3443 void *ptr = qemu_get_ram_ptr(raddr);
3444 return lduw_p(ptr);
3445}
3446
3447static void subpage_ram_writew(void *opaque, target_phys_addr_t addr,
3448 uint32_t value)
3449{
3450 ram_addr_t raddr = addr;
3451 void *ptr = qemu_get_ram_ptr(raddr);
3452 stw_p(ptr, value);
3453}
3454
3455static uint32_t subpage_ram_readl(void *opaque, target_phys_addr_t addr)
3456{
3457 ram_addr_t raddr = addr;
3458 void *ptr = qemu_get_ram_ptr(raddr);
3459 return ldl_p(ptr);
3460}
3461
3462static void subpage_ram_writel(void *opaque, target_phys_addr_t addr,
3463 uint32_t value)
3464{
3465 ram_addr_t raddr = addr;
3466 void *ptr = qemu_get_ram_ptr(raddr);
3467 stl_p(ptr, value);
3468}
3469
3470static CPUReadMemoryFunc * const subpage_ram_read[] = {
3471 &subpage_ram_readb,
3472 &subpage_ram_readw,
3473 &subpage_ram_readl,
3474};
3475
3476static CPUWriteMemoryFunc * const subpage_ram_write[] = {
3477 &subpage_ram_writeb,
3478 &subpage_ram_writew,
3479 &subpage_ram_writel,
3480};
3481
Anthony Liguoric227f092009-10-01 16:12:16 -05003482static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3483 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003484{
3485 int idx, eidx;
3486
3487 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3488 return -1;
3489 idx = SUBPAGE_IDX(start);
3490 eidx = SUBPAGE_IDX(end);
3491#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003492 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003493 mmio, start, end, idx, eidx, memory);
3494#endif
Andreas Färber56384e82011-11-30 16:26:21 +01003495 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
3496 memory = IO_MEM_SUBPAGE_RAM;
3497 }
Richard Hendersonf6405242010-04-22 16:47:31 -07003498 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
blueswir1db7b5422007-05-26 17:36:03 +00003499 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003500 mmio->sub_io_index[idx] = memory;
3501 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003502 }
3503
3504 return 0;
3505}
3506
Richard Hendersonf6405242010-04-22 16:47:31 -07003507static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3508 ram_addr_t orig_memory,
3509 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003510{
Anthony Liguoric227f092009-10-01 16:12:16 -05003511 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003512 int subpage_memory;
3513
Anthony Liguori7267c092011-08-20 22:09:37 -05003514 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003515
3516 mmio->base = base;
Avi Kivitybe675c92011-11-20 16:22:55 +02003517 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
blueswir1db7b5422007-05-26 17:36:03 +00003518#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003519 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3520 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003521#endif
aliguori1eec6142009-02-05 22:06:18 +00003522 *phys = subpage_memory | IO_MEM_SUBPAGE;
Richard Hendersonf6405242010-04-22 16:47:31 -07003523 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003524
3525 return mmio;
3526}
3527
aliguori88715652009-02-11 15:20:58 +00003528static int get_free_io_mem_idx(void)
3529{
3530 int i;
3531
3532 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3533 if (!io_mem_used[i]) {
3534 io_mem_used[i] = 1;
3535 return i;
3536 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003537 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003538 return -1;
3539}
3540
bellard33417e72003-08-10 21:47:01 +00003541/* mem_read and mem_write are arrays of functions containing the
3542 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003543 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003544 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003545 modified. If it is zero, a new io zone is allocated. The return
3546 value can be used with cpu_register_physical_memory(). (-1) is
3547 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003548static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003549 CPUReadMemoryFunc * const *mem_read,
3550 CPUWriteMemoryFunc * const *mem_write,
Avi Kivitybe675c92011-11-20 16:22:55 +02003551 void *opaque)
bellard33417e72003-08-10 21:47:01 +00003552{
Richard Henderson3cab7212010-05-07 09:52:51 -07003553 int i;
3554
bellard33417e72003-08-10 21:47:01 +00003555 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003556 io_index = get_free_io_mem_idx();
3557 if (io_index == -1)
3558 return io_index;
bellard33417e72003-08-10 21:47:01 +00003559 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003560 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003561 if (io_index >= IO_MEM_NB_ENTRIES)
3562 return -1;
3563 }
bellardb5ff1b32005-11-26 10:38:39 +00003564
Richard Henderson3cab7212010-05-07 09:52:51 -07003565 for (i = 0; i < 3; ++i) {
Avi Kivityacbbec52011-11-21 12:27:03 +02003566 _io_mem_read[io_index][i]
Richard Henderson3cab7212010-05-07 09:52:51 -07003567 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3568 }
3569 for (i = 0; i < 3; ++i) {
Avi Kivityacbbec52011-11-21 12:27:03 +02003570 _io_mem_write[io_index][i]
Richard Henderson3cab7212010-05-07 09:52:51 -07003571 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3572 }
bellarda4193c82004-06-03 14:01:43 +00003573 io_mem_opaque[io_index] = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003574
3575 return (io_index << IO_MEM_SHIFT);
bellard33417e72003-08-10 21:47:01 +00003576}
bellard61382a52003-10-27 21:22:23 +00003577
Blue Swirld60efc62009-08-25 18:29:31 +00003578int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3579 CPUWriteMemoryFunc * const *mem_write,
Avi Kivitybe675c92011-11-20 16:22:55 +02003580 void *opaque)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003581{
Avi Kivitybe675c92011-11-20 16:22:55 +02003582 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003583}
3584
aliguori88715652009-02-11 15:20:58 +00003585void cpu_unregister_io_memory(int io_table_address)
3586{
3587 int i;
3588 int io_index = io_table_address >> IO_MEM_SHIFT;
3589
3590 for (i=0;i < 3; i++) {
Avi Kivityacbbec52011-11-21 12:27:03 +02003591 _io_mem_read[io_index][i] = unassigned_mem_read[i];
3592 _io_mem_write[io_index][i] = unassigned_mem_write[i];
aliguori88715652009-02-11 15:20:58 +00003593 }
3594 io_mem_opaque[io_index] = NULL;
3595 io_mem_used[io_index] = 0;
3596}
3597
Avi Kivitye9179ce2009-06-14 11:38:52 +03003598static void io_mem_init(void)
3599{
3600 int i;
3601
Alexander Graf2507c122010-12-08 12:05:37 +01003602 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
Avi Kivitybe675c92011-11-20 16:22:55 +02003603 unassigned_mem_write, NULL);
Alexander Graf2507c122010-12-08 12:05:37 +01003604 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
Avi Kivitybe675c92011-11-20 16:22:55 +02003605 unassigned_mem_write, NULL);
Alexander Graf2507c122010-12-08 12:05:37 +01003606 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
Avi Kivitybe675c92011-11-20 16:22:55 +02003607 notdirty_mem_write, NULL);
Andreas Färber56384e82011-11-30 16:26:21 +01003608 cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM, subpage_ram_read,
Avi Kivitybe675c92011-11-20 16:22:55 +02003609 subpage_ram_write, NULL);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003610 for (i=0; i<5; i++)
3611 io_mem_used[i] = 1;
3612
3613 io_mem_watch = cpu_register_io_memory(watch_mem_read,
Avi Kivitybe675c92011-11-20 16:22:55 +02003614 watch_mem_write, NULL);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003615}
3616
Avi Kivity62152b82011-07-26 14:26:14 +03003617static void memory_map_init(void)
3618{
Anthony Liguori7267c092011-08-20 22:09:37 -05003619 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003620 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003621 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003622
Anthony Liguori7267c092011-08-20 22:09:37 -05003623 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003624 memory_region_init(system_io, "io", 65536);
3625 set_system_io_map(system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003626}
3627
3628MemoryRegion *get_system_memory(void)
3629{
3630 return system_memory;
3631}
3632
Avi Kivity309cb472011-08-08 16:09:03 +03003633MemoryRegion *get_system_io(void)
3634{
3635 return system_io;
3636}
3637
pbrooke2eef172008-06-08 01:09:01 +00003638#endif /* !defined(CONFIG_USER_ONLY) */
3639
bellard13eb76e2004-01-24 15:23:36 +00003640/* physical memory access (slow version, mainly for debug) */
3641#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003642int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3643 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003644{
3645 int l, flags;
3646 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003647 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003648
3649 while (len > 0) {
3650 page = addr & TARGET_PAGE_MASK;
3651 l = (page + TARGET_PAGE_SIZE) - addr;
3652 if (l > len)
3653 l = len;
3654 flags = page_get_flags(page);
3655 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003656 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003657 if (is_write) {
3658 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003659 return -1;
bellard579a97f2007-11-11 14:26:47 +00003660 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003661 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003662 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003663 memcpy(p, buf, l);
3664 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003665 } else {
3666 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003667 return -1;
bellard579a97f2007-11-11 14:26:47 +00003668 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003669 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003670 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003671 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003672 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003673 }
3674 len -= l;
3675 buf += l;
3676 addr += l;
3677 }
Paul Brooka68fe892010-03-01 00:08:59 +00003678 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003679}
bellard8df1cd02005-01-28 22:37:22 +00003680
bellard13eb76e2004-01-24 15:23:36 +00003681#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003682void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003683 int len, int is_write)
3684{
3685 int l, io_index;
3686 uint8_t *ptr;
3687 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003688 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003689 ram_addr_t pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003690 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003691
bellard13eb76e2004-01-24 15:23:36 +00003692 while (len > 0) {
3693 page = addr & TARGET_PAGE_MASK;
3694 l = (page + TARGET_PAGE_SIZE) - addr;
3695 if (l > len)
3696 l = len;
bellard92e873b2004-05-21 14:52:29 +00003697 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003698 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003699
bellard13eb76e2004-01-24 15:23:36 +00003700 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003701 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003702 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003703 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003704 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
bellard6a00d602005-11-21 23:25:50 +00003705 /* XXX: could force cpu_single_env to NULL to avoid
3706 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003707 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003708 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003709 val = ldl_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003710 io_mem_write(io_index, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003711 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003712 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003713 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003714 val = lduw_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003715 io_mem_write(io_index, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003716 l = 2;
3717 } else {
bellard1c213d12005-09-03 10:49:04 +00003718 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003719 val = ldub_p(buf);
Avi Kivityacbbec52011-11-21 12:27:03 +02003720 io_mem_write(io_index, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003721 l = 1;
3722 }
3723 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003724 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00003725 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003726 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003727 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003728 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003729 if (!cpu_physical_memory_is_dirty(addr1)) {
3730 /* invalidate code */
3731 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3732 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003733 cpu_physical_memory_set_dirty_flags(
3734 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003735 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003736 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003737 }
3738 } else {
Avi Kivity1d393fa2012-01-01 21:15:42 +02003739 if (!is_ram_rom_romd(pd)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003740 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003741 /* I/O case */
3742 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003743 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
aurel326c2934d2009-02-18 21:37:17 +00003744 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003745 /* 32 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003746 val = io_mem_read(io_index, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003747 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003748 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003749 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003750 /* 16 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003751 val = io_mem_read(io_index, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003752 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003753 l = 2;
3754 } else {
bellard1c213d12005-09-03 10:49:04 +00003755 /* 8 bit read access */
Avi Kivityacbbec52011-11-21 12:27:03 +02003756 val = io_mem_read(io_index, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003757 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003758 l = 1;
3759 }
3760 } else {
3761 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003762 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3763 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3764 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003765 }
3766 }
3767 len -= l;
3768 buf += l;
3769 addr += l;
3770 }
3771}
bellard8df1cd02005-01-28 22:37:22 +00003772
bellardd0ecd2a2006-04-23 17:14:48 +00003773/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003774void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003775 const uint8_t *buf, int len)
3776{
3777 int l;
3778 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003779 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003780 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003781 PhysPageDesc p;
ths3b46e622007-09-17 08:09:54 +00003782
bellardd0ecd2a2006-04-23 17:14:48 +00003783 while (len > 0) {
3784 page = addr & TARGET_PAGE_MASK;
3785 l = (page + TARGET_PAGE_SIZE) - addr;
3786 if (l > len)
3787 l = len;
3788 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003789 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003790
Avi Kivity1d393fa2012-01-01 21:15:42 +02003791 if (!is_ram_rom_romd(pd)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003792 /* do nothing */
3793 } else {
3794 unsigned long addr1;
3795 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3796 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003797 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003798 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003799 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003800 }
3801 len -= l;
3802 buf += l;
3803 addr += l;
3804 }
3805}
3806
aliguori6d16c2f2009-01-22 16:59:11 +00003807typedef struct {
3808 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003809 target_phys_addr_t addr;
3810 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003811} BounceBuffer;
3812
3813static BounceBuffer bounce;
3814
aliguoriba223c22009-01-22 16:59:16 +00003815typedef struct MapClient {
3816 void *opaque;
3817 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003818 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003819} MapClient;
3820
Blue Swirl72cf2d42009-09-12 07:36:22 +00003821static QLIST_HEAD(map_client_list, MapClient) map_client_list
3822 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003823
3824void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3825{
Anthony Liguori7267c092011-08-20 22:09:37 -05003826 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003827
3828 client->opaque = opaque;
3829 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003830 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003831 return client;
3832}
3833
3834void cpu_unregister_map_client(void *_client)
3835{
3836 MapClient *client = (MapClient *)_client;
3837
Blue Swirl72cf2d42009-09-12 07:36:22 +00003838 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003839 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003840}
3841
3842static void cpu_notify_map_clients(void)
3843{
3844 MapClient *client;
3845
Blue Swirl72cf2d42009-09-12 07:36:22 +00003846 while (!QLIST_EMPTY(&map_client_list)) {
3847 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003848 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003849 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003850 }
3851}
3852
aliguori6d16c2f2009-01-22 16:59:11 +00003853/* Map a physical memory region into a host virtual address.
3854 * May map a subset of the requested range, given by and returned in *plen.
3855 * May return NULL if resources needed to perform the mapping are exhausted.
3856 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003857 * Use cpu_register_map_client() to know when retrying the map operation is
3858 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003859 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003860void *cpu_physical_memory_map(target_phys_addr_t addr,
3861 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003862 int is_write)
3863{
Anthony Liguoric227f092009-10-01 16:12:16 -05003864 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003865 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003866 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003867 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00003868 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003869 PhysPageDesc p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003870 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003871 ram_addr_t rlen;
3872 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003873
3874 while (len > 0) {
3875 page = addr & TARGET_PAGE_MASK;
3876 l = (page + TARGET_PAGE_SIZE) - addr;
3877 if (l > len)
3878 l = len;
3879 p = phys_page_find(page >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003880 pd = p.phys_offset;
aliguori6d16c2f2009-01-22 16:59:11 +00003881
3882 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003883 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003884 break;
3885 }
3886 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3887 bounce.addr = addr;
3888 bounce.len = l;
3889 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003890 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003891 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003892
3893 *plen = l;
3894 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003895 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003896 if (!todo) {
3897 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3898 }
aliguori6d16c2f2009-01-22 16:59:11 +00003899
3900 len -= l;
3901 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003902 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003903 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003904 rlen = todo;
3905 ret = qemu_ram_ptr_length(raddr, &rlen);
3906 *plen = rlen;
3907 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003908}
3909
3910/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3911 * Will also mark the memory as dirty if is_write == 1. access_len gives
3912 * the amount of memory that was actually read or written by the caller.
3913 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003914void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3915 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003916{
3917 if (buffer != bounce.buffer) {
3918 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003919 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003920 while (access_len) {
3921 unsigned l;
3922 l = TARGET_PAGE_SIZE;
3923 if (l > access_len)
3924 l = access_len;
3925 if (!cpu_physical_memory_is_dirty(addr1)) {
3926 /* invalidate code */
3927 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3928 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003929 cpu_physical_memory_set_dirty_flags(
3930 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00003931 }
3932 addr1 += l;
3933 access_len -= l;
3934 }
3935 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003936 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003937 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003938 }
aliguori6d16c2f2009-01-22 16:59:11 +00003939 return;
3940 }
3941 if (is_write) {
3942 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3943 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003944 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003945 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003946 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003947}
bellardd0ecd2a2006-04-23 17:14:48 +00003948
bellard8df1cd02005-01-28 22:37:22 +00003949/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003950static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3951 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003952{
3953 int io_index;
3954 uint8_t *ptr;
3955 uint32_t val;
3956 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003957 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00003958
3959 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003960 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00003961
Avi Kivity1d393fa2012-01-01 21:15:42 +02003962 if (!is_ram_rom_romd(pd)) {
bellard8df1cd02005-01-28 22:37:22 +00003963 /* I/O case */
3964 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003965 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02003966 val = io_mem_read(io_index, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003967#if defined(TARGET_WORDS_BIGENDIAN)
3968 if (endian == DEVICE_LITTLE_ENDIAN) {
3969 val = bswap32(val);
3970 }
3971#else
3972 if (endian == DEVICE_BIG_ENDIAN) {
3973 val = bswap32(val);
3974 }
3975#endif
bellard8df1cd02005-01-28 22:37:22 +00003976 } else {
3977 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003978 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00003979 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003980 switch (endian) {
3981 case DEVICE_LITTLE_ENDIAN:
3982 val = ldl_le_p(ptr);
3983 break;
3984 case DEVICE_BIG_ENDIAN:
3985 val = ldl_be_p(ptr);
3986 break;
3987 default:
3988 val = ldl_p(ptr);
3989 break;
3990 }
bellard8df1cd02005-01-28 22:37:22 +00003991 }
3992 return val;
3993}
3994
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003995uint32_t ldl_phys(target_phys_addr_t addr)
3996{
3997 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3998}
3999
4000uint32_t ldl_le_phys(target_phys_addr_t addr)
4001{
4002 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4003}
4004
4005uint32_t ldl_be_phys(target_phys_addr_t addr)
4006{
4007 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4008}
4009
bellard84b7b8e2005-11-28 21:19:04 +00004010/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004011static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4012 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004013{
4014 int io_index;
4015 uint8_t *ptr;
4016 uint64_t val;
4017 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004018 PhysPageDesc p;
bellard84b7b8e2005-11-28 21:19:04 +00004019
4020 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004021 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004022
Avi Kivity1d393fa2012-01-01 21:15:42 +02004023 if (!is_ram_rom_romd(pd)) {
bellard84b7b8e2005-11-28 21:19:04 +00004024 /* I/O case */
4025 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004026 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004027
4028 /* XXX This is broken when device endian != cpu endian.
4029 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004030#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004031 val = io_mem_read(io_index, addr, 4) << 32;
4032 val |= io_mem_read(io_index, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00004033#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004034 val = io_mem_read(io_index, addr, 4);
4035 val |= io_mem_read(io_index, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00004036#endif
4037 } else {
4038 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004039 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00004040 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004041 switch (endian) {
4042 case DEVICE_LITTLE_ENDIAN:
4043 val = ldq_le_p(ptr);
4044 break;
4045 case DEVICE_BIG_ENDIAN:
4046 val = ldq_be_p(ptr);
4047 break;
4048 default:
4049 val = ldq_p(ptr);
4050 break;
4051 }
bellard84b7b8e2005-11-28 21:19:04 +00004052 }
4053 return val;
4054}
4055
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004056uint64_t ldq_phys(target_phys_addr_t addr)
4057{
4058 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4059}
4060
4061uint64_t ldq_le_phys(target_phys_addr_t addr)
4062{
4063 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4064}
4065
4066uint64_t ldq_be_phys(target_phys_addr_t addr)
4067{
4068 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4069}
4070
bellardaab33092005-10-30 20:48:42 +00004071/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004072uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004073{
4074 uint8_t val;
4075 cpu_physical_memory_read(addr, &val, 1);
4076 return val;
4077}
4078
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004079/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004080static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4081 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004082{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004083 int io_index;
4084 uint8_t *ptr;
4085 uint64_t val;
4086 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004087 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004088
4089 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004090 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004091
Avi Kivity1d393fa2012-01-01 21:15:42 +02004092 if (!is_ram_rom_romd(pd)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004093 /* I/O case */
4094 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004095 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004096 val = io_mem_read(io_index, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004097#if defined(TARGET_WORDS_BIGENDIAN)
4098 if (endian == DEVICE_LITTLE_ENDIAN) {
4099 val = bswap16(val);
4100 }
4101#else
4102 if (endian == DEVICE_BIG_ENDIAN) {
4103 val = bswap16(val);
4104 }
4105#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004106 } else {
4107 /* RAM case */
4108 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4109 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004110 switch (endian) {
4111 case DEVICE_LITTLE_ENDIAN:
4112 val = lduw_le_p(ptr);
4113 break;
4114 case DEVICE_BIG_ENDIAN:
4115 val = lduw_be_p(ptr);
4116 break;
4117 default:
4118 val = lduw_p(ptr);
4119 break;
4120 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004121 }
4122 return val;
bellardaab33092005-10-30 20:48:42 +00004123}
4124
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004125uint32_t lduw_phys(target_phys_addr_t addr)
4126{
4127 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4128}
4129
4130uint32_t lduw_le_phys(target_phys_addr_t addr)
4131{
4132 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4133}
4134
4135uint32_t lduw_be_phys(target_phys_addr_t addr)
4136{
4137 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4138}
4139
bellard8df1cd02005-01-28 22:37:22 +00004140/* warning: addr must be aligned. The ram page is not masked as dirty
4141 and the code inside is not invalidated. It is useful if the dirty
4142 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004143void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004144{
4145 int io_index;
4146 uint8_t *ptr;
4147 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004148 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004149
4150 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004151 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004152
bellard3a7d9292005-08-21 09:26:42 +00004153 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004154 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004155 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Avi Kivityacbbec52011-11-21 12:27:03 +02004156 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004157 } else {
aliguori74576192008-10-06 14:02:03 +00004158 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004159 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004160 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004161
4162 if (unlikely(in_migration)) {
4163 if (!cpu_physical_memory_is_dirty(addr1)) {
4164 /* invalidate code */
4165 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4166 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004167 cpu_physical_memory_set_dirty_flags(
4168 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004169 }
4170 }
bellard8df1cd02005-01-28 22:37:22 +00004171 }
4172}
4173
Anthony Liguoric227f092009-10-01 16:12:16 -05004174void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004175{
4176 int io_index;
4177 uint8_t *ptr;
4178 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004179 PhysPageDesc p;
j_mayerbc98a7e2007-04-04 07:55:12 +00004180
4181 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004182 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004183
j_mayerbc98a7e2007-04-04 07:55:12 +00004184 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4185 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004186 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004187#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivityacbbec52011-11-21 12:27:03 +02004188 io_mem_write(io_index, addr, val >> 32, 4);
4189 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004190#else
Avi Kivityacbbec52011-11-21 12:27:03 +02004191 io_mem_write(io_index, addr, (uint32_t)val, 4);
4192 io_mem_write(io_index, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00004193#endif
4194 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004195 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004196 (addr & ~TARGET_PAGE_MASK);
4197 stq_p(ptr, val);
4198 }
4199}
4200
bellard8df1cd02005-01-28 22:37:22 +00004201/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004202static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4203 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004204{
4205 int io_index;
4206 uint8_t *ptr;
4207 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004208 PhysPageDesc p;
bellard8df1cd02005-01-28 22:37:22 +00004209
4210 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004211 pd = p.phys_offset;
ths3b46e622007-09-17 08:09:54 +00004212
bellard3a7d9292005-08-21 09:26:42 +00004213 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004214 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004215 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004216#if defined(TARGET_WORDS_BIGENDIAN)
4217 if (endian == DEVICE_LITTLE_ENDIAN) {
4218 val = bswap32(val);
4219 }
4220#else
4221 if (endian == DEVICE_BIG_ENDIAN) {
4222 val = bswap32(val);
4223 }
4224#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004225 io_mem_write(io_index, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00004226 } else {
4227 unsigned long addr1;
4228 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4229 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004230 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004231 switch (endian) {
4232 case DEVICE_LITTLE_ENDIAN:
4233 stl_le_p(ptr, val);
4234 break;
4235 case DEVICE_BIG_ENDIAN:
4236 stl_be_p(ptr, val);
4237 break;
4238 default:
4239 stl_p(ptr, val);
4240 break;
4241 }
bellard3a7d9292005-08-21 09:26:42 +00004242 if (!cpu_physical_memory_is_dirty(addr1)) {
4243 /* invalidate code */
4244 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4245 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004246 cpu_physical_memory_set_dirty_flags(addr1,
4247 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004248 }
bellard8df1cd02005-01-28 22:37:22 +00004249 }
4250}
4251
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004252void stl_phys(target_phys_addr_t addr, uint32_t val)
4253{
4254 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4255}
4256
4257void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4258{
4259 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4260}
4261
4262void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4263{
4264 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4265}
4266
bellardaab33092005-10-30 20:48:42 +00004267/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004268void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004269{
4270 uint8_t v = val;
4271 cpu_physical_memory_write(addr, &v, 1);
4272}
4273
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004274/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004275static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4276 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004277{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004278 int io_index;
4279 uint8_t *ptr;
4280 unsigned long pd;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004281 PhysPageDesc p;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004282
4283 p = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004284 pd = p.phys_offset;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004285
4286 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4287 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02004288 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004289#if defined(TARGET_WORDS_BIGENDIAN)
4290 if (endian == DEVICE_LITTLE_ENDIAN) {
4291 val = bswap16(val);
4292 }
4293#else
4294 if (endian == DEVICE_BIG_ENDIAN) {
4295 val = bswap16(val);
4296 }
4297#endif
Avi Kivityacbbec52011-11-21 12:27:03 +02004298 io_mem_write(io_index, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004299 } else {
4300 unsigned long addr1;
4301 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4302 /* RAM case */
4303 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004304 switch (endian) {
4305 case DEVICE_LITTLE_ENDIAN:
4306 stw_le_p(ptr, val);
4307 break;
4308 case DEVICE_BIG_ENDIAN:
4309 stw_be_p(ptr, val);
4310 break;
4311 default:
4312 stw_p(ptr, val);
4313 break;
4314 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004315 if (!cpu_physical_memory_is_dirty(addr1)) {
4316 /* invalidate code */
4317 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4318 /* set dirty bit */
4319 cpu_physical_memory_set_dirty_flags(addr1,
4320 (0xff & ~CODE_DIRTY_FLAG));
4321 }
4322 }
bellardaab33092005-10-30 20:48:42 +00004323}
4324
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004325void stw_phys(target_phys_addr_t addr, uint32_t val)
4326{
4327 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4328}
4329
4330void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4331{
4332 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4333}
4334
4335void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4336{
4337 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4338}
4339
bellardaab33092005-10-30 20:48:42 +00004340/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004341void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004342{
4343 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004344 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004345}
4346
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004347void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4348{
4349 val = cpu_to_le64(val);
4350 cpu_physical_memory_write(addr, &val, 8);
4351}
4352
4353void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4354{
4355 val = cpu_to_be64(val);
4356 cpu_physical_memory_write(addr, &val, 8);
4357}
4358
aliguori5e2972f2009-03-28 17:51:36 +00004359/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004360int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004361 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004362{
4363 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004364 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004365 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004366
4367 while (len > 0) {
4368 page = addr & TARGET_PAGE_MASK;
4369 phys_addr = cpu_get_phys_page_debug(env, page);
4370 /* if no physical page mapped, return an error */
4371 if (phys_addr == -1)
4372 return -1;
4373 l = (page + TARGET_PAGE_SIZE) - addr;
4374 if (l > len)
4375 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004376 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004377 if (is_write)
4378 cpu_physical_memory_write_rom(phys_addr, buf, l);
4379 else
aliguori5e2972f2009-03-28 17:51:36 +00004380 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004381 len -= l;
4382 buf += l;
4383 addr += l;
4384 }
4385 return 0;
4386}
Paul Brooka68fe892010-03-01 00:08:59 +00004387#endif
bellard13eb76e2004-01-24 15:23:36 +00004388
pbrook2e70f6e2008-06-29 01:03:05 +00004389/* in deterministic execution mode, instructions doing device I/Os
4390 must be at the end of the TB */
4391void cpu_io_recompile(CPUState *env, void *retaddr)
4392{
4393 TranslationBlock *tb;
4394 uint32_t n, cflags;
4395 target_ulong pc, cs_base;
4396 uint64_t flags;
4397
4398 tb = tb_find_pc((unsigned long)retaddr);
4399 if (!tb) {
4400 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4401 retaddr);
4402 }
4403 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004404 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004405 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004406 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004407 n = n - env->icount_decr.u16.low;
4408 /* Generate a new TB ending on the I/O insn. */
4409 n++;
4410 /* On MIPS and SH, delay slot instructions can only be restarted if
4411 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004412 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004413 branch. */
4414#if defined(TARGET_MIPS)
4415 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4416 env->active_tc.PC -= 4;
4417 env->icount_decr.u16.low++;
4418 env->hflags &= ~MIPS_HFLAG_BMASK;
4419 }
4420#elif defined(TARGET_SH4)
4421 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4422 && n > 1) {
4423 env->pc -= 2;
4424 env->icount_decr.u16.low++;
4425 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4426 }
4427#endif
4428 /* This should never happen. */
4429 if (n > CF_COUNT_MASK)
4430 cpu_abort(env, "TB too big during recompile");
4431
4432 cflags = n | CF_LAST_IO;
4433 pc = tb->pc;
4434 cs_base = tb->cs_base;
4435 flags = tb->flags;
4436 tb_phys_invalidate(tb, -1);
4437 /* FIXME: In theory this could raise an exception. In practice
4438 we have already translated the block once so it's probably ok. */
4439 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004440 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004441 the first in the TB) then we end up generating a whole new TB and
4442 repeating the fault, which is horribly inefficient.
4443 Better would be to execute just this insn uncached, or generate a
4444 second new TB. */
4445 cpu_resume_from_signal(env, NULL);
4446}
4447
Paul Brookb3755a92010-03-12 16:54:58 +00004448#if !defined(CONFIG_USER_ONLY)
4449
Stefan Weil055403b2010-10-22 23:03:32 +02004450void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004451{
4452 int i, target_code_size, max_target_code_size;
4453 int direct_jmp_count, direct_jmp2_count, cross_page;
4454 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004455
bellarde3db7222005-01-26 22:00:47 +00004456 target_code_size = 0;
4457 max_target_code_size = 0;
4458 cross_page = 0;
4459 direct_jmp_count = 0;
4460 direct_jmp2_count = 0;
4461 for(i = 0; i < nb_tbs; i++) {
4462 tb = &tbs[i];
4463 target_code_size += tb->size;
4464 if (tb->size > max_target_code_size)
4465 max_target_code_size = tb->size;
4466 if (tb->page_addr[1] != -1)
4467 cross_page++;
4468 if (tb->tb_next_offset[0] != 0xffff) {
4469 direct_jmp_count++;
4470 if (tb->tb_next_offset[1] != 0xffff) {
4471 direct_jmp2_count++;
4472 }
4473 }
4474 }
4475 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004476 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004477 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004478 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4479 cpu_fprintf(f, "TB count %d/%d\n",
4480 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004481 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004482 nb_tbs ? target_code_size / nb_tbs : 0,
4483 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004484 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004485 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4486 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004487 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4488 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004489 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4490 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004491 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004492 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4493 direct_jmp2_count,
4494 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004495 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004496 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4497 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4498 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004499 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004500}
4501
bellard61382a52003-10-27 21:22:23 +00004502#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004503#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004504#define GETPC() NULL
4505#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004506#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004507
4508#define SHIFT 0
4509#include "softmmu_template.h"
4510
4511#define SHIFT 1
4512#include "softmmu_template.h"
4513
4514#define SHIFT 2
4515#include "softmmu_template.h"
4516
4517#define SHIFT 3
4518#include "softmmu_template.h"
4519
4520#undef env
4521
4522#endif