blob: bfc9a43ce73fc5fdf841522902683690d50f9f42 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
pbrook53a59602006-03-25 19:31:22 +000036#if defined(CONFIG_USER_ONLY)
37#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010038#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
39#include <sys/param.h>
40#if __FreeBSD_version >= 700104
41#define HAVE_KINFO_GETVMMAP
42#define sigqueue sigqueue_freebsd /* avoid redefinition */
43#include <sys/time.h>
44#include <sys/proc.h>
45#include <machine/profile.h>
46#define _KERNEL
47#include <sys/user.h>
48#undef _KERNEL
49#undef sigqueue
50#include <libutil.h>
51#endif
52#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010053#else /* !CONFIG_USER_ONLY */
54#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010055#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000056#endif
bellard54936002003-05-13 00:25:15 +000057
bellardfd6ce8f2003-05-14 19:00:11 +000058//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000059//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000060//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000061//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000062
63/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000064//#define DEBUG_TB_CHECK
65//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000066
ths1196be32007-03-17 15:17:58 +000067//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000068//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000069
pbrook99773bd2006-04-16 15:14:59 +000070#if !defined(CONFIG_USER_ONLY)
71/* TB consistency checks only implemented for usermode emulation. */
72#undef DEBUG_TB_CHECK
73#endif
74
bellard9fa3e852004-01-04 18:06:42 +000075#define SMC_BITMAP_USE_THRESHOLD 10
76
blueswir1bdaf78e2008-10-04 07:24:27 +000077static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020078static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000079TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000080static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000081/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050082spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000083
blueswir1141ac462008-07-26 15:05:57 +000084#if defined(__arm__) || defined(__sparc_v9__)
85/* The prologue must be reachable with a direct jump. ARM and Sparc64
86 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000087 section close to code segment. */
88#define code_gen_section \
89 __attribute__((__section__(".gen_code"))) \
90 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020091#elif defined(_WIN32)
92/* Maximum alignment for Win32 is 16. */
93#define code_gen_section \
94 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000095#else
96#define code_gen_section \
97 __attribute__((aligned (32)))
98#endif
99
100uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000101static uint8_t *code_gen_buffer;
102static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000103/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000104static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200105static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000106
pbrooke2eef172008-06-08 01:09:01 +0000107#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000108int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000109static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000110
Alex Williamsonf471a172010-06-11 11:11:42 -0600111RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
pbrooke2eef172008-06-08 01:09:01 +0000112#endif
bellard9fa3e852004-01-04 18:06:42 +0000113
bellard6a00d602005-11-21 23:25:50 +0000114CPUState *first_cpu;
115/* current CPU in the current thread. It is only valid inside
116 cpu_exec() */
ths5fafdf22007-09-16 21:08:06 +0000117CPUState *cpu_single_env;
pbrook2e70f6e2008-06-29 01:03:05 +0000118/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000119 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000120 2 = Adaptive rate instruction counting. */
121int use_icount = 0;
122/* Current instruction counter. While executing translated code this may
123 include some instructions that have not yet been executed. */
124int64_t qemu_icount;
bellard6a00d602005-11-21 23:25:50 +0000125
bellard54936002003-05-13 00:25:15 +0000126typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000127 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000128 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000129 /* in order to optimize self modifying code, we count the number
130 of lookups we do to a given page to use a bitmap */
131 unsigned int code_write_count;
132 uint8_t *code_bitmap;
133#if defined(CONFIG_USER_ONLY)
134 unsigned long flags;
135#endif
bellard54936002003-05-13 00:25:15 +0000136} PageDesc;
137
Paul Brook41c1b1c2010-03-12 16:54:58 +0000138/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800139 while in user mode we want it to be based on virtual addresses. */
140#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000141#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
142# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
143#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800144# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000145#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000146#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800147# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000148#endif
bellard54936002003-05-13 00:25:15 +0000149
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800150/* Size of the L2 (and L3, etc) page tables. */
151#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000152#define L2_SIZE (1 << L2_BITS)
153
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800154/* The bits remaining after N lower levels of page tables. */
155#define P_L1_BITS_REM \
156 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157#define V_L1_BITS_REM \
158 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
159
160/* Size of the L1 page table. Avoid silly small sizes. */
161#if P_L1_BITS_REM < 4
162#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
163#else
164#define P_L1_BITS P_L1_BITS_REM
165#endif
166
167#if V_L1_BITS_REM < 4
168#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
169#else
170#define V_L1_BITS V_L1_BITS_REM
171#endif
172
173#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
174#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
175
176#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
bellard83fb7ad2004-07-05 21:25:26 +0000179unsigned long qemu_real_host_page_size;
180unsigned long qemu_host_page_bits;
181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000183
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000187
pbrooke2eef172008-06-08 01:09:01 +0000188#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000189typedef struct PhysPageDesc {
190 /* offset in host memory of the page + io_index in the low bits */
191 ram_addr_t phys_offset;
192 ram_addr_t region_offset;
193} PhysPageDesc;
194
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800195/* This is a multi-level map on the physical address space.
196 The bottom level has pointers to PhysPageDesc. */
197static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000198
pbrooke2eef172008-06-08 01:09:01 +0000199static void io_mem_init(void);
200
bellard33417e72003-08-10 21:47:01 +0000201/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000202CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
203CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000204void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000205static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000206static int io_mem_watch;
207#endif
bellard33417e72003-08-10 21:47:01 +0000208
bellard34865132003-10-05 14:28:56 +0000209/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200210#ifdef WIN32
211static const char *logfilename = "qemu.log";
212#else
blueswir1d9b630f2008-10-05 09:57:08 +0000213static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200214#endif
bellard34865132003-10-05 14:28:56 +0000215FILE *logfile;
216int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000217static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000218
bellarde3db7222005-01-26 22:00:47 +0000219/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000220#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000221static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000222#endif
bellarde3db7222005-01-26 22:00:47 +0000223static int tb_flush_count;
224static int tb_phys_invalidate_count;
225
bellard7cb69ca2008-05-10 10:55:51 +0000226#ifdef _WIN32
227static void map_exec(void *addr, long size)
228{
229 DWORD old_protect;
230 VirtualProtect(addr, size,
231 PAGE_EXECUTE_READWRITE, &old_protect);
232
233}
234#else
235static void map_exec(void *addr, long size)
236{
bellard43694152008-05-29 09:35:57 +0000237 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000238
bellard43694152008-05-29 09:35:57 +0000239 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000240 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000241 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000242
243 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000244 end += page_size - 1;
245 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000246
247 mprotect((void *)start, end - start,
248 PROT_READ | PROT_WRITE | PROT_EXEC);
249}
250#endif
251
bellardb346ff42003-06-15 20:05:50 +0000252static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000253{
bellard83fb7ad2004-07-05 21:25:26 +0000254 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000255 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000256#ifdef _WIN32
257 {
258 SYSTEM_INFO system_info;
259
260 GetSystemInfo(&system_info);
261 qemu_real_host_page_size = system_info.dwPageSize;
262 }
263#else
264 qemu_real_host_page_size = getpagesize();
265#endif
bellard83fb7ad2004-07-05 21:25:26 +0000266 if (qemu_host_page_size == 0)
267 qemu_host_page_size = qemu_real_host_page_size;
268 if (qemu_host_page_size < TARGET_PAGE_SIZE)
269 qemu_host_page_size = TARGET_PAGE_SIZE;
270 qemu_host_page_bits = 0;
271 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
272 qemu_host_page_bits++;
273 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000274
Paul Brook2e9a5712010-05-05 16:32:59 +0100275#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000276 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100277#ifdef HAVE_KINFO_GETVMMAP
278 struct kinfo_vmentry *freep;
279 int i, cnt;
280
281 freep = kinfo_getvmmap(getpid(), &cnt);
282 if (freep) {
283 mmap_lock();
284 for (i = 0; i < cnt; i++) {
285 unsigned long startaddr, endaddr;
286
287 startaddr = freep[i].kve_start;
288 endaddr = freep[i].kve_end;
289 if (h2g_valid(startaddr)) {
290 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
291
292 if (h2g_valid(endaddr)) {
293 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200294 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100295 } else {
296#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
297 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100299#endif
300 }
301 }
302 }
303 free(freep);
304 mmap_unlock();
305 }
306#else
balrog50a95692007-12-12 01:16:23 +0000307 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000308
pbrook07765902008-05-31 16:33:53 +0000309 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800310
Aurelien Jarnofd436902010-04-10 17:20:36 +0200311 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000312 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800313 mmap_lock();
314
balrog50a95692007-12-12 01:16:23 +0000315 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800316 unsigned long startaddr, endaddr;
317 int n;
318
319 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
320
321 if (n == 2 && h2g_valid(startaddr)) {
322 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
323
324 if (h2g_valid(endaddr)) {
325 endaddr = h2g(endaddr);
326 } else {
327 endaddr = ~0ul;
328 }
329 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000330 }
331 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800332
balrog50a95692007-12-12 01:16:23 +0000333 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800334 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000335 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100336#endif
balrog50a95692007-12-12 01:16:23 +0000337 }
338#endif
bellard54936002003-05-13 00:25:15 +0000339}
340
Paul Brook41c1b1c2010-03-12 16:54:58 +0000341static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000342{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000343 PageDesc *pd;
344 void **lp;
345 int i;
346
pbrook17e23772008-06-09 13:47:45 +0000347#if defined(CONFIG_USER_ONLY)
Paul Brook2e9a5712010-05-05 16:32:59 +0100348 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800349# define ALLOC(P, SIZE) \
350 do { \
351 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
352 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000354#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800355# define ALLOC(P, SIZE) \
356 do { P = qemu_mallocz(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000357#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800358
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800359 /* Level 1. Always allocated. */
360 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
361
362 /* Level 2..N-1. */
363 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
364 void **p = *lp;
365
366 if (p == NULL) {
367 if (!alloc) {
368 return NULL;
369 }
370 ALLOC(p, sizeof(void *) * L2_SIZE);
371 *lp = p;
372 }
373
374 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000375 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800376
377 pd = *lp;
378 if (pd == NULL) {
379 if (!alloc) {
380 return NULL;
381 }
382 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
383 *lp = pd;
384 }
385
386#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800387
388 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000389}
390
Paul Brook41c1b1c2010-03-12 16:54:58 +0000391static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000392{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800393 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000394}
395
Paul Brook6d9a1302010-02-28 23:55:53 +0000396#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500397static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000398{
pbrooke3f4e2a2006-04-08 20:02:06 +0000399 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800400 void **lp;
401 int i;
bellard92e873b2004-05-21 14:52:29 +0000402
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800403 /* Level 1. Always allocated. */
404 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000405
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800406 /* Level 2..N-1. */
407 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
408 void **p = *lp;
409 if (p == NULL) {
410 if (!alloc) {
411 return NULL;
412 }
413 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
414 }
415 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000416 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800417
pbrooke3f4e2a2006-04-08 20:02:06 +0000418 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800419 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000420 int i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800421
422 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000423 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800424 }
425
426 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
427
pbrook67c4d232009-02-23 13:16:07 +0000428 for (i = 0; i < L2_SIZE; i++) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800429 pd[i].phys_offset = IO_MEM_UNASSIGNED;
430 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000431 }
bellard92e873b2004-05-21 14:52:29 +0000432 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800433
434 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000435}
436
Anthony Liguoric227f092009-10-01 16:12:16 -0500437static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000438{
bellard108c49b2005-07-24 12:55:09 +0000439 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000440}
441
Anthony Liguoric227f092009-10-01 16:12:16 -0500442static void tlb_protect_code(ram_addr_t ram_addr);
443static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000444 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000445#define mmap_lock() do { } while(0)
446#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000447#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000448
bellard43694152008-05-29 09:35:57 +0000449#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
450
451#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100452/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000453 user mode. It will change when a dedicated libc will be used */
454#define USE_STATIC_CODE_GEN_BUFFER
455#endif
456
457#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200458static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
459 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000460#endif
461
blueswir18fcd3692008-08-17 20:26:25 +0000462static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000463{
bellard43694152008-05-29 09:35:57 +0000464#ifdef USE_STATIC_CODE_GEN_BUFFER
465 code_gen_buffer = static_code_gen_buffer;
466 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
467 map_exec(code_gen_buffer, code_gen_buffer_size);
468#else
bellard26a5f132008-05-28 12:30:31 +0000469 code_gen_buffer_size = tb_size;
470 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000471#if defined(CONFIG_USER_ONLY)
472 /* in user mode, phys_ram_size is not meaningful */
473 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
474#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100475 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000476 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000477#endif
bellard26a5f132008-05-28 12:30:31 +0000478 }
479 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
480 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
481 /* The code gen buffer location may have constraints depending on
482 the host cpu and OS */
483#if defined(__linux__)
484 {
485 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000486 void *start = NULL;
487
bellard26a5f132008-05-28 12:30:31 +0000488 flags = MAP_PRIVATE | MAP_ANONYMOUS;
489#if defined(__x86_64__)
490 flags |= MAP_32BIT;
491 /* Cannot map more than that */
492 if (code_gen_buffer_size > (800 * 1024 * 1024))
493 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000494#elif defined(__sparc_v9__)
495 // Map the buffer below 2G, so we can use direct calls and branches
496 flags |= MAP_FIXED;
497 start = (void *) 0x60000000UL;
498 if (code_gen_buffer_size > (512 * 1024 * 1024))
499 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000500#elif defined(__arm__)
balrog63d41242008-12-01 02:19:41 +0000501 /* Map the buffer below 32M, so we can use direct calls and branches */
balrog1cb06612008-12-01 02:10:17 +0000502 flags |= MAP_FIXED;
503 start = (void *) 0x01000000UL;
504 if (code_gen_buffer_size > 16 * 1024 * 1024)
505 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700506#elif defined(__s390x__)
507 /* Map the buffer so that we can use direct calls and branches. */
508 /* We have a +- 4GB range on the branches; leave some slop. */
509 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
510 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
511 }
512 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000513#endif
blueswir1141ac462008-07-26 15:05:57 +0000514 code_gen_buffer = mmap(start, code_gen_buffer_size,
515 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000516 flags, -1, 0);
517 if (code_gen_buffer == MAP_FAILED) {
518 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
519 exit(1);
520 }
521 }
Bradcbb608a2010-12-20 21:25:40 -0500522#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
523 || defined(__DragonFly__) || defined(__OpenBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000524 {
525 int flags;
526 void *addr = NULL;
527 flags = MAP_PRIVATE | MAP_ANONYMOUS;
528#if defined(__x86_64__)
529 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
530 * 0x40000000 is free */
531 flags |= MAP_FIXED;
532 addr = (void *)0x40000000;
533 /* Cannot map more than that */
534 if (code_gen_buffer_size > (800 * 1024 * 1024))
535 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000536#elif defined(__sparc_v9__)
537 // Map the buffer below 2G, so we can use direct calls and branches
538 flags |= MAP_FIXED;
539 addr = (void *) 0x60000000UL;
540 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
541 code_gen_buffer_size = (512 * 1024 * 1024);
542 }
aliguori06e67a82008-09-27 15:32:41 +0000543#endif
544 code_gen_buffer = mmap(addr, code_gen_buffer_size,
545 PROT_WRITE | PROT_READ | PROT_EXEC,
546 flags, -1, 0);
547 if (code_gen_buffer == MAP_FAILED) {
548 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
549 exit(1);
550 }
551 }
bellard26a5f132008-05-28 12:30:31 +0000552#else
553 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000554 map_exec(code_gen_buffer, code_gen_buffer_size);
555#endif
bellard43694152008-05-29 09:35:57 +0000556#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000557 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100558 code_gen_buffer_max_size = code_gen_buffer_size -
559 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000560 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
561 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
562}
563
564/* Must be called before using the QEMU cpus. 'tb_size' is the size
565 (in bytes) allocated to the translation buffer. Zero means default
566 size. */
567void cpu_exec_init_all(unsigned long tb_size)
568{
bellard26a5f132008-05-28 12:30:31 +0000569 cpu_gen_init();
570 code_gen_alloc(tb_size);
571 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000572 page_init();
pbrooke2eef172008-06-08 01:09:01 +0000573#if !defined(CONFIG_USER_ONLY)
bellard26a5f132008-05-28 12:30:31 +0000574 io_mem_init();
pbrooke2eef172008-06-08 01:09:01 +0000575#endif
Richard Henderson9002ec72010-05-06 08:50:41 -0700576#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
577 /* There's no guest base to take into account, so go ahead and
578 initialize the prologue now. */
579 tcg_prologue_init(&tcg_ctx);
580#endif
bellard26a5f132008-05-28 12:30:31 +0000581}
582
pbrook9656f322008-07-01 20:01:19 +0000583#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
584
Juan Quintelae59fb372009-09-29 22:48:21 +0200585static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200586{
587 CPUState *env = opaque;
588
aurel323098dba2009-03-07 21:28:24 +0000589 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
590 version_id is increased. */
591 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000592 tlb_flush(env, 1);
593
594 return 0;
595}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200596
597static const VMStateDescription vmstate_cpu_common = {
598 .name = "cpu_common",
599 .version_id = 1,
600 .minimum_version_id = 1,
601 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200602 .post_load = cpu_common_post_load,
603 .fields = (VMStateField []) {
604 VMSTATE_UINT32(halted, CPUState),
605 VMSTATE_UINT32(interrupt_request, CPUState),
606 VMSTATE_END_OF_LIST()
607 }
608};
pbrook9656f322008-07-01 20:01:19 +0000609#endif
610
Glauber Costa950f1472009-06-09 12:15:18 -0400611CPUState *qemu_get_cpu(int cpu)
612{
613 CPUState *env = first_cpu;
614
615 while (env) {
616 if (env->cpu_index == cpu)
617 break;
618 env = env->next_cpu;
619 }
620
621 return env;
622}
623
bellard6a00d602005-11-21 23:25:50 +0000624void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000625{
bellard6a00d602005-11-21 23:25:50 +0000626 CPUState **penv;
627 int cpu_index;
628
pbrookc2764712009-03-07 15:24:59 +0000629#if defined(CONFIG_USER_ONLY)
630 cpu_list_lock();
631#endif
bellard6a00d602005-11-21 23:25:50 +0000632 env->next_cpu = NULL;
633 penv = &first_cpu;
634 cpu_index = 0;
635 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700636 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000637 cpu_index++;
638 }
639 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000640 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000641 QTAILQ_INIT(&env->breakpoints);
642 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100643#ifndef CONFIG_USER_ONLY
644 env->thread_id = qemu_get_thread_id();
645#endif
bellard6a00d602005-11-21 23:25:50 +0000646 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000647#if defined(CONFIG_USER_ONLY)
648 cpu_list_unlock();
649#endif
pbrookb3c77242008-06-30 16:31:04 +0000650#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600651 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
652 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000653 cpu_save, cpu_load, env);
654#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000655}
656
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100657/* Allocate a new translation block. Flush the translation buffer if
658 too many translation blocks or too much generated code. */
659static TranslationBlock *tb_alloc(target_ulong pc)
660{
661 TranslationBlock *tb;
662
663 if (nb_tbs >= code_gen_max_blocks ||
664 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
665 return NULL;
666 tb = &tbs[nb_tbs++];
667 tb->pc = pc;
668 tb->cflags = 0;
669 return tb;
670}
671
672void tb_free(TranslationBlock *tb)
673{
674 /* In practice this is mostly used for single use temporary TB
675 Ignore the hard cases and just back up if this TB happens to
676 be the last one generated. */
677 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
678 code_gen_ptr = tb->tc_ptr;
679 nb_tbs--;
680 }
681}
682
bellard9fa3e852004-01-04 18:06:42 +0000683static inline void invalidate_page_bitmap(PageDesc *p)
684{
685 if (p->code_bitmap) {
bellard59817cc2004-02-16 22:01:13 +0000686 qemu_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000687 p->code_bitmap = NULL;
688 }
689 p->code_write_count = 0;
690}
691
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800692/* Set to NULL all the 'first_tb' fields in all PageDescs. */
693
694static void page_flush_tb_1 (int level, void **lp)
695{
696 int i;
697
698 if (*lp == NULL) {
699 return;
700 }
701 if (level == 0) {
702 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000703 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800704 pd[i].first_tb = NULL;
705 invalidate_page_bitmap(pd + i);
706 }
707 } else {
708 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000709 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800710 page_flush_tb_1 (level - 1, pp + i);
711 }
712 }
713}
714
bellardfd6ce8f2003-05-14 19:00:11 +0000715static void page_flush_tb(void)
716{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800717 int i;
718 for (i = 0; i < V_L1_SIZE; i++) {
719 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000720 }
721}
722
723/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000724/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000725void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000726{
bellard6a00d602005-11-21 23:25:50 +0000727 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000728#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000729 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
730 (unsigned long)(code_gen_ptr - code_gen_buffer),
731 nb_tbs, nb_tbs > 0 ?
732 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000733#endif
bellard26a5f132008-05-28 12:30:31 +0000734 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000735 cpu_abort(env1, "Internal error: code buffer overflow\n");
736
bellardfd6ce8f2003-05-14 19:00:11 +0000737 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000738
bellard6a00d602005-11-21 23:25:50 +0000739 for(env = first_cpu; env != NULL; env = env->next_cpu) {
740 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
741 }
bellard9fa3e852004-01-04 18:06:42 +0000742
bellard8a8a6082004-10-03 13:36:49 +0000743 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000744 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000745
bellardfd6ce8f2003-05-14 19:00:11 +0000746 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000747 /* XXX: flush processor icache at this point if cache flush is
748 expensive */
bellarde3db7222005-01-26 22:00:47 +0000749 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000750}
751
752#ifdef DEBUG_TB_CHECK
753
j_mayerbc98a7e2007-04-04 07:55:12 +0000754static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000755{
756 TranslationBlock *tb;
757 int i;
758 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000759 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
760 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000761 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
762 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000763 printf("ERROR invalidate: address=" TARGET_FMT_lx
764 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000765 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000766 }
767 }
768 }
769}
770
771/* verify that all the pages have correct rights for code */
772static void tb_page_check(void)
773{
774 TranslationBlock *tb;
775 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000776
pbrook99773bd2006-04-16 15:14:59 +0000777 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
778 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000779 flags1 = page_get_flags(tb->pc);
780 flags2 = page_get_flags(tb->pc + tb->size - 1);
781 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
782 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000783 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000784 }
785 }
786 }
787}
788
789#endif
790
791/* invalidate one TB */
792static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
793 int next_offset)
794{
795 TranslationBlock *tb1;
796 for(;;) {
797 tb1 = *ptb;
798 if (tb1 == tb) {
799 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
800 break;
801 }
802 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
803 }
804}
805
bellard9fa3e852004-01-04 18:06:42 +0000806static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
807{
808 TranslationBlock *tb1;
809 unsigned int n1;
810
811 for(;;) {
812 tb1 = *ptb;
813 n1 = (long)tb1 & 3;
814 tb1 = (TranslationBlock *)((long)tb1 & ~3);
815 if (tb1 == tb) {
816 *ptb = tb1->page_next[n1];
817 break;
818 }
819 ptb = &tb1->page_next[n1];
820 }
821}
822
bellardd4e81642003-05-25 16:46:15 +0000823static inline void tb_jmp_remove(TranslationBlock *tb, int n)
824{
825 TranslationBlock *tb1, **ptb;
826 unsigned int n1;
827
828 ptb = &tb->jmp_next[n];
829 tb1 = *ptb;
830 if (tb1) {
831 /* find tb(n) in circular list */
832 for(;;) {
833 tb1 = *ptb;
834 n1 = (long)tb1 & 3;
835 tb1 = (TranslationBlock *)((long)tb1 & ~3);
836 if (n1 == n && tb1 == tb)
837 break;
838 if (n1 == 2) {
839 ptb = &tb1->jmp_first;
840 } else {
841 ptb = &tb1->jmp_next[n1];
842 }
843 }
844 /* now we can suppress tb(n) from the list */
845 *ptb = tb->jmp_next[n];
846
847 tb->jmp_next[n] = NULL;
848 }
849}
850
851/* reset the jump entry 'n' of a TB so that it is not chained to
852 another TB */
853static inline void tb_reset_jump(TranslationBlock *tb, int n)
854{
855 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
856}
857
Paul Brook41c1b1c2010-03-12 16:54:58 +0000858void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000859{
bellard6a00d602005-11-21 23:25:50 +0000860 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000861 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000862 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000863 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000864 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000865
bellard9fa3e852004-01-04 18:06:42 +0000866 /* remove the TB from the hash list */
867 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
868 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000869 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000870 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000871
bellard9fa3e852004-01-04 18:06:42 +0000872 /* remove the TB from the page list */
873 if (tb->page_addr[0] != page_addr) {
874 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
875 tb_page_remove(&p->first_tb, tb);
876 invalidate_page_bitmap(p);
877 }
878 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
879 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
880 tb_page_remove(&p->first_tb, tb);
881 invalidate_page_bitmap(p);
882 }
883
bellard8a40a182005-11-20 10:35:40 +0000884 tb_invalidated_flag = 1;
885
886 /* remove the TB from the hash list */
887 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000888 for(env = first_cpu; env != NULL; env = env->next_cpu) {
889 if (env->tb_jmp_cache[h] == tb)
890 env->tb_jmp_cache[h] = NULL;
891 }
bellard8a40a182005-11-20 10:35:40 +0000892
893 /* suppress this TB from the two jump lists */
894 tb_jmp_remove(tb, 0);
895 tb_jmp_remove(tb, 1);
896
897 /* suppress any remaining jumps to this TB */
898 tb1 = tb->jmp_first;
899 for(;;) {
900 n1 = (long)tb1 & 3;
901 if (n1 == 2)
902 break;
903 tb1 = (TranslationBlock *)((long)tb1 & ~3);
904 tb2 = tb1->jmp_next[n1];
905 tb_reset_jump(tb1, n1);
906 tb1->jmp_next[n1] = NULL;
907 tb1 = tb2;
908 }
909 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
910
bellarde3db7222005-01-26 22:00:47 +0000911 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000912}
913
914static inline void set_bits(uint8_t *tab, int start, int len)
915{
916 int end, mask, end1;
917
918 end = start + len;
919 tab += start >> 3;
920 mask = 0xff << (start & 7);
921 if ((start & ~7) == (end & ~7)) {
922 if (start < end) {
923 mask &= ~(0xff << (end & 7));
924 *tab |= mask;
925 }
926 } else {
927 *tab++ |= mask;
928 start = (start + 8) & ~7;
929 end1 = end & ~7;
930 while (start < end1) {
931 *tab++ = 0xff;
932 start += 8;
933 }
934 if (start < end) {
935 mask = ~(0xff << (end & 7));
936 *tab |= mask;
937 }
938 }
939}
940
941static void build_page_bitmap(PageDesc *p)
942{
943 int n, tb_start, tb_end;
944 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000945
pbrookb2a70812008-06-09 13:57:23 +0000946 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000947
948 tb = p->first_tb;
949 while (tb != NULL) {
950 n = (long)tb & 3;
951 tb = (TranslationBlock *)((long)tb & ~3);
952 /* NOTE: this is subtle as a TB may span two physical pages */
953 if (n == 0) {
954 /* NOTE: tb_end may be after the end of the page, but
955 it is not a problem */
956 tb_start = tb->pc & ~TARGET_PAGE_MASK;
957 tb_end = tb_start + tb->size;
958 if (tb_end > TARGET_PAGE_SIZE)
959 tb_end = TARGET_PAGE_SIZE;
960 } else {
961 tb_start = 0;
962 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
963 }
964 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
965 tb = tb->page_next[n];
966 }
967}
968
pbrook2e70f6e2008-06-29 01:03:05 +0000969TranslationBlock *tb_gen_code(CPUState *env,
970 target_ulong pc, target_ulong cs_base,
971 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000972{
973 TranslationBlock *tb;
974 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000975 tb_page_addr_t phys_pc, phys_page2;
976 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000977 int code_gen_size;
978
Paul Brook41c1b1c2010-03-12 16:54:58 +0000979 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +0000980 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000981 if (!tb) {
982 /* flush must be done */
983 tb_flush(env);
984 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000985 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000986 /* Don't forget to invalidate previous TB info. */
987 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +0000988 }
989 tc_ptr = code_gen_ptr;
990 tb->tc_ptr = tc_ptr;
991 tb->cs_base = cs_base;
992 tb->flags = flags;
993 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +0000994 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +0000995 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +0000996
bellardd720b932004-04-25 17:57:43 +0000997 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +0000998 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +0000999 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001000 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001001 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001002 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001003 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001004 return tb;
bellardd720b932004-04-25 17:57:43 +00001005}
ths3b46e622007-09-17 08:09:54 +00001006
bellard9fa3e852004-01-04 18:06:42 +00001007/* invalidate all TBs which intersect with the target physical page
1008 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001009 the same physical page. 'is_cpu_write_access' should be true if called
1010 from a real cpu write access: the virtual CPU will exit the current
1011 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001012void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001013 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001014{
aliguori6b917542008-11-18 19:46:41 +00001015 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001016 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001017 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001018 PageDesc *p;
1019 int n;
1020#ifdef TARGET_HAS_PRECISE_SMC
1021 int current_tb_not_found = is_cpu_write_access;
1022 TranslationBlock *current_tb = NULL;
1023 int current_tb_modified = 0;
1024 target_ulong current_pc = 0;
1025 target_ulong current_cs_base = 0;
1026 int current_flags = 0;
1027#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001028
1029 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001030 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001031 return;
ths5fafdf22007-09-16 21:08:06 +00001032 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001033 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1034 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001035 /* build code bitmap */
1036 build_page_bitmap(p);
1037 }
1038
1039 /* we remove all the TBs in the range [start, end[ */
1040 /* XXX: see if in some cases it could be faster to invalidate all the code */
1041 tb = p->first_tb;
1042 while (tb != NULL) {
1043 n = (long)tb & 3;
1044 tb = (TranslationBlock *)((long)tb & ~3);
1045 tb_next = tb->page_next[n];
1046 /* NOTE: this is subtle as a TB may span two physical pages */
1047 if (n == 0) {
1048 /* NOTE: tb_end may be after the end of the page, but
1049 it is not a problem */
1050 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1051 tb_end = tb_start + tb->size;
1052 } else {
1053 tb_start = tb->page_addr[1];
1054 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1055 }
1056 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001057#ifdef TARGET_HAS_PRECISE_SMC
1058 if (current_tb_not_found) {
1059 current_tb_not_found = 0;
1060 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001061 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001062 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001063 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001064 }
1065 }
1066 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001067 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001068 /* If we are modifying the current TB, we must stop
1069 its execution. We could be more precise by checking
1070 that the modification is after the current PC, but it
1071 would require a specialized function to partially
1072 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001073
bellardd720b932004-04-25 17:57:43 +00001074 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001075 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001076 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1077 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001078 }
1079#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001080 /* we need to do that to handle the case where a signal
1081 occurs while doing tb_phys_invalidate() */
1082 saved_tb = NULL;
1083 if (env) {
1084 saved_tb = env->current_tb;
1085 env->current_tb = NULL;
1086 }
bellard9fa3e852004-01-04 18:06:42 +00001087 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001088 if (env) {
1089 env->current_tb = saved_tb;
1090 if (env->interrupt_request && env->current_tb)
1091 cpu_interrupt(env, env->interrupt_request);
1092 }
bellard9fa3e852004-01-04 18:06:42 +00001093 }
1094 tb = tb_next;
1095 }
1096#if !defined(CONFIG_USER_ONLY)
1097 /* if no code remaining, no need to continue to use slow writes */
1098 if (!p->first_tb) {
1099 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001100 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001101 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001102 }
1103 }
1104#endif
1105#ifdef TARGET_HAS_PRECISE_SMC
1106 if (current_tb_modified) {
1107 /* we generate a block containing just the instruction
1108 modifying the memory. It will ensure that it cannot modify
1109 itself */
bellardea1c1802004-06-14 18:56:36 +00001110 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001111 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001112 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001113 }
1114#endif
1115}
1116
1117/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001118static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001119{
1120 PageDesc *p;
1121 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001122#if 0
bellarda4193c82004-06-03 14:01:43 +00001123 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001124 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1125 cpu_single_env->mem_io_vaddr, len,
1126 cpu_single_env->eip,
1127 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001128 }
1129#endif
bellard9fa3e852004-01-04 18:06:42 +00001130 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001131 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001132 return;
1133 if (p->code_bitmap) {
1134 offset = start & ~TARGET_PAGE_MASK;
1135 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1136 if (b & ((1 << len) - 1))
1137 goto do_invalidate;
1138 } else {
1139 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001140 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001141 }
1142}
1143
bellard9fa3e852004-01-04 18:06:42 +00001144#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001145static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001146 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001147{
aliguori6b917542008-11-18 19:46:41 +00001148 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001149 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001150 int n;
bellardd720b932004-04-25 17:57:43 +00001151#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001152 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001153 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001154 int current_tb_modified = 0;
1155 target_ulong current_pc = 0;
1156 target_ulong current_cs_base = 0;
1157 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001158#endif
bellard9fa3e852004-01-04 18:06:42 +00001159
1160 addr &= TARGET_PAGE_MASK;
1161 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001162 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001163 return;
1164 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001165#ifdef TARGET_HAS_PRECISE_SMC
1166 if (tb && pc != 0) {
1167 current_tb = tb_find_pc(pc);
1168 }
1169#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001170 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001171 n = (long)tb & 3;
1172 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001173#ifdef TARGET_HAS_PRECISE_SMC
1174 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001175 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001176 /* If we are modifying the current TB, we must stop
1177 its execution. We could be more precise by checking
1178 that the modification is after the current PC, but it
1179 would require a specialized function to partially
1180 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001181
bellardd720b932004-04-25 17:57:43 +00001182 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001183 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001184 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1185 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001186 }
1187#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001188 tb_phys_invalidate(tb, addr);
1189 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001190 }
1191 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001192#ifdef TARGET_HAS_PRECISE_SMC
1193 if (current_tb_modified) {
1194 /* we generate a block containing just the instruction
1195 modifying the memory. It will ensure that it cannot modify
1196 itself */
bellardea1c1802004-06-14 18:56:36 +00001197 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001198 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001199 cpu_resume_from_signal(env, puc);
1200 }
1201#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001202}
bellard9fa3e852004-01-04 18:06:42 +00001203#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001204
1205/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001206static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001207 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001208{
1209 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001210#ifndef CONFIG_USER_ONLY
1211 bool page_already_protected;
1212#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001213
bellard9fa3e852004-01-04 18:06:42 +00001214 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001215 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001216 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001217#ifndef CONFIG_USER_ONLY
1218 page_already_protected = p->first_tb != NULL;
1219#endif
bellard9fa3e852004-01-04 18:06:42 +00001220 p->first_tb = (TranslationBlock *)((long)tb | n);
1221 invalidate_page_bitmap(p);
1222
bellard107db442004-06-22 18:48:46 +00001223#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001224
bellard9fa3e852004-01-04 18:06:42 +00001225#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001226 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001227 target_ulong addr;
1228 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001229 int prot;
1230
bellardfd6ce8f2003-05-14 19:00:11 +00001231 /* force the host page as non writable (writes will have a
1232 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001233 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001234 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001235 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1236 addr += TARGET_PAGE_SIZE) {
1237
1238 p2 = page_find (addr >> TARGET_PAGE_BITS);
1239 if (!p2)
1240 continue;
1241 prot |= p2->flags;
1242 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001243 }
ths5fafdf22007-09-16 21:08:06 +00001244 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001245 (prot & PAGE_BITS) & ~PAGE_WRITE);
1246#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001247 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001248 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001249#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001250 }
bellard9fa3e852004-01-04 18:06:42 +00001251#else
1252 /* if some code is already present, then the pages are already
1253 protected. So we handle the case where only the first TB is
1254 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001255 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001256 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001257 }
1258#endif
bellardd720b932004-04-25 17:57:43 +00001259
1260#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001261}
1262
bellard9fa3e852004-01-04 18:06:42 +00001263/* add a new TB and link it to the physical page tables. phys_page2 is
1264 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001265void tb_link_page(TranslationBlock *tb,
1266 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001267{
bellard9fa3e852004-01-04 18:06:42 +00001268 unsigned int h;
1269 TranslationBlock **ptb;
1270
pbrookc8a706f2008-06-02 16:16:42 +00001271 /* Grab the mmap lock to stop another thread invalidating this TB
1272 before we are done. */
1273 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001274 /* add in the physical hash table */
1275 h = tb_phys_hash_func(phys_pc);
1276 ptb = &tb_phys_hash[h];
1277 tb->phys_hash_next = *ptb;
1278 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001279
1280 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001281 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1282 if (phys_page2 != -1)
1283 tb_alloc_page(tb, 1, phys_page2);
1284 else
1285 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001286
bellardd4e81642003-05-25 16:46:15 +00001287 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1288 tb->jmp_next[0] = NULL;
1289 tb->jmp_next[1] = NULL;
1290
1291 /* init original jump addresses */
1292 if (tb->tb_next_offset[0] != 0xffff)
1293 tb_reset_jump(tb, 0);
1294 if (tb->tb_next_offset[1] != 0xffff)
1295 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001296
1297#ifdef DEBUG_TB_CHECK
1298 tb_page_check();
1299#endif
pbrookc8a706f2008-06-02 16:16:42 +00001300 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001301}
1302
bellarda513fe12003-05-27 23:29:48 +00001303/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1304 tb[1].tc_ptr. Return NULL if not found */
1305TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1306{
1307 int m_min, m_max, m;
1308 unsigned long v;
1309 TranslationBlock *tb;
1310
1311 if (nb_tbs <= 0)
1312 return NULL;
1313 if (tc_ptr < (unsigned long)code_gen_buffer ||
1314 tc_ptr >= (unsigned long)code_gen_ptr)
1315 return NULL;
1316 /* binary search (cf Knuth) */
1317 m_min = 0;
1318 m_max = nb_tbs - 1;
1319 while (m_min <= m_max) {
1320 m = (m_min + m_max) >> 1;
1321 tb = &tbs[m];
1322 v = (unsigned long)tb->tc_ptr;
1323 if (v == tc_ptr)
1324 return tb;
1325 else if (tc_ptr < v) {
1326 m_max = m - 1;
1327 } else {
1328 m_min = m + 1;
1329 }
ths5fafdf22007-09-16 21:08:06 +00001330 }
bellarda513fe12003-05-27 23:29:48 +00001331 return &tbs[m_max];
1332}
bellard75012672003-06-21 13:11:07 +00001333
bellardea041c02003-06-25 16:16:50 +00001334static void tb_reset_jump_recursive(TranslationBlock *tb);
1335
1336static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1337{
1338 TranslationBlock *tb1, *tb_next, **ptb;
1339 unsigned int n1;
1340
1341 tb1 = tb->jmp_next[n];
1342 if (tb1 != NULL) {
1343 /* find head of list */
1344 for(;;) {
1345 n1 = (long)tb1 & 3;
1346 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1347 if (n1 == 2)
1348 break;
1349 tb1 = tb1->jmp_next[n1];
1350 }
1351 /* we are now sure now that tb jumps to tb1 */
1352 tb_next = tb1;
1353
1354 /* remove tb from the jmp_first list */
1355 ptb = &tb_next->jmp_first;
1356 for(;;) {
1357 tb1 = *ptb;
1358 n1 = (long)tb1 & 3;
1359 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1360 if (n1 == n && tb1 == tb)
1361 break;
1362 ptb = &tb1->jmp_next[n1];
1363 }
1364 *ptb = tb->jmp_next[n];
1365 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001366
bellardea041c02003-06-25 16:16:50 +00001367 /* suppress the jump to next tb in generated code */
1368 tb_reset_jump(tb, n);
1369
bellard01243112004-01-04 15:48:17 +00001370 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001371 tb_reset_jump_recursive(tb_next);
1372 }
1373}
1374
1375static void tb_reset_jump_recursive(TranslationBlock *tb)
1376{
1377 tb_reset_jump_recursive2(tb, 0);
1378 tb_reset_jump_recursive2(tb, 1);
1379}
1380
bellard1fddef42005-04-17 19:16:13 +00001381#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001382#if defined(CONFIG_USER_ONLY)
1383static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1384{
1385 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1386}
1387#else
bellardd720b932004-04-25 17:57:43 +00001388static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1389{
Anthony Liguoric227f092009-10-01 16:12:16 -05001390 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001391 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001392 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001393 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001394
pbrookc2f07f82006-04-08 17:14:56 +00001395 addr = cpu_get_phys_page_debug(env, pc);
1396 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1397 if (!p) {
1398 pd = IO_MEM_UNASSIGNED;
1399 } else {
1400 pd = p->phys_offset;
1401 }
1402 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001403 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001404}
bellardc27004e2005-01-03 23:35:10 +00001405#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001406#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001407
Paul Brookc527ee82010-03-01 03:31:14 +00001408#if defined(CONFIG_USER_ONLY)
1409void cpu_watchpoint_remove_all(CPUState *env, int mask)
1410
1411{
1412}
1413
1414int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1415 int flags, CPUWatchpoint **watchpoint)
1416{
1417 return -ENOSYS;
1418}
1419#else
pbrook6658ffb2007-03-16 23:58:11 +00001420/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001421int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1422 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001423{
aliguorib4051332008-11-18 20:14:20 +00001424 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001425 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001426
aliguorib4051332008-11-18 20:14:20 +00001427 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1428 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1429 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1430 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1431 return -EINVAL;
1432 }
aliguoria1d1bb32008-11-18 20:07:32 +00001433 wp = qemu_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001434
aliguoria1d1bb32008-11-18 20:07:32 +00001435 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001436 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001437 wp->flags = flags;
1438
aliguori2dc9f412008-11-18 20:56:59 +00001439 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001440 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001441 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001442 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001443 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001444
pbrook6658ffb2007-03-16 23:58:11 +00001445 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001446
1447 if (watchpoint)
1448 *watchpoint = wp;
1449 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001450}
1451
aliguoria1d1bb32008-11-18 20:07:32 +00001452/* Remove a specific watchpoint. */
1453int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1454 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001455{
aliguorib4051332008-11-18 20:14:20 +00001456 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001457 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001458
Blue Swirl72cf2d42009-09-12 07:36:22 +00001459 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001460 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001461 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001462 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001463 return 0;
1464 }
1465 }
aliguoria1d1bb32008-11-18 20:07:32 +00001466 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001467}
1468
aliguoria1d1bb32008-11-18 20:07:32 +00001469/* Remove a specific watchpoint by reference. */
1470void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1471{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001472 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001473
aliguoria1d1bb32008-11-18 20:07:32 +00001474 tlb_flush_page(env, watchpoint->vaddr);
1475
1476 qemu_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001477}
1478
aliguoria1d1bb32008-11-18 20:07:32 +00001479/* Remove all matching watchpoints. */
1480void cpu_watchpoint_remove_all(CPUState *env, int mask)
1481{
aliguoric0ce9982008-11-25 22:13:57 +00001482 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001483
Blue Swirl72cf2d42009-09-12 07:36:22 +00001484 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001485 if (wp->flags & mask)
1486 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001487 }
aliguoria1d1bb32008-11-18 20:07:32 +00001488}
Paul Brookc527ee82010-03-01 03:31:14 +00001489#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001490
1491/* Add a breakpoint. */
1492int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1493 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001494{
bellard1fddef42005-04-17 19:16:13 +00001495#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001496 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001497
aliguoria1d1bb32008-11-18 20:07:32 +00001498 bp = qemu_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001499
1500 bp->pc = pc;
1501 bp->flags = flags;
1502
aliguori2dc9f412008-11-18 20:56:59 +00001503 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001504 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001505 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001506 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001507 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001508
1509 breakpoint_invalidate(env, pc);
1510
1511 if (breakpoint)
1512 *breakpoint = bp;
1513 return 0;
1514#else
1515 return -ENOSYS;
1516#endif
1517}
1518
1519/* Remove a specific breakpoint. */
1520int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1521{
1522#if defined(TARGET_HAS_ICE)
1523 CPUBreakpoint *bp;
1524
Blue Swirl72cf2d42009-09-12 07:36:22 +00001525 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001526 if (bp->pc == pc && bp->flags == flags) {
1527 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001528 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001529 }
bellard4c3a88a2003-07-26 12:06:08 +00001530 }
aliguoria1d1bb32008-11-18 20:07:32 +00001531 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001532#else
aliguoria1d1bb32008-11-18 20:07:32 +00001533 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001534#endif
1535}
1536
aliguoria1d1bb32008-11-18 20:07:32 +00001537/* Remove a specific breakpoint by reference. */
1538void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001539{
bellard1fddef42005-04-17 19:16:13 +00001540#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001541 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001542
aliguoria1d1bb32008-11-18 20:07:32 +00001543 breakpoint_invalidate(env, breakpoint->pc);
1544
1545 qemu_free(breakpoint);
1546#endif
1547}
1548
1549/* Remove all matching breakpoints. */
1550void cpu_breakpoint_remove_all(CPUState *env, int mask)
1551{
1552#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001553 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001554
Blue Swirl72cf2d42009-09-12 07:36:22 +00001555 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001556 if (bp->flags & mask)
1557 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001558 }
bellard4c3a88a2003-07-26 12:06:08 +00001559#endif
1560}
1561
bellardc33a3462003-07-29 20:50:33 +00001562/* enable or disable single step mode. EXCP_DEBUG is returned by the
1563 CPU loop after each instruction */
1564void cpu_single_step(CPUState *env, int enabled)
1565{
bellard1fddef42005-04-17 19:16:13 +00001566#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001567 if (env->singlestep_enabled != enabled) {
1568 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001569 if (kvm_enabled())
1570 kvm_update_guest_debug(env, 0);
1571 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001572 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001573 /* XXX: only flush what is necessary */
1574 tb_flush(env);
1575 }
bellardc33a3462003-07-29 20:50:33 +00001576 }
1577#endif
1578}
1579
bellard34865132003-10-05 14:28:56 +00001580/* enable or disable low levels log */
1581void cpu_set_log(int log_flags)
1582{
1583 loglevel = log_flags;
1584 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001585 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001586 if (!logfile) {
1587 perror(logfilename);
1588 _exit(1);
1589 }
bellard9fa3e852004-01-04 18:06:42 +00001590#if !defined(CONFIG_SOFTMMU)
1591 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1592 {
blueswir1b55266b2008-09-20 08:07:15 +00001593 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001594 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1595 }
Filip Navarabf65f532009-07-27 10:02:04 -05001596#elif !defined(_WIN32)
1597 /* Win32 doesn't support line-buffering and requires size >= 2 */
bellard34865132003-10-05 14:28:56 +00001598 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001599#endif
pbrooke735b912007-06-30 13:53:24 +00001600 log_append = 1;
1601 }
1602 if (!loglevel && logfile) {
1603 fclose(logfile);
1604 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001605 }
1606}
1607
1608void cpu_set_log_filename(const char *filename)
1609{
1610 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001611 if (logfile) {
1612 fclose(logfile);
1613 logfile = NULL;
1614 }
1615 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001616}
bellardc33a3462003-07-29 20:50:33 +00001617
aurel323098dba2009-03-07 21:28:24 +00001618static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001619{
pbrookd5975362008-06-07 20:50:51 +00001620 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1621 problem and hope the cpu will stop of its own accord. For userspace
1622 emulation this often isn't actually as bad as it sounds. Often
1623 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001624 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001625 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001626
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001627 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001628 tb = env->current_tb;
1629 /* if the cpu is currently executing code, we must unlink it and
1630 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001631 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001632 env->current_tb = NULL;
1633 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001634 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001635 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001636}
1637
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001638#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001639/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001640static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001641{
1642 int old_mask;
1643
1644 old_mask = env->interrupt_request;
1645 env->interrupt_request |= mask;
1646
aliguori8edac962009-04-24 18:03:45 +00001647 /*
1648 * If called from iothread context, wake the target cpu in
1649 * case its halted.
1650 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001651 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001652 qemu_cpu_kick(env);
1653 return;
1654 }
aliguori8edac962009-04-24 18:03:45 +00001655
pbrook2e70f6e2008-06-29 01:03:05 +00001656 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001657 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001658 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001659 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001660 cpu_abort(env, "Raised interrupt while not in I/O function");
1661 }
pbrook2e70f6e2008-06-29 01:03:05 +00001662 } else {
aurel323098dba2009-03-07 21:28:24 +00001663 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001664 }
1665}
1666
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001667CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1668
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001669#else /* CONFIG_USER_ONLY */
1670
1671void cpu_interrupt(CPUState *env, int mask)
1672{
1673 env->interrupt_request |= mask;
1674 cpu_unlink_tb(env);
1675}
1676#endif /* CONFIG_USER_ONLY */
1677
bellardb54ad042004-05-20 13:42:52 +00001678void cpu_reset_interrupt(CPUState *env, int mask)
1679{
1680 env->interrupt_request &= ~mask;
1681}
1682
aurel323098dba2009-03-07 21:28:24 +00001683void cpu_exit(CPUState *env)
1684{
1685 env->exit_request = 1;
1686 cpu_unlink_tb(env);
1687}
1688
blueswir1c7cd6a32008-10-02 18:27:46 +00001689const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001690 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001691 "show generated host assembly code for each compiled TB" },
1692 { CPU_LOG_TB_IN_ASM, "in_asm",
1693 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001694 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001695 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001696 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001697 "show micro ops "
1698#ifdef TARGET_I386
1699 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001700#endif
blueswir1e01a1152008-03-14 17:37:11 +00001701 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001702 { CPU_LOG_INT, "int",
1703 "show interrupts/exceptions in short format" },
1704 { CPU_LOG_EXEC, "exec",
1705 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001706 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001707 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001708#ifdef TARGET_I386
1709 { CPU_LOG_PCALL, "pcall",
1710 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001711 { CPU_LOG_RESET, "cpu_reset",
1712 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001713#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001714#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001715 { CPU_LOG_IOPORT, "ioport",
1716 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001717#endif
bellardf193c792004-03-21 17:06:25 +00001718 { 0, NULL, NULL },
1719};
1720
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001721#ifndef CONFIG_USER_ONLY
1722static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1723 = QLIST_HEAD_INITIALIZER(memory_client_list);
1724
1725static void cpu_notify_set_memory(target_phys_addr_t start_addr,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001726 ram_addr_t size,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001727 ram_addr_t phys_offset,
1728 bool log_dirty)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001729{
1730 CPUPhysMemoryClient *client;
1731 QLIST_FOREACH(client, &memory_client_list, list) {
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001732 client->set_memory(client, start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001733 }
1734}
1735
1736static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001737 target_phys_addr_t end)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001738{
1739 CPUPhysMemoryClient *client;
1740 QLIST_FOREACH(client, &memory_client_list, list) {
1741 int r = client->sync_dirty_bitmap(client, start, end);
1742 if (r < 0)
1743 return r;
1744 }
1745 return 0;
1746}
1747
1748static int cpu_notify_migration_log(int enable)
1749{
1750 CPUPhysMemoryClient *client;
1751 QLIST_FOREACH(client, &memory_client_list, list) {
1752 int r = client->migration_log(client, enable);
1753 if (r < 0)
1754 return r;
1755 }
1756 return 0;
1757}
1758
Alex Williamson2173a752011-05-03 12:36:58 -06001759struct last_map {
1760 target_phys_addr_t start_addr;
1761 ram_addr_t size;
1762 ram_addr_t phys_offset;
1763};
1764
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001765/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1766 * address. Each intermediate table provides the next L2_BITs of guest
1767 * physical address space. The number of levels vary based on host and
1768 * guest configuration, making it efficient to build the final guest
1769 * physical address by seeding the L1 offset and shifting and adding in
1770 * each L2 offset as we recurse through them. */
Alex Williamson2173a752011-05-03 12:36:58 -06001771static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
1772 void **lp, target_phys_addr_t addr,
1773 struct last_map *map)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001774{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001775 int i;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001776
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001777 if (*lp == NULL) {
1778 return;
1779 }
1780 if (level == 0) {
1781 PhysPageDesc *pd = *lp;
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001782 addr <<= L2_BITS + TARGET_PAGE_BITS;
Paul Brook7296aba2010-03-14 14:58:46 +00001783 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001784 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
Alex Williamson2173a752011-05-03 12:36:58 -06001785 target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
1786
1787 if (map->size &&
1788 start_addr == map->start_addr + map->size &&
1789 pd[i].phys_offset == map->phys_offset + map->size) {
1790
1791 map->size += TARGET_PAGE_SIZE;
1792 continue;
1793 } else if (map->size) {
1794 client->set_memory(client, map->start_addr,
1795 map->size, map->phys_offset, false);
1796 }
1797
1798 map->start_addr = start_addr;
1799 map->size = TARGET_PAGE_SIZE;
1800 map->phys_offset = pd[i].phys_offset;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001801 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001802 }
1803 } else {
1804 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001805 for (i = 0; i < L2_SIZE; ++i) {
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001806 phys_page_for_each_1(client, level - 1, pp + i,
Alex Williamson2173a752011-05-03 12:36:58 -06001807 (addr << L2_BITS) | i, map);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001808 }
1809 }
1810}
1811
1812static void phys_page_for_each(CPUPhysMemoryClient *client)
1813{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001814 int i;
Alex Williamson2173a752011-05-03 12:36:58 -06001815 struct last_map map = { };
1816
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001817 for (i = 0; i < P_L1_SIZE; ++i) {
1818 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
Alex Williamson2173a752011-05-03 12:36:58 -06001819 l1_phys_map + i, i, &map);
1820 }
1821 if (map.size) {
1822 client->set_memory(client, map.start_addr, map.size, map.phys_offset,
1823 false);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001824 }
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001825}
1826
1827void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1828{
1829 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1830 phys_page_for_each(client);
1831}
1832
1833void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1834{
1835 QLIST_REMOVE(client, list);
1836}
1837#endif
1838
bellardf193c792004-03-21 17:06:25 +00001839static int cmp1(const char *s1, int n, const char *s2)
1840{
1841 if (strlen(s2) != n)
1842 return 0;
1843 return memcmp(s1, s2, n) == 0;
1844}
ths3b46e622007-09-17 08:09:54 +00001845
bellardf193c792004-03-21 17:06:25 +00001846/* takes a comma separated list of log masks. Return 0 if error. */
1847int cpu_str_to_log_mask(const char *str)
1848{
blueswir1c7cd6a32008-10-02 18:27:46 +00001849 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001850 int mask;
1851 const char *p, *p1;
1852
1853 p = str;
1854 mask = 0;
1855 for(;;) {
1856 p1 = strchr(p, ',');
1857 if (!p1)
1858 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001859 if(cmp1(p,p1-p,"all")) {
1860 for(item = cpu_log_items; item->mask != 0; item++) {
1861 mask |= item->mask;
1862 }
1863 } else {
1864 for(item = cpu_log_items; item->mask != 0; item++) {
1865 if (cmp1(p, p1 - p, item->name))
1866 goto found;
1867 }
1868 return 0;
bellardf193c792004-03-21 17:06:25 +00001869 }
bellardf193c792004-03-21 17:06:25 +00001870 found:
1871 mask |= item->mask;
1872 if (*p1 != ',')
1873 break;
1874 p = p1 + 1;
1875 }
1876 return mask;
1877}
bellardea041c02003-06-25 16:16:50 +00001878
bellard75012672003-06-21 13:11:07 +00001879void cpu_abort(CPUState *env, const char *fmt, ...)
1880{
1881 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001882 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001883
1884 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001885 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001886 fprintf(stderr, "qemu: fatal: ");
1887 vfprintf(stderr, fmt, ap);
1888 fprintf(stderr, "\n");
1889#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001890 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1891#else
1892 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001893#endif
aliguori93fcfe32009-01-15 22:34:14 +00001894 if (qemu_log_enabled()) {
1895 qemu_log("qemu: fatal: ");
1896 qemu_log_vprintf(fmt, ap2);
1897 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001898#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001899 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001900#else
aliguori93fcfe32009-01-15 22:34:14 +00001901 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001902#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001903 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001904 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001905 }
pbrook493ae1f2007-11-23 16:53:59 +00001906 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001907 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001908#if defined(CONFIG_USER_ONLY)
1909 {
1910 struct sigaction act;
1911 sigfillset(&act.sa_mask);
1912 act.sa_handler = SIG_DFL;
1913 sigaction(SIGABRT, &act, NULL);
1914 }
1915#endif
bellard75012672003-06-21 13:11:07 +00001916 abort();
1917}
1918
thsc5be9f02007-02-28 20:20:53 +00001919CPUState *cpu_copy(CPUState *env)
1920{
ths01ba9812007-12-09 02:22:57 +00001921 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001922 CPUState *next_cpu = new_env->next_cpu;
1923 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001924#if defined(TARGET_HAS_ICE)
1925 CPUBreakpoint *bp;
1926 CPUWatchpoint *wp;
1927#endif
1928
thsc5be9f02007-02-28 20:20:53 +00001929 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001930
1931 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001932 new_env->next_cpu = next_cpu;
1933 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001934
1935 /* Clone all break/watchpoints.
1936 Note: Once we support ptrace with hw-debug register access, make sure
1937 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001938 QTAILQ_INIT(&env->breakpoints);
1939 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001940#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001941 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001942 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1943 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001944 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001945 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1946 wp->flags, NULL);
1947 }
1948#endif
1949
thsc5be9f02007-02-28 20:20:53 +00001950 return new_env;
1951}
1952
bellard01243112004-01-04 15:48:17 +00001953#if !defined(CONFIG_USER_ONLY)
1954
edgar_igl5c751e92008-05-06 08:44:21 +00001955static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1956{
1957 unsigned int i;
1958
1959 /* Discard jump cache entries for any tb which might potentially
1960 overlap the flushed page. */
1961 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1962 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001963 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001964
1965 i = tb_jmp_cache_hash_page(addr);
1966 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001967 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001968}
1969
Igor Kovalenko08738982009-07-12 02:15:40 +04001970static CPUTLBEntry s_cputlb_empty_entry = {
1971 .addr_read = -1,
1972 .addr_write = -1,
1973 .addr_code = -1,
1974 .addend = -1,
1975};
1976
bellardee8b7022004-02-03 23:35:10 +00001977/* NOTE: if flush_global is true, also flush global entries (not
1978 implemented yet) */
1979void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001980{
bellard33417e72003-08-10 21:47:01 +00001981 int i;
bellard01243112004-01-04 15:48:17 +00001982
bellard9fa3e852004-01-04 18:06:42 +00001983#if defined(DEBUG_TLB)
1984 printf("tlb_flush:\n");
1985#endif
bellard01243112004-01-04 15:48:17 +00001986 /* must reset current TB so that interrupts cannot modify the
1987 links while we are modifying them */
1988 env->current_tb = NULL;
1989
bellard33417e72003-08-10 21:47:01 +00001990 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001991 int mmu_idx;
1992 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001993 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001994 }
bellard33417e72003-08-10 21:47:01 +00001995 }
bellard9fa3e852004-01-04 18:06:42 +00001996
bellard8a40a182005-11-20 10:35:40 +00001997 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001998
Paul Brookd4c430a2010-03-17 02:14:28 +00001999 env->tlb_flush_addr = -1;
2000 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00002001 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00002002}
2003
bellard274da6b2004-05-20 21:56:27 +00002004static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00002005{
ths5fafdf22007-09-16 21:08:06 +00002006 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00002007 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00002008 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00002009 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00002010 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00002011 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04002012 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00002013 }
bellard61382a52003-10-27 21:22:23 +00002014}
2015
bellard2e126692004-04-25 21:28:44 +00002016void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00002017{
bellard8a40a182005-11-20 10:35:40 +00002018 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002019 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00002020
bellard9fa3e852004-01-04 18:06:42 +00002021#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00002022 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00002023#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00002024 /* Check if we need to flush due to large pages. */
2025 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2026#if defined(DEBUG_TLB)
2027 printf("tlb_flush_page: forced full flush ("
2028 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2029 env->tlb_flush_addr, env->tlb_flush_mask);
2030#endif
2031 tlb_flush(env, 1);
2032 return;
2033 }
bellard01243112004-01-04 15:48:17 +00002034 /* must reset current TB so that interrupts cannot modify the
2035 links while we are modifying them */
2036 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00002037
bellard61382a52003-10-27 21:22:23 +00002038 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00002039 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002040 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2041 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002042
edgar_igl5c751e92008-05-06 08:44:21 +00002043 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002044}
2045
bellard9fa3e852004-01-04 18:06:42 +00002046/* update the TLBs so that writes to code in the virtual page 'addr'
2047 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002048static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002049{
ths5fafdf22007-09-16 21:08:06 +00002050 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002051 ram_addr + TARGET_PAGE_SIZE,
2052 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002053}
2054
bellard9fa3e852004-01-04 18:06:42 +00002055/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002056 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002057static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002058 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002059{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002060 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002061}
2062
ths5fafdf22007-09-16 21:08:06 +00002063static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002064 unsigned long start, unsigned long length)
2065{
2066 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00002067 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2068 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002069 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002070 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002071 }
2072 }
2073}
2074
pbrook5579c7f2009-04-11 14:47:08 +00002075/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002076void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002077 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002078{
2079 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002080 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002081 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002082
2083 start &= TARGET_PAGE_MASK;
2084 end = TARGET_PAGE_ALIGN(end);
2085
2086 length = end - start;
2087 if (length == 0)
2088 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002089 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002090
bellard1ccde1c2004-02-06 19:46:14 +00002091 /* we modify the TLB cache so that the dirty bit will be set again
2092 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002093 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002094 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002095 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002096 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002097 != (end - 1) - start) {
2098 abort();
2099 }
2100
bellard6a00d602005-11-21 23:25:50 +00002101 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002102 int mmu_idx;
2103 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2104 for(i = 0; i < CPU_TLB_SIZE; i++)
2105 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2106 start1, length);
2107 }
bellard6a00d602005-11-21 23:25:50 +00002108 }
bellard1ccde1c2004-02-06 19:46:14 +00002109}
2110
aliguori74576192008-10-06 14:02:03 +00002111int cpu_physical_memory_set_dirty_tracking(int enable)
2112{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002113 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002114 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002115 ret = cpu_notify_migration_log(!!enable);
2116 return ret;
aliguori74576192008-10-06 14:02:03 +00002117}
2118
2119int cpu_physical_memory_get_dirty_tracking(void)
2120{
2121 return in_migration;
2122}
2123
Anthony Liguoric227f092009-10-01 16:12:16 -05002124int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2125 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00002126{
Michael S. Tsirkin7b8f3b72010-01-27 22:07:21 +02002127 int ret;
Jan Kiszka151f7742009-05-01 20:52:47 +02002128
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002129 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
Jan Kiszka151f7742009-05-01 20:52:47 +02002130 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00002131}
2132
Anthony PERARDe5896b12011-02-07 12:19:23 +01002133int cpu_physical_log_start(target_phys_addr_t start_addr,
2134 ram_addr_t size)
2135{
2136 CPUPhysMemoryClient *client;
2137 QLIST_FOREACH(client, &memory_client_list, list) {
2138 if (client->log_start) {
2139 int r = client->log_start(client, start_addr, size);
2140 if (r < 0) {
2141 return r;
2142 }
2143 }
2144 }
2145 return 0;
2146}
2147
2148int cpu_physical_log_stop(target_phys_addr_t start_addr,
2149 ram_addr_t size)
2150{
2151 CPUPhysMemoryClient *client;
2152 QLIST_FOREACH(client, &memory_client_list, list) {
2153 if (client->log_stop) {
2154 int r = client->log_stop(client, start_addr, size);
2155 if (r < 0) {
2156 return r;
2157 }
2158 }
2159 }
2160 return 0;
2161}
2162
bellard3a7d9292005-08-21 09:26:42 +00002163static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2164{
Anthony Liguoric227f092009-10-01 16:12:16 -05002165 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002166 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002167
bellard84b7b8e2005-11-28 21:19:04 +00002168 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00002169 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2170 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002171 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002172 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002173 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002174 }
2175 }
2176}
2177
2178/* update the TLB according to the current state of the dirty bits */
2179void cpu_tlb_update_dirty(CPUState *env)
2180{
2181 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002182 int mmu_idx;
2183 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2184 for(i = 0; i < CPU_TLB_SIZE; i++)
2185 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2186 }
bellard3a7d9292005-08-21 09:26:42 +00002187}
2188
pbrook0f459d12008-06-09 00:20:13 +00002189static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002190{
pbrook0f459d12008-06-09 00:20:13 +00002191 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2192 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002193}
2194
pbrook0f459d12008-06-09 00:20:13 +00002195/* update the TLB corresponding to virtual page vaddr
2196 so that it is no longer dirty */
2197static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002198{
bellard1ccde1c2004-02-06 19:46:14 +00002199 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002200 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002201
pbrook0f459d12008-06-09 00:20:13 +00002202 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002203 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002204 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2205 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002206}
2207
Paul Brookd4c430a2010-03-17 02:14:28 +00002208/* Our TLB does not support large pages, so remember the area covered by
2209 large pages and trigger a full TLB flush if these are invalidated. */
2210static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2211 target_ulong size)
2212{
2213 target_ulong mask = ~(size - 1);
2214
2215 if (env->tlb_flush_addr == (target_ulong)-1) {
2216 env->tlb_flush_addr = vaddr & mask;
2217 env->tlb_flush_mask = mask;
2218 return;
2219 }
2220 /* Extend the existing region to include the new page.
2221 This is a compromise between unnecessary flushes and the cost
2222 of maintaining a full variable size TLB. */
2223 mask &= env->tlb_flush_mask;
2224 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2225 mask <<= 1;
2226 }
2227 env->tlb_flush_addr &= mask;
2228 env->tlb_flush_mask = mask;
2229}
2230
2231/* Add a new TLB entry. At most one entry for a given virtual address
2232 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2233 supplied size is only used by tlb_flush_page. */
2234void tlb_set_page(CPUState *env, target_ulong vaddr,
2235 target_phys_addr_t paddr, int prot,
2236 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002237{
bellard92e873b2004-05-21 14:52:29 +00002238 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00002239 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002240 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002241 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002242 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002243 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002244 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002245 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002246 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002247
Paul Brookd4c430a2010-03-17 02:14:28 +00002248 assert(size >= TARGET_PAGE_SIZE);
2249 if (size != TARGET_PAGE_SIZE) {
2250 tlb_add_large_page(env, vaddr, size);
2251 }
bellard92e873b2004-05-21 14:52:29 +00002252 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002253 if (!p) {
2254 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00002255 } else {
2256 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002257 }
2258#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002259 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2260 " prot=%x idx=%d pd=0x%08lx\n",
2261 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002262#endif
2263
pbrook0f459d12008-06-09 00:20:13 +00002264 address = vaddr;
2265 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2266 /* IO memory case (romd handled later) */
2267 address |= TLB_MMIO;
2268 }
pbrook5579c7f2009-04-11 14:47:08 +00002269 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00002270 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2271 /* Normal RAM. */
2272 iotlb = pd & TARGET_PAGE_MASK;
2273 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2274 iotlb |= IO_MEM_NOTDIRTY;
2275 else
2276 iotlb |= IO_MEM_ROM;
2277 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002278 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002279 It would be nice to pass an offset from the base address
2280 of that region. This would avoid having to special case RAM,
2281 and avoid full address decoding in every device.
2282 We can't use the high bits of pd for this because
2283 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002284 iotlb = (pd & ~TARGET_PAGE_MASK);
2285 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002286 iotlb += p->region_offset;
2287 } else {
2288 iotlb += paddr;
2289 }
pbrook0f459d12008-06-09 00:20:13 +00002290 }
pbrook6658ffb2007-03-16 23:58:11 +00002291
pbrook0f459d12008-06-09 00:20:13 +00002292 code_address = address;
2293 /* Make accesses to pages with watchpoints go via the
2294 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002295 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002296 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002297 /* Avoid trapping reads of pages with a write breakpoint. */
2298 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2299 iotlb = io_mem_watch + paddr;
2300 address |= TLB_MMIO;
2301 break;
2302 }
pbrook6658ffb2007-03-16 23:58:11 +00002303 }
pbrook0f459d12008-06-09 00:20:13 +00002304 }
balrogd79acba2007-06-26 20:01:13 +00002305
pbrook0f459d12008-06-09 00:20:13 +00002306 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2307 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2308 te = &env->tlb_table[mmu_idx][index];
2309 te->addend = addend - vaddr;
2310 if (prot & PAGE_READ) {
2311 te->addr_read = address;
2312 } else {
2313 te->addr_read = -1;
2314 }
edgar_igl5c751e92008-05-06 08:44:21 +00002315
pbrook0f459d12008-06-09 00:20:13 +00002316 if (prot & PAGE_EXEC) {
2317 te->addr_code = code_address;
2318 } else {
2319 te->addr_code = -1;
2320 }
2321 if (prot & PAGE_WRITE) {
2322 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2323 (pd & IO_MEM_ROMD)) {
2324 /* Write access calls the I/O callback. */
2325 te->addr_write = address | TLB_MMIO;
2326 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2327 !cpu_physical_memory_is_dirty(pd)) {
2328 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002329 } else {
pbrook0f459d12008-06-09 00:20:13 +00002330 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002331 }
pbrook0f459d12008-06-09 00:20:13 +00002332 } else {
2333 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002334 }
bellard9fa3e852004-01-04 18:06:42 +00002335}
2336
bellard01243112004-01-04 15:48:17 +00002337#else
2338
bellardee8b7022004-02-03 23:35:10 +00002339void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002340{
2341}
2342
bellard2e126692004-04-25 21:28:44 +00002343void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002344{
2345}
2346
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002347/*
2348 * Walks guest process memory "regions" one by one
2349 * and calls callback function 'fn' for each region.
2350 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002351
2352struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002353{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002354 walk_memory_regions_fn fn;
2355 void *priv;
2356 unsigned long start;
2357 int prot;
2358};
bellard9fa3e852004-01-04 18:06:42 +00002359
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002360static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002361 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002362{
2363 if (data->start != -1ul) {
2364 int rc = data->fn(data->priv, data->start, end, data->prot);
2365 if (rc != 0) {
2366 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002367 }
bellard33417e72003-08-10 21:47:01 +00002368 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002369
2370 data->start = (new_prot ? end : -1ul);
2371 data->prot = new_prot;
2372
2373 return 0;
2374}
2375
2376static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002377 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002378{
Paul Brookb480d9b2010-03-12 23:23:29 +00002379 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002380 int i, rc;
2381
2382 if (*lp == NULL) {
2383 return walk_memory_regions_end(data, base, 0);
2384 }
2385
2386 if (level == 0) {
2387 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002388 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002389 int prot = pd[i].flags;
2390
2391 pa = base | (i << TARGET_PAGE_BITS);
2392 if (prot != data->prot) {
2393 rc = walk_memory_regions_end(data, pa, prot);
2394 if (rc != 0) {
2395 return rc;
2396 }
2397 }
2398 }
2399 } else {
2400 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002401 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002402 pa = base | ((abi_ulong)i <<
2403 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002404 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2405 if (rc != 0) {
2406 return rc;
2407 }
2408 }
2409 }
2410
2411 return 0;
2412}
2413
2414int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2415{
2416 struct walk_memory_regions_data data;
2417 unsigned long i;
2418
2419 data.fn = fn;
2420 data.priv = priv;
2421 data.start = -1ul;
2422 data.prot = 0;
2423
2424 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002425 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002426 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2427 if (rc != 0) {
2428 return rc;
2429 }
2430 }
2431
2432 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002433}
2434
Paul Brookb480d9b2010-03-12 23:23:29 +00002435static int dump_region(void *priv, abi_ulong start,
2436 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002437{
2438 FILE *f = (FILE *)priv;
2439
Paul Brookb480d9b2010-03-12 23:23:29 +00002440 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2441 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002442 start, end, end - start,
2443 ((prot & PAGE_READ) ? 'r' : '-'),
2444 ((prot & PAGE_WRITE) ? 'w' : '-'),
2445 ((prot & PAGE_EXEC) ? 'x' : '-'));
2446
2447 return (0);
2448}
2449
2450/* dump memory mappings */
2451void page_dump(FILE *f)
2452{
2453 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2454 "start", "end", "size", "prot");
2455 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002456}
2457
pbrook53a59602006-03-25 19:31:22 +00002458int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002459{
bellard9fa3e852004-01-04 18:06:42 +00002460 PageDesc *p;
2461
2462 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002463 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002464 return 0;
2465 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002466}
2467
Richard Henderson376a7902010-03-10 15:57:04 -08002468/* Modify the flags of a page and invalidate the code if necessary.
2469 The flag PAGE_WRITE_ORG is positioned automatically depending
2470 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002471void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002472{
Richard Henderson376a7902010-03-10 15:57:04 -08002473 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002474
Richard Henderson376a7902010-03-10 15:57:04 -08002475 /* This function should never be called with addresses outside the
2476 guest address space. If this assert fires, it probably indicates
2477 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002478#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2479 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002480#endif
2481 assert(start < end);
2482
bellard9fa3e852004-01-04 18:06:42 +00002483 start = start & TARGET_PAGE_MASK;
2484 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002485
2486 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002487 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002488 }
2489
2490 for (addr = start, len = end - start;
2491 len != 0;
2492 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2493 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2494
2495 /* If the write protection bit is set, then we invalidate
2496 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002497 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002498 (flags & PAGE_WRITE) &&
2499 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002500 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002501 }
2502 p->flags = flags;
2503 }
bellard9fa3e852004-01-04 18:06:42 +00002504}
2505
ths3d97b402007-11-02 19:02:07 +00002506int page_check_range(target_ulong start, target_ulong len, int flags)
2507{
2508 PageDesc *p;
2509 target_ulong end;
2510 target_ulong addr;
2511
Richard Henderson376a7902010-03-10 15:57:04 -08002512 /* This function should never be called with addresses outside the
2513 guest address space. If this assert fires, it probably indicates
2514 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002515#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2516 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002517#endif
2518
Richard Henderson3e0650a2010-03-29 10:54:42 -07002519 if (len == 0) {
2520 return 0;
2521 }
Richard Henderson376a7902010-03-10 15:57:04 -08002522 if (start + len - 1 < start) {
2523 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002524 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002525 }
balrog55f280c2008-10-28 10:24:11 +00002526
ths3d97b402007-11-02 19:02:07 +00002527 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2528 start = start & TARGET_PAGE_MASK;
2529
Richard Henderson376a7902010-03-10 15:57:04 -08002530 for (addr = start, len = end - start;
2531 len != 0;
2532 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002533 p = page_find(addr >> TARGET_PAGE_BITS);
2534 if( !p )
2535 return -1;
2536 if( !(p->flags & PAGE_VALID) )
2537 return -1;
2538
bellarddae32702007-11-14 10:51:00 +00002539 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002540 return -1;
bellarddae32702007-11-14 10:51:00 +00002541 if (flags & PAGE_WRITE) {
2542 if (!(p->flags & PAGE_WRITE_ORG))
2543 return -1;
2544 /* unprotect the page if it was put read-only because it
2545 contains translated code */
2546 if (!(p->flags & PAGE_WRITE)) {
2547 if (!page_unprotect(addr, 0, NULL))
2548 return -1;
2549 }
2550 return 0;
2551 }
ths3d97b402007-11-02 19:02:07 +00002552 }
2553 return 0;
2554}
2555
bellard9fa3e852004-01-04 18:06:42 +00002556/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002557 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002558int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002559{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002560 unsigned int prot;
2561 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002562 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002563
pbrookc8a706f2008-06-02 16:16:42 +00002564 /* Technically this isn't safe inside a signal handler. However we
2565 know this only ever happens in a synchronous SEGV handler, so in
2566 practice it seems to be ok. */
2567 mmap_lock();
2568
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002569 p = page_find(address >> TARGET_PAGE_BITS);
2570 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002571 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002572 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002573 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002574
bellard9fa3e852004-01-04 18:06:42 +00002575 /* if the page was really writable, then we change its
2576 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002577 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2578 host_start = address & qemu_host_page_mask;
2579 host_end = host_start + qemu_host_page_size;
2580
2581 prot = 0;
2582 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2583 p = page_find(addr >> TARGET_PAGE_BITS);
2584 p->flags |= PAGE_WRITE;
2585 prot |= p->flags;
2586
bellard9fa3e852004-01-04 18:06:42 +00002587 /* and since the content will be modified, we must invalidate
2588 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002589 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002590#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002591 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002592#endif
bellard9fa3e852004-01-04 18:06:42 +00002593 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002594 mprotect((void *)g2h(host_start), qemu_host_page_size,
2595 prot & PAGE_BITS);
2596
2597 mmap_unlock();
2598 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002599 }
pbrookc8a706f2008-06-02 16:16:42 +00002600 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002601 return 0;
2602}
2603
bellard6a00d602005-11-21 23:25:50 +00002604static inline void tlb_set_dirty(CPUState *env,
2605 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002606{
2607}
bellard9fa3e852004-01-04 18:06:42 +00002608#endif /* defined(CONFIG_USER_ONLY) */
2609
pbrooke2eef172008-06-08 01:09:01 +00002610#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002611
Paul Brookc04b2b72010-03-01 03:31:14 +00002612#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2613typedef struct subpage_t {
2614 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002615 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2616 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002617} subpage_t;
2618
Anthony Liguoric227f092009-10-01 16:12:16 -05002619static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2620 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002621static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2622 ram_addr_t orig_memory,
2623 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002624#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2625 need_subpage) \
2626 do { \
2627 if (addr > start_addr) \
2628 start_addr2 = 0; \
2629 else { \
2630 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2631 if (start_addr2 > 0) \
2632 need_subpage = 1; \
2633 } \
2634 \
blueswir149e9fba2007-05-30 17:25:06 +00002635 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002636 end_addr2 = TARGET_PAGE_SIZE - 1; \
2637 else { \
2638 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2639 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2640 need_subpage = 1; \
2641 } \
2642 } while (0)
2643
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002644/* register physical memory.
2645 For RAM, 'size' must be a multiple of the target page size.
2646 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002647 io memory page. The address used when calling the IO function is
2648 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002649 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002650 before calculating this offset. This should not be a problem unless
2651 the low bits of start_addr and region_offset differ. */
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002652void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
Anthony Liguoric227f092009-10-01 16:12:16 -05002653 ram_addr_t size,
2654 ram_addr_t phys_offset,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002655 ram_addr_t region_offset,
2656 bool log_dirty)
bellard33417e72003-08-10 21:47:01 +00002657{
Anthony Liguoric227f092009-10-01 16:12:16 -05002658 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002659 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002660 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002661 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002662 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002663
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002664 assert(size);
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002665 cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002666
pbrook67c4d232009-02-23 13:16:07 +00002667 if (phys_offset == IO_MEM_UNASSIGNED) {
2668 region_offset = start_addr;
2669 }
pbrook8da3ff12008-12-01 18:59:50 +00002670 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002671 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002672 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002673
2674 addr = start_addr;
2675 do {
blueswir1db7b5422007-05-26 17:36:03 +00002676 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2677 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002678 ram_addr_t orig_memory = p->phys_offset;
2679 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002680 int need_subpage = 0;
2681
2682 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2683 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002684 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002685 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2686 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002687 &p->phys_offset, orig_memory,
2688 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002689 } else {
2690 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2691 >> IO_MEM_SHIFT];
2692 }
pbrook8da3ff12008-12-01 18:59:50 +00002693 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2694 region_offset);
2695 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002696 } else {
2697 p->phys_offset = phys_offset;
2698 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2699 (phys_offset & IO_MEM_ROMD))
2700 phys_offset += TARGET_PAGE_SIZE;
2701 }
2702 } else {
2703 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2704 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002705 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002706 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002707 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002708 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002709 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002710 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002711 int need_subpage = 0;
2712
2713 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2714 end_addr2, need_subpage);
2715
Richard Hendersonf6405242010-04-22 16:47:31 -07002716 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002717 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002718 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002719 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002720 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002721 phys_offset, region_offset);
2722 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002723 }
2724 }
2725 }
pbrook8da3ff12008-12-01 18:59:50 +00002726 region_offset += TARGET_PAGE_SIZE;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002727 addr += TARGET_PAGE_SIZE;
2728 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002729
bellard9d420372006-06-25 22:25:22 +00002730 /* since each CPU stores ram addresses in its TLB cache, we must
2731 reset the modified entries */
2732 /* XXX: slow ! */
2733 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2734 tlb_flush(env, 1);
2735 }
bellard33417e72003-08-10 21:47:01 +00002736}
2737
bellardba863452006-09-24 18:41:10 +00002738/* XXX: temporary until new memory mapping API */
Anthony Liguoric227f092009-10-01 16:12:16 -05002739ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002740{
2741 PhysPageDesc *p;
2742
2743 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2744 if (!p)
2745 return IO_MEM_UNASSIGNED;
2746 return p->phys_offset;
2747}
2748
Anthony Liguoric227f092009-10-01 16:12:16 -05002749void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002750{
2751 if (kvm_enabled())
2752 kvm_coalesce_mmio_region(addr, size);
2753}
2754
Anthony Liguoric227f092009-10-01 16:12:16 -05002755void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002756{
2757 if (kvm_enabled())
2758 kvm_uncoalesce_mmio_region(addr, size);
2759}
2760
Sheng Yang62a27442010-01-26 19:21:16 +08002761void qemu_flush_coalesced_mmio_buffer(void)
2762{
2763 if (kvm_enabled())
2764 kvm_flush_coalesced_mmio_buffer();
2765}
2766
Marcelo Tosattic9027602010-03-01 20:25:08 -03002767#if defined(__linux__) && !defined(TARGET_S390X)
2768
2769#include <sys/vfs.h>
2770
2771#define HUGETLBFS_MAGIC 0x958458f6
2772
2773static long gethugepagesize(const char *path)
2774{
2775 struct statfs fs;
2776 int ret;
2777
2778 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002779 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002780 } while (ret != 0 && errno == EINTR);
2781
2782 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002783 perror(path);
2784 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002785 }
2786
2787 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002788 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002789
2790 return fs.f_bsize;
2791}
2792
Alex Williamson04b16652010-07-02 11:13:17 -06002793static void *file_ram_alloc(RAMBlock *block,
2794 ram_addr_t memory,
2795 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002796{
2797 char *filename;
2798 void *area;
2799 int fd;
2800#ifdef MAP_POPULATE
2801 int flags;
2802#endif
2803 unsigned long hpagesize;
2804
2805 hpagesize = gethugepagesize(path);
2806 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002807 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002808 }
2809
2810 if (memory < hpagesize) {
2811 return NULL;
2812 }
2813
2814 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2815 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2816 return NULL;
2817 }
2818
2819 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002820 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002821 }
2822
2823 fd = mkstemp(filename);
2824 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002825 perror("unable to create backing store for hugepages");
2826 free(filename);
2827 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002828 }
2829 unlink(filename);
2830 free(filename);
2831
2832 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2833
2834 /*
2835 * ftruncate is not supported by hugetlbfs in older
2836 * hosts, so don't bother bailing out on errors.
2837 * If anything goes wrong with it under other filesystems,
2838 * mmap will fail.
2839 */
2840 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002841 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002842
2843#ifdef MAP_POPULATE
2844 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2845 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2846 * to sidestep this quirk.
2847 */
2848 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2849 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2850#else
2851 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2852#endif
2853 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002854 perror("file_ram_alloc: can't mmap RAM pages");
2855 close(fd);
2856 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002857 }
Alex Williamson04b16652010-07-02 11:13:17 -06002858 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002859 return area;
2860}
2861#endif
2862
Alex Williamsond17b5282010-06-25 11:08:38 -06002863static ram_addr_t find_ram_offset(ram_addr_t size)
2864{
Alex Williamson04b16652010-07-02 11:13:17 -06002865 RAMBlock *block, *next_block;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002866 ram_addr_t offset = 0, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002867
2868 if (QLIST_EMPTY(&ram_list.blocks))
2869 return 0;
2870
2871 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002872 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002873
2874 end = block->offset + block->length;
2875
2876 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2877 if (next_block->offset >= end) {
2878 next = MIN(next, next_block->offset);
2879 }
2880 }
2881 if (next - end >= size && next - end < mingap) {
2882 offset = end;
2883 mingap = next - end;
2884 }
2885 }
2886 return offset;
2887}
2888
2889static ram_addr_t last_ram_offset(void)
2890{
Alex Williamsond17b5282010-06-25 11:08:38 -06002891 RAMBlock *block;
2892 ram_addr_t last = 0;
2893
2894 QLIST_FOREACH(block, &ram_list.blocks, next)
2895 last = MAX(last, block->offset + block->length);
2896
2897 return last;
2898}
2899
Cam Macdonell84b89d72010-07-26 18:10:57 -06002900ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002901 ram_addr_t size, void *host)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002902{
2903 RAMBlock *new_block, *block;
2904
2905 size = TARGET_PAGE_ALIGN(size);
2906 new_block = qemu_mallocz(sizeof(*new_block));
2907
2908 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2909 char *id = dev->parent_bus->info->get_dev_path(dev);
2910 if (id) {
2911 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2912 qemu_free(id);
2913 }
2914 }
2915 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2916
2917 QLIST_FOREACH(block, &ram_list.blocks, next) {
2918 if (!strcmp(block->idstr, new_block->idstr)) {
2919 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2920 new_block->idstr);
2921 abort();
2922 }
2923 }
2924
Jun Nakajima432d2682010-08-31 16:41:25 +01002925 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002926 if (host) {
2927 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002928 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002929 } else {
2930 if (mem_path) {
2931#if defined (__linux__) && !defined(TARGET_S390X)
2932 new_block->host = file_ram_alloc(new_block, size, mem_path);
2933 if (!new_block->host) {
2934 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002935 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002936 }
2937#else
2938 fprintf(stderr, "-mem-path option unsupported\n");
2939 exit(1);
2940#endif
2941 } else {
2942#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002943 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2944 an system defined value, which is at least 256GB. Larger systems
2945 have larger values. We put the guest between the end of data
2946 segment (system break) and this value. We use 32GB as a base to
2947 have enough room for the system break to grow. */
2948 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002949 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002950 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002951 if (new_block->host == MAP_FAILED) {
2952 fprintf(stderr, "Allocating RAM failed\n");
2953 abort();
2954 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002955#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002956 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002957 xen_ram_alloc(new_block->offset, size);
2958 } else {
2959 new_block->host = qemu_vmalloc(size);
2960 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002961#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002962 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002963 }
2964 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002965 new_block->length = size;
2966
2967 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2968
2969 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2970 last_ram_offset() >> TARGET_PAGE_BITS);
2971 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2972 0xff, size >> TARGET_PAGE_BITS);
2973
2974 if (kvm_enabled())
2975 kvm_setup_guest_memory(new_block->host, size);
2976
2977 return new_block->offset;
2978}
2979
Alex Williamson1724f042010-06-25 11:09:35 -06002980ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
pbrook94a6b542009-04-11 17:15:54 +00002981{
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002982 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
pbrook94a6b542009-04-11 17:15:54 +00002983}
bellarde9a1ab12007-02-08 23:08:38 +00002984
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002985void qemu_ram_free_from_ptr(ram_addr_t addr)
2986{
2987 RAMBlock *block;
2988
2989 QLIST_FOREACH(block, &ram_list.blocks, next) {
2990 if (addr == block->offset) {
2991 QLIST_REMOVE(block, next);
2992 qemu_free(block);
2993 return;
2994 }
2995 }
2996}
2997
Anthony Liguoric227f092009-10-01 16:12:16 -05002998void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002999{
Alex Williamson04b16652010-07-02 11:13:17 -06003000 RAMBlock *block;
3001
3002 QLIST_FOREACH(block, &ram_list.blocks, next) {
3003 if (addr == block->offset) {
3004 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01003005 if (block->flags & RAM_PREALLOC_MASK) {
3006 ;
3007 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06003008#if defined (__linux__) && !defined(TARGET_S390X)
3009 if (block->fd) {
3010 munmap(block->host, block->length);
3011 close(block->fd);
3012 } else {
3013 qemu_vfree(block->host);
3014 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003015#else
3016 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06003017#endif
3018 } else {
3019#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3020 munmap(block->host, block->length);
3021#else
Jan Kiszka868bb332011-06-21 22:59:09 +02003022 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003023 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01003024 } else {
3025 qemu_vfree(block->host);
3026 }
Alex Williamson04b16652010-07-02 11:13:17 -06003027#endif
3028 }
3029 qemu_free(block);
3030 return;
3031 }
3032 }
3033
bellarde9a1ab12007-02-08 23:08:38 +00003034}
3035
Huang Yingcd19cfa2011-03-02 08:56:19 +01003036#ifndef _WIN32
3037void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3038{
3039 RAMBlock *block;
3040 ram_addr_t offset;
3041 int flags;
3042 void *area, *vaddr;
3043
3044 QLIST_FOREACH(block, &ram_list.blocks, next) {
3045 offset = addr - block->offset;
3046 if (offset < block->length) {
3047 vaddr = block->host + offset;
3048 if (block->flags & RAM_PREALLOC_MASK) {
3049 ;
3050 } else {
3051 flags = MAP_FIXED;
3052 munmap(vaddr, length);
3053 if (mem_path) {
3054#if defined(__linux__) && !defined(TARGET_S390X)
3055 if (block->fd) {
3056#ifdef MAP_POPULATE
3057 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3058 MAP_PRIVATE;
3059#else
3060 flags |= MAP_PRIVATE;
3061#endif
3062 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3063 flags, block->fd, offset);
3064 } else {
3065 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3066 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3067 flags, -1, 0);
3068 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003069#else
3070 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003071#endif
3072 } else {
3073#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3074 flags |= MAP_SHARED | MAP_ANONYMOUS;
3075 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3076 flags, -1, 0);
3077#else
3078 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3079 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3080 flags, -1, 0);
3081#endif
3082 }
3083 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003084 fprintf(stderr, "Could not remap addr: "
3085 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003086 length, addr);
3087 exit(1);
3088 }
3089 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3090 }
3091 return;
3092 }
3093 }
3094}
3095#endif /* !_WIN32 */
3096
pbrookdc828ca2009-04-09 22:21:07 +00003097/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003098 With the exception of the softmmu code in this file, this should
3099 only be used for local memory (e.g. video ram) that the device owns,
3100 and knows it isn't going to access beyond the end of the block.
3101
3102 It should not be used for general purpose DMA.
3103 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3104 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003105void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003106{
pbrook94a6b542009-04-11 17:15:54 +00003107 RAMBlock *block;
3108
Alex Williamsonf471a172010-06-11 11:11:42 -06003109 QLIST_FOREACH(block, &ram_list.blocks, next) {
3110 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003111 /* Move this entry to to start of the list. */
3112 if (block != QLIST_FIRST(&ram_list.blocks)) {
3113 QLIST_REMOVE(block, next);
3114 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3115 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003116 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003117 /* We need to check if the requested address is in the RAM
3118 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003119 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003120 */
3121 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003122 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003123 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003124 block->host =
3125 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003126 }
3127 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003128 return block->host + (addr - block->offset);
3129 }
pbrook94a6b542009-04-11 17:15:54 +00003130 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003131
3132 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3133 abort();
3134
3135 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003136}
3137
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003138/* Return a host pointer to ram allocated with qemu_ram_alloc.
3139 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3140 */
3141void *qemu_safe_ram_ptr(ram_addr_t addr)
3142{
3143 RAMBlock *block;
3144
3145 QLIST_FOREACH(block, &ram_list.blocks, next) {
3146 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003147 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003148 /* We need to check if the requested address is in the RAM
3149 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003150 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003151 */
3152 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003153 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003154 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003155 block->host =
3156 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003157 }
3158 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003159 return block->host + (addr - block->offset);
3160 }
3161 }
3162
3163 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3164 abort();
3165
3166 return NULL;
3167}
3168
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003169/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3170 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003171void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003172{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003173 if (*size == 0) {
3174 return NULL;
3175 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003176 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003177 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003178 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003179 RAMBlock *block;
3180
3181 QLIST_FOREACH(block, &ram_list.blocks, next) {
3182 if (addr - block->offset < block->length) {
3183 if (addr - block->offset + *size > block->length)
3184 *size = block->length - addr + block->offset;
3185 return block->host + (addr - block->offset);
3186 }
3187 }
3188
3189 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3190 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003191 }
3192}
3193
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003194void qemu_put_ram_ptr(void *addr)
3195{
3196 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003197}
3198
Marcelo Tosattie8902612010-10-11 15:31:19 -03003199int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003200{
pbrook94a6b542009-04-11 17:15:54 +00003201 RAMBlock *block;
3202 uint8_t *host = ptr;
3203
Jan Kiszka868bb332011-06-21 22:59:09 +02003204 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003205 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003206 return 0;
3207 }
3208
Alex Williamsonf471a172010-06-11 11:11:42 -06003209 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003210 /* This case append when the block is not mapped. */
3211 if (block->host == NULL) {
3212 continue;
3213 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003214 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003215 *ram_addr = block->offset + (host - block->host);
3216 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003217 }
pbrook94a6b542009-04-11 17:15:54 +00003218 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003219
Marcelo Tosattie8902612010-10-11 15:31:19 -03003220 return -1;
3221}
Alex Williamsonf471a172010-06-11 11:11:42 -06003222
Marcelo Tosattie8902612010-10-11 15:31:19 -03003223/* Some of the softmmu routines need to translate from a host pointer
3224 (typically a TLB entry) back to a ram offset. */
3225ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3226{
3227 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003228
Marcelo Tosattie8902612010-10-11 15:31:19 -03003229 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3230 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3231 abort();
3232 }
3233 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003234}
3235
Anthony Liguoric227f092009-10-01 16:12:16 -05003236static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00003237{
pbrook67d3b952006-12-18 05:03:52 +00003238#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003239 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003240#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003241#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003242 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
blueswir1e18231a2008-10-06 18:46:28 +00003243#endif
3244 return 0;
3245}
3246
Anthony Liguoric227f092009-10-01 16:12:16 -05003247static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003248{
3249#ifdef DEBUG_UNASSIGNED
3250 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3251#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003252#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003253 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
blueswir1e18231a2008-10-06 18:46:28 +00003254#endif
3255 return 0;
3256}
3257
Anthony Liguoric227f092009-10-01 16:12:16 -05003258static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003259{
3260#ifdef DEBUG_UNASSIGNED
3261 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3262#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003263#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003264 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003265#endif
bellard33417e72003-08-10 21:47:01 +00003266 return 0;
3267}
3268
Anthony Liguoric227f092009-10-01 16:12:16 -05003269static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00003270{
pbrook67d3b952006-12-18 05:03:52 +00003271#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003272 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00003273#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003274#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003275 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
blueswir1e18231a2008-10-06 18:46:28 +00003276#endif
3277}
3278
Anthony Liguoric227f092009-10-01 16:12:16 -05003279static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003280{
3281#ifdef DEBUG_UNASSIGNED
3282 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3283#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003284#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003285 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
blueswir1e18231a2008-10-06 18:46:28 +00003286#endif
3287}
3288
Anthony Liguoric227f092009-10-01 16:12:16 -05003289static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003290{
3291#ifdef DEBUG_UNASSIGNED
3292 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3293#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003294#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003295 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003296#endif
bellard33417e72003-08-10 21:47:01 +00003297}
3298
Blue Swirld60efc62009-08-25 18:29:31 +00003299static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00003300 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00003301 unassigned_mem_readw,
3302 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00003303};
3304
Blue Swirld60efc62009-08-25 18:29:31 +00003305static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00003306 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00003307 unassigned_mem_writew,
3308 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00003309};
3310
Anthony Liguoric227f092009-10-01 16:12:16 -05003311static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003312 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003313{
bellard3a7d9292005-08-21 09:26:42 +00003314 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003315 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003316 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3317#if !defined(CONFIG_USER_ONLY)
3318 tb_invalidate_phys_page_fast(ram_addr, 1);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003319 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003320#endif
3321 }
pbrook5579c7f2009-04-11 14:47:08 +00003322 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003323 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003324 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003325 /* we remove the notdirty callback only if the code has been
3326 flushed */
3327 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003328 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003329}
3330
Anthony Liguoric227f092009-10-01 16:12:16 -05003331static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003332 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003333{
bellard3a7d9292005-08-21 09:26:42 +00003334 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003335 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003336 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3337#if !defined(CONFIG_USER_ONLY)
3338 tb_invalidate_phys_page_fast(ram_addr, 2);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003339 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003340#endif
3341 }
pbrook5579c7f2009-04-11 14:47:08 +00003342 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003343 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003344 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003345 /* we remove the notdirty callback only if the code has been
3346 flushed */
3347 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003348 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003349}
3350
Anthony Liguoric227f092009-10-01 16:12:16 -05003351static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003352 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003353{
bellard3a7d9292005-08-21 09:26:42 +00003354 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003355 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003356 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3357#if !defined(CONFIG_USER_ONLY)
3358 tb_invalidate_phys_page_fast(ram_addr, 4);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003359 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003360#endif
3361 }
pbrook5579c7f2009-04-11 14:47:08 +00003362 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003363 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003364 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003365 /* we remove the notdirty callback only if the code has been
3366 flushed */
3367 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003368 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003369}
3370
Blue Swirld60efc62009-08-25 18:29:31 +00003371static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00003372 NULL, /* never used */
3373 NULL, /* never used */
3374 NULL, /* never used */
3375};
3376
Blue Swirld60efc62009-08-25 18:29:31 +00003377static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00003378 notdirty_mem_writeb,
3379 notdirty_mem_writew,
3380 notdirty_mem_writel,
3381};
3382
pbrook0f459d12008-06-09 00:20:13 +00003383/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003384static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003385{
3386 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003387 target_ulong pc, cs_base;
3388 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003389 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003390 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003391 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003392
aliguori06d55cc2008-11-18 20:24:06 +00003393 if (env->watchpoint_hit) {
3394 /* We re-entered the check after replacing the TB. Now raise
3395 * the debug interrupt so that is will trigger after the
3396 * current instruction. */
3397 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3398 return;
3399 }
pbrook2e70f6e2008-06-29 01:03:05 +00003400 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003401 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003402 if ((vaddr == (wp->vaddr & len_mask) ||
3403 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003404 wp->flags |= BP_WATCHPOINT_HIT;
3405 if (!env->watchpoint_hit) {
3406 env->watchpoint_hit = wp;
3407 tb = tb_find_pc(env->mem_io_pc);
3408 if (!tb) {
3409 cpu_abort(env, "check_watchpoint: could not find TB for "
3410 "pc=%p", (void *)env->mem_io_pc);
3411 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003412 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003413 tb_phys_invalidate(tb, -1);
3414 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3415 env->exception_index = EXCP_DEBUG;
3416 } else {
3417 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3418 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3419 }
3420 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003421 }
aliguori6e140f22008-11-18 20:37:55 +00003422 } else {
3423 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003424 }
3425 }
3426}
3427
pbrook6658ffb2007-03-16 23:58:11 +00003428/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3429 so these check for a hit then pass through to the normal out-of-line
3430 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05003431static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003432{
aliguorib4051332008-11-18 20:14:20 +00003433 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003434 return ldub_phys(addr);
3435}
3436
Anthony Liguoric227f092009-10-01 16:12:16 -05003437static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003438{
aliguorib4051332008-11-18 20:14:20 +00003439 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003440 return lduw_phys(addr);
3441}
3442
Anthony Liguoric227f092009-10-01 16:12:16 -05003443static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003444{
aliguorib4051332008-11-18 20:14:20 +00003445 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003446 return ldl_phys(addr);
3447}
3448
Anthony Liguoric227f092009-10-01 16:12:16 -05003449static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003450 uint32_t val)
3451{
aliguorib4051332008-11-18 20:14:20 +00003452 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003453 stb_phys(addr, val);
3454}
3455
Anthony Liguoric227f092009-10-01 16:12:16 -05003456static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003457 uint32_t val)
3458{
aliguorib4051332008-11-18 20:14:20 +00003459 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003460 stw_phys(addr, val);
3461}
3462
Anthony Liguoric227f092009-10-01 16:12:16 -05003463static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003464 uint32_t val)
3465{
aliguorib4051332008-11-18 20:14:20 +00003466 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003467 stl_phys(addr, val);
3468}
3469
Blue Swirld60efc62009-08-25 18:29:31 +00003470static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003471 watch_mem_readb,
3472 watch_mem_readw,
3473 watch_mem_readl,
3474};
3475
Blue Swirld60efc62009-08-25 18:29:31 +00003476static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003477 watch_mem_writeb,
3478 watch_mem_writew,
3479 watch_mem_writel,
3480};
pbrook6658ffb2007-03-16 23:58:11 +00003481
Richard Hendersonf6405242010-04-22 16:47:31 -07003482static inline uint32_t subpage_readlen (subpage_t *mmio,
3483 target_phys_addr_t addr,
3484 unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003485{
Richard Hendersonf6405242010-04-22 16:47:31 -07003486 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003487#if defined(DEBUG_SUBPAGE)
3488 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3489 mmio, len, addr, idx);
3490#endif
blueswir1db7b5422007-05-26 17:36:03 +00003491
Richard Hendersonf6405242010-04-22 16:47:31 -07003492 addr += mmio->region_offset[idx];
3493 idx = mmio->sub_io_index[idx];
3494 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
blueswir1db7b5422007-05-26 17:36:03 +00003495}
3496
Anthony Liguoric227f092009-10-01 16:12:16 -05003497static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
Richard Hendersonf6405242010-04-22 16:47:31 -07003498 uint32_t value, unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003499{
Richard Hendersonf6405242010-04-22 16:47:31 -07003500 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003501#if defined(DEBUG_SUBPAGE)
Richard Hendersonf6405242010-04-22 16:47:31 -07003502 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3503 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003504#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003505
3506 addr += mmio->region_offset[idx];
3507 idx = mmio->sub_io_index[idx];
3508 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00003509}
3510
Anthony Liguoric227f092009-10-01 16:12:16 -05003511static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003512{
blueswir1db7b5422007-05-26 17:36:03 +00003513 return subpage_readlen(opaque, addr, 0);
3514}
3515
Anthony Liguoric227f092009-10-01 16:12:16 -05003516static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003517 uint32_t value)
3518{
blueswir1db7b5422007-05-26 17:36:03 +00003519 subpage_writelen(opaque, addr, value, 0);
3520}
3521
Anthony Liguoric227f092009-10-01 16:12:16 -05003522static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003523{
blueswir1db7b5422007-05-26 17:36:03 +00003524 return subpage_readlen(opaque, addr, 1);
3525}
3526
Anthony Liguoric227f092009-10-01 16:12:16 -05003527static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003528 uint32_t value)
3529{
blueswir1db7b5422007-05-26 17:36:03 +00003530 subpage_writelen(opaque, addr, value, 1);
3531}
3532
Anthony Liguoric227f092009-10-01 16:12:16 -05003533static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003534{
blueswir1db7b5422007-05-26 17:36:03 +00003535 return subpage_readlen(opaque, addr, 2);
3536}
3537
Richard Hendersonf6405242010-04-22 16:47:31 -07003538static void subpage_writel (void *opaque, target_phys_addr_t addr,
3539 uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00003540{
blueswir1db7b5422007-05-26 17:36:03 +00003541 subpage_writelen(opaque, addr, value, 2);
3542}
3543
Blue Swirld60efc62009-08-25 18:29:31 +00003544static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003545 &subpage_readb,
3546 &subpage_readw,
3547 &subpage_readl,
3548};
3549
Blue Swirld60efc62009-08-25 18:29:31 +00003550static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003551 &subpage_writeb,
3552 &subpage_writew,
3553 &subpage_writel,
3554};
3555
Anthony Liguoric227f092009-10-01 16:12:16 -05003556static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3557 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003558{
3559 int idx, eidx;
3560
3561 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3562 return -1;
3563 idx = SUBPAGE_IDX(start);
3564 eidx = SUBPAGE_IDX(end);
3565#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003566 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003567 mmio, start, end, idx, eidx, memory);
3568#endif
Gleb Natapov95c318f2010-07-29 10:41:45 +03003569 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3570 memory = IO_MEM_UNASSIGNED;
Richard Hendersonf6405242010-04-22 16:47:31 -07003571 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
blueswir1db7b5422007-05-26 17:36:03 +00003572 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003573 mmio->sub_io_index[idx] = memory;
3574 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003575 }
3576
3577 return 0;
3578}
3579
Richard Hendersonf6405242010-04-22 16:47:31 -07003580static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3581 ram_addr_t orig_memory,
3582 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003583{
Anthony Liguoric227f092009-10-01 16:12:16 -05003584 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003585 int subpage_memory;
3586
Anthony Liguoric227f092009-10-01 16:12:16 -05003587 mmio = qemu_mallocz(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003588
3589 mmio->base = base;
Alexander Graf2507c122010-12-08 12:05:37 +01003590 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3591 DEVICE_NATIVE_ENDIAN);
blueswir1db7b5422007-05-26 17:36:03 +00003592#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003593 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3594 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003595#endif
aliguori1eec6142009-02-05 22:06:18 +00003596 *phys = subpage_memory | IO_MEM_SUBPAGE;
Richard Hendersonf6405242010-04-22 16:47:31 -07003597 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003598
3599 return mmio;
3600}
3601
aliguori88715652009-02-11 15:20:58 +00003602static int get_free_io_mem_idx(void)
3603{
3604 int i;
3605
3606 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3607 if (!io_mem_used[i]) {
3608 io_mem_used[i] = 1;
3609 return i;
3610 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003611 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003612 return -1;
3613}
3614
Alexander Grafdd310532010-12-08 12:05:36 +01003615/*
3616 * Usually, devices operate in little endian mode. There are devices out
3617 * there that operate in big endian too. Each device gets byte swapped
3618 * mmio if plugged onto a CPU that does the other endianness.
3619 *
3620 * CPU Device swap?
3621 *
3622 * little little no
3623 * little big yes
3624 * big little yes
3625 * big big no
3626 */
3627
3628typedef struct SwapEndianContainer {
3629 CPUReadMemoryFunc *read[3];
3630 CPUWriteMemoryFunc *write[3];
3631 void *opaque;
3632} SwapEndianContainer;
3633
3634static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3635{
3636 uint32_t val;
3637 SwapEndianContainer *c = opaque;
3638 val = c->read[0](c->opaque, addr);
3639 return val;
3640}
3641
3642static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3643{
3644 uint32_t val;
3645 SwapEndianContainer *c = opaque;
3646 val = bswap16(c->read[1](c->opaque, addr));
3647 return val;
3648}
3649
3650static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3651{
3652 uint32_t val;
3653 SwapEndianContainer *c = opaque;
3654 val = bswap32(c->read[2](c->opaque, addr));
3655 return val;
3656}
3657
3658static CPUReadMemoryFunc * const swapendian_readfn[3]={
3659 swapendian_mem_readb,
3660 swapendian_mem_readw,
3661 swapendian_mem_readl
3662};
3663
3664static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3665 uint32_t val)
3666{
3667 SwapEndianContainer *c = opaque;
3668 c->write[0](c->opaque, addr, val);
3669}
3670
3671static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3672 uint32_t val)
3673{
3674 SwapEndianContainer *c = opaque;
3675 c->write[1](c->opaque, addr, bswap16(val));
3676}
3677
3678static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3679 uint32_t val)
3680{
3681 SwapEndianContainer *c = opaque;
3682 c->write[2](c->opaque, addr, bswap32(val));
3683}
3684
3685static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3686 swapendian_mem_writeb,
3687 swapendian_mem_writew,
3688 swapendian_mem_writel
3689};
3690
3691static void swapendian_init(int io_index)
3692{
3693 SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3694 int i;
3695
3696 /* Swap mmio for big endian targets */
3697 c->opaque = io_mem_opaque[io_index];
3698 for (i = 0; i < 3; i++) {
3699 c->read[i] = io_mem_read[io_index][i];
3700 c->write[i] = io_mem_write[io_index][i];
3701
3702 io_mem_read[io_index][i] = swapendian_readfn[i];
3703 io_mem_write[io_index][i] = swapendian_writefn[i];
3704 }
3705 io_mem_opaque[io_index] = c;
3706}
3707
3708static void swapendian_del(int io_index)
3709{
3710 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3711 qemu_free(io_mem_opaque[io_index]);
3712 }
3713}
3714
bellard33417e72003-08-10 21:47:01 +00003715/* mem_read and mem_write are arrays of functions containing the
3716 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003717 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003718 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003719 modified. If it is zero, a new io zone is allocated. The return
3720 value can be used with cpu_register_physical_memory(). (-1) is
3721 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003722static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003723 CPUReadMemoryFunc * const *mem_read,
3724 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003725 void *opaque, enum device_endian endian)
bellard33417e72003-08-10 21:47:01 +00003726{
Richard Henderson3cab7212010-05-07 09:52:51 -07003727 int i;
3728
bellard33417e72003-08-10 21:47:01 +00003729 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003730 io_index = get_free_io_mem_idx();
3731 if (io_index == -1)
3732 return io_index;
bellard33417e72003-08-10 21:47:01 +00003733 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003734 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003735 if (io_index >= IO_MEM_NB_ENTRIES)
3736 return -1;
3737 }
bellardb5ff1b32005-11-26 10:38:39 +00003738
Richard Henderson3cab7212010-05-07 09:52:51 -07003739 for (i = 0; i < 3; ++i) {
3740 io_mem_read[io_index][i]
3741 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3742 }
3743 for (i = 0; i < 3; ++i) {
3744 io_mem_write[io_index][i]
3745 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3746 }
bellarda4193c82004-06-03 14:01:43 +00003747 io_mem_opaque[io_index] = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003748
Alexander Grafdd310532010-12-08 12:05:36 +01003749 switch (endian) {
3750 case DEVICE_BIG_ENDIAN:
3751#ifndef TARGET_WORDS_BIGENDIAN
3752 swapendian_init(io_index);
3753#endif
3754 break;
3755 case DEVICE_LITTLE_ENDIAN:
3756#ifdef TARGET_WORDS_BIGENDIAN
3757 swapendian_init(io_index);
3758#endif
3759 break;
3760 case DEVICE_NATIVE_ENDIAN:
3761 default:
3762 break;
3763 }
3764
Richard Hendersonf6405242010-04-22 16:47:31 -07003765 return (io_index << IO_MEM_SHIFT);
bellard33417e72003-08-10 21:47:01 +00003766}
bellard61382a52003-10-27 21:22:23 +00003767
Blue Swirld60efc62009-08-25 18:29:31 +00003768int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3769 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003770 void *opaque, enum device_endian endian)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003771{
Alexander Graf2507c122010-12-08 12:05:37 +01003772 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003773}
3774
aliguori88715652009-02-11 15:20:58 +00003775void cpu_unregister_io_memory(int io_table_address)
3776{
3777 int i;
3778 int io_index = io_table_address >> IO_MEM_SHIFT;
3779
Alexander Grafdd310532010-12-08 12:05:36 +01003780 swapendian_del(io_index);
3781
aliguori88715652009-02-11 15:20:58 +00003782 for (i=0;i < 3; i++) {
3783 io_mem_read[io_index][i] = unassigned_mem_read[i];
3784 io_mem_write[io_index][i] = unassigned_mem_write[i];
3785 }
3786 io_mem_opaque[io_index] = NULL;
3787 io_mem_used[io_index] = 0;
3788}
3789
Avi Kivitye9179ce2009-06-14 11:38:52 +03003790static void io_mem_init(void)
3791{
3792 int i;
3793
Alexander Graf2507c122010-12-08 12:05:37 +01003794 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3795 unassigned_mem_write, NULL,
3796 DEVICE_NATIVE_ENDIAN);
3797 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3798 unassigned_mem_write, NULL,
3799 DEVICE_NATIVE_ENDIAN);
3800 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3801 notdirty_mem_write, NULL,
3802 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003803 for (i=0; i<5; i++)
3804 io_mem_used[i] = 1;
3805
3806 io_mem_watch = cpu_register_io_memory(watch_mem_read,
Alexander Graf2507c122010-12-08 12:05:37 +01003807 watch_mem_write, NULL,
3808 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003809}
3810
pbrooke2eef172008-06-08 01:09:01 +00003811#endif /* !defined(CONFIG_USER_ONLY) */
3812
bellard13eb76e2004-01-24 15:23:36 +00003813/* physical memory access (slow version, mainly for debug) */
3814#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003815int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3816 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003817{
3818 int l, flags;
3819 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003820 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003821
3822 while (len > 0) {
3823 page = addr & TARGET_PAGE_MASK;
3824 l = (page + TARGET_PAGE_SIZE) - addr;
3825 if (l > len)
3826 l = len;
3827 flags = page_get_flags(page);
3828 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003829 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003830 if (is_write) {
3831 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003832 return -1;
bellard579a97f2007-11-11 14:26:47 +00003833 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003834 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003835 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003836 memcpy(p, buf, l);
3837 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003838 } else {
3839 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003840 return -1;
bellard579a97f2007-11-11 14:26:47 +00003841 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003842 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003843 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003844 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003845 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003846 }
3847 len -= l;
3848 buf += l;
3849 addr += l;
3850 }
Paul Brooka68fe892010-03-01 00:08:59 +00003851 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003852}
bellard8df1cd02005-01-28 22:37:22 +00003853
bellard13eb76e2004-01-24 15:23:36 +00003854#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003855void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003856 int len, int is_write)
3857{
3858 int l, io_index;
3859 uint8_t *ptr;
3860 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003861 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003862 ram_addr_t pd;
bellard92e873b2004-05-21 14:52:29 +00003863 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003864
bellard13eb76e2004-01-24 15:23:36 +00003865 while (len > 0) {
3866 page = addr & TARGET_PAGE_MASK;
3867 l = (page + TARGET_PAGE_SIZE) - addr;
3868 if (l > len)
3869 l = len;
bellard92e873b2004-05-21 14:52:29 +00003870 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003871 if (!p) {
3872 pd = IO_MEM_UNASSIGNED;
3873 } else {
3874 pd = p->phys_offset;
3875 }
ths3b46e622007-09-17 08:09:54 +00003876
bellard13eb76e2004-01-24 15:23:36 +00003877 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003878 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003879 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003880 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003881 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003882 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003883 /* XXX: could force cpu_single_env to NULL to avoid
3884 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003885 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003886 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003887 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003888 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003889 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003890 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003891 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003892 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003893 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003894 l = 2;
3895 } else {
bellard1c213d12005-09-03 10:49:04 +00003896 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003897 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003898 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003899 l = 1;
3900 }
3901 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003902 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00003903 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003904 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003905 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003906 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003907 if (!cpu_physical_memory_is_dirty(addr1)) {
3908 /* invalidate code */
3909 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3910 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003911 cpu_physical_memory_set_dirty_flags(
3912 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003913 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003914 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003915 }
3916 } else {
ths5fafdf22007-09-16 21:08:06 +00003917 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003918 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003919 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003920 /* I/O case */
3921 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003922 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003923 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3924 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003925 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003926 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003927 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003928 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003929 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003930 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003931 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003932 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003933 l = 2;
3934 } else {
bellard1c213d12005-09-03 10:49:04 +00003935 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003936 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003937 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003938 l = 1;
3939 }
3940 } else {
3941 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003942 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3943 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3944 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003945 }
3946 }
3947 len -= l;
3948 buf += l;
3949 addr += l;
3950 }
3951}
bellard8df1cd02005-01-28 22:37:22 +00003952
bellardd0ecd2a2006-04-23 17:14:48 +00003953/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003954void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003955 const uint8_t *buf, int len)
3956{
3957 int l;
3958 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003959 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003960 unsigned long pd;
3961 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003962
bellardd0ecd2a2006-04-23 17:14:48 +00003963 while (len > 0) {
3964 page = addr & TARGET_PAGE_MASK;
3965 l = (page + TARGET_PAGE_SIZE) - addr;
3966 if (l > len)
3967 l = len;
3968 p = phys_page_find(page >> TARGET_PAGE_BITS);
3969 if (!p) {
3970 pd = IO_MEM_UNASSIGNED;
3971 } else {
3972 pd = p->phys_offset;
3973 }
ths3b46e622007-09-17 08:09:54 +00003974
bellardd0ecd2a2006-04-23 17:14:48 +00003975 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00003976 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3977 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003978 /* do nothing */
3979 } else {
3980 unsigned long addr1;
3981 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3982 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003983 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003984 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003985 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003986 }
3987 len -= l;
3988 buf += l;
3989 addr += l;
3990 }
3991}
3992
aliguori6d16c2f2009-01-22 16:59:11 +00003993typedef struct {
3994 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003995 target_phys_addr_t addr;
3996 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003997} BounceBuffer;
3998
3999static BounceBuffer bounce;
4000
aliguoriba223c22009-01-22 16:59:16 +00004001typedef struct MapClient {
4002 void *opaque;
4003 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00004004 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00004005} MapClient;
4006
Blue Swirl72cf2d42009-09-12 07:36:22 +00004007static QLIST_HEAD(map_client_list, MapClient) map_client_list
4008 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00004009
4010void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
4011{
4012 MapClient *client = qemu_malloc(sizeof(*client));
4013
4014 client->opaque = opaque;
4015 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00004016 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00004017 return client;
4018}
4019
4020void cpu_unregister_map_client(void *_client)
4021{
4022 MapClient *client = (MapClient *)_client;
4023
Blue Swirl72cf2d42009-09-12 07:36:22 +00004024 QLIST_REMOVE(client, link);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09004025 qemu_free(client);
aliguoriba223c22009-01-22 16:59:16 +00004026}
4027
4028static void cpu_notify_map_clients(void)
4029{
4030 MapClient *client;
4031
Blue Swirl72cf2d42009-09-12 07:36:22 +00004032 while (!QLIST_EMPTY(&map_client_list)) {
4033 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00004034 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09004035 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00004036 }
4037}
4038
aliguori6d16c2f2009-01-22 16:59:11 +00004039/* Map a physical memory region into a host virtual address.
4040 * May map a subset of the requested range, given by and returned in *plen.
4041 * May return NULL if resources needed to perform the mapping are exhausted.
4042 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00004043 * Use cpu_register_map_client() to know when retrying the map operation is
4044 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00004045 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004046void *cpu_physical_memory_map(target_phys_addr_t addr,
4047 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00004048 int is_write)
4049{
Anthony Liguoric227f092009-10-01 16:12:16 -05004050 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004051 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00004052 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004053 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00004054 unsigned long pd;
4055 PhysPageDesc *p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00004056 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004057 ram_addr_t rlen;
4058 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004059
4060 while (len > 0) {
4061 page = addr & TARGET_PAGE_MASK;
4062 l = (page + TARGET_PAGE_SIZE) - addr;
4063 if (l > len)
4064 l = len;
4065 p = phys_page_find(page >> TARGET_PAGE_BITS);
4066 if (!p) {
4067 pd = IO_MEM_UNASSIGNED;
4068 } else {
4069 pd = p->phys_offset;
4070 }
4071
4072 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004073 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00004074 break;
4075 }
4076 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4077 bounce.addr = addr;
4078 bounce.len = l;
4079 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02004080 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00004081 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004082
4083 *plen = l;
4084 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00004085 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004086 if (!todo) {
4087 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4088 }
aliguori6d16c2f2009-01-22 16:59:11 +00004089
4090 len -= l;
4091 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004092 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004093 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004094 rlen = todo;
4095 ret = qemu_ram_ptr_length(raddr, &rlen);
4096 *plen = rlen;
4097 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004098}
4099
4100/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4101 * Will also mark the memory as dirty if is_write == 1. access_len gives
4102 * the amount of memory that was actually read or written by the caller.
4103 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004104void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4105 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004106{
4107 if (buffer != bounce.buffer) {
4108 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004109 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004110 while (access_len) {
4111 unsigned l;
4112 l = TARGET_PAGE_SIZE;
4113 if (l > access_len)
4114 l = access_len;
4115 if (!cpu_physical_memory_is_dirty(addr1)) {
4116 /* invalidate code */
4117 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4118 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004119 cpu_physical_memory_set_dirty_flags(
4120 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004121 }
4122 addr1 += l;
4123 access_len -= l;
4124 }
4125 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004126 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004127 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004128 }
aliguori6d16c2f2009-01-22 16:59:11 +00004129 return;
4130 }
4131 if (is_write) {
4132 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4133 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004134 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004135 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004136 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004137}
bellardd0ecd2a2006-04-23 17:14:48 +00004138
bellard8df1cd02005-01-28 22:37:22 +00004139/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004140static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4141 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004142{
4143 int io_index;
4144 uint8_t *ptr;
4145 uint32_t val;
4146 unsigned long pd;
4147 PhysPageDesc *p;
4148
4149 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4150 if (!p) {
4151 pd = IO_MEM_UNASSIGNED;
4152 } else {
4153 pd = p->phys_offset;
4154 }
ths3b46e622007-09-17 08:09:54 +00004155
ths5fafdf22007-09-16 21:08:06 +00004156 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00004157 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00004158 /* I/O case */
4159 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004160 if (p)
4161 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004162 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004163#if defined(TARGET_WORDS_BIGENDIAN)
4164 if (endian == DEVICE_LITTLE_ENDIAN) {
4165 val = bswap32(val);
4166 }
4167#else
4168 if (endian == DEVICE_BIG_ENDIAN) {
4169 val = bswap32(val);
4170 }
4171#endif
bellard8df1cd02005-01-28 22:37:22 +00004172 } else {
4173 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004174 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00004175 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004176 switch (endian) {
4177 case DEVICE_LITTLE_ENDIAN:
4178 val = ldl_le_p(ptr);
4179 break;
4180 case DEVICE_BIG_ENDIAN:
4181 val = ldl_be_p(ptr);
4182 break;
4183 default:
4184 val = ldl_p(ptr);
4185 break;
4186 }
bellard8df1cd02005-01-28 22:37:22 +00004187 }
4188 return val;
4189}
4190
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004191uint32_t ldl_phys(target_phys_addr_t addr)
4192{
4193 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4194}
4195
4196uint32_t ldl_le_phys(target_phys_addr_t addr)
4197{
4198 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4199}
4200
4201uint32_t ldl_be_phys(target_phys_addr_t addr)
4202{
4203 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4204}
4205
bellard84b7b8e2005-11-28 21:19:04 +00004206/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004207static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4208 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004209{
4210 int io_index;
4211 uint8_t *ptr;
4212 uint64_t val;
4213 unsigned long pd;
4214 PhysPageDesc *p;
4215
4216 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4217 if (!p) {
4218 pd = IO_MEM_UNASSIGNED;
4219 } else {
4220 pd = p->phys_offset;
4221 }
ths3b46e622007-09-17 08:09:54 +00004222
bellard2a4188a2006-06-25 21:54:59 +00004223 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4224 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00004225 /* I/O case */
4226 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004227 if (p)
4228 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004229
4230 /* XXX This is broken when device endian != cpu endian.
4231 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004232#ifdef TARGET_WORDS_BIGENDIAN
4233 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4234 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4235#else
4236 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4237 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4238#endif
4239 } else {
4240 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004241 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00004242 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004243 switch (endian) {
4244 case DEVICE_LITTLE_ENDIAN:
4245 val = ldq_le_p(ptr);
4246 break;
4247 case DEVICE_BIG_ENDIAN:
4248 val = ldq_be_p(ptr);
4249 break;
4250 default:
4251 val = ldq_p(ptr);
4252 break;
4253 }
bellard84b7b8e2005-11-28 21:19:04 +00004254 }
4255 return val;
4256}
4257
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004258uint64_t ldq_phys(target_phys_addr_t addr)
4259{
4260 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4261}
4262
4263uint64_t ldq_le_phys(target_phys_addr_t addr)
4264{
4265 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4266}
4267
4268uint64_t ldq_be_phys(target_phys_addr_t addr)
4269{
4270 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4271}
4272
bellardaab33092005-10-30 20:48:42 +00004273/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004274uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004275{
4276 uint8_t val;
4277 cpu_physical_memory_read(addr, &val, 1);
4278 return val;
4279}
4280
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004281/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004282static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4283 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004284{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004285 int io_index;
4286 uint8_t *ptr;
4287 uint64_t val;
4288 unsigned long pd;
4289 PhysPageDesc *p;
4290
4291 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4292 if (!p) {
4293 pd = IO_MEM_UNASSIGNED;
4294 } else {
4295 pd = p->phys_offset;
4296 }
4297
4298 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4299 !(pd & IO_MEM_ROMD)) {
4300 /* I/O case */
4301 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4302 if (p)
4303 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4304 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004305#if defined(TARGET_WORDS_BIGENDIAN)
4306 if (endian == DEVICE_LITTLE_ENDIAN) {
4307 val = bswap16(val);
4308 }
4309#else
4310 if (endian == DEVICE_BIG_ENDIAN) {
4311 val = bswap16(val);
4312 }
4313#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004314 } else {
4315 /* RAM case */
4316 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4317 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004318 switch (endian) {
4319 case DEVICE_LITTLE_ENDIAN:
4320 val = lduw_le_p(ptr);
4321 break;
4322 case DEVICE_BIG_ENDIAN:
4323 val = lduw_be_p(ptr);
4324 break;
4325 default:
4326 val = lduw_p(ptr);
4327 break;
4328 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004329 }
4330 return val;
bellardaab33092005-10-30 20:48:42 +00004331}
4332
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004333uint32_t lduw_phys(target_phys_addr_t addr)
4334{
4335 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4336}
4337
4338uint32_t lduw_le_phys(target_phys_addr_t addr)
4339{
4340 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4341}
4342
4343uint32_t lduw_be_phys(target_phys_addr_t addr)
4344{
4345 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4346}
4347
bellard8df1cd02005-01-28 22:37:22 +00004348/* warning: addr must be aligned. The ram page is not masked as dirty
4349 and the code inside is not invalidated. It is useful if the dirty
4350 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004351void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004352{
4353 int io_index;
4354 uint8_t *ptr;
4355 unsigned long pd;
4356 PhysPageDesc *p;
4357
4358 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4359 if (!p) {
4360 pd = IO_MEM_UNASSIGNED;
4361 } else {
4362 pd = p->phys_offset;
4363 }
ths3b46e622007-09-17 08:09:54 +00004364
bellard3a7d9292005-08-21 09:26:42 +00004365 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004366 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004367 if (p)
4368 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004369 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4370 } else {
aliguori74576192008-10-06 14:02:03 +00004371 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004372 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004373 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004374
4375 if (unlikely(in_migration)) {
4376 if (!cpu_physical_memory_is_dirty(addr1)) {
4377 /* invalidate code */
4378 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4379 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004380 cpu_physical_memory_set_dirty_flags(
4381 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004382 }
4383 }
bellard8df1cd02005-01-28 22:37:22 +00004384 }
4385}
4386
Anthony Liguoric227f092009-10-01 16:12:16 -05004387void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004388{
4389 int io_index;
4390 uint8_t *ptr;
4391 unsigned long pd;
4392 PhysPageDesc *p;
4393
4394 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4395 if (!p) {
4396 pd = IO_MEM_UNASSIGNED;
4397 } else {
4398 pd = p->phys_offset;
4399 }
ths3b46e622007-09-17 08:09:54 +00004400
j_mayerbc98a7e2007-04-04 07:55:12 +00004401 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4402 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004403 if (p)
4404 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004405#ifdef TARGET_WORDS_BIGENDIAN
4406 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4407 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4408#else
4409 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4410 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4411#endif
4412 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004413 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004414 (addr & ~TARGET_PAGE_MASK);
4415 stq_p(ptr, val);
4416 }
4417}
4418
bellard8df1cd02005-01-28 22:37:22 +00004419/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004420static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4421 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004422{
4423 int io_index;
4424 uint8_t *ptr;
4425 unsigned long pd;
4426 PhysPageDesc *p;
4427
4428 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4429 if (!p) {
4430 pd = IO_MEM_UNASSIGNED;
4431 } else {
4432 pd = p->phys_offset;
4433 }
ths3b46e622007-09-17 08:09:54 +00004434
bellard3a7d9292005-08-21 09:26:42 +00004435 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004436 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004437 if (p)
4438 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004439#if defined(TARGET_WORDS_BIGENDIAN)
4440 if (endian == DEVICE_LITTLE_ENDIAN) {
4441 val = bswap32(val);
4442 }
4443#else
4444 if (endian == DEVICE_BIG_ENDIAN) {
4445 val = bswap32(val);
4446 }
4447#endif
bellard8df1cd02005-01-28 22:37:22 +00004448 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4449 } else {
4450 unsigned long addr1;
4451 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4452 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004453 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004454 switch (endian) {
4455 case DEVICE_LITTLE_ENDIAN:
4456 stl_le_p(ptr, val);
4457 break;
4458 case DEVICE_BIG_ENDIAN:
4459 stl_be_p(ptr, val);
4460 break;
4461 default:
4462 stl_p(ptr, val);
4463 break;
4464 }
bellard3a7d9292005-08-21 09:26:42 +00004465 if (!cpu_physical_memory_is_dirty(addr1)) {
4466 /* invalidate code */
4467 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4468 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004469 cpu_physical_memory_set_dirty_flags(addr1,
4470 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004471 }
bellard8df1cd02005-01-28 22:37:22 +00004472 }
4473}
4474
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004475void stl_phys(target_phys_addr_t addr, uint32_t val)
4476{
4477 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4478}
4479
4480void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4481{
4482 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4483}
4484
4485void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4486{
4487 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4488}
4489
bellardaab33092005-10-30 20:48:42 +00004490/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004491void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004492{
4493 uint8_t v = val;
4494 cpu_physical_memory_write(addr, &v, 1);
4495}
4496
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004497/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004498static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4499 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004500{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004501 int io_index;
4502 uint8_t *ptr;
4503 unsigned long pd;
4504 PhysPageDesc *p;
4505
4506 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4507 if (!p) {
4508 pd = IO_MEM_UNASSIGNED;
4509 } else {
4510 pd = p->phys_offset;
4511 }
4512
4513 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4514 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4515 if (p)
4516 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004517#if defined(TARGET_WORDS_BIGENDIAN)
4518 if (endian == DEVICE_LITTLE_ENDIAN) {
4519 val = bswap16(val);
4520 }
4521#else
4522 if (endian == DEVICE_BIG_ENDIAN) {
4523 val = bswap16(val);
4524 }
4525#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004526 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4527 } else {
4528 unsigned long addr1;
4529 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4530 /* RAM case */
4531 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004532 switch (endian) {
4533 case DEVICE_LITTLE_ENDIAN:
4534 stw_le_p(ptr, val);
4535 break;
4536 case DEVICE_BIG_ENDIAN:
4537 stw_be_p(ptr, val);
4538 break;
4539 default:
4540 stw_p(ptr, val);
4541 break;
4542 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004543 if (!cpu_physical_memory_is_dirty(addr1)) {
4544 /* invalidate code */
4545 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4546 /* set dirty bit */
4547 cpu_physical_memory_set_dirty_flags(addr1,
4548 (0xff & ~CODE_DIRTY_FLAG));
4549 }
4550 }
bellardaab33092005-10-30 20:48:42 +00004551}
4552
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004553void stw_phys(target_phys_addr_t addr, uint32_t val)
4554{
4555 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4556}
4557
4558void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4559{
4560 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4561}
4562
4563void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4564{
4565 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4566}
4567
bellardaab33092005-10-30 20:48:42 +00004568/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004569void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004570{
4571 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004572 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004573}
4574
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004575void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4576{
4577 val = cpu_to_le64(val);
4578 cpu_physical_memory_write(addr, &val, 8);
4579}
4580
4581void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4582{
4583 val = cpu_to_be64(val);
4584 cpu_physical_memory_write(addr, &val, 8);
4585}
4586
aliguori5e2972f2009-03-28 17:51:36 +00004587/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004588int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004589 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004590{
4591 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004592 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004593 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004594
4595 while (len > 0) {
4596 page = addr & TARGET_PAGE_MASK;
4597 phys_addr = cpu_get_phys_page_debug(env, page);
4598 /* if no physical page mapped, return an error */
4599 if (phys_addr == -1)
4600 return -1;
4601 l = (page + TARGET_PAGE_SIZE) - addr;
4602 if (l > len)
4603 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004604 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004605 if (is_write)
4606 cpu_physical_memory_write_rom(phys_addr, buf, l);
4607 else
aliguori5e2972f2009-03-28 17:51:36 +00004608 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004609 len -= l;
4610 buf += l;
4611 addr += l;
4612 }
4613 return 0;
4614}
Paul Brooka68fe892010-03-01 00:08:59 +00004615#endif
bellard13eb76e2004-01-24 15:23:36 +00004616
pbrook2e70f6e2008-06-29 01:03:05 +00004617/* in deterministic execution mode, instructions doing device I/Os
4618 must be at the end of the TB */
4619void cpu_io_recompile(CPUState *env, void *retaddr)
4620{
4621 TranslationBlock *tb;
4622 uint32_t n, cflags;
4623 target_ulong pc, cs_base;
4624 uint64_t flags;
4625
4626 tb = tb_find_pc((unsigned long)retaddr);
4627 if (!tb) {
4628 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4629 retaddr);
4630 }
4631 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004632 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004633 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004634 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004635 n = n - env->icount_decr.u16.low;
4636 /* Generate a new TB ending on the I/O insn. */
4637 n++;
4638 /* On MIPS and SH, delay slot instructions can only be restarted if
4639 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004640 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004641 branch. */
4642#if defined(TARGET_MIPS)
4643 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4644 env->active_tc.PC -= 4;
4645 env->icount_decr.u16.low++;
4646 env->hflags &= ~MIPS_HFLAG_BMASK;
4647 }
4648#elif defined(TARGET_SH4)
4649 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4650 && n > 1) {
4651 env->pc -= 2;
4652 env->icount_decr.u16.low++;
4653 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4654 }
4655#endif
4656 /* This should never happen. */
4657 if (n > CF_COUNT_MASK)
4658 cpu_abort(env, "TB too big during recompile");
4659
4660 cflags = n | CF_LAST_IO;
4661 pc = tb->pc;
4662 cs_base = tb->cs_base;
4663 flags = tb->flags;
4664 tb_phys_invalidate(tb, -1);
4665 /* FIXME: In theory this could raise an exception. In practice
4666 we have already translated the block once so it's probably ok. */
4667 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004668 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004669 the first in the TB) then we end up generating a whole new TB and
4670 repeating the fault, which is horribly inefficient.
4671 Better would be to execute just this insn uncached, or generate a
4672 second new TB. */
4673 cpu_resume_from_signal(env, NULL);
4674}
4675
Paul Brookb3755a92010-03-12 16:54:58 +00004676#if !defined(CONFIG_USER_ONLY)
4677
Stefan Weil055403b2010-10-22 23:03:32 +02004678void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004679{
4680 int i, target_code_size, max_target_code_size;
4681 int direct_jmp_count, direct_jmp2_count, cross_page;
4682 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004683
bellarde3db7222005-01-26 22:00:47 +00004684 target_code_size = 0;
4685 max_target_code_size = 0;
4686 cross_page = 0;
4687 direct_jmp_count = 0;
4688 direct_jmp2_count = 0;
4689 for(i = 0; i < nb_tbs; i++) {
4690 tb = &tbs[i];
4691 target_code_size += tb->size;
4692 if (tb->size > max_target_code_size)
4693 max_target_code_size = tb->size;
4694 if (tb->page_addr[1] != -1)
4695 cross_page++;
4696 if (tb->tb_next_offset[0] != 0xffff) {
4697 direct_jmp_count++;
4698 if (tb->tb_next_offset[1] != 0xffff) {
4699 direct_jmp2_count++;
4700 }
4701 }
4702 }
4703 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004704 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004705 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004706 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4707 cpu_fprintf(f, "TB count %d/%d\n",
4708 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004709 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004710 nb_tbs ? target_code_size / nb_tbs : 0,
4711 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004712 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004713 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4714 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004715 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4716 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004717 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4718 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004719 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004720 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4721 direct_jmp2_count,
4722 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004723 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004724 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4725 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4726 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004727 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004728}
4729
bellard61382a52003-10-27 21:22:23 +00004730#define MMUSUFFIX _cmmu
4731#define GETPC() NULL
4732#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004733#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004734
4735#define SHIFT 0
4736#include "softmmu_template.h"
4737
4738#define SHIFT 1
4739#include "softmmu_template.h"
4740
4741#define SHIFT 2
4742#include "softmmu_template.h"
4743
4744#define SHIFT 3
4745#include "softmmu_template.h"
4746
4747#undef env
4748
4749#endif