blob: 09928a3b3781f93009a7730a8d21265126e39086 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
29#include "exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000030#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000031#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000033#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000034#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010035#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000036#include "qemu-timer.h"
pbrook53a59602006-03-25 19:31:22 +000037#if defined(CONFIG_USER_ONLY)
38#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010039#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40#include <sys/param.h>
41#if __FreeBSD_version >= 700104
42#define HAVE_KINFO_GETVMMAP
43#define sigqueue sigqueue_freebsd /* avoid redefinition */
44#include <sys/time.h>
45#include <sys/proc.h>
46#include <machine/profile.h>
47#define _KERNEL
48#include <sys/user.h>
49#undef _KERNEL
50#undef sigqueue
51#include <libutil.h>
52#endif
53#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010054#else /* !CONFIG_USER_ONLY */
55#include "xen-mapcache.h"
pbrook53a59602006-03-25 19:31:22 +000056#endif
bellard54936002003-05-13 00:25:15 +000057
bellardfd6ce8f2003-05-14 19:00:11 +000058//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000059//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000060//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000061//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000062
63/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000064//#define DEBUG_TB_CHECK
65//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000066
ths1196be32007-03-17 15:17:58 +000067//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000068//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000069
pbrook99773bd2006-04-16 15:14:59 +000070#if !defined(CONFIG_USER_ONLY)
71/* TB consistency checks only implemented for usermode emulation. */
72#undef DEBUG_TB_CHECK
73#endif
74
bellard9fa3e852004-01-04 18:06:42 +000075#define SMC_BITMAP_USE_THRESHOLD 10
76
blueswir1bdaf78e2008-10-04 07:24:27 +000077static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020078static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000079TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000080static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000081/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050082spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000083
blueswir1141ac462008-07-26 15:05:57 +000084#if defined(__arm__) || defined(__sparc_v9__)
85/* The prologue must be reachable with a direct jump. ARM and Sparc64
86 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000087 section close to code segment. */
88#define code_gen_section \
89 __attribute__((__section__(".gen_code"))) \
90 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020091#elif defined(_WIN32)
92/* Maximum alignment for Win32 is 16. */
93#define code_gen_section \
94 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000095#else
96#define code_gen_section \
97 __attribute__((aligned (32)))
98#endif
99
100uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000101static uint8_t *code_gen_buffer;
102static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000103/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000104static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200105static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000106
pbrooke2eef172008-06-08 01:09:01 +0000107#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000108int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000109static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000110
Alex Williamsonf471a172010-06-11 11:11:42 -0600111RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
pbrooke2eef172008-06-08 01:09:01 +0000112#endif
bellard9fa3e852004-01-04 18:06:42 +0000113
bellard6a00d602005-11-21 23:25:50 +0000114CPUState *first_cpu;
115/* current CPU in the current thread. It is only valid inside
116 cpu_exec() */
ths5fafdf22007-09-16 21:08:06 +0000117CPUState *cpu_single_env;
pbrook2e70f6e2008-06-29 01:03:05 +0000118/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000119 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000120 2 = Adaptive rate instruction counting. */
121int use_icount = 0;
122/* Current instruction counter. While executing translated code this may
123 include some instructions that have not yet been executed. */
124int64_t qemu_icount;
bellard6a00d602005-11-21 23:25:50 +0000125
bellard54936002003-05-13 00:25:15 +0000126typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000127 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000128 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000129 /* in order to optimize self modifying code, we count the number
130 of lookups we do to a given page to use a bitmap */
131 unsigned int code_write_count;
132 uint8_t *code_bitmap;
133#if defined(CONFIG_USER_ONLY)
134 unsigned long flags;
135#endif
bellard54936002003-05-13 00:25:15 +0000136} PageDesc;
137
Paul Brook41c1b1c2010-03-12 16:54:58 +0000138/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800139 while in user mode we want it to be based on virtual addresses. */
140#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000141#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
142# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
143#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800144# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000145#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000146#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800147# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000148#endif
bellard54936002003-05-13 00:25:15 +0000149
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800150/* Size of the L2 (and L3, etc) page tables. */
151#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000152#define L2_SIZE (1 << L2_BITS)
153
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800154/* The bits remaining after N lower levels of page tables. */
155#define P_L1_BITS_REM \
156 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157#define V_L1_BITS_REM \
158 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
159
160/* Size of the L1 page table. Avoid silly small sizes. */
161#if P_L1_BITS_REM < 4
162#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
163#else
164#define P_L1_BITS P_L1_BITS_REM
165#endif
166
167#if V_L1_BITS_REM < 4
168#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
169#else
170#define V_L1_BITS V_L1_BITS_REM
171#endif
172
173#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
174#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
175
176#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
bellard83fb7ad2004-07-05 21:25:26 +0000179unsigned long qemu_real_host_page_size;
180unsigned long qemu_host_page_bits;
181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000183
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000187
pbrooke2eef172008-06-08 01:09:01 +0000188#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000189typedef struct PhysPageDesc {
190 /* offset in host memory of the page + io_index in the low bits */
191 ram_addr_t phys_offset;
192 ram_addr_t region_offset;
193} PhysPageDesc;
194
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800195/* This is a multi-level map on the physical address space.
196 The bottom level has pointers to PhysPageDesc. */
197static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000198
pbrooke2eef172008-06-08 01:09:01 +0000199static void io_mem_init(void);
200
bellard33417e72003-08-10 21:47:01 +0000201/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000202CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
203CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000204void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000205static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000206static int io_mem_watch;
207#endif
bellard33417e72003-08-10 21:47:01 +0000208
bellard34865132003-10-05 14:28:56 +0000209/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200210#ifdef WIN32
211static const char *logfilename = "qemu.log";
212#else
blueswir1d9b630f2008-10-05 09:57:08 +0000213static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200214#endif
bellard34865132003-10-05 14:28:56 +0000215FILE *logfile;
216int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000217static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000218
bellarde3db7222005-01-26 22:00:47 +0000219/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000220#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000221static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000222#endif
bellarde3db7222005-01-26 22:00:47 +0000223static int tb_flush_count;
224static int tb_phys_invalidate_count;
225
bellard7cb69ca2008-05-10 10:55:51 +0000226#ifdef _WIN32
227static void map_exec(void *addr, long size)
228{
229 DWORD old_protect;
230 VirtualProtect(addr, size,
231 PAGE_EXECUTE_READWRITE, &old_protect);
232
233}
234#else
235static void map_exec(void *addr, long size)
236{
bellard43694152008-05-29 09:35:57 +0000237 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000238
bellard43694152008-05-29 09:35:57 +0000239 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000240 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000241 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000242
243 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000244 end += page_size - 1;
245 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000246
247 mprotect((void *)start, end - start,
248 PROT_READ | PROT_WRITE | PROT_EXEC);
249}
250#endif
251
bellardb346ff42003-06-15 20:05:50 +0000252static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000253{
bellard83fb7ad2004-07-05 21:25:26 +0000254 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000255 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000256#ifdef _WIN32
257 {
258 SYSTEM_INFO system_info;
259
260 GetSystemInfo(&system_info);
261 qemu_real_host_page_size = system_info.dwPageSize;
262 }
263#else
264 qemu_real_host_page_size = getpagesize();
265#endif
bellard83fb7ad2004-07-05 21:25:26 +0000266 if (qemu_host_page_size == 0)
267 qemu_host_page_size = qemu_real_host_page_size;
268 if (qemu_host_page_size < TARGET_PAGE_SIZE)
269 qemu_host_page_size = TARGET_PAGE_SIZE;
270 qemu_host_page_bits = 0;
271 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
272 qemu_host_page_bits++;
273 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000274
Paul Brook2e9a5712010-05-05 16:32:59 +0100275#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000276 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100277#ifdef HAVE_KINFO_GETVMMAP
278 struct kinfo_vmentry *freep;
279 int i, cnt;
280
281 freep = kinfo_getvmmap(getpid(), &cnt);
282 if (freep) {
283 mmap_lock();
284 for (i = 0; i < cnt; i++) {
285 unsigned long startaddr, endaddr;
286
287 startaddr = freep[i].kve_start;
288 endaddr = freep[i].kve_end;
289 if (h2g_valid(startaddr)) {
290 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
291
292 if (h2g_valid(endaddr)) {
293 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200294 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100295 } else {
296#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
297 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100299#endif
300 }
301 }
302 }
303 free(freep);
304 mmap_unlock();
305 }
306#else
balrog50a95692007-12-12 01:16:23 +0000307 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000308
pbrook07765902008-05-31 16:33:53 +0000309 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800310
Aurelien Jarnofd436902010-04-10 17:20:36 +0200311 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000312 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800313 mmap_lock();
314
balrog50a95692007-12-12 01:16:23 +0000315 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800316 unsigned long startaddr, endaddr;
317 int n;
318
319 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
320
321 if (n == 2 && h2g_valid(startaddr)) {
322 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
323
324 if (h2g_valid(endaddr)) {
325 endaddr = h2g(endaddr);
326 } else {
327 endaddr = ~0ul;
328 }
329 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000330 }
331 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800332
balrog50a95692007-12-12 01:16:23 +0000333 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800334 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000335 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100336#endif
balrog50a95692007-12-12 01:16:23 +0000337 }
338#endif
bellard54936002003-05-13 00:25:15 +0000339}
340
Paul Brook41c1b1c2010-03-12 16:54:58 +0000341static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000342{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000343 PageDesc *pd;
344 void **lp;
345 int i;
346
pbrook17e23772008-06-09 13:47:45 +0000347#if defined(CONFIG_USER_ONLY)
Paul Brook2e9a5712010-05-05 16:32:59 +0100348 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800349# define ALLOC(P, SIZE) \
350 do { \
351 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
352 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000354#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800355# define ALLOC(P, SIZE) \
356 do { P = qemu_mallocz(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000357#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800358
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800359 /* Level 1. Always allocated. */
360 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
361
362 /* Level 2..N-1. */
363 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
364 void **p = *lp;
365
366 if (p == NULL) {
367 if (!alloc) {
368 return NULL;
369 }
370 ALLOC(p, sizeof(void *) * L2_SIZE);
371 *lp = p;
372 }
373
374 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000375 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800376
377 pd = *lp;
378 if (pd == NULL) {
379 if (!alloc) {
380 return NULL;
381 }
382 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
383 *lp = pd;
384 }
385
386#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800387
388 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000389}
390
Paul Brook41c1b1c2010-03-12 16:54:58 +0000391static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000392{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800393 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000394}
395
Paul Brook6d9a1302010-02-28 23:55:53 +0000396#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500397static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000398{
pbrooke3f4e2a2006-04-08 20:02:06 +0000399 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800400 void **lp;
401 int i;
bellard92e873b2004-05-21 14:52:29 +0000402
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800403 /* Level 1. Always allocated. */
404 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000405
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800406 /* Level 2..N-1. */
407 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
408 void **p = *lp;
409 if (p == NULL) {
410 if (!alloc) {
411 return NULL;
412 }
413 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
414 }
415 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000416 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800417
pbrooke3f4e2a2006-04-08 20:02:06 +0000418 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800419 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000420 int i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800421
422 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000423 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800424 }
425
426 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
427
pbrook67c4d232009-02-23 13:16:07 +0000428 for (i = 0; i < L2_SIZE; i++) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800429 pd[i].phys_offset = IO_MEM_UNASSIGNED;
430 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000431 }
bellard92e873b2004-05-21 14:52:29 +0000432 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800433
434 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000435}
436
Anthony Liguoric227f092009-10-01 16:12:16 -0500437static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000438{
bellard108c49b2005-07-24 12:55:09 +0000439 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000440}
441
Anthony Liguoric227f092009-10-01 16:12:16 -0500442static void tlb_protect_code(ram_addr_t ram_addr);
443static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000444 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000445#define mmap_lock() do { } while(0)
446#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000447#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000448
bellard43694152008-05-29 09:35:57 +0000449#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
450
451#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100452/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000453 user mode. It will change when a dedicated libc will be used */
454#define USE_STATIC_CODE_GEN_BUFFER
455#endif
456
457#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200458static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
459 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000460#endif
461
blueswir18fcd3692008-08-17 20:26:25 +0000462static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000463{
bellard43694152008-05-29 09:35:57 +0000464#ifdef USE_STATIC_CODE_GEN_BUFFER
465 code_gen_buffer = static_code_gen_buffer;
466 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
467 map_exec(code_gen_buffer, code_gen_buffer_size);
468#else
bellard26a5f132008-05-28 12:30:31 +0000469 code_gen_buffer_size = tb_size;
470 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000471#if defined(CONFIG_USER_ONLY)
472 /* in user mode, phys_ram_size is not meaningful */
473 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
474#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100475 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000476 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000477#endif
bellard26a5f132008-05-28 12:30:31 +0000478 }
479 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
480 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
481 /* The code gen buffer location may have constraints depending on
482 the host cpu and OS */
483#if defined(__linux__)
484 {
485 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000486 void *start = NULL;
487
bellard26a5f132008-05-28 12:30:31 +0000488 flags = MAP_PRIVATE | MAP_ANONYMOUS;
489#if defined(__x86_64__)
490 flags |= MAP_32BIT;
491 /* Cannot map more than that */
492 if (code_gen_buffer_size > (800 * 1024 * 1024))
493 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000494#elif defined(__sparc_v9__)
495 // Map the buffer below 2G, so we can use direct calls and branches
496 flags |= MAP_FIXED;
497 start = (void *) 0x60000000UL;
498 if (code_gen_buffer_size > (512 * 1024 * 1024))
499 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000500#elif defined(__arm__)
balrog63d41242008-12-01 02:19:41 +0000501 /* Map the buffer below 32M, so we can use direct calls and branches */
balrog1cb06612008-12-01 02:10:17 +0000502 flags |= MAP_FIXED;
503 start = (void *) 0x01000000UL;
504 if (code_gen_buffer_size > 16 * 1024 * 1024)
505 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700506#elif defined(__s390x__)
507 /* Map the buffer so that we can use direct calls and branches. */
508 /* We have a +- 4GB range on the branches; leave some slop. */
509 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
510 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
511 }
512 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000513#endif
blueswir1141ac462008-07-26 15:05:57 +0000514 code_gen_buffer = mmap(start, code_gen_buffer_size,
515 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000516 flags, -1, 0);
517 if (code_gen_buffer == MAP_FAILED) {
518 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
519 exit(1);
520 }
521 }
Bradcbb608a2010-12-20 21:25:40 -0500522#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
523 || defined(__DragonFly__) || defined(__OpenBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000524 {
525 int flags;
526 void *addr = NULL;
527 flags = MAP_PRIVATE | MAP_ANONYMOUS;
528#if defined(__x86_64__)
529 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
530 * 0x40000000 is free */
531 flags |= MAP_FIXED;
532 addr = (void *)0x40000000;
533 /* Cannot map more than that */
534 if (code_gen_buffer_size > (800 * 1024 * 1024))
535 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000536#elif defined(__sparc_v9__)
537 // Map the buffer below 2G, so we can use direct calls and branches
538 flags |= MAP_FIXED;
539 addr = (void *) 0x60000000UL;
540 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
541 code_gen_buffer_size = (512 * 1024 * 1024);
542 }
aliguori06e67a82008-09-27 15:32:41 +0000543#endif
544 code_gen_buffer = mmap(addr, code_gen_buffer_size,
545 PROT_WRITE | PROT_READ | PROT_EXEC,
546 flags, -1, 0);
547 if (code_gen_buffer == MAP_FAILED) {
548 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
549 exit(1);
550 }
551 }
bellard26a5f132008-05-28 12:30:31 +0000552#else
553 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000554 map_exec(code_gen_buffer, code_gen_buffer_size);
555#endif
bellard43694152008-05-29 09:35:57 +0000556#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000557 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
558 code_gen_buffer_max_size = code_gen_buffer_size -
Aurelien Jarno239fda32010-06-03 19:29:31 +0200559 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000560 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
561 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
562}
563
564/* Must be called before using the QEMU cpus. 'tb_size' is the size
565 (in bytes) allocated to the translation buffer. Zero means default
566 size. */
567void cpu_exec_init_all(unsigned long tb_size)
568{
bellard26a5f132008-05-28 12:30:31 +0000569 cpu_gen_init();
570 code_gen_alloc(tb_size);
571 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000572 page_init();
pbrooke2eef172008-06-08 01:09:01 +0000573#if !defined(CONFIG_USER_ONLY)
bellard26a5f132008-05-28 12:30:31 +0000574 io_mem_init();
pbrooke2eef172008-06-08 01:09:01 +0000575#endif
Richard Henderson9002ec72010-05-06 08:50:41 -0700576#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
577 /* There's no guest base to take into account, so go ahead and
578 initialize the prologue now. */
579 tcg_prologue_init(&tcg_ctx);
580#endif
bellard26a5f132008-05-28 12:30:31 +0000581}
582
pbrook9656f322008-07-01 20:01:19 +0000583#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
584
Juan Quintelae59fb372009-09-29 22:48:21 +0200585static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200586{
587 CPUState *env = opaque;
588
aurel323098dba2009-03-07 21:28:24 +0000589 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
590 version_id is increased. */
591 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000592 tlb_flush(env, 1);
593
594 return 0;
595}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200596
597static const VMStateDescription vmstate_cpu_common = {
598 .name = "cpu_common",
599 .version_id = 1,
600 .minimum_version_id = 1,
601 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200602 .post_load = cpu_common_post_load,
603 .fields = (VMStateField []) {
604 VMSTATE_UINT32(halted, CPUState),
605 VMSTATE_UINT32(interrupt_request, CPUState),
606 VMSTATE_END_OF_LIST()
607 }
608};
pbrook9656f322008-07-01 20:01:19 +0000609#endif
610
Glauber Costa950f1472009-06-09 12:15:18 -0400611CPUState *qemu_get_cpu(int cpu)
612{
613 CPUState *env = first_cpu;
614
615 while (env) {
616 if (env->cpu_index == cpu)
617 break;
618 env = env->next_cpu;
619 }
620
621 return env;
622}
623
bellard6a00d602005-11-21 23:25:50 +0000624void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000625{
bellard6a00d602005-11-21 23:25:50 +0000626 CPUState **penv;
627 int cpu_index;
628
pbrookc2764712009-03-07 15:24:59 +0000629#if defined(CONFIG_USER_ONLY)
630 cpu_list_lock();
631#endif
bellard6a00d602005-11-21 23:25:50 +0000632 env->next_cpu = NULL;
633 penv = &first_cpu;
634 cpu_index = 0;
635 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700636 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000637 cpu_index++;
638 }
639 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000640 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000641 QTAILQ_INIT(&env->breakpoints);
642 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100643#ifndef CONFIG_USER_ONLY
644 env->thread_id = qemu_get_thread_id();
645#endif
bellard6a00d602005-11-21 23:25:50 +0000646 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000647#if defined(CONFIG_USER_ONLY)
648 cpu_list_unlock();
649#endif
pbrookb3c77242008-06-30 16:31:04 +0000650#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600651 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
652 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000653 cpu_save, cpu_load, env);
654#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000655}
656
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100657/* Allocate a new translation block. Flush the translation buffer if
658 too many translation blocks or too much generated code. */
659static TranslationBlock *tb_alloc(target_ulong pc)
660{
661 TranslationBlock *tb;
662
663 if (nb_tbs >= code_gen_max_blocks ||
664 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
665 return NULL;
666 tb = &tbs[nb_tbs++];
667 tb->pc = pc;
668 tb->cflags = 0;
669 return tb;
670}
671
672void tb_free(TranslationBlock *tb)
673{
674 /* In practice this is mostly used for single use temporary TB
675 Ignore the hard cases and just back up if this TB happens to
676 be the last one generated. */
677 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
678 code_gen_ptr = tb->tc_ptr;
679 nb_tbs--;
680 }
681}
682
bellard9fa3e852004-01-04 18:06:42 +0000683static inline void invalidate_page_bitmap(PageDesc *p)
684{
685 if (p->code_bitmap) {
bellard59817cc2004-02-16 22:01:13 +0000686 qemu_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000687 p->code_bitmap = NULL;
688 }
689 p->code_write_count = 0;
690}
691
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800692/* Set to NULL all the 'first_tb' fields in all PageDescs. */
693
694static void page_flush_tb_1 (int level, void **lp)
695{
696 int i;
697
698 if (*lp == NULL) {
699 return;
700 }
701 if (level == 0) {
702 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000703 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800704 pd[i].first_tb = NULL;
705 invalidate_page_bitmap(pd + i);
706 }
707 } else {
708 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000709 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800710 page_flush_tb_1 (level - 1, pp + i);
711 }
712 }
713}
714
bellardfd6ce8f2003-05-14 19:00:11 +0000715static void page_flush_tb(void)
716{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800717 int i;
718 for (i = 0; i < V_L1_SIZE; i++) {
719 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000720 }
721}
722
723/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000724/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000725void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000726{
bellard6a00d602005-11-21 23:25:50 +0000727 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000728#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000729 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
730 (unsigned long)(code_gen_ptr - code_gen_buffer),
731 nb_tbs, nb_tbs > 0 ?
732 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000733#endif
bellard26a5f132008-05-28 12:30:31 +0000734 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000735 cpu_abort(env1, "Internal error: code buffer overflow\n");
736
bellardfd6ce8f2003-05-14 19:00:11 +0000737 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000738
bellard6a00d602005-11-21 23:25:50 +0000739 for(env = first_cpu; env != NULL; env = env->next_cpu) {
740 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
741 }
bellard9fa3e852004-01-04 18:06:42 +0000742
bellard8a8a6082004-10-03 13:36:49 +0000743 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000744 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000745
bellardfd6ce8f2003-05-14 19:00:11 +0000746 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000747 /* XXX: flush processor icache at this point if cache flush is
748 expensive */
bellarde3db7222005-01-26 22:00:47 +0000749 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000750}
751
752#ifdef DEBUG_TB_CHECK
753
j_mayerbc98a7e2007-04-04 07:55:12 +0000754static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000755{
756 TranslationBlock *tb;
757 int i;
758 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000759 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
760 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000761 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
762 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000763 printf("ERROR invalidate: address=" TARGET_FMT_lx
764 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000765 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000766 }
767 }
768 }
769}
770
771/* verify that all the pages have correct rights for code */
772static void tb_page_check(void)
773{
774 TranslationBlock *tb;
775 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000776
pbrook99773bd2006-04-16 15:14:59 +0000777 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
778 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000779 flags1 = page_get_flags(tb->pc);
780 flags2 = page_get_flags(tb->pc + tb->size - 1);
781 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
782 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000783 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000784 }
785 }
786 }
787}
788
789#endif
790
791/* invalidate one TB */
792static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
793 int next_offset)
794{
795 TranslationBlock *tb1;
796 for(;;) {
797 tb1 = *ptb;
798 if (tb1 == tb) {
799 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
800 break;
801 }
802 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
803 }
804}
805
bellard9fa3e852004-01-04 18:06:42 +0000806static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
807{
808 TranslationBlock *tb1;
809 unsigned int n1;
810
811 for(;;) {
812 tb1 = *ptb;
813 n1 = (long)tb1 & 3;
814 tb1 = (TranslationBlock *)((long)tb1 & ~3);
815 if (tb1 == tb) {
816 *ptb = tb1->page_next[n1];
817 break;
818 }
819 ptb = &tb1->page_next[n1];
820 }
821}
822
bellardd4e81642003-05-25 16:46:15 +0000823static inline void tb_jmp_remove(TranslationBlock *tb, int n)
824{
825 TranslationBlock *tb1, **ptb;
826 unsigned int n1;
827
828 ptb = &tb->jmp_next[n];
829 tb1 = *ptb;
830 if (tb1) {
831 /* find tb(n) in circular list */
832 for(;;) {
833 tb1 = *ptb;
834 n1 = (long)tb1 & 3;
835 tb1 = (TranslationBlock *)((long)tb1 & ~3);
836 if (n1 == n && tb1 == tb)
837 break;
838 if (n1 == 2) {
839 ptb = &tb1->jmp_first;
840 } else {
841 ptb = &tb1->jmp_next[n1];
842 }
843 }
844 /* now we can suppress tb(n) from the list */
845 *ptb = tb->jmp_next[n];
846
847 tb->jmp_next[n] = NULL;
848 }
849}
850
851/* reset the jump entry 'n' of a TB so that it is not chained to
852 another TB */
853static inline void tb_reset_jump(TranslationBlock *tb, int n)
854{
855 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
856}
857
Paul Brook41c1b1c2010-03-12 16:54:58 +0000858void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000859{
bellard6a00d602005-11-21 23:25:50 +0000860 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000861 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000862 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000863 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000864 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000865
bellard9fa3e852004-01-04 18:06:42 +0000866 /* remove the TB from the hash list */
867 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
868 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000869 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000870 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000871
bellard9fa3e852004-01-04 18:06:42 +0000872 /* remove the TB from the page list */
873 if (tb->page_addr[0] != page_addr) {
874 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
875 tb_page_remove(&p->first_tb, tb);
876 invalidate_page_bitmap(p);
877 }
878 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
879 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
880 tb_page_remove(&p->first_tb, tb);
881 invalidate_page_bitmap(p);
882 }
883
bellard8a40a182005-11-20 10:35:40 +0000884 tb_invalidated_flag = 1;
885
886 /* remove the TB from the hash list */
887 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000888 for(env = first_cpu; env != NULL; env = env->next_cpu) {
889 if (env->tb_jmp_cache[h] == tb)
890 env->tb_jmp_cache[h] = NULL;
891 }
bellard8a40a182005-11-20 10:35:40 +0000892
893 /* suppress this TB from the two jump lists */
894 tb_jmp_remove(tb, 0);
895 tb_jmp_remove(tb, 1);
896
897 /* suppress any remaining jumps to this TB */
898 tb1 = tb->jmp_first;
899 for(;;) {
900 n1 = (long)tb1 & 3;
901 if (n1 == 2)
902 break;
903 tb1 = (TranslationBlock *)((long)tb1 & ~3);
904 tb2 = tb1->jmp_next[n1];
905 tb_reset_jump(tb1, n1);
906 tb1->jmp_next[n1] = NULL;
907 tb1 = tb2;
908 }
909 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
910
bellarde3db7222005-01-26 22:00:47 +0000911 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000912}
913
914static inline void set_bits(uint8_t *tab, int start, int len)
915{
916 int end, mask, end1;
917
918 end = start + len;
919 tab += start >> 3;
920 mask = 0xff << (start & 7);
921 if ((start & ~7) == (end & ~7)) {
922 if (start < end) {
923 mask &= ~(0xff << (end & 7));
924 *tab |= mask;
925 }
926 } else {
927 *tab++ |= mask;
928 start = (start + 8) & ~7;
929 end1 = end & ~7;
930 while (start < end1) {
931 *tab++ = 0xff;
932 start += 8;
933 }
934 if (start < end) {
935 mask = ~(0xff << (end & 7));
936 *tab |= mask;
937 }
938 }
939}
940
941static void build_page_bitmap(PageDesc *p)
942{
943 int n, tb_start, tb_end;
944 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000945
pbrookb2a70812008-06-09 13:57:23 +0000946 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000947
948 tb = p->first_tb;
949 while (tb != NULL) {
950 n = (long)tb & 3;
951 tb = (TranslationBlock *)((long)tb & ~3);
952 /* NOTE: this is subtle as a TB may span two physical pages */
953 if (n == 0) {
954 /* NOTE: tb_end may be after the end of the page, but
955 it is not a problem */
956 tb_start = tb->pc & ~TARGET_PAGE_MASK;
957 tb_end = tb_start + tb->size;
958 if (tb_end > TARGET_PAGE_SIZE)
959 tb_end = TARGET_PAGE_SIZE;
960 } else {
961 tb_start = 0;
962 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
963 }
964 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
965 tb = tb->page_next[n];
966 }
967}
968
pbrook2e70f6e2008-06-29 01:03:05 +0000969TranslationBlock *tb_gen_code(CPUState *env,
970 target_ulong pc, target_ulong cs_base,
971 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000972{
973 TranslationBlock *tb;
974 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000975 tb_page_addr_t phys_pc, phys_page2;
976 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000977 int code_gen_size;
978
Paul Brook41c1b1c2010-03-12 16:54:58 +0000979 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +0000980 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000981 if (!tb) {
982 /* flush must be done */
983 tb_flush(env);
984 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000985 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000986 /* Don't forget to invalidate previous TB info. */
987 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +0000988 }
989 tc_ptr = code_gen_ptr;
990 tb->tc_ptr = tc_ptr;
991 tb->cs_base = cs_base;
992 tb->flags = flags;
993 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +0000994 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +0000995 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +0000996
bellardd720b932004-04-25 17:57:43 +0000997 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +0000998 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +0000999 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001000 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001001 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001002 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001003 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001004 return tb;
bellardd720b932004-04-25 17:57:43 +00001005}
ths3b46e622007-09-17 08:09:54 +00001006
bellard9fa3e852004-01-04 18:06:42 +00001007/* invalidate all TBs which intersect with the target physical page
1008 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001009 the same physical page. 'is_cpu_write_access' should be true if called
1010 from a real cpu write access: the virtual CPU will exit the current
1011 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001012void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001013 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001014{
aliguori6b917542008-11-18 19:46:41 +00001015 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001016 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001017 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001018 PageDesc *p;
1019 int n;
1020#ifdef TARGET_HAS_PRECISE_SMC
1021 int current_tb_not_found = is_cpu_write_access;
1022 TranslationBlock *current_tb = NULL;
1023 int current_tb_modified = 0;
1024 target_ulong current_pc = 0;
1025 target_ulong current_cs_base = 0;
1026 int current_flags = 0;
1027#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001028
1029 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001030 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001031 return;
ths5fafdf22007-09-16 21:08:06 +00001032 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001033 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1034 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001035 /* build code bitmap */
1036 build_page_bitmap(p);
1037 }
1038
1039 /* we remove all the TBs in the range [start, end[ */
1040 /* XXX: see if in some cases it could be faster to invalidate all the code */
1041 tb = p->first_tb;
1042 while (tb != NULL) {
1043 n = (long)tb & 3;
1044 tb = (TranslationBlock *)((long)tb & ~3);
1045 tb_next = tb->page_next[n];
1046 /* NOTE: this is subtle as a TB may span two physical pages */
1047 if (n == 0) {
1048 /* NOTE: tb_end may be after the end of the page, but
1049 it is not a problem */
1050 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1051 tb_end = tb_start + tb->size;
1052 } else {
1053 tb_start = tb->page_addr[1];
1054 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1055 }
1056 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001057#ifdef TARGET_HAS_PRECISE_SMC
1058 if (current_tb_not_found) {
1059 current_tb_not_found = 0;
1060 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001061 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001062 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001063 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001064 }
1065 }
1066 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001067 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001068 /* If we are modifying the current TB, we must stop
1069 its execution. We could be more precise by checking
1070 that the modification is after the current PC, but it
1071 would require a specialized function to partially
1072 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001073
bellardd720b932004-04-25 17:57:43 +00001074 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001075 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001076 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1077 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001078 }
1079#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001080 /* we need to do that to handle the case where a signal
1081 occurs while doing tb_phys_invalidate() */
1082 saved_tb = NULL;
1083 if (env) {
1084 saved_tb = env->current_tb;
1085 env->current_tb = NULL;
1086 }
bellard9fa3e852004-01-04 18:06:42 +00001087 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001088 if (env) {
1089 env->current_tb = saved_tb;
1090 if (env->interrupt_request && env->current_tb)
1091 cpu_interrupt(env, env->interrupt_request);
1092 }
bellard9fa3e852004-01-04 18:06:42 +00001093 }
1094 tb = tb_next;
1095 }
1096#if !defined(CONFIG_USER_ONLY)
1097 /* if no code remaining, no need to continue to use slow writes */
1098 if (!p->first_tb) {
1099 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001100 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001101 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001102 }
1103 }
1104#endif
1105#ifdef TARGET_HAS_PRECISE_SMC
1106 if (current_tb_modified) {
1107 /* we generate a block containing just the instruction
1108 modifying the memory. It will ensure that it cannot modify
1109 itself */
bellardea1c1802004-06-14 18:56:36 +00001110 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001111 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001112 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001113 }
1114#endif
1115}
1116
1117/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001118static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001119{
1120 PageDesc *p;
1121 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001122#if 0
bellarda4193c82004-06-03 14:01:43 +00001123 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001124 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1125 cpu_single_env->mem_io_vaddr, len,
1126 cpu_single_env->eip,
1127 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001128 }
1129#endif
bellard9fa3e852004-01-04 18:06:42 +00001130 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001131 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001132 return;
1133 if (p->code_bitmap) {
1134 offset = start & ~TARGET_PAGE_MASK;
1135 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1136 if (b & ((1 << len) - 1))
1137 goto do_invalidate;
1138 } else {
1139 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001140 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001141 }
1142}
1143
bellard9fa3e852004-01-04 18:06:42 +00001144#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001145static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001146 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001147{
aliguori6b917542008-11-18 19:46:41 +00001148 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001149 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001150 int n;
bellardd720b932004-04-25 17:57:43 +00001151#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001152 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001153 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001154 int current_tb_modified = 0;
1155 target_ulong current_pc = 0;
1156 target_ulong current_cs_base = 0;
1157 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001158#endif
bellard9fa3e852004-01-04 18:06:42 +00001159
1160 addr &= TARGET_PAGE_MASK;
1161 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001162 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001163 return;
1164 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001165#ifdef TARGET_HAS_PRECISE_SMC
1166 if (tb && pc != 0) {
1167 current_tb = tb_find_pc(pc);
1168 }
1169#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001170 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001171 n = (long)tb & 3;
1172 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001173#ifdef TARGET_HAS_PRECISE_SMC
1174 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001175 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001176 /* If we are modifying the current TB, we must stop
1177 its execution. We could be more precise by checking
1178 that the modification is after the current PC, but it
1179 would require a specialized function to partially
1180 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001181
bellardd720b932004-04-25 17:57:43 +00001182 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001183 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001184 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1185 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001186 }
1187#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001188 tb_phys_invalidate(tb, addr);
1189 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001190 }
1191 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001192#ifdef TARGET_HAS_PRECISE_SMC
1193 if (current_tb_modified) {
1194 /* we generate a block containing just the instruction
1195 modifying the memory. It will ensure that it cannot modify
1196 itself */
bellardea1c1802004-06-14 18:56:36 +00001197 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001198 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001199 cpu_resume_from_signal(env, puc);
1200 }
1201#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001202}
bellard9fa3e852004-01-04 18:06:42 +00001203#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001204
1205/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001206static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001207 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001208{
1209 PageDesc *p;
bellard9fa3e852004-01-04 18:06:42 +00001210 TranslationBlock *last_first_tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001211
bellard9fa3e852004-01-04 18:06:42 +00001212 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001213 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001214 tb->page_next[n] = p->first_tb;
1215 last_first_tb = p->first_tb;
1216 p->first_tb = (TranslationBlock *)((long)tb | n);
1217 invalidate_page_bitmap(p);
1218
bellard107db442004-06-22 18:48:46 +00001219#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001220
bellard9fa3e852004-01-04 18:06:42 +00001221#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001222 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001223 target_ulong addr;
1224 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001225 int prot;
1226
bellardfd6ce8f2003-05-14 19:00:11 +00001227 /* force the host page as non writable (writes will have a
1228 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001229 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001230 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001231 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1232 addr += TARGET_PAGE_SIZE) {
1233
1234 p2 = page_find (addr >> TARGET_PAGE_BITS);
1235 if (!p2)
1236 continue;
1237 prot |= p2->flags;
1238 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001239 }
ths5fafdf22007-09-16 21:08:06 +00001240 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001241 (prot & PAGE_BITS) & ~PAGE_WRITE);
1242#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001243 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001244 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001245#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001246 }
bellard9fa3e852004-01-04 18:06:42 +00001247#else
1248 /* if some code is already present, then the pages are already
1249 protected. So we handle the case where only the first TB is
1250 allocated in a physical page */
1251 if (!last_first_tb) {
bellard6a00d602005-11-21 23:25:50 +00001252 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001253 }
1254#endif
bellardd720b932004-04-25 17:57:43 +00001255
1256#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001257}
1258
bellard9fa3e852004-01-04 18:06:42 +00001259/* add a new TB and link it to the physical page tables. phys_page2 is
1260 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001261void tb_link_page(TranslationBlock *tb,
1262 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001263{
bellard9fa3e852004-01-04 18:06:42 +00001264 unsigned int h;
1265 TranslationBlock **ptb;
1266
pbrookc8a706f2008-06-02 16:16:42 +00001267 /* Grab the mmap lock to stop another thread invalidating this TB
1268 before we are done. */
1269 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001270 /* add in the physical hash table */
1271 h = tb_phys_hash_func(phys_pc);
1272 ptb = &tb_phys_hash[h];
1273 tb->phys_hash_next = *ptb;
1274 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001275
1276 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001277 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1278 if (phys_page2 != -1)
1279 tb_alloc_page(tb, 1, phys_page2);
1280 else
1281 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001282
bellardd4e81642003-05-25 16:46:15 +00001283 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1284 tb->jmp_next[0] = NULL;
1285 tb->jmp_next[1] = NULL;
1286
1287 /* init original jump addresses */
1288 if (tb->tb_next_offset[0] != 0xffff)
1289 tb_reset_jump(tb, 0);
1290 if (tb->tb_next_offset[1] != 0xffff)
1291 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001292
1293#ifdef DEBUG_TB_CHECK
1294 tb_page_check();
1295#endif
pbrookc8a706f2008-06-02 16:16:42 +00001296 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001297}
1298
bellarda513fe12003-05-27 23:29:48 +00001299/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1300 tb[1].tc_ptr. Return NULL if not found */
1301TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1302{
1303 int m_min, m_max, m;
1304 unsigned long v;
1305 TranslationBlock *tb;
1306
1307 if (nb_tbs <= 0)
1308 return NULL;
1309 if (tc_ptr < (unsigned long)code_gen_buffer ||
1310 tc_ptr >= (unsigned long)code_gen_ptr)
1311 return NULL;
1312 /* binary search (cf Knuth) */
1313 m_min = 0;
1314 m_max = nb_tbs - 1;
1315 while (m_min <= m_max) {
1316 m = (m_min + m_max) >> 1;
1317 tb = &tbs[m];
1318 v = (unsigned long)tb->tc_ptr;
1319 if (v == tc_ptr)
1320 return tb;
1321 else if (tc_ptr < v) {
1322 m_max = m - 1;
1323 } else {
1324 m_min = m + 1;
1325 }
ths5fafdf22007-09-16 21:08:06 +00001326 }
bellarda513fe12003-05-27 23:29:48 +00001327 return &tbs[m_max];
1328}
bellard75012672003-06-21 13:11:07 +00001329
bellardea041c02003-06-25 16:16:50 +00001330static void tb_reset_jump_recursive(TranslationBlock *tb);
1331
1332static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1333{
1334 TranslationBlock *tb1, *tb_next, **ptb;
1335 unsigned int n1;
1336
1337 tb1 = tb->jmp_next[n];
1338 if (tb1 != NULL) {
1339 /* find head of list */
1340 for(;;) {
1341 n1 = (long)tb1 & 3;
1342 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1343 if (n1 == 2)
1344 break;
1345 tb1 = tb1->jmp_next[n1];
1346 }
1347 /* we are now sure now that tb jumps to tb1 */
1348 tb_next = tb1;
1349
1350 /* remove tb from the jmp_first list */
1351 ptb = &tb_next->jmp_first;
1352 for(;;) {
1353 tb1 = *ptb;
1354 n1 = (long)tb1 & 3;
1355 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1356 if (n1 == n && tb1 == tb)
1357 break;
1358 ptb = &tb1->jmp_next[n1];
1359 }
1360 *ptb = tb->jmp_next[n];
1361 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001362
bellardea041c02003-06-25 16:16:50 +00001363 /* suppress the jump to next tb in generated code */
1364 tb_reset_jump(tb, n);
1365
bellard01243112004-01-04 15:48:17 +00001366 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001367 tb_reset_jump_recursive(tb_next);
1368 }
1369}
1370
1371static void tb_reset_jump_recursive(TranslationBlock *tb)
1372{
1373 tb_reset_jump_recursive2(tb, 0);
1374 tb_reset_jump_recursive2(tb, 1);
1375}
1376
bellard1fddef42005-04-17 19:16:13 +00001377#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001378#if defined(CONFIG_USER_ONLY)
1379static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1380{
1381 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1382}
1383#else
bellardd720b932004-04-25 17:57:43 +00001384static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1385{
Anthony Liguoric227f092009-10-01 16:12:16 -05001386 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001387 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001388 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001389 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001390
pbrookc2f07f82006-04-08 17:14:56 +00001391 addr = cpu_get_phys_page_debug(env, pc);
1392 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1393 if (!p) {
1394 pd = IO_MEM_UNASSIGNED;
1395 } else {
1396 pd = p->phys_offset;
1397 }
1398 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001399 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001400}
bellardc27004e2005-01-03 23:35:10 +00001401#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001402#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001403
Paul Brookc527ee82010-03-01 03:31:14 +00001404#if defined(CONFIG_USER_ONLY)
1405void cpu_watchpoint_remove_all(CPUState *env, int mask)
1406
1407{
1408}
1409
1410int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1411 int flags, CPUWatchpoint **watchpoint)
1412{
1413 return -ENOSYS;
1414}
1415#else
pbrook6658ffb2007-03-16 23:58:11 +00001416/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001417int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1418 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001419{
aliguorib4051332008-11-18 20:14:20 +00001420 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001421 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001422
aliguorib4051332008-11-18 20:14:20 +00001423 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1424 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1425 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1426 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1427 return -EINVAL;
1428 }
aliguoria1d1bb32008-11-18 20:07:32 +00001429 wp = qemu_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001430
aliguoria1d1bb32008-11-18 20:07:32 +00001431 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001432 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001433 wp->flags = flags;
1434
aliguori2dc9f412008-11-18 20:56:59 +00001435 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001436 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001437 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001438 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001439 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001440
pbrook6658ffb2007-03-16 23:58:11 +00001441 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001442
1443 if (watchpoint)
1444 *watchpoint = wp;
1445 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001446}
1447
aliguoria1d1bb32008-11-18 20:07:32 +00001448/* Remove a specific watchpoint. */
1449int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1450 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001451{
aliguorib4051332008-11-18 20:14:20 +00001452 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001453 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001454
Blue Swirl72cf2d42009-09-12 07:36:22 +00001455 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001456 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001457 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001458 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001459 return 0;
1460 }
1461 }
aliguoria1d1bb32008-11-18 20:07:32 +00001462 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001463}
1464
aliguoria1d1bb32008-11-18 20:07:32 +00001465/* Remove a specific watchpoint by reference. */
1466void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1467{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001468 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001469
aliguoria1d1bb32008-11-18 20:07:32 +00001470 tlb_flush_page(env, watchpoint->vaddr);
1471
1472 qemu_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001473}
1474
aliguoria1d1bb32008-11-18 20:07:32 +00001475/* Remove all matching watchpoints. */
1476void cpu_watchpoint_remove_all(CPUState *env, int mask)
1477{
aliguoric0ce9982008-11-25 22:13:57 +00001478 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001479
Blue Swirl72cf2d42009-09-12 07:36:22 +00001480 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001481 if (wp->flags & mask)
1482 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001483 }
aliguoria1d1bb32008-11-18 20:07:32 +00001484}
Paul Brookc527ee82010-03-01 03:31:14 +00001485#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001486
1487/* Add a breakpoint. */
1488int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1489 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001490{
bellard1fddef42005-04-17 19:16:13 +00001491#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001492 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001493
aliguoria1d1bb32008-11-18 20:07:32 +00001494 bp = qemu_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001495
1496 bp->pc = pc;
1497 bp->flags = flags;
1498
aliguori2dc9f412008-11-18 20:56:59 +00001499 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001500 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001501 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001502 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001503 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001504
1505 breakpoint_invalidate(env, pc);
1506
1507 if (breakpoint)
1508 *breakpoint = bp;
1509 return 0;
1510#else
1511 return -ENOSYS;
1512#endif
1513}
1514
1515/* Remove a specific breakpoint. */
1516int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1517{
1518#if defined(TARGET_HAS_ICE)
1519 CPUBreakpoint *bp;
1520
Blue Swirl72cf2d42009-09-12 07:36:22 +00001521 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001522 if (bp->pc == pc && bp->flags == flags) {
1523 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001524 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001525 }
bellard4c3a88a2003-07-26 12:06:08 +00001526 }
aliguoria1d1bb32008-11-18 20:07:32 +00001527 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001528#else
aliguoria1d1bb32008-11-18 20:07:32 +00001529 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001530#endif
1531}
1532
aliguoria1d1bb32008-11-18 20:07:32 +00001533/* Remove a specific breakpoint by reference. */
1534void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001535{
bellard1fddef42005-04-17 19:16:13 +00001536#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001537 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001538
aliguoria1d1bb32008-11-18 20:07:32 +00001539 breakpoint_invalidate(env, breakpoint->pc);
1540
1541 qemu_free(breakpoint);
1542#endif
1543}
1544
1545/* Remove all matching breakpoints. */
1546void cpu_breakpoint_remove_all(CPUState *env, int mask)
1547{
1548#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001549 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001550
Blue Swirl72cf2d42009-09-12 07:36:22 +00001551 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001552 if (bp->flags & mask)
1553 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001554 }
bellard4c3a88a2003-07-26 12:06:08 +00001555#endif
1556}
1557
bellardc33a3462003-07-29 20:50:33 +00001558/* enable or disable single step mode. EXCP_DEBUG is returned by the
1559 CPU loop after each instruction */
1560void cpu_single_step(CPUState *env, int enabled)
1561{
bellard1fddef42005-04-17 19:16:13 +00001562#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001563 if (env->singlestep_enabled != enabled) {
1564 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001565 if (kvm_enabled())
1566 kvm_update_guest_debug(env, 0);
1567 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001568 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001569 /* XXX: only flush what is necessary */
1570 tb_flush(env);
1571 }
bellardc33a3462003-07-29 20:50:33 +00001572 }
1573#endif
1574}
1575
bellard34865132003-10-05 14:28:56 +00001576/* enable or disable low levels log */
1577void cpu_set_log(int log_flags)
1578{
1579 loglevel = log_flags;
1580 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001581 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001582 if (!logfile) {
1583 perror(logfilename);
1584 _exit(1);
1585 }
bellard9fa3e852004-01-04 18:06:42 +00001586#if !defined(CONFIG_SOFTMMU)
1587 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1588 {
blueswir1b55266b2008-09-20 08:07:15 +00001589 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001590 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1591 }
Filip Navarabf65f532009-07-27 10:02:04 -05001592#elif !defined(_WIN32)
1593 /* Win32 doesn't support line-buffering and requires size >= 2 */
bellard34865132003-10-05 14:28:56 +00001594 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001595#endif
pbrooke735b912007-06-30 13:53:24 +00001596 log_append = 1;
1597 }
1598 if (!loglevel && logfile) {
1599 fclose(logfile);
1600 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001601 }
1602}
1603
1604void cpu_set_log_filename(const char *filename)
1605{
1606 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001607 if (logfile) {
1608 fclose(logfile);
1609 logfile = NULL;
1610 }
1611 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001612}
bellardc33a3462003-07-29 20:50:33 +00001613
aurel323098dba2009-03-07 21:28:24 +00001614static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001615{
pbrookd5975362008-06-07 20:50:51 +00001616 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1617 problem and hope the cpu will stop of its own accord. For userspace
1618 emulation this often isn't actually as bad as it sounds. Often
1619 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001620 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001621 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001622
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001623 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001624 tb = env->current_tb;
1625 /* if the cpu is currently executing code, we must unlink it and
1626 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001627 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001628 env->current_tb = NULL;
1629 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001630 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001631 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001632}
1633
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001634#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001635/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001636static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001637{
1638 int old_mask;
1639
1640 old_mask = env->interrupt_request;
1641 env->interrupt_request |= mask;
1642
aliguori8edac962009-04-24 18:03:45 +00001643 /*
1644 * If called from iothread context, wake the target cpu in
1645 * case its halted.
1646 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001647 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001648 qemu_cpu_kick(env);
1649 return;
1650 }
aliguori8edac962009-04-24 18:03:45 +00001651
pbrook2e70f6e2008-06-29 01:03:05 +00001652 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001653 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001654 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001655 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001656 cpu_abort(env, "Raised interrupt while not in I/O function");
1657 }
pbrook2e70f6e2008-06-29 01:03:05 +00001658 } else {
aurel323098dba2009-03-07 21:28:24 +00001659 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001660 }
1661}
1662
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001663CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1664
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001665#else /* CONFIG_USER_ONLY */
1666
1667void cpu_interrupt(CPUState *env, int mask)
1668{
1669 env->interrupt_request |= mask;
1670 cpu_unlink_tb(env);
1671}
1672#endif /* CONFIG_USER_ONLY */
1673
bellardb54ad042004-05-20 13:42:52 +00001674void cpu_reset_interrupt(CPUState *env, int mask)
1675{
1676 env->interrupt_request &= ~mask;
1677}
1678
aurel323098dba2009-03-07 21:28:24 +00001679void cpu_exit(CPUState *env)
1680{
1681 env->exit_request = 1;
1682 cpu_unlink_tb(env);
1683}
1684
blueswir1c7cd6a32008-10-02 18:27:46 +00001685const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001686 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001687 "show generated host assembly code for each compiled TB" },
1688 { CPU_LOG_TB_IN_ASM, "in_asm",
1689 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001690 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001691 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001692 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001693 "show micro ops "
1694#ifdef TARGET_I386
1695 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001696#endif
blueswir1e01a1152008-03-14 17:37:11 +00001697 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001698 { CPU_LOG_INT, "int",
1699 "show interrupts/exceptions in short format" },
1700 { CPU_LOG_EXEC, "exec",
1701 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001702 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001703 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001704#ifdef TARGET_I386
1705 { CPU_LOG_PCALL, "pcall",
1706 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001707 { CPU_LOG_RESET, "cpu_reset",
1708 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001709#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001710#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001711 { CPU_LOG_IOPORT, "ioport",
1712 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001713#endif
bellardf193c792004-03-21 17:06:25 +00001714 { 0, NULL, NULL },
1715};
1716
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001717#ifndef CONFIG_USER_ONLY
1718static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1719 = QLIST_HEAD_INITIALIZER(memory_client_list);
1720
1721static void cpu_notify_set_memory(target_phys_addr_t start_addr,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001722 ram_addr_t size,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001723 ram_addr_t phys_offset,
1724 bool log_dirty)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001725{
1726 CPUPhysMemoryClient *client;
1727 QLIST_FOREACH(client, &memory_client_list, list) {
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001728 client->set_memory(client, start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001729 }
1730}
1731
1732static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001733 target_phys_addr_t end)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001734{
1735 CPUPhysMemoryClient *client;
1736 QLIST_FOREACH(client, &memory_client_list, list) {
1737 int r = client->sync_dirty_bitmap(client, start, end);
1738 if (r < 0)
1739 return r;
1740 }
1741 return 0;
1742}
1743
1744static int cpu_notify_migration_log(int enable)
1745{
1746 CPUPhysMemoryClient *client;
1747 QLIST_FOREACH(client, &memory_client_list, list) {
1748 int r = client->migration_log(client, enable);
1749 if (r < 0)
1750 return r;
1751 }
1752 return 0;
1753}
1754
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001755/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1756 * address. Each intermediate table provides the next L2_BITs of guest
1757 * physical address space. The number of levels vary based on host and
1758 * guest configuration, making it efficient to build the final guest
1759 * physical address by seeding the L1 offset and shifting and adding in
1760 * each L2 offset as we recurse through them. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001761static void phys_page_for_each_1(CPUPhysMemoryClient *client,
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001762 int level, void **lp, target_phys_addr_t addr)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001763{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001764 int i;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001765
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001766 if (*lp == NULL) {
1767 return;
1768 }
1769 if (level == 0) {
1770 PhysPageDesc *pd = *lp;
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001771 addr <<= L2_BITS + TARGET_PAGE_BITS;
Paul Brook7296aba2010-03-14 14:58:46 +00001772 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001773 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001774 client->set_memory(client, addr | i << TARGET_PAGE_BITS,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001775 TARGET_PAGE_SIZE, pd[i].phys_offset, false);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001776 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001777 }
1778 } else {
1779 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001780 for (i = 0; i < L2_SIZE; ++i) {
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001781 phys_page_for_each_1(client, level - 1, pp + i,
1782 (addr << L2_BITS) | i);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001783 }
1784 }
1785}
1786
1787static void phys_page_for_each(CPUPhysMemoryClient *client)
1788{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001789 int i;
1790 for (i = 0; i < P_L1_SIZE; ++i) {
1791 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001792 l1_phys_map + i, i);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001793 }
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001794}
1795
1796void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1797{
1798 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1799 phys_page_for_each(client);
1800}
1801
1802void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1803{
1804 QLIST_REMOVE(client, list);
1805}
1806#endif
1807
bellardf193c792004-03-21 17:06:25 +00001808static int cmp1(const char *s1, int n, const char *s2)
1809{
1810 if (strlen(s2) != n)
1811 return 0;
1812 return memcmp(s1, s2, n) == 0;
1813}
ths3b46e622007-09-17 08:09:54 +00001814
bellardf193c792004-03-21 17:06:25 +00001815/* takes a comma separated list of log masks. Return 0 if error. */
1816int cpu_str_to_log_mask(const char *str)
1817{
blueswir1c7cd6a32008-10-02 18:27:46 +00001818 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001819 int mask;
1820 const char *p, *p1;
1821
1822 p = str;
1823 mask = 0;
1824 for(;;) {
1825 p1 = strchr(p, ',');
1826 if (!p1)
1827 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001828 if(cmp1(p,p1-p,"all")) {
1829 for(item = cpu_log_items; item->mask != 0; item++) {
1830 mask |= item->mask;
1831 }
1832 } else {
1833 for(item = cpu_log_items; item->mask != 0; item++) {
1834 if (cmp1(p, p1 - p, item->name))
1835 goto found;
1836 }
1837 return 0;
bellardf193c792004-03-21 17:06:25 +00001838 }
bellardf193c792004-03-21 17:06:25 +00001839 found:
1840 mask |= item->mask;
1841 if (*p1 != ',')
1842 break;
1843 p = p1 + 1;
1844 }
1845 return mask;
1846}
bellardea041c02003-06-25 16:16:50 +00001847
bellard75012672003-06-21 13:11:07 +00001848void cpu_abort(CPUState *env, const char *fmt, ...)
1849{
1850 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001851 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001852
1853 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001854 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001855 fprintf(stderr, "qemu: fatal: ");
1856 vfprintf(stderr, fmt, ap);
1857 fprintf(stderr, "\n");
1858#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001859 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1860#else
1861 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001862#endif
aliguori93fcfe32009-01-15 22:34:14 +00001863 if (qemu_log_enabled()) {
1864 qemu_log("qemu: fatal: ");
1865 qemu_log_vprintf(fmt, ap2);
1866 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001867#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001868 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001869#else
aliguori93fcfe32009-01-15 22:34:14 +00001870 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001871#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001872 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001873 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001874 }
pbrook493ae1f2007-11-23 16:53:59 +00001875 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001876 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001877#if defined(CONFIG_USER_ONLY)
1878 {
1879 struct sigaction act;
1880 sigfillset(&act.sa_mask);
1881 act.sa_handler = SIG_DFL;
1882 sigaction(SIGABRT, &act, NULL);
1883 }
1884#endif
bellard75012672003-06-21 13:11:07 +00001885 abort();
1886}
1887
thsc5be9f02007-02-28 20:20:53 +00001888CPUState *cpu_copy(CPUState *env)
1889{
ths01ba9812007-12-09 02:22:57 +00001890 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001891 CPUState *next_cpu = new_env->next_cpu;
1892 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001893#if defined(TARGET_HAS_ICE)
1894 CPUBreakpoint *bp;
1895 CPUWatchpoint *wp;
1896#endif
1897
thsc5be9f02007-02-28 20:20:53 +00001898 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001899
1900 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001901 new_env->next_cpu = next_cpu;
1902 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001903
1904 /* Clone all break/watchpoints.
1905 Note: Once we support ptrace with hw-debug register access, make sure
1906 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001907 QTAILQ_INIT(&env->breakpoints);
1908 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001909#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001910 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001911 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1912 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001913 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001914 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1915 wp->flags, NULL);
1916 }
1917#endif
1918
thsc5be9f02007-02-28 20:20:53 +00001919 return new_env;
1920}
1921
bellard01243112004-01-04 15:48:17 +00001922#if !defined(CONFIG_USER_ONLY)
1923
edgar_igl5c751e92008-05-06 08:44:21 +00001924static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1925{
1926 unsigned int i;
1927
1928 /* Discard jump cache entries for any tb which might potentially
1929 overlap the flushed page. */
1930 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1931 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001932 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001933
1934 i = tb_jmp_cache_hash_page(addr);
1935 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001936 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001937}
1938
Igor Kovalenko08738982009-07-12 02:15:40 +04001939static CPUTLBEntry s_cputlb_empty_entry = {
1940 .addr_read = -1,
1941 .addr_write = -1,
1942 .addr_code = -1,
1943 .addend = -1,
1944};
1945
bellardee8b7022004-02-03 23:35:10 +00001946/* NOTE: if flush_global is true, also flush global entries (not
1947 implemented yet) */
1948void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001949{
bellard33417e72003-08-10 21:47:01 +00001950 int i;
bellard01243112004-01-04 15:48:17 +00001951
bellard9fa3e852004-01-04 18:06:42 +00001952#if defined(DEBUG_TLB)
1953 printf("tlb_flush:\n");
1954#endif
bellard01243112004-01-04 15:48:17 +00001955 /* must reset current TB so that interrupts cannot modify the
1956 links while we are modifying them */
1957 env->current_tb = NULL;
1958
bellard33417e72003-08-10 21:47:01 +00001959 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001960 int mmu_idx;
1961 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001962 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001963 }
bellard33417e72003-08-10 21:47:01 +00001964 }
bellard9fa3e852004-01-04 18:06:42 +00001965
bellard8a40a182005-11-20 10:35:40 +00001966 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001967
Paul Brookd4c430a2010-03-17 02:14:28 +00001968 env->tlb_flush_addr = -1;
1969 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001970 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001971}
1972
bellard274da6b2004-05-20 21:56:27 +00001973static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001974{
ths5fafdf22007-09-16 21:08:06 +00001975 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001976 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001977 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001978 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001979 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001980 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001981 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001982 }
bellard61382a52003-10-27 21:22:23 +00001983}
1984
bellard2e126692004-04-25 21:28:44 +00001985void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001986{
bellard8a40a182005-11-20 10:35:40 +00001987 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001988 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001989
bellard9fa3e852004-01-04 18:06:42 +00001990#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001991 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001992#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001993 /* Check if we need to flush due to large pages. */
1994 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1995#if defined(DEBUG_TLB)
1996 printf("tlb_flush_page: forced full flush ("
1997 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1998 env->tlb_flush_addr, env->tlb_flush_mask);
1999#endif
2000 tlb_flush(env, 1);
2001 return;
2002 }
bellard01243112004-01-04 15:48:17 +00002003 /* must reset current TB so that interrupts cannot modify the
2004 links while we are modifying them */
2005 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00002006
bellard61382a52003-10-27 21:22:23 +00002007 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00002008 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002009 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2010 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002011
edgar_igl5c751e92008-05-06 08:44:21 +00002012 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002013}
2014
bellard9fa3e852004-01-04 18:06:42 +00002015/* update the TLBs so that writes to code in the virtual page 'addr'
2016 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002017static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002018{
ths5fafdf22007-09-16 21:08:06 +00002019 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002020 ram_addr + TARGET_PAGE_SIZE,
2021 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002022}
2023
bellard9fa3e852004-01-04 18:06:42 +00002024/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002025 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002026static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002027 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002028{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002029 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002030}
2031
ths5fafdf22007-09-16 21:08:06 +00002032static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002033 unsigned long start, unsigned long length)
2034{
2035 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00002036 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2037 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002038 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002039 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002040 }
2041 }
2042}
2043
pbrook5579c7f2009-04-11 14:47:08 +00002044/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002045void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002046 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002047{
2048 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002049 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002050 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002051
2052 start &= TARGET_PAGE_MASK;
2053 end = TARGET_PAGE_ALIGN(end);
2054
2055 length = end - start;
2056 if (length == 0)
2057 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002058 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002059
bellard1ccde1c2004-02-06 19:46:14 +00002060 /* we modify the TLB cache so that the dirty bit will be set again
2061 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002062 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002063 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002064 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002065 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002066 != (end - 1) - start) {
2067 abort();
2068 }
2069
bellard6a00d602005-11-21 23:25:50 +00002070 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002071 int mmu_idx;
2072 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2073 for(i = 0; i < CPU_TLB_SIZE; i++)
2074 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2075 start1, length);
2076 }
bellard6a00d602005-11-21 23:25:50 +00002077 }
bellard1ccde1c2004-02-06 19:46:14 +00002078}
2079
aliguori74576192008-10-06 14:02:03 +00002080int cpu_physical_memory_set_dirty_tracking(int enable)
2081{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002082 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002083 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002084 ret = cpu_notify_migration_log(!!enable);
2085 return ret;
aliguori74576192008-10-06 14:02:03 +00002086}
2087
2088int cpu_physical_memory_get_dirty_tracking(void)
2089{
2090 return in_migration;
2091}
2092
Anthony Liguoric227f092009-10-01 16:12:16 -05002093int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2094 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00002095{
Michael S. Tsirkin7b8f3b72010-01-27 22:07:21 +02002096 int ret;
Jan Kiszka151f7742009-05-01 20:52:47 +02002097
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002098 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
Jan Kiszka151f7742009-05-01 20:52:47 +02002099 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00002100}
2101
Anthony PERARDe5896b12011-02-07 12:19:23 +01002102int cpu_physical_log_start(target_phys_addr_t start_addr,
2103 ram_addr_t size)
2104{
2105 CPUPhysMemoryClient *client;
2106 QLIST_FOREACH(client, &memory_client_list, list) {
2107 if (client->log_start) {
2108 int r = client->log_start(client, start_addr, size);
2109 if (r < 0) {
2110 return r;
2111 }
2112 }
2113 }
2114 return 0;
2115}
2116
2117int cpu_physical_log_stop(target_phys_addr_t start_addr,
2118 ram_addr_t size)
2119{
2120 CPUPhysMemoryClient *client;
2121 QLIST_FOREACH(client, &memory_client_list, list) {
2122 if (client->log_stop) {
2123 int r = client->log_stop(client, start_addr, size);
2124 if (r < 0) {
2125 return r;
2126 }
2127 }
2128 }
2129 return 0;
2130}
2131
bellard3a7d9292005-08-21 09:26:42 +00002132static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2133{
Anthony Liguoric227f092009-10-01 16:12:16 -05002134 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002135 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002136
bellard84b7b8e2005-11-28 21:19:04 +00002137 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00002138 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2139 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002140 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002141 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002142 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002143 }
2144 }
2145}
2146
2147/* update the TLB according to the current state of the dirty bits */
2148void cpu_tlb_update_dirty(CPUState *env)
2149{
2150 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002151 int mmu_idx;
2152 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2153 for(i = 0; i < CPU_TLB_SIZE; i++)
2154 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2155 }
bellard3a7d9292005-08-21 09:26:42 +00002156}
2157
pbrook0f459d12008-06-09 00:20:13 +00002158static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002159{
pbrook0f459d12008-06-09 00:20:13 +00002160 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2161 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002162}
2163
pbrook0f459d12008-06-09 00:20:13 +00002164/* update the TLB corresponding to virtual page vaddr
2165 so that it is no longer dirty */
2166static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002167{
bellard1ccde1c2004-02-06 19:46:14 +00002168 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002169 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002170
pbrook0f459d12008-06-09 00:20:13 +00002171 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002172 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002173 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2174 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002175}
2176
Paul Brookd4c430a2010-03-17 02:14:28 +00002177/* Our TLB does not support large pages, so remember the area covered by
2178 large pages and trigger a full TLB flush if these are invalidated. */
2179static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2180 target_ulong size)
2181{
2182 target_ulong mask = ~(size - 1);
2183
2184 if (env->tlb_flush_addr == (target_ulong)-1) {
2185 env->tlb_flush_addr = vaddr & mask;
2186 env->tlb_flush_mask = mask;
2187 return;
2188 }
2189 /* Extend the existing region to include the new page.
2190 This is a compromise between unnecessary flushes and the cost
2191 of maintaining a full variable size TLB. */
2192 mask &= env->tlb_flush_mask;
2193 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2194 mask <<= 1;
2195 }
2196 env->tlb_flush_addr &= mask;
2197 env->tlb_flush_mask = mask;
2198}
2199
2200/* Add a new TLB entry. At most one entry for a given virtual address
2201 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2202 supplied size is only used by tlb_flush_page. */
2203void tlb_set_page(CPUState *env, target_ulong vaddr,
2204 target_phys_addr_t paddr, int prot,
2205 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002206{
bellard92e873b2004-05-21 14:52:29 +00002207 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00002208 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002209 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002210 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002211 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002212 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002213 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002214 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002215 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002216
Paul Brookd4c430a2010-03-17 02:14:28 +00002217 assert(size >= TARGET_PAGE_SIZE);
2218 if (size != TARGET_PAGE_SIZE) {
2219 tlb_add_large_page(env, vaddr, size);
2220 }
bellard92e873b2004-05-21 14:52:29 +00002221 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002222 if (!p) {
2223 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00002224 } else {
2225 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002226 }
2227#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002228 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2229 " prot=%x idx=%d pd=0x%08lx\n",
2230 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002231#endif
2232
pbrook0f459d12008-06-09 00:20:13 +00002233 address = vaddr;
2234 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2235 /* IO memory case (romd handled later) */
2236 address |= TLB_MMIO;
2237 }
pbrook5579c7f2009-04-11 14:47:08 +00002238 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00002239 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2240 /* Normal RAM. */
2241 iotlb = pd & TARGET_PAGE_MASK;
2242 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2243 iotlb |= IO_MEM_NOTDIRTY;
2244 else
2245 iotlb |= IO_MEM_ROM;
2246 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002247 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002248 It would be nice to pass an offset from the base address
2249 of that region. This would avoid having to special case RAM,
2250 and avoid full address decoding in every device.
2251 We can't use the high bits of pd for this because
2252 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002253 iotlb = (pd & ~TARGET_PAGE_MASK);
2254 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002255 iotlb += p->region_offset;
2256 } else {
2257 iotlb += paddr;
2258 }
pbrook0f459d12008-06-09 00:20:13 +00002259 }
pbrook6658ffb2007-03-16 23:58:11 +00002260
pbrook0f459d12008-06-09 00:20:13 +00002261 code_address = address;
2262 /* Make accesses to pages with watchpoints go via the
2263 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002264 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002265 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002266 /* Avoid trapping reads of pages with a write breakpoint. */
2267 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2268 iotlb = io_mem_watch + paddr;
2269 address |= TLB_MMIO;
2270 break;
2271 }
pbrook6658ffb2007-03-16 23:58:11 +00002272 }
pbrook0f459d12008-06-09 00:20:13 +00002273 }
balrogd79acba2007-06-26 20:01:13 +00002274
pbrook0f459d12008-06-09 00:20:13 +00002275 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2276 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2277 te = &env->tlb_table[mmu_idx][index];
2278 te->addend = addend - vaddr;
2279 if (prot & PAGE_READ) {
2280 te->addr_read = address;
2281 } else {
2282 te->addr_read = -1;
2283 }
edgar_igl5c751e92008-05-06 08:44:21 +00002284
pbrook0f459d12008-06-09 00:20:13 +00002285 if (prot & PAGE_EXEC) {
2286 te->addr_code = code_address;
2287 } else {
2288 te->addr_code = -1;
2289 }
2290 if (prot & PAGE_WRITE) {
2291 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2292 (pd & IO_MEM_ROMD)) {
2293 /* Write access calls the I/O callback. */
2294 te->addr_write = address | TLB_MMIO;
2295 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2296 !cpu_physical_memory_is_dirty(pd)) {
2297 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002298 } else {
pbrook0f459d12008-06-09 00:20:13 +00002299 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002300 }
pbrook0f459d12008-06-09 00:20:13 +00002301 } else {
2302 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002303 }
bellard9fa3e852004-01-04 18:06:42 +00002304}
2305
bellard01243112004-01-04 15:48:17 +00002306#else
2307
bellardee8b7022004-02-03 23:35:10 +00002308void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002309{
2310}
2311
bellard2e126692004-04-25 21:28:44 +00002312void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002313{
2314}
2315
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002316/*
2317 * Walks guest process memory "regions" one by one
2318 * and calls callback function 'fn' for each region.
2319 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002320
2321struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002322{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002323 walk_memory_regions_fn fn;
2324 void *priv;
2325 unsigned long start;
2326 int prot;
2327};
bellard9fa3e852004-01-04 18:06:42 +00002328
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002329static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002330 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002331{
2332 if (data->start != -1ul) {
2333 int rc = data->fn(data->priv, data->start, end, data->prot);
2334 if (rc != 0) {
2335 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002336 }
bellard33417e72003-08-10 21:47:01 +00002337 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002338
2339 data->start = (new_prot ? end : -1ul);
2340 data->prot = new_prot;
2341
2342 return 0;
2343}
2344
2345static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002346 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002347{
Paul Brookb480d9b2010-03-12 23:23:29 +00002348 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002349 int i, rc;
2350
2351 if (*lp == NULL) {
2352 return walk_memory_regions_end(data, base, 0);
2353 }
2354
2355 if (level == 0) {
2356 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002357 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002358 int prot = pd[i].flags;
2359
2360 pa = base | (i << TARGET_PAGE_BITS);
2361 if (prot != data->prot) {
2362 rc = walk_memory_regions_end(data, pa, prot);
2363 if (rc != 0) {
2364 return rc;
2365 }
2366 }
2367 }
2368 } else {
2369 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002370 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002371 pa = base | ((abi_ulong)i <<
2372 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002373 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2374 if (rc != 0) {
2375 return rc;
2376 }
2377 }
2378 }
2379
2380 return 0;
2381}
2382
2383int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2384{
2385 struct walk_memory_regions_data data;
2386 unsigned long i;
2387
2388 data.fn = fn;
2389 data.priv = priv;
2390 data.start = -1ul;
2391 data.prot = 0;
2392
2393 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002394 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002395 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2396 if (rc != 0) {
2397 return rc;
2398 }
2399 }
2400
2401 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002402}
2403
Paul Brookb480d9b2010-03-12 23:23:29 +00002404static int dump_region(void *priv, abi_ulong start,
2405 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002406{
2407 FILE *f = (FILE *)priv;
2408
Paul Brookb480d9b2010-03-12 23:23:29 +00002409 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2410 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002411 start, end, end - start,
2412 ((prot & PAGE_READ) ? 'r' : '-'),
2413 ((prot & PAGE_WRITE) ? 'w' : '-'),
2414 ((prot & PAGE_EXEC) ? 'x' : '-'));
2415
2416 return (0);
2417}
2418
2419/* dump memory mappings */
2420void page_dump(FILE *f)
2421{
2422 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2423 "start", "end", "size", "prot");
2424 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002425}
2426
pbrook53a59602006-03-25 19:31:22 +00002427int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002428{
bellard9fa3e852004-01-04 18:06:42 +00002429 PageDesc *p;
2430
2431 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002432 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002433 return 0;
2434 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002435}
2436
Richard Henderson376a7902010-03-10 15:57:04 -08002437/* Modify the flags of a page and invalidate the code if necessary.
2438 The flag PAGE_WRITE_ORG is positioned automatically depending
2439 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002440void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002441{
Richard Henderson376a7902010-03-10 15:57:04 -08002442 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002443
Richard Henderson376a7902010-03-10 15:57:04 -08002444 /* This function should never be called with addresses outside the
2445 guest address space. If this assert fires, it probably indicates
2446 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002447#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2448 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002449#endif
2450 assert(start < end);
2451
bellard9fa3e852004-01-04 18:06:42 +00002452 start = start & TARGET_PAGE_MASK;
2453 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002454
2455 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002456 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002457 }
2458
2459 for (addr = start, len = end - start;
2460 len != 0;
2461 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2462 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2463
2464 /* If the write protection bit is set, then we invalidate
2465 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002466 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002467 (flags & PAGE_WRITE) &&
2468 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002469 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002470 }
2471 p->flags = flags;
2472 }
bellard9fa3e852004-01-04 18:06:42 +00002473}
2474
ths3d97b402007-11-02 19:02:07 +00002475int page_check_range(target_ulong start, target_ulong len, int flags)
2476{
2477 PageDesc *p;
2478 target_ulong end;
2479 target_ulong addr;
2480
Richard Henderson376a7902010-03-10 15:57:04 -08002481 /* This function should never be called with addresses outside the
2482 guest address space. If this assert fires, it probably indicates
2483 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002484#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2485 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002486#endif
2487
Richard Henderson3e0650a2010-03-29 10:54:42 -07002488 if (len == 0) {
2489 return 0;
2490 }
Richard Henderson376a7902010-03-10 15:57:04 -08002491 if (start + len - 1 < start) {
2492 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002493 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002494 }
balrog55f280c2008-10-28 10:24:11 +00002495
ths3d97b402007-11-02 19:02:07 +00002496 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2497 start = start & TARGET_PAGE_MASK;
2498
Richard Henderson376a7902010-03-10 15:57:04 -08002499 for (addr = start, len = end - start;
2500 len != 0;
2501 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002502 p = page_find(addr >> TARGET_PAGE_BITS);
2503 if( !p )
2504 return -1;
2505 if( !(p->flags & PAGE_VALID) )
2506 return -1;
2507
bellarddae32702007-11-14 10:51:00 +00002508 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002509 return -1;
bellarddae32702007-11-14 10:51:00 +00002510 if (flags & PAGE_WRITE) {
2511 if (!(p->flags & PAGE_WRITE_ORG))
2512 return -1;
2513 /* unprotect the page if it was put read-only because it
2514 contains translated code */
2515 if (!(p->flags & PAGE_WRITE)) {
2516 if (!page_unprotect(addr, 0, NULL))
2517 return -1;
2518 }
2519 return 0;
2520 }
ths3d97b402007-11-02 19:02:07 +00002521 }
2522 return 0;
2523}
2524
bellard9fa3e852004-01-04 18:06:42 +00002525/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002526 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002527int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002528{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002529 unsigned int prot;
2530 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002531 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002532
pbrookc8a706f2008-06-02 16:16:42 +00002533 /* Technically this isn't safe inside a signal handler. However we
2534 know this only ever happens in a synchronous SEGV handler, so in
2535 practice it seems to be ok. */
2536 mmap_lock();
2537
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002538 p = page_find(address >> TARGET_PAGE_BITS);
2539 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002540 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002541 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002542 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002543
bellard9fa3e852004-01-04 18:06:42 +00002544 /* if the page was really writable, then we change its
2545 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002546 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2547 host_start = address & qemu_host_page_mask;
2548 host_end = host_start + qemu_host_page_size;
2549
2550 prot = 0;
2551 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2552 p = page_find(addr >> TARGET_PAGE_BITS);
2553 p->flags |= PAGE_WRITE;
2554 prot |= p->flags;
2555
bellard9fa3e852004-01-04 18:06:42 +00002556 /* and since the content will be modified, we must invalidate
2557 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002558 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002559#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002560 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002561#endif
bellard9fa3e852004-01-04 18:06:42 +00002562 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002563 mprotect((void *)g2h(host_start), qemu_host_page_size,
2564 prot & PAGE_BITS);
2565
2566 mmap_unlock();
2567 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002568 }
pbrookc8a706f2008-06-02 16:16:42 +00002569 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002570 return 0;
2571}
2572
bellard6a00d602005-11-21 23:25:50 +00002573static inline void tlb_set_dirty(CPUState *env,
2574 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002575{
2576}
bellard9fa3e852004-01-04 18:06:42 +00002577#endif /* defined(CONFIG_USER_ONLY) */
2578
pbrooke2eef172008-06-08 01:09:01 +00002579#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002580
Paul Brookc04b2b72010-03-01 03:31:14 +00002581#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2582typedef struct subpage_t {
2583 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002584 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2585 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002586} subpage_t;
2587
Anthony Liguoric227f092009-10-01 16:12:16 -05002588static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2589 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002590static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2591 ram_addr_t orig_memory,
2592 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002593#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2594 need_subpage) \
2595 do { \
2596 if (addr > start_addr) \
2597 start_addr2 = 0; \
2598 else { \
2599 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2600 if (start_addr2 > 0) \
2601 need_subpage = 1; \
2602 } \
2603 \
blueswir149e9fba2007-05-30 17:25:06 +00002604 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002605 end_addr2 = TARGET_PAGE_SIZE - 1; \
2606 else { \
2607 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2608 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2609 need_subpage = 1; \
2610 } \
2611 } while (0)
2612
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002613/* register physical memory.
2614 For RAM, 'size' must be a multiple of the target page size.
2615 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002616 io memory page. The address used when calling the IO function is
2617 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002618 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002619 before calculating this offset. This should not be a problem unless
2620 the low bits of start_addr and region_offset differ. */
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002621void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
Anthony Liguoric227f092009-10-01 16:12:16 -05002622 ram_addr_t size,
2623 ram_addr_t phys_offset,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002624 ram_addr_t region_offset,
2625 bool log_dirty)
bellard33417e72003-08-10 21:47:01 +00002626{
Anthony Liguoric227f092009-10-01 16:12:16 -05002627 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002628 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002629 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002630 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002631 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002632
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002633 assert(size);
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002634 cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002635
pbrook67c4d232009-02-23 13:16:07 +00002636 if (phys_offset == IO_MEM_UNASSIGNED) {
2637 region_offset = start_addr;
2638 }
pbrook8da3ff12008-12-01 18:59:50 +00002639 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002640 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002641 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002642
2643 addr = start_addr;
2644 do {
blueswir1db7b5422007-05-26 17:36:03 +00002645 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2646 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002647 ram_addr_t orig_memory = p->phys_offset;
2648 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002649 int need_subpage = 0;
2650
2651 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2652 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002653 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002654 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2655 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002656 &p->phys_offset, orig_memory,
2657 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002658 } else {
2659 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2660 >> IO_MEM_SHIFT];
2661 }
pbrook8da3ff12008-12-01 18:59:50 +00002662 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2663 region_offset);
2664 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002665 } else {
2666 p->phys_offset = phys_offset;
2667 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2668 (phys_offset & IO_MEM_ROMD))
2669 phys_offset += TARGET_PAGE_SIZE;
2670 }
2671 } else {
2672 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2673 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002674 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002675 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002676 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002677 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002678 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002679 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002680 int need_subpage = 0;
2681
2682 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2683 end_addr2, need_subpage);
2684
Richard Hendersonf6405242010-04-22 16:47:31 -07002685 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002686 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002687 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002688 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002689 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002690 phys_offset, region_offset);
2691 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002692 }
2693 }
2694 }
pbrook8da3ff12008-12-01 18:59:50 +00002695 region_offset += TARGET_PAGE_SIZE;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002696 addr += TARGET_PAGE_SIZE;
2697 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002698
bellard9d420372006-06-25 22:25:22 +00002699 /* since each CPU stores ram addresses in its TLB cache, we must
2700 reset the modified entries */
2701 /* XXX: slow ! */
2702 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2703 tlb_flush(env, 1);
2704 }
bellard33417e72003-08-10 21:47:01 +00002705}
2706
bellardba863452006-09-24 18:41:10 +00002707/* XXX: temporary until new memory mapping API */
Anthony Liguoric227f092009-10-01 16:12:16 -05002708ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002709{
2710 PhysPageDesc *p;
2711
2712 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2713 if (!p)
2714 return IO_MEM_UNASSIGNED;
2715 return p->phys_offset;
2716}
2717
Anthony Liguoric227f092009-10-01 16:12:16 -05002718void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002719{
2720 if (kvm_enabled())
2721 kvm_coalesce_mmio_region(addr, size);
2722}
2723
Anthony Liguoric227f092009-10-01 16:12:16 -05002724void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002725{
2726 if (kvm_enabled())
2727 kvm_uncoalesce_mmio_region(addr, size);
2728}
2729
Sheng Yang62a27442010-01-26 19:21:16 +08002730void qemu_flush_coalesced_mmio_buffer(void)
2731{
2732 if (kvm_enabled())
2733 kvm_flush_coalesced_mmio_buffer();
2734}
2735
Marcelo Tosattic9027602010-03-01 20:25:08 -03002736#if defined(__linux__) && !defined(TARGET_S390X)
2737
2738#include <sys/vfs.h>
2739
2740#define HUGETLBFS_MAGIC 0x958458f6
2741
2742static long gethugepagesize(const char *path)
2743{
2744 struct statfs fs;
2745 int ret;
2746
2747 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002748 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002749 } while (ret != 0 && errno == EINTR);
2750
2751 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002752 perror(path);
2753 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002754 }
2755
2756 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002757 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002758
2759 return fs.f_bsize;
2760}
2761
Alex Williamson04b16652010-07-02 11:13:17 -06002762static void *file_ram_alloc(RAMBlock *block,
2763 ram_addr_t memory,
2764 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002765{
2766 char *filename;
2767 void *area;
2768 int fd;
2769#ifdef MAP_POPULATE
2770 int flags;
2771#endif
2772 unsigned long hpagesize;
2773
2774 hpagesize = gethugepagesize(path);
2775 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002776 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002777 }
2778
2779 if (memory < hpagesize) {
2780 return NULL;
2781 }
2782
2783 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2784 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2785 return NULL;
2786 }
2787
2788 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002789 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002790 }
2791
2792 fd = mkstemp(filename);
2793 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002794 perror("unable to create backing store for hugepages");
2795 free(filename);
2796 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002797 }
2798 unlink(filename);
2799 free(filename);
2800
2801 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2802
2803 /*
2804 * ftruncate is not supported by hugetlbfs in older
2805 * hosts, so don't bother bailing out on errors.
2806 * If anything goes wrong with it under other filesystems,
2807 * mmap will fail.
2808 */
2809 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002810 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002811
2812#ifdef MAP_POPULATE
2813 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2814 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2815 * to sidestep this quirk.
2816 */
2817 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2818 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2819#else
2820 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2821#endif
2822 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002823 perror("file_ram_alloc: can't mmap RAM pages");
2824 close(fd);
2825 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002826 }
Alex Williamson04b16652010-07-02 11:13:17 -06002827 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002828 return area;
2829}
2830#endif
2831
Alex Williamsond17b5282010-06-25 11:08:38 -06002832static ram_addr_t find_ram_offset(ram_addr_t size)
2833{
Alex Williamson04b16652010-07-02 11:13:17 -06002834 RAMBlock *block, *next_block;
Blue Swirl09d7ae92010-07-07 19:37:53 +00002835 ram_addr_t offset = 0, mingap = ULONG_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002836
2837 if (QLIST_EMPTY(&ram_list.blocks))
2838 return 0;
2839
2840 QLIST_FOREACH(block, &ram_list.blocks, next) {
2841 ram_addr_t end, next = ULONG_MAX;
2842
2843 end = block->offset + block->length;
2844
2845 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2846 if (next_block->offset >= end) {
2847 next = MIN(next, next_block->offset);
2848 }
2849 }
2850 if (next - end >= size && next - end < mingap) {
2851 offset = end;
2852 mingap = next - end;
2853 }
2854 }
2855 return offset;
2856}
2857
2858static ram_addr_t last_ram_offset(void)
2859{
Alex Williamsond17b5282010-06-25 11:08:38 -06002860 RAMBlock *block;
2861 ram_addr_t last = 0;
2862
2863 QLIST_FOREACH(block, &ram_list.blocks, next)
2864 last = MAX(last, block->offset + block->length);
2865
2866 return last;
2867}
2868
Cam Macdonell84b89d72010-07-26 18:10:57 -06002869ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002870 ram_addr_t size, void *host)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002871{
2872 RAMBlock *new_block, *block;
2873
2874 size = TARGET_PAGE_ALIGN(size);
2875 new_block = qemu_mallocz(sizeof(*new_block));
2876
2877 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2878 char *id = dev->parent_bus->info->get_dev_path(dev);
2879 if (id) {
2880 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2881 qemu_free(id);
2882 }
2883 }
2884 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2885
2886 QLIST_FOREACH(block, &ram_list.blocks, next) {
2887 if (!strcmp(block->idstr, new_block->idstr)) {
2888 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2889 new_block->idstr);
2890 abort();
2891 }
2892 }
2893
Jun Nakajima432d2682010-08-31 16:41:25 +01002894 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002895 if (host) {
2896 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002897 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002898 } else {
2899 if (mem_path) {
2900#if defined (__linux__) && !defined(TARGET_S390X)
2901 new_block->host = file_ram_alloc(new_block, size, mem_path);
2902 if (!new_block->host) {
2903 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002904 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002905 }
2906#else
2907 fprintf(stderr, "-mem-path option unsupported\n");
2908 exit(1);
2909#endif
2910 } else {
2911#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002912 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2913 an system defined value, which is at least 256GB. Larger systems
2914 have larger values. We put the guest between the end of data
2915 segment (system break) and this value. We use 32GB as a base to
2916 have enough room for the system break to grow. */
2917 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002918 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002919 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002920 if (new_block->host == MAP_FAILED) {
2921 fprintf(stderr, "Allocating RAM failed\n");
2922 abort();
2923 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002924#else
Jun Nakajima432d2682010-08-31 16:41:25 +01002925 if (xen_mapcache_enabled()) {
2926 xen_ram_alloc(new_block->offset, size);
2927 } else {
2928 new_block->host = qemu_vmalloc(size);
2929 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002930#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002931 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002932 }
2933 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002934 new_block->length = size;
2935
2936 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2937
2938 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2939 last_ram_offset() >> TARGET_PAGE_BITS);
2940 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2941 0xff, size >> TARGET_PAGE_BITS);
2942
2943 if (kvm_enabled())
2944 kvm_setup_guest_memory(new_block->host, size);
2945
2946 return new_block->offset;
2947}
2948
Alex Williamson1724f042010-06-25 11:09:35 -06002949ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
pbrook94a6b542009-04-11 17:15:54 +00002950{
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002951 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
pbrook94a6b542009-04-11 17:15:54 +00002952}
bellarde9a1ab12007-02-08 23:08:38 +00002953
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002954void qemu_ram_free_from_ptr(ram_addr_t addr)
2955{
2956 RAMBlock *block;
2957
2958 QLIST_FOREACH(block, &ram_list.blocks, next) {
2959 if (addr == block->offset) {
2960 QLIST_REMOVE(block, next);
2961 qemu_free(block);
2962 return;
2963 }
2964 }
2965}
2966
Anthony Liguoric227f092009-10-01 16:12:16 -05002967void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002968{
Alex Williamson04b16652010-07-02 11:13:17 -06002969 RAMBlock *block;
2970
2971 QLIST_FOREACH(block, &ram_list.blocks, next) {
2972 if (addr == block->offset) {
2973 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002974 if (block->flags & RAM_PREALLOC_MASK) {
2975 ;
2976 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002977#if defined (__linux__) && !defined(TARGET_S390X)
2978 if (block->fd) {
2979 munmap(block->host, block->length);
2980 close(block->fd);
2981 } else {
2982 qemu_vfree(block->host);
2983 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002984#else
2985 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002986#endif
2987 } else {
2988#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2989 munmap(block->host, block->length);
2990#else
Jun Nakajima432d2682010-08-31 16:41:25 +01002991 if (xen_mapcache_enabled()) {
2992 qemu_invalidate_entry(block->host);
2993 } else {
2994 qemu_vfree(block->host);
2995 }
Alex Williamson04b16652010-07-02 11:13:17 -06002996#endif
2997 }
2998 qemu_free(block);
2999 return;
3000 }
3001 }
3002
bellarde9a1ab12007-02-08 23:08:38 +00003003}
3004
Huang Yingcd19cfa2011-03-02 08:56:19 +01003005#ifndef _WIN32
3006void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3007{
3008 RAMBlock *block;
3009 ram_addr_t offset;
3010 int flags;
3011 void *area, *vaddr;
3012
3013 QLIST_FOREACH(block, &ram_list.blocks, next) {
3014 offset = addr - block->offset;
3015 if (offset < block->length) {
3016 vaddr = block->host + offset;
3017 if (block->flags & RAM_PREALLOC_MASK) {
3018 ;
3019 } else {
3020 flags = MAP_FIXED;
3021 munmap(vaddr, length);
3022 if (mem_path) {
3023#if defined(__linux__) && !defined(TARGET_S390X)
3024 if (block->fd) {
3025#ifdef MAP_POPULATE
3026 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3027 MAP_PRIVATE;
3028#else
3029 flags |= MAP_PRIVATE;
3030#endif
3031 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3032 flags, block->fd, offset);
3033 } else {
3034 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3035 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3036 flags, -1, 0);
3037 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003038#else
3039 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003040#endif
3041 } else {
3042#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3043 flags |= MAP_SHARED | MAP_ANONYMOUS;
3044 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3045 flags, -1, 0);
3046#else
3047 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3048 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3049 flags, -1, 0);
3050#endif
3051 }
3052 if (area != vaddr) {
3053 fprintf(stderr, "Could not remap addr: %lx@%lx\n",
3054 length, addr);
3055 exit(1);
3056 }
3057 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3058 }
3059 return;
3060 }
3061 }
3062}
3063#endif /* !_WIN32 */
3064
pbrookdc828ca2009-04-09 22:21:07 +00003065/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003066 With the exception of the softmmu code in this file, this should
3067 only be used for local memory (e.g. video ram) that the device owns,
3068 and knows it isn't going to access beyond the end of the block.
3069
3070 It should not be used for general purpose DMA.
3071 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3072 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003073void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003074{
pbrook94a6b542009-04-11 17:15:54 +00003075 RAMBlock *block;
3076
Alex Williamsonf471a172010-06-11 11:11:42 -06003077 QLIST_FOREACH(block, &ram_list.blocks, next) {
3078 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003079 /* Move this entry to to start of the list. */
3080 if (block != QLIST_FIRST(&ram_list.blocks)) {
3081 QLIST_REMOVE(block, next);
3082 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3083 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003084 if (xen_mapcache_enabled()) {
3085 /* We need to check if the requested address is in the RAM
3086 * because we don't want to map the entire memory in QEMU.
3087 */
3088 if (block->offset == 0) {
3089 return qemu_map_cache(addr, 0, 1);
3090 } else if (block->host == NULL) {
3091 block->host = xen_map_block(block->offset, block->length);
3092 }
3093 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003094 return block->host + (addr - block->offset);
3095 }
pbrook94a6b542009-04-11 17:15:54 +00003096 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003097
3098 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3099 abort();
3100
3101 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003102}
3103
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003104/* Return a host pointer to ram allocated with qemu_ram_alloc.
3105 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3106 */
3107void *qemu_safe_ram_ptr(ram_addr_t addr)
3108{
3109 RAMBlock *block;
3110
3111 QLIST_FOREACH(block, &ram_list.blocks, next) {
3112 if (addr - block->offset < block->length) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003113 if (xen_mapcache_enabled()) {
3114 /* We need to check if the requested address is in the RAM
3115 * because we don't want to map the entire memory in QEMU.
3116 */
3117 if (block->offset == 0) {
3118 return qemu_map_cache(addr, 0, 1);
3119 } else if (block->host == NULL) {
3120 block->host = xen_map_block(block->offset, block->length);
3121 }
3122 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003123 return block->host + (addr - block->offset);
3124 }
3125 }
3126
3127 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3128 abort();
3129
3130 return NULL;
3131}
3132
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003133void qemu_put_ram_ptr(void *addr)
3134{
3135 trace_qemu_put_ram_ptr(addr);
3136
3137 if (xen_mapcache_enabled()) {
3138 RAMBlock *block;
3139
3140 QLIST_FOREACH(block, &ram_list.blocks, next) {
3141 if (addr == block->host) {
3142 break;
3143 }
3144 }
3145 if (block && block->host) {
3146 xen_unmap_block(block->host, block->length);
3147 block->host = NULL;
3148 } else {
3149 qemu_map_cache_unlock(addr);
3150 }
3151 }
3152}
3153
Marcelo Tosattie8902612010-10-11 15:31:19 -03003154int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003155{
pbrook94a6b542009-04-11 17:15:54 +00003156 RAMBlock *block;
3157 uint8_t *host = ptr;
3158
Alex Williamsonf471a172010-06-11 11:11:42 -06003159 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003160 /* This case append when the block is not mapped. */
3161 if (block->host == NULL) {
3162 continue;
3163 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003164 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003165 *ram_addr = block->offset + (host - block->host);
3166 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003167 }
pbrook94a6b542009-04-11 17:15:54 +00003168 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003169
3170 if (xen_mapcache_enabled()) {
3171 *ram_addr = qemu_ram_addr_from_mapcache(ptr);
3172 return 0;
3173 }
3174
Marcelo Tosattie8902612010-10-11 15:31:19 -03003175 return -1;
3176}
Alex Williamsonf471a172010-06-11 11:11:42 -06003177
Marcelo Tosattie8902612010-10-11 15:31:19 -03003178/* Some of the softmmu routines need to translate from a host pointer
3179 (typically a TLB entry) back to a ram offset. */
3180ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3181{
3182 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003183
Marcelo Tosattie8902612010-10-11 15:31:19 -03003184 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3185 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3186 abort();
3187 }
3188 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003189}
3190
Anthony Liguoric227f092009-10-01 16:12:16 -05003191static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00003192{
pbrook67d3b952006-12-18 05:03:52 +00003193#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003194 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003195#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003196#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003197 do_unassigned_access(addr, 0, 0, 0, 1);
3198#endif
3199 return 0;
3200}
3201
Anthony Liguoric227f092009-10-01 16:12:16 -05003202static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003203{
3204#ifdef DEBUG_UNASSIGNED
3205 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3206#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003207#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003208 do_unassigned_access(addr, 0, 0, 0, 2);
3209#endif
3210 return 0;
3211}
3212
Anthony Liguoric227f092009-10-01 16:12:16 -05003213static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003214{
3215#ifdef DEBUG_UNASSIGNED
3216 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3217#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003218#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003219 do_unassigned_access(addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003220#endif
bellard33417e72003-08-10 21:47:01 +00003221 return 0;
3222}
3223
Anthony Liguoric227f092009-10-01 16:12:16 -05003224static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00003225{
pbrook67d3b952006-12-18 05:03:52 +00003226#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003227 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00003228#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003229#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003230 do_unassigned_access(addr, 1, 0, 0, 1);
3231#endif
3232}
3233
Anthony Liguoric227f092009-10-01 16:12:16 -05003234static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003235{
3236#ifdef DEBUG_UNASSIGNED
3237 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3238#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003239#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003240 do_unassigned_access(addr, 1, 0, 0, 2);
3241#endif
3242}
3243
Anthony Liguoric227f092009-10-01 16:12:16 -05003244static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003245{
3246#ifdef DEBUG_UNASSIGNED
3247 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3248#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003249#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003250 do_unassigned_access(addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003251#endif
bellard33417e72003-08-10 21:47:01 +00003252}
3253
Blue Swirld60efc62009-08-25 18:29:31 +00003254static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00003255 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00003256 unassigned_mem_readw,
3257 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00003258};
3259
Blue Swirld60efc62009-08-25 18:29:31 +00003260static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00003261 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00003262 unassigned_mem_writew,
3263 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00003264};
3265
Anthony Liguoric227f092009-10-01 16:12:16 -05003266static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003267 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003268{
bellard3a7d9292005-08-21 09:26:42 +00003269 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003270 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003271 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3272#if !defined(CONFIG_USER_ONLY)
3273 tb_invalidate_phys_page_fast(ram_addr, 1);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003274 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003275#endif
3276 }
pbrook5579c7f2009-04-11 14:47:08 +00003277 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003278 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003279 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003280 /* we remove the notdirty callback only if the code has been
3281 flushed */
3282 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003283 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003284}
3285
Anthony Liguoric227f092009-10-01 16:12:16 -05003286static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003287 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003288{
bellard3a7d9292005-08-21 09:26:42 +00003289 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003290 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003291 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3292#if !defined(CONFIG_USER_ONLY)
3293 tb_invalidate_phys_page_fast(ram_addr, 2);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003294 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003295#endif
3296 }
pbrook5579c7f2009-04-11 14:47:08 +00003297 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003298 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003299 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003300 /* we remove the notdirty callback only if the code has been
3301 flushed */
3302 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003303 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003304}
3305
Anthony Liguoric227f092009-10-01 16:12:16 -05003306static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003307 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003308{
bellard3a7d9292005-08-21 09:26:42 +00003309 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003310 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003311 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3312#if !defined(CONFIG_USER_ONLY)
3313 tb_invalidate_phys_page_fast(ram_addr, 4);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003314 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003315#endif
3316 }
pbrook5579c7f2009-04-11 14:47:08 +00003317 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003318 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003319 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003320 /* we remove the notdirty callback only if the code has been
3321 flushed */
3322 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003323 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003324}
3325
Blue Swirld60efc62009-08-25 18:29:31 +00003326static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00003327 NULL, /* never used */
3328 NULL, /* never used */
3329 NULL, /* never used */
3330};
3331
Blue Swirld60efc62009-08-25 18:29:31 +00003332static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00003333 notdirty_mem_writeb,
3334 notdirty_mem_writew,
3335 notdirty_mem_writel,
3336};
3337
pbrook0f459d12008-06-09 00:20:13 +00003338/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003339static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003340{
3341 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003342 target_ulong pc, cs_base;
3343 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003344 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003345 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003346 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003347
aliguori06d55cc2008-11-18 20:24:06 +00003348 if (env->watchpoint_hit) {
3349 /* We re-entered the check after replacing the TB. Now raise
3350 * the debug interrupt so that is will trigger after the
3351 * current instruction. */
3352 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3353 return;
3354 }
pbrook2e70f6e2008-06-29 01:03:05 +00003355 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003356 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003357 if ((vaddr == (wp->vaddr & len_mask) ||
3358 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003359 wp->flags |= BP_WATCHPOINT_HIT;
3360 if (!env->watchpoint_hit) {
3361 env->watchpoint_hit = wp;
3362 tb = tb_find_pc(env->mem_io_pc);
3363 if (!tb) {
3364 cpu_abort(env, "check_watchpoint: could not find TB for "
3365 "pc=%p", (void *)env->mem_io_pc);
3366 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003367 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003368 tb_phys_invalidate(tb, -1);
3369 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3370 env->exception_index = EXCP_DEBUG;
3371 } else {
3372 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3373 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3374 }
3375 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003376 }
aliguori6e140f22008-11-18 20:37:55 +00003377 } else {
3378 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003379 }
3380 }
3381}
3382
pbrook6658ffb2007-03-16 23:58:11 +00003383/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3384 so these check for a hit then pass through to the normal out-of-line
3385 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05003386static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003387{
aliguorib4051332008-11-18 20:14:20 +00003388 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003389 return ldub_phys(addr);
3390}
3391
Anthony Liguoric227f092009-10-01 16:12:16 -05003392static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003393{
aliguorib4051332008-11-18 20:14:20 +00003394 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003395 return lduw_phys(addr);
3396}
3397
Anthony Liguoric227f092009-10-01 16:12:16 -05003398static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003399{
aliguorib4051332008-11-18 20:14:20 +00003400 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003401 return ldl_phys(addr);
3402}
3403
Anthony Liguoric227f092009-10-01 16:12:16 -05003404static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003405 uint32_t val)
3406{
aliguorib4051332008-11-18 20:14:20 +00003407 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003408 stb_phys(addr, val);
3409}
3410
Anthony Liguoric227f092009-10-01 16:12:16 -05003411static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003412 uint32_t val)
3413{
aliguorib4051332008-11-18 20:14:20 +00003414 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003415 stw_phys(addr, val);
3416}
3417
Anthony Liguoric227f092009-10-01 16:12:16 -05003418static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003419 uint32_t val)
3420{
aliguorib4051332008-11-18 20:14:20 +00003421 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003422 stl_phys(addr, val);
3423}
3424
Blue Swirld60efc62009-08-25 18:29:31 +00003425static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003426 watch_mem_readb,
3427 watch_mem_readw,
3428 watch_mem_readl,
3429};
3430
Blue Swirld60efc62009-08-25 18:29:31 +00003431static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003432 watch_mem_writeb,
3433 watch_mem_writew,
3434 watch_mem_writel,
3435};
pbrook6658ffb2007-03-16 23:58:11 +00003436
Richard Hendersonf6405242010-04-22 16:47:31 -07003437static inline uint32_t subpage_readlen (subpage_t *mmio,
3438 target_phys_addr_t addr,
3439 unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003440{
Richard Hendersonf6405242010-04-22 16:47:31 -07003441 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003442#if defined(DEBUG_SUBPAGE)
3443 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3444 mmio, len, addr, idx);
3445#endif
blueswir1db7b5422007-05-26 17:36:03 +00003446
Richard Hendersonf6405242010-04-22 16:47:31 -07003447 addr += mmio->region_offset[idx];
3448 idx = mmio->sub_io_index[idx];
3449 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
blueswir1db7b5422007-05-26 17:36:03 +00003450}
3451
Anthony Liguoric227f092009-10-01 16:12:16 -05003452static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
Richard Hendersonf6405242010-04-22 16:47:31 -07003453 uint32_t value, unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003454{
Richard Hendersonf6405242010-04-22 16:47:31 -07003455 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003456#if defined(DEBUG_SUBPAGE)
Richard Hendersonf6405242010-04-22 16:47:31 -07003457 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3458 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003459#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003460
3461 addr += mmio->region_offset[idx];
3462 idx = mmio->sub_io_index[idx];
3463 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00003464}
3465
Anthony Liguoric227f092009-10-01 16:12:16 -05003466static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003467{
blueswir1db7b5422007-05-26 17:36:03 +00003468 return subpage_readlen(opaque, addr, 0);
3469}
3470
Anthony Liguoric227f092009-10-01 16:12:16 -05003471static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003472 uint32_t value)
3473{
blueswir1db7b5422007-05-26 17:36:03 +00003474 subpage_writelen(opaque, addr, value, 0);
3475}
3476
Anthony Liguoric227f092009-10-01 16:12:16 -05003477static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003478{
blueswir1db7b5422007-05-26 17:36:03 +00003479 return subpage_readlen(opaque, addr, 1);
3480}
3481
Anthony Liguoric227f092009-10-01 16:12:16 -05003482static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003483 uint32_t value)
3484{
blueswir1db7b5422007-05-26 17:36:03 +00003485 subpage_writelen(opaque, addr, value, 1);
3486}
3487
Anthony Liguoric227f092009-10-01 16:12:16 -05003488static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003489{
blueswir1db7b5422007-05-26 17:36:03 +00003490 return subpage_readlen(opaque, addr, 2);
3491}
3492
Richard Hendersonf6405242010-04-22 16:47:31 -07003493static void subpage_writel (void *opaque, target_phys_addr_t addr,
3494 uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00003495{
blueswir1db7b5422007-05-26 17:36:03 +00003496 subpage_writelen(opaque, addr, value, 2);
3497}
3498
Blue Swirld60efc62009-08-25 18:29:31 +00003499static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003500 &subpage_readb,
3501 &subpage_readw,
3502 &subpage_readl,
3503};
3504
Blue Swirld60efc62009-08-25 18:29:31 +00003505static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003506 &subpage_writeb,
3507 &subpage_writew,
3508 &subpage_writel,
3509};
3510
Anthony Liguoric227f092009-10-01 16:12:16 -05003511static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3512 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003513{
3514 int idx, eidx;
3515
3516 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3517 return -1;
3518 idx = SUBPAGE_IDX(start);
3519 eidx = SUBPAGE_IDX(end);
3520#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003521 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003522 mmio, start, end, idx, eidx, memory);
3523#endif
Gleb Natapov95c318f2010-07-29 10:41:45 +03003524 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3525 memory = IO_MEM_UNASSIGNED;
Richard Hendersonf6405242010-04-22 16:47:31 -07003526 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
blueswir1db7b5422007-05-26 17:36:03 +00003527 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003528 mmio->sub_io_index[idx] = memory;
3529 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003530 }
3531
3532 return 0;
3533}
3534
Richard Hendersonf6405242010-04-22 16:47:31 -07003535static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3536 ram_addr_t orig_memory,
3537 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003538{
Anthony Liguoric227f092009-10-01 16:12:16 -05003539 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003540 int subpage_memory;
3541
Anthony Liguoric227f092009-10-01 16:12:16 -05003542 mmio = qemu_mallocz(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003543
3544 mmio->base = base;
Alexander Graf2507c122010-12-08 12:05:37 +01003545 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3546 DEVICE_NATIVE_ENDIAN);
blueswir1db7b5422007-05-26 17:36:03 +00003547#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003548 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3549 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003550#endif
aliguori1eec6142009-02-05 22:06:18 +00003551 *phys = subpage_memory | IO_MEM_SUBPAGE;
Richard Hendersonf6405242010-04-22 16:47:31 -07003552 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003553
3554 return mmio;
3555}
3556
aliguori88715652009-02-11 15:20:58 +00003557static int get_free_io_mem_idx(void)
3558{
3559 int i;
3560
3561 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3562 if (!io_mem_used[i]) {
3563 io_mem_used[i] = 1;
3564 return i;
3565 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003566 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003567 return -1;
3568}
3569
Alexander Grafdd310532010-12-08 12:05:36 +01003570/*
3571 * Usually, devices operate in little endian mode. There are devices out
3572 * there that operate in big endian too. Each device gets byte swapped
3573 * mmio if plugged onto a CPU that does the other endianness.
3574 *
3575 * CPU Device swap?
3576 *
3577 * little little no
3578 * little big yes
3579 * big little yes
3580 * big big no
3581 */
3582
3583typedef struct SwapEndianContainer {
3584 CPUReadMemoryFunc *read[3];
3585 CPUWriteMemoryFunc *write[3];
3586 void *opaque;
3587} SwapEndianContainer;
3588
3589static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3590{
3591 uint32_t val;
3592 SwapEndianContainer *c = opaque;
3593 val = c->read[0](c->opaque, addr);
3594 return val;
3595}
3596
3597static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3598{
3599 uint32_t val;
3600 SwapEndianContainer *c = opaque;
3601 val = bswap16(c->read[1](c->opaque, addr));
3602 return val;
3603}
3604
3605static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3606{
3607 uint32_t val;
3608 SwapEndianContainer *c = opaque;
3609 val = bswap32(c->read[2](c->opaque, addr));
3610 return val;
3611}
3612
3613static CPUReadMemoryFunc * const swapendian_readfn[3]={
3614 swapendian_mem_readb,
3615 swapendian_mem_readw,
3616 swapendian_mem_readl
3617};
3618
3619static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3620 uint32_t val)
3621{
3622 SwapEndianContainer *c = opaque;
3623 c->write[0](c->opaque, addr, val);
3624}
3625
3626static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3627 uint32_t val)
3628{
3629 SwapEndianContainer *c = opaque;
3630 c->write[1](c->opaque, addr, bswap16(val));
3631}
3632
3633static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3634 uint32_t val)
3635{
3636 SwapEndianContainer *c = opaque;
3637 c->write[2](c->opaque, addr, bswap32(val));
3638}
3639
3640static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3641 swapendian_mem_writeb,
3642 swapendian_mem_writew,
3643 swapendian_mem_writel
3644};
3645
3646static void swapendian_init(int io_index)
3647{
3648 SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3649 int i;
3650
3651 /* Swap mmio for big endian targets */
3652 c->opaque = io_mem_opaque[io_index];
3653 for (i = 0; i < 3; i++) {
3654 c->read[i] = io_mem_read[io_index][i];
3655 c->write[i] = io_mem_write[io_index][i];
3656
3657 io_mem_read[io_index][i] = swapendian_readfn[i];
3658 io_mem_write[io_index][i] = swapendian_writefn[i];
3659 }
3660 io_mem_opaque[io_index] = c;
3661}
3662
3663static void swapendian_del(int io_index)
3664{
3665 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3666 qemu_free(io_mem_opaque[io_index]);
3667 }
3668}
3669
bellard33417e72003-08-10 21:47:01 +00003670/* mem_read and mem_write are arrays of functions containing the
3671 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003672 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003673 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003674 modified. If it is zero, a new io zone is allocated. The return
3675 value can be used with cpu_register_physical_memory(). (-1) is
3676 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003677static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003678 CPUReadMemoryFunc * const *mem_read,
3679 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003680 void *opaque, enum device_endian endian)
bellard33417e72003-08-10 21:47:01 +00003681{
Richard Henderson3cab7212010-05-07 09:52:51 -07003682 int i;
3683
bellard33417e72003-08-10 21:47:01 +00003684 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003685 io_index = get_free_io_mem_idx();
3686 if (io_index == -1)
3687 return io_index;
bellard33417e72003-08-10 21:47:01 +00003688 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003689 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003690 if (io_index >= IO_MEM_NB_ENTRIES)
3691 return -1;
3692 }
bellardb5ff1b32005-11-26 10:38:39 +00003693
Richard Henderson3cab7212010-05-07 09:52:51 -07003694 for (i = 0; i < 3; ++i) {
3695 io_mem_read[io_index][i]
3696 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3697 }
3698 for (i = 0; i < 3; ++i) {
3699 io_mem_write[io_index][i]
3700 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3701 }
bellarda4193c82004-06-03 14:01:43 +00003702 io_mem_opaque[io_index] = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003703
Alexander Grafdd310532010-12-08 12:05:36 +01003704 switch (endian) {
3705 case DEVICE_BIG_ENDIAN:
3706#ifndef TARGET_WORDS_BIGENDIAN
3707 swapendian_init(io_index);
3708#endif
3709 break;
3710 case DEVICE_LITTLE_ENDIAN:
3711#ifdef TARGET_WORDS_BIGENDIAN
3712 swapendian_init(io_index);
3713#endif
3714 break;
3715 case DEVICE_NATIVE_ENDIAN:
3716 default:
3717 break;
3718 }
3719
Richard Hendersonf6405242010-04-22 16:47:31 -07003720 return (io_index << IO_MEM_SHIFT);
bellard33417e72003-08-10 21:47:01 +00003721}
bellard61382a52003-10-27 21:22:23 +00003722
Blue Swirld60efc62009-08-25 18:29:31 +00003723int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3724 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003725 void *opaque, enum device_endian endian)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003726{
Alexander Graf2507c122010-12-08 12:05:37 +01003727 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003728}
3729
aliguori88715652009-02-11 15:20:58 +00003730void cpu_unregister_io_memory(int io_table_address)
3731{
3732 int i;
3733 int io_index = io_table_address >> IO_MEM_SHIFT;
3734
Alexander Grafdd310532010-12-08 12:05:36 +01003735 swapendian_del(io_index);
3736
aliguori88715652009-02-11 15:20:58 +00003737 for (i=0;i < 3; i++) {
3738 io_mem_read[io_index][i] = unassigned_mem_read[i];
3739 io_mem_write[io_index][i] = unassigned_mem_write[i];
3740 }
3741 io_mem_opaque[io_index] = NULL;
3742 io_mem_used[io_index] = 0;
3743}
3744
Avi Kivitye9179ce2009-06-14 11:38:52 +03003745static void io_mem_init(void)
3746{
3747 int i;
3748
Alexander Graf2507c122010-12-08 12:05:37 +01003749 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3750 unassigned_mem_write, NULL,
3751 DEVICE_NATIVE_ENDIAN);
3752 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3753 unassigned_mem_write, NULL,
3754 DEVICE_NATIVE_ENDIAN);
3755 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3756 notdirty_mem_write, NULL,
3757 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003758 for (i=0; i<5; i++)
3759 io_mem_used[i] = 1;
3760
3761 io_mem_watch = cpu_register_io_memory(watch_mem_read,
Alexander Graf2507c122010-12-08 12:05:37 +01003762 watch_mem_write, NULL,
3763 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003764}
3765
pbrooke2eef172008-06-08 01:09:01 +00003766#endif /* !defined(CONFIG_USER_ONLY) */
3767
bellard13eb76e2004-01-24 15:23:36 +00003768/* physical memory access (slow version, mainly for debug) */
3769#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003770int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3771 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003772{
3773 int l, flags;
3774 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003775 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003776
3777 while (len > 0) {
3778 page = addr & TARGET_PAGE_MASK;
3779 l = (page + TARGET_PAGE_SIZE) - addr;
3780 if (l > len)
3781 l = len;
3782 flags = page_get_flags(page);
3783 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003784 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003785 if (is_write) {
3786 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003787 return -1;
bellard579a97f2007-11-11 14:26:47 +00003788 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003789 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003790 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003791 memcpy(p, buf, l);
3792 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003793 } else {
3794 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003795 return -1;
bellard579a97f2007-11-11 14:26:47 +00003796 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003797 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003798 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003799 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003800 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003801 }
3802 len -= l;
3803 buf += l;
3804 addr += l;
3805 }
Paul Brooka68fe892010-03-01 00:08:59 +00003806 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003807}
bellard8df1cd02005-01-28 22:37:22 +00003808
bellard13eb76e2004-01-24 15:23:36 +00003809#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003810void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003811 int len, int is_write)
3812{
3813 int l, io_index;
3814 uint8_t *ptr;
3815 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003816 target_phys_addr_t page;
bellard2e126692004-04-25 21:28:44 +00003817 unsigned long pd;
bellard92e873b2004-05-21 14:52:29 +00003818 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003819
bellard13eb76e2004-01-24 15:23:36 +00003820 while (len > 0) {
3821 page = addr & TARGET_PAGE_MASK;
3822 l = (page + TARGET_PAGE_SIZE) - addr;
3823 if (l > len)
3824 l = len;
bellard92e873b2004-05-21 14:52:29 +00003825 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003826 if (!p) {
3827 pd = IO_MEM_UNASSIGNED;
3828 } else {
3829 pd = p->phys_offset;
3830 }
ths3b46e622007-09-17 08:09:54 +00003831
bellard13eb76e2004-01-24 15:23:36 +00003832 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003833 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003834 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003835 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003836 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003837 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003838 /* XXX: could force cpu_single_env to NULL to avoid
3839 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003840 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003841 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003842 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003843 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003844 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003845 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003846 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003847 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003848 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003849 l = 2;
3850 } else {
bellard1c213d12005-09-03 10:49:04 +00003851 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003852 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003853 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003854 l = 1;
3855 }
3856 } else {
bellardb448f2f2004-02-25 23:24:04 +00003857 unsigned long addr1;
3858 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003859 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003860 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003861 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003862 if (!cpu_physical_memory_is_dirty(addr1)) {
3863 /* invalidate code */
3864 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3865 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003866 cpu_physical_memory_set_dirty_flags(
3867 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003868 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003869 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003870 }
3871 } else {
ths5fafdf22007-09-16 21:08:06 +00003872 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003873 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003874 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003875 /* I/O case */
3876 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003877 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003878 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3879 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003880 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003881 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003882 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003883 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003884 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003885 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003886 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003887 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003888 l = 2;
3889 } else {
bellard1c213d12005-09-03 10:49:04 +00003890 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003891 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003892 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003893 l = 1;
3894 }
3895 } else {
3896 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003897 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3898 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3899 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003900 }
3901 }
3902 len -= l;
3903 buf += l;
3904 addr += l;
3905 }
3906}
bellard8df1cd02005-01-28 22:37:22 +00003907
bellardd0ecd2a2006-04-23 17:14:48 +00003908/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003909void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003910 const uint8_t *buf, int len)
3911{
3912 int l;
3913 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003914 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003915 unsigned long pd;
3916 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003917
bellardd0ecd2a2006-04-23 17:14:48 +00003918 while (len > 0) {
3919 page = addr & TARGET_PAGE_MASK;
3920 l = (page + TARGET_PAGE_SIZE) - addr;
3921 if (l > len)
3922 l = len;
3923 p = phys_page_find(page >> TARGET_PAGE_BITS);
3924 if (!p) {
3925 pd = IO_MEM_UNASSIGNED;
3926 } else {
3927 pd = p->phys_offset;
3928 }
ths3b46e622007-09-17 08:09:54 +00003929
bellardd0ecd2a2006-04-23 17:14:48 +00003930 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00003931 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3932 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003933 /* do nothing */
3934 } else {
3935 unsigned long addr1;
3936 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3937 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003938 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003939 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003940 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003941 }
3942 len -= l;
3943 buf += l;
3944 addr += l;
3945 }
3946}
3947
aliguori6d16c2f2009-01-22 16:59:11 +00003948typedef struct {
3949 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003950 target_phys_addr_t addr;
3951 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003952} BounceBuffer;
3953
3954static BounceBuffer bounce;
3955
aliguoriba223c22009-01-22 16:59:16 +00003956typedef struct MapClient {
3957 void *opaque;
3958 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003959 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003960} MapClient;
3961
Blue Swirl72cf2d42009-09-12 07:36:22 +00003962static QLIST_HEAD(map_client_list, MapClient) map_client_list
3963 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003964
3965void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3966{
3967 MapClient *client = qemu_malloc(sizeof(*client));
3968
3969 client->opaque = opaque;
3970 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003971 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003972 return client;
3973}
3974
3975void cpu_unregister_map_client(void *_client)
3976{
3977 MapClient *client = (MapClient *)_client;
3978
Blue Swirl72cf2d42009-09-12 07:36:22 +00003979 QLIST_REMOVE(client, link);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003980 qemu_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003981}
3982
3983static void cpu_notify_map_clients(void)
3984{
3985 MapClient *client;
3986
Blue Swirl72cf2d42009-09-12 07:36:22 +00003987 while (!QLIST_EMPTY(&map_client_list)) {
3988 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003989 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003990 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003991 }
3992}
3993
aliguori6d16c2f2009-01-22 16:59:11 +00003994/* Map a physical memory region into a host virtual address.
3995 * May map a subset of the requested range, given by and returned in *plen.
3996 * May return NULL if resources needed to perform the mapping are exhausted.
3997 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003998 * Use cpu_register_map_client() to know when retrying the map operation is
3999 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00004000 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004001void *cpu_physical_memory_map(target_phys_addr_t addr,
4002 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00004003 int is_write)
4004{
Anthony Liguoric227f092009-10-01 16:12:16 -05004005 target_phys_addr_t len = *plen;
4006 target_phys_addr_t done = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00004007 int l;
4008 uint8_t *ret = NULL;
4009 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05004010 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00004011 unsigned long pd;
4012 PhysPageDesc *p;
4013 unsigned long addr1;
4014
4015 while (len > 0) {
4016 page = addr & TARGET_PAGE_MASK;
4017 l = (page + TARGET_PAGE_SIZE) - addr;
4018 if (l > len)
4019 l = len;
4020 p = phys_page_find(page >> TARGET_PAGE_BITS);
4021 if (!p) {
4022 pd = IO_MEM_UNASSIGNED;
4023 } else {
4024 pd = p->phys_offset;
4025 }
4026
4027 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4028 if (done || bounce.buffer) {
4029 break;
4030 }
4031 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4032 bounce.addr = addr;
4033 bounce.len = l;
4034 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02004035 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00004036 }
4037 ptr = bounce.buffer;
4038 } else {
4039 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004040 ptr = qemu_get_ram_ptr(addr1);
aliguori6d16c2f2009-01-22 16:59:11 +00004041 }
4042 if (!done) {
4043 ret = ptr;
4044 } else if (ret + done != ptr) {
4045 break;
4046 }
4047
4048 len -= l;
4049 addr += l;
4050 done += l;
4051 }
4052 *plen = done;
4053 return ret;
4054}
4055
4056/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4057 * Will also mark the memory as dirty if is_write == 1. access_len gives
4058 * the amount of memory that was actually read or written by the caller.
4059 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004060void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4061 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004062{
4063 if (buffer != bounce.buffer) {
4064 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004065 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004066 while (access_len) {
4067 unsigned l;
4068 l = TARGET_PAGE_SIZE;
4069 if (l > access_len)
4070 l = access_len;
4071 if (!cpu_physical_memory_is_dirty(addr1)) {
4072 /* invalidate code */
4073 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4074 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004075 cpu_physical_memory_set_dirty_flags(
4076 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004077 }
4078 addr1 += l;
4079 access_len -= l;
4080 }
4081 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004082 if (xen_mapcache_enabled()) {
4083 uint8_t *buffer1 = buffer;
4084 uint8_t *end_buffer = buffer + len;
4085
4086 while (buffer1 < end_buffer) {
4087 qemu_put_ram_ptr(buffer1);
4088 buffer1 += TARGET_PAGE_SIZE;
4089 }
4090 }
aliguori6d16c2f2009-01-22 16:59:11 +00004091 return;
4092 }
4093 if (is_write) {
4094 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4095 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004096 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004097 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004098 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004099}
bellardd0ecd2a2006-04-23 17:14:48 +00004100
bellard8df1cd02005-01-28 22:37:22 +00004101/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05004102uint32_t ldl_phys(target_phys_addr_t addr)
bellard8df1cd02005-01-28 22:37:22 +00004103{
4104 int io_index;
4105 uint8_t *ptr;
4106 uint32_t val;
4107 unsigned long pd;
4108 PhysPageDesc *p;
4109
4110 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4111 if (!p) {
4112 pd = IO_MEM_UNASSIGNED;
4113 } else {
4114 pd = p->phys_offset;
4115 }
ths3b46e622007-09-17 08:09:54 +00004116
ths5fafdf22007-09-16 21:08:06 +00004117 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00004118 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00004119 /* I/O case */
4120 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004121 if (p)
4122 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004123 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4124 } else {
4125 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004126 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00004127 (addr & ~TARGET_PAGE_MASK);
4128 val = ldl_p(ptr);
4129 }
4130 return val;
4131}
4132
bellard84b7b8e2005-11-28 21:19:04 +00004133/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05004134uint64_t ldq_phys(target_phys_addr_t addr)
bellard84b7b8e2005-11-28 21:19:04 +00004135{
4136 int io_index;
4137 uint8_t *ptr;
4138 uint64_t val;
4139 unsigned long pd;
4140 PhysPageDesc *p;
4141
4142 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4143 if (!p) {
4144 pd = IO_MEM_UNASSIGNED;
4145 } else {
4146 pd = p->phys_offset;
4147 }
ths3b46e622007-09-17 08:09:54 +00004148
bellard2a4188a2006-06-25 21:54:59 +00004149 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4150 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00004151 /* I/O case */
4152 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004153 if (p)
4154 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard84b7b8e2005-11-28 21:19:04 +00004155#ifdef TARGET_WORDS_BIGENDIAN
4156 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4157 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4158#else
4159 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4160 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4161#endif
4162 } else {
4163 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004164 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00004165 (addr & ~TARGET_PAGE_MASK);
4166 val = ldq_p(ptr);
4167 }
4168 return val;
4169}
4170
bellardaab33092005-10-30 20:48:42 +00004171/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004172uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004173{
4174 uint8_t val;
4175 cpu_physical_memory_read(addr, &val, 1);
4176 return val;
4177}
4178
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004179/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05004180uint32_t lduw_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004181{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004182 int io_index;
4183 uint8_t *ptr;
4184 uint64_t val;
4185 unsigned long pd;
4186 PhysPageDesc *p;
4187
4188 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4189 if (!p) {
4190 pd = IO_MEM_UNASSIGNED;
4191 } else {
4192 pd = p->phys_offset;
4193 }
4194
4195 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4196 !(pd & IO_MEM_ROMD)) {
4197 /* I/O case */
4198 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4199 if (p)
4200 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4201 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
4202 } else {
4203 /* RAM case */
4204 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4205 (addr & ~TARGET_PAGE_MASK);
4206 val = lduw_p(ptr);
4207 }
4208 return val;
bellardaab33092005-10-30 20:48:42 +00004209}
4210
bellard8df1cd02005-01-28 22:37:22 +00004211/* warning: addr must be aligned. The ram page is not masked as dirty
4212 and the code inside is not invalidated. It is useful if the dirty
4213 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004214void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004215{
4216 int io_index;
4217 uint8_t *ptr;
4218 unsigned long pd;
4219 PhysPageDesc *p;
4220
4221 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4222 if (!p) {
4223 pd = IO_MEM_UNASSIGNED;
4224 } else {
4225 pd = p->phys_offset;
4226 }
ths3b46e622007-09-17 08:09:54 +00004227
bellard3a7d9292005-08-21 09:26:42 +00004228 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004229 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004230 if (p)
4231 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004232 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4233 } else {
aliguori74576192008-10-06 14:02:03 +00004234 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004235 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004236 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004237
4238 if (unlikely(in_migration)) {
4239 if (!cpu_physical_memory_is_dirty(addr1)) {
4240 /* invalidate code */
4241 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4242 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004243 cpu_physical_memory_set_dirty_flags(
4244 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004245 }
4246 }
bellard8df1cd02005-01-28 22:37:22 +00004247 }
4248}
4249
Anthony Liguoric227f092009-10-01 16:12:16 -05004250void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004251{
4252 int io_index;
4253 uint8_t *ptr;
4254 unsigned long pd;
4255 PhysPageDesc *p;
4256
4257 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4258 if (!p) {
4259 pd = IO_MEM_UNASSIGNED;
4260 } else {
4261 pd = p->phys_offset;
4262 }
ths3b46e622007-09-17 08:09:54 +00004263
j_mayerbc98a7e2007-04-04 07:55:12 +00004264 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4265 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004266 if (p)
4267 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004268#ifdef TARGET_WORDS_BIGENDIAN
4269 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4270 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4271#else
4272 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4273 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4274#endif
4275 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004276 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004277 (addr & ~TARGET_PAGE_MASK);
4278 stq_p(ptr, val);
4279 }
4280}
4281
bellard8df1cd02005-01-28 22:37:22 +00004282/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05004283void stl_phys(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004284{
4285 int io_index;
4286 uint8_t *ptr;
4287 unsigned long pd;
4288 PhysPageDesc *p;
4289
4290 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4291 if (!p) {
4292 pd = IO_MEM_UNASSIGNED;
4293 } else {
4294 pd = p->phys_offset;
4295 }
ths3b46e622007-09-17 08:09:54 +00004296
bellard3a7d9292005-08-21 09:26:42 +00004297 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004298 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004299 if (p)
4300 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004301 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4302 } else {
4303 unsigned long addr1;
4304 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4305 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004306 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004307 stl_p(ptr, val);
bellard3a7d9292005-08-21 09:26:42 +00004308 if (!cpu_physical_memory_is_dirty(addr1)) {
4309 /* invalidate code */
4310 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4311 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004312 cpu_physical_memory_set_dirty_flags(addr1,
4313 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004314 }
bellard8df1cd02005-01-28 22:37:22 +00004315 }
4316}
4317
bellardaab33092005-10-30 20:48:42 +00004318/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004319void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004320{
4321 uint8_t v = val;
4322 cpu_physical_memory_write(addr, &v, 1);
4323}
4324
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004325/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05004326void stw_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004327{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004328 int io_index;
4329 uint8_t *ptr;
4330 unsigned long pd;
4331 PhysPageDesc *p;
4332
4333 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4334 if (!p) {
4335 pd = IO_MEM_UNASSIGNED;
4336 } else {
4337 pd = p->phys_offset;
4338 }
4339
4340 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4341 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4342 if (p)
4343 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4344 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4345 } else {
4346 unsigned long addr1;
4347 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4348 /* RAM case */
4349 ptr = qemu_get_ram_ptr(addr1);
4350 stw_p(ptr, val);
4351 if (!cpu_physical_memory_is_dirty(addr1)) {
4352 /* invalidate code */
4353 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4354 /* set dirty bit */
4355 cpu_physical_memory_set_dirty_flags(addr1,
4356 (0xff & ~CODE_DIRTY_FLAG));
4357 }
4358 }
bellardaab33092005-10-30 20:48:42 +00004359}
4360
4361/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004362void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004363{
4364 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004365 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004366}
4367
aliguori5e2972f2009-03-28 17:51:36 +00004368/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004369int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004370 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004371{
4372 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004373 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004374 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004375
4376 while (len > 0) {
4377 page = addr & TARGET_PAGE_MASK;
4378 phys_addr = cpu_get_phys_page_debug(env, page);
4379 /* if no physical page mapped, return an error */
4380 if (phys_addr == -1)
4381 return -1;
4382 l = (page + TARGET_PAGE_SIZE) - addr;
4383 if (l > len)
4384 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004385 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004386 if (is_write)
4387 cpu_physical_memory_write_rom(phys_addr, buf, l);
4388 else
aliguori5e2972f2009-03-28 17:51:36 +00004389 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004390 len -= l;
4391 buf += l;
4392 addr += l;
4393 }
4394 return 0;
4395}
Paul Brooka68fe892010-03-01 00:08:59 +00004396#endif
bellard13eb76e2004-01-24 15:23:36 +00004397
pbrook2e70f6e2008-06-29 01:03:05 +00004398/* in deterministic execution mode, instructions doing device I/Os
4399 must be at the end of the TB */
4400void cpu_io_recompile(CPUState *env, void *retaddr)
4401{
4402 TranslationBlock *tb;
4403 uint32_t n, cflags;
4404 target_ulong pc, cs_base;
4405 uint64_t flags;
4406
4407 tb = tb_find_pc((unsigned long)retaddr);
4408 if (!tb) {
4409 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4410 retaddr);
4411 }
4412 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004413 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004414 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004415 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004416 n = n - env->icount_decr.u16.low;
4417 /* Generate a new TB ending on the I/O insn. */
4418 n++;
4419 /* On MIPS and SH, delay slot instructions can only be restarted if
4420 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004421 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004422 branch. */
4423#if defined(TARGET_MIPS)
4424 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4425 env->active_tc.PC -= 4;
4426 env->icount_decr.u16.low++;
4427 env->hflags &= ~MIPS_HFLAG_BMASK;
4428 }
4429#elif defined(TARGET_SH4)
4430 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4431 && n > 1) {
4432 env->pc -= 2;
4433 env->icount_decr.u16.low++;
4434 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4435 }
4436#endif
4437 /* This should never happen. */
4438 if (n > CF_COUNT_MASK)
4439 cpu_abort(env, "TB too big during recompile");
4440
4441 cflags = n | CF_LAST_IO;
4442 pc = tb->pc;
4443 cs_base = tb->cs_base;
4444 flags = tb->flags;
4445 tb_phys_invalidate(tb, -1);
4446 /* FIXME: In theory this could raise an exception. In practice
4447 we have already translated the block once so it's probably ok. */
4448 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004449 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004450 the first in the TB) then we end up generating a whole new TB and
4451 repeating the fault, which is horribly inefficient.
4452 Better would be to execute just this insn uncached, or generate a
4453 second new TB. */
4454 cpu_resume_from_signal(env, NULL);
4455}
4456
Paul Brookb3755a92010-03-12 16:54:58 +00004457#if !defined(CONFIG_USER_ONLY)
4458
Stefan Weil055403b2010-10-22 23:03:32 +02004459void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004460{
4461 int i, target_code_size, max_target_code_size;
4462 int direct_jmp_count, direct_jmp2_count, cross_page;
4463 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004464
bellarde3db7222005-01-26 22:00:47 +00004465 target_code_size = 0;
4466 max_target_code_size = 0;
4467 cross_page = 0;
4468 direct_jmp_count = 0;
4469 direct_jmp2_count = 0;
4470 for(i = 0; i < nb_tbs; i++) {
4471 tb = &tbs[i];
4472 target_code_size += tb->size;
4473 if (tb->size > max_target_code_size)
4474 max_target_code_size = tb->size;
4475 if (tb->page_addr[1] != -1)
4476 cross_page++;
4477 if (tb->tb_next_offset[0] != 0xffff) {
4478 direct_jmp_count++;
4479 if (tb->tb_next_offset[1] != 0xffff) {
4480 direct_jmp2_count++;
4481 }
4482 }
4483 }
4484 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004485 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004486 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004487 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4488 cpu_fprintf(f, "TB count %d/%d\n",
4489 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004490 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004491 nb_tbs ? target_code_size / nb_tbs : 0,
4492 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004493 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004494 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4495 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004496 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4497 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004498 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4499 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004500 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004501 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4502 direct_jmp2_count,
4503 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004504 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004505 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4506 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4507 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004508 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004509}
4510
bellard61382a52003-10-27 21:22:23 +00004511#define MMUSUFFIX _cmmu
4512#define GETPC() NULL
4513#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004514#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004515
4516#define SHIFT 0
4517#include "softmmu_template.h"
4518
4519#define SHIFT 1
4520#include "softmmu_template.h"
4521
4522#define SHIFT 2
4523#include "softmmu_template.h"
4524
4525#define SHIFT 3
4526#include "softmmu_template.h"
4527
4528#undef env
4529
4530#endif