blob: 72362810ec8e8940d110566ed58e1c0c69470e2f [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
29#include "exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000030#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000031#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000033#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000034#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010035#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000036#include "qemu-timer.h"
pbrook53a59602006-03-25 19:31:22 +000037#if defined(CONFIG_USER_ONLY)
38#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010039#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40#include <sys/param.h>
41#if __FreeBSD_version >= 700104
42#define HAVE_KINFO_GETVMMAP
43#define sigqueue sigqueue_freebsd /* avoid redefinition */
44#include <sys/time.h>
45#include <sys/proc.h>
46#include <machine/profile.h>
47#define _KERNEL
48#include <sys/user.h>
49#undef _KERNEL
50#undef sigqueue
51#include <libutil.h>
52#endif
53#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010054#else /* !CONFIG_USER_ONLY */
55#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010056#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000057#endif
bellard54936002003-05-13 00:25:15 +000058
bellardfd6ce8f2003-05-14 19:00:11 +000059//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000060//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000061//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000062//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000063
64/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000065//#define DEBUG_TB_CHECK
66//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000067
ths1196be32007-03-17 15:17:58 +000068//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000069//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000070
pbrook99773bd2006-04-16 15:14:59 +000071#if !defined(CONFIG_USER_ONLY)
72/* TB consistency checks only implemented for usermode emulation. */
73#undef DEBUG_TB_CHECK
74#endif
75
bellard9fa3e852004-01-04 18:06:42 +000076#define SMC_BITMAP_USE_THRESHOLD 10
77
blueswir1bdaf78e2008-10-04 07:24:27 +000078static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020079static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000080TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000081static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000082/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050083spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000084
blueswir1141ac462008-07-26 15:05:57 +000085#if defined(__arm__) || defined(__sparc_v9__)
86/* The prologue must be reachable with a direct jump. ARM and Sparc64
87 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000088 section close to code segment. */
89#define code_gen_section \
90 __attribute__((__section__(".gen_code"))) \
91 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020092#elif defined(_WIN32)
93/* Maximum alignment for Win32 is 16. */
94#define code_gen_section \
95 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000096#else
97#define code_gen_section \
98 __attribute__((aligned (32)))
99#endif
100
101uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000102static uint8_t *code_gen_buffer;
103static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000104/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000105static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200106static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000107
pbrooke2eef172008-06-08 01:09:01 +0000108#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000109int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000110static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000111
Alex Williamsonf471a172010-06-11 11:11:42 -0600112RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
pbrooke2eef172008-06-08 01:09:01 +0000113#endif
bellard9fa3e852004-01-04 18:06:42 +0000114
bellard6a00d602005-11-21 23:25:50 +0000115CPUState *first_cpu;
116/* current CPU in the current thread. It is only valid inside
117 cpu_exec() */
ths5fafdf22007-09-16 21:08:06 +0000118CPUState *cpu_single_env;
pbrook2e70f6e2008-06-29 01:03:05 +0000119/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000120 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000121 2 = Adaptive rate instruction counting. */
122int use_icount = 0;
123/* Current instruction counter. While executing translated code this may
124 include some instructions that have not yet been executed. */
125int64_t qemu_icount;
bellard6a00d602005-11-21 23:25:50 +0000126
bellard54936002003-05-13 00:25:15 +0000127typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000128 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000129 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000130 /* in order to optimize self modifying code, we count the number
131 of lookups we do to a given page to use a bitmap */
132 unsigned int code_write_count;
133 uint8_t *code_bitmap;
134#if defined(CONFIG_USER_ONLY)
135 unsigned long flags;
136#endif
bellard54936002003-05-13 00:25:15 +0000137} PageDesc;
138
Paul Brook41c1b1c2010-03-12 16:54:58 +0000139/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800140 while in user mode we want it to be based on virtual addresses. */
141#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000142#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
143# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
144#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800145# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000146#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000147#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800148# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000149#endif
bellard54936002003-05-13 00:25:15 +0000150
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800151/* Size of the L2 (and L3, etc) page tables. */
152#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000153#define L2_SIZE (1 << L2_BITS)
154
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800155/* The bits remaining after N lower levels of page tables. */
156#define P_L1_BITS_REM \
157 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
158#define V_L1_BITS_REM \
159 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
160
161/* Size of the L1 page table. Avoid silly small sizes. */
162#if P_L1_BITS_REM < 4
163#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
164#else
165#define P_L1_BITS P_L1_BITS_REM
166#endif
167
168#if V_L1_BITS_REM < 4
169#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
170#else
171#define V_L1_BITS V_L1_BITS_REM
172#endif
173
174#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
175#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176
177#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
178#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
bellard83fb7ad2004-07-05 21:25:26 +0000180unsigned long qemu_real_host_page_size;
181unsigned long qemu_host_page_bits;
182unsigned long qemu_host_page_size;
183unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000184
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800185/* This is a multi-level map on the virtual address space.
186 The bottom level has pointers to PageDesc. */
187static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000188
pbrooke2eef172008-06-08 01:09:01 +0000189#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000190typedef struct PhysPageDesc {
191 /* offset in host memory of the page + io_index in the low bits */
192 ram_addr_t phys_offset;
193 ram_addr_t region_offset;
194} PhysPageDesc;
195
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800196/* This is a multi-level map on the physical address space.
197 The bottom level has pointers to PhysPageDesc. */
198static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000199
pbrooke2eef172008-06-08 01:09:01 +0000200static void io_mem_init(void);
201
bellard33417e72003-08-10 21:47:01 +0000202/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000203CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
204CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000205void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000206static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000207static int io_mem_watch;
208#endif
bellard33417e72003-08-10 21:47:01 +0000209
bellard34865132003-10-05 14:28:56 +0000210/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200211#ifdef WIN32
212static const char *logfilename = "qemu.log";
213#else
blueswir1d9b630f2008-10-05 09:57:08 +0000214static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200215#endif
bellard34865132003-10-05 14:28:56 +0000216FILE *logfile;
217int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000218static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000219
bellarde3db7222005-01-26 22:00:47 +0000220/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000221#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000222static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000223#endif
bellarde3db7222005-01-26 22:00:47 +0000224static int tb_flush_count;
225static int tb_phys_invalidate_count;
226
bellard7cb69ca2008-05-10 10:55:51 +0000227#ifdef _WIN32
228static void map_exec(void *addr, long size)
229{
230 DWORD old_protect;
231 VirtualProtect(addr, size,
232 PAGE_EXECUTE_READWRITE, &old_protect);
233
234}
235#else
236static void map_exec(void *addr, long size)
237{
bellard43694152008-05-29 09:35:57 +0000238 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000239
bellard43694152008-05-29 09:35:57 +0000240 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000241 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000242 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000243
244 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000245 end += page_size - 1;
246 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000247
248 mprotect((void *)start, end - start,
249 PROT_READ | PROT_WRITE | PROT_EXEC);
250}
251#endif
252
bellardb346ff42003-06-15 20:05:50 +0000253static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000254{
bellard83fb7ad2004-07-05 21:25:26 +0000255 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000256 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000257#ifdef _WIN32
258 {
259 SYSTEM_INFO system_info;
260
261 GetSystemInfo(&system_info);
262 qemu_real_host_page_size = system_info.dwPageSize;
263 }
264#else
265 qemu_real_host_page_size = getpagesize();
266#endif
bellard83fb7ad2004-07-05 21:25:26 +0000267 if (qemu_host_page_size == 0)
268 qemu_host_page_size = qemu_real_host_page_size;
269 if (qemu_host_page_size < TARGET_PAGE_SIZE)
270 qemu_host_page_size = TARGET_PAGE_SIZE;
271 qemu_host_page_bits = 0;
272 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
273 qemu_host_page_bits++;
274 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000275
Paul Brook2e9a5712010-05-05 16:32:59 +0100276#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000277 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100278#ifdef HAVE_KINFO_GETVMMAP
279 struct kinfo_vmentry *freep;
280 int i, cnt;
281
282 freep = kinfo_getvmmap(getpid(), &cnt);
283 if (freep) {
284 mmap_lock();
285 for (i = 0; i < cnt; i++) {
286 unsigned long startaddr, endaddr;
287
288 startaddr = freep[i].kve_start;
289 endaddr = freep[i].kve_end;
290 if (h2g_valid(startaddr)) {
291 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
292
293 if (h2g_valid(endaddr)) {
294 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200295 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100296 } else {
297#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
298 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200299 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100300#endif
301 }
302 }
303 }
304 free(freep);
305 mmap_unlock();
306 }
307#else
balrog50a95692007-12-12 01:16:23 +0000308 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000309
pbrook07765902008-05-31 16:33:53 +0000310 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800311
Aurelien Jarnofd436902010-04-10 17:20:36 +0200312 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000313 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800314 mmap_lock();
315
balrog50a95692007-12-12 01:16:23 +0000316 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800317 unsigned long startaddr, endaddr;
318 int n;
319
320 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
321
322 if (n == 2 && h2g_valid(startaddr)) {
323 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
324
325 if (h2g_valid(endaddr)) {
326 endaddr = h2g(endaddr);
327 } else {
328 endaddr = ~0ul;
329 }
330 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000331 }
332 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800333
balrog50a95692007-12-12 01:16:23 +0000334 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800335 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000336 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100337#endif
balrog50a95692007-12-12 01:16:23 +0000338 }
339#endif
bellard54936002003-05-13 00:25:15 +0000340}
341
Paul Brook41c1b1c2010-03-12 16:54:58 +0000342static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000343{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000344 PageDesc *pd;
345 void **lp;
346 int i;
347
pbrook17e23772008-06-09 13:47:45 +0000348#if defined(CONFIG_USER_ONLY)
Paul Brook2e9a5712010-05-05 16:32:59 +0100349 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800350# define ALLOC(P, SIZE) \
351 do { \
352 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
353 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800354 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000355#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800356# define ALLOC(P, SIZE) \
357 do { P = qemu_mallocz(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000358#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800359
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800360 /* Level 1. Always allocated. */
361 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
362
363 /* Level 2..N-1. */
364 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
365 void **p = *lp;
366
367 if (p == NULL) {
368 if (!alloc) {
369 return NULL;
370 }
371 ALLOC(p, sizeof(void *) * L2_SIZE);
372 *lp = p;
373 }
374
375 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000376 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800377
378 pd = *lp;
379 if (pd == NULL) {
380 if (!alloc) {
381 return NULL;
382 }
383 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
384 *lp = pd;
385 }
386
387#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800388
389 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000390}
391
Paul Brook41c1b1c2010-03-12 16:54:58 +0000392static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000393{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800394 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000395}
396
Paul Brook6d9a1302010-02-28 23:55:53 +0000397#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500398static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000399{
pbrooke3f4e2a2006-04-08 20:02:06 +0000400 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800401 void **lp;
402 int i;
bellard92e873b2004-05-21 14:52:29 +0000403
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800404 /* Level 1. Always allocated. */
405 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000406
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800407 /* Level 2..N-1. */
408 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
409 void **p = *lp;
410 if (p == NULL) {
411 if (!alloc) {
412 return NULL;
413 }
414 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
415 }
416 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000417 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800418
pbrooke3f4e2a2006-04-08 20:02:06 +0000419 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800420 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000421 int i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800422
423 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000424 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800425 }
426
427 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
428
pbrook67c4d232009-02-23 13:16:07 +0000429 for (i = 0; i < L2_SIZE; i++) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800430 pd[i].phys_offset = IO_MEM_UNASSIGNED;
431 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000432 }
bellard92e873b2004-05-21 14:52:29 +0000433 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800434
435 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000436}
437
Anthony Liguoric227f092009-10-01 16:12:16 -0500438static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000439{
bellard108c49b2005-07-24 12:55:09 +0000440 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000441}
442
Anthony Liguoric227f092009-10-01 16:12:16 -0500443static void tlb_protect_code(ram_addr_t ram_addr);
444static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000445 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000446#define mmap_lock() do { } while(0)
447#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000448#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000449
bellard43694152008-05-29 09:35:57 +0000450#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
451
452#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100453/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000454 user mode. It will change when a dedicated libc will be used */
455#define USE_STATIC_CODE_GEN_BUFFER
456#endif
457
458#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200459static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
460 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000461#endif
462
blueswir18fcd3692008-08-17 20:26:25 +0000463static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000464{
bellard43694152008-05-29 09:35:57 +0000465#ifdef USE_STATIC_CODE_GEN_BUFFER
466 code_gen_buffer = static_code_gen_buffer;
467 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
468 map_exec(code_gen_buffer, code_gen_buffer_size);
469#else
bellard26a5f132008-05-28 12:30:31 +0000470 code_gen_buffer_size = tb_size;
471 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000472#if defined(CONFIG_USER_ONLY)
473 /* in user mode, phys_ram_size is not meaningful */
474 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
475#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100476 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000477 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000478#endif
bellard26a5f132008-05-28 12:30:31 +0000479 }
480 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
481 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
482 /* The code gen buffer location may have constraints depending on
483 the host cpu and OS */
484#if defined(__linux__)
485 {
486 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000487 void *start = NULL;
488
bellard26a5f132008-05-28 12:30:31 +0000489 flags = MAP_PRIVATE | MAP_ANONYMOUS;
490#if defined(__x86_64__)
491 flags |= MAP_32BIT;
492 /* Cannot map more than that */
493 if (code_gen_buffer_size > (800 * 1024 * 1024))
494 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000495#elif defined(__sparc_v9__)
496 // Map the buffer below 2G, so we can use direct calls and branches
497 flags |= MAP_FIXED;
498 start = (void *) 0x60000000UL;
499 if (code_gen_buffer_size > (512 * 1024 * 1024))
500 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000501#elif defined(__arm__)
balrog63d41242008-12-01 02:19:41 +0000502 /* Map the buffer below 32M, so we can use direct calls and branches */
balrog1cb06612008-12-01 02:10:17 +0000503 flags |= MAP_FIXED;
504 start = (void *) 0x01000000UL;
505 if (code_gen_buffer_size > 16 * 1024 * 1024)
506 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700507#elif defined(__s390x__)
508 /* Map the buffer so that we can use direct calls and branches. */
509 /* We have a +- 4GB range on the branches; leave some slop. */
510 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
511 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
512 }
513 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000514#endif
blueswir1141ac462008-07-26 15:05:57 +0000515 code_gen_buffer = mmap(start, code_gen_buffer_size,
516 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000517 flags, -1, 0);
518 if (code_gen_buffer == MAP_FAILED) {
519 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
520 exit(1);
521 }
522 }
Bradcbb608a2010-12-20 21:25:40 -0500523#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
524 || defined(__DragonFly__) || defined(__OpenBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000525 {
526 int flags;
527 void *addr = NULL;
528 flags = MAP_PRIVATE | MAP_ANONYMOUS;
529#if defined(__x86_64__)
530 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
531 * 0x40000000 is free */
532 flags |= MAP_FIXED;
533 addr = (void *)0x40000000;
534 /* Cannot map more than that */
535 if (code_gen_buffer_size > (800 * 1024 * 1024))
536 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000537#elif defined(__sparc_v9__)
538 // Map the buffer below 2G, so we can use direct calls and branches
539 flags |= MAP_FIXED;
540 addr = (void *) 0x60000000UL;
541 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
542 code_gen_buffer_size = (512 * 1024 * 1024);
543 }
aliguori06e67a82008-09-27 15:32:41 +0000544#endif
545 code_gen_buffer = mmap(addr, code_gen_buffer_size,
546 PROT_WRITE | PROT_READ | PROT_EXEC,
547 flags, -1, 0);
548 if (code_gen_buffer == MAP_FAILED) {
549 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
550 exit(1);
551 }
552 }
bellard26a5f132008-05-28 12:30:31 +0000553#else
554 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000555 map_exec(code_gen_buffer, code_gen_buffer_size);
556#endif
bellard43694152008-05-29 09:35:57 +0000557#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000558 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
559 code_gen_buffer_max_size = code_gen_buffer_size -
Aurelien Jarno239fda32010-06-03 19:29:31 +0200560 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000561 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
562 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
563}
564
565/* Must be called before using the QEMU cpus. 'tb_size' is the size
566 (in bytes) allocated to the translation buffer. Zero means default
567 size. */
568void cpu_exec_init_all(unsigned long tb_size)
569{
bellard26a5f132008-05-28 12:30:31 +0000570 cpu_gen_init();
571 code_gen_alloc(tb_size);
572 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000573 page_init();
pbrooke2eef172008-06-08 01:09:01 +0000574#if !defined(CONFIG_USER_ONLY)
bellard26a5f132008-05-28 12:30:31 +0000575 io_mem_init();
pbrooke2eef172008-06-08 01:09:01 +0000576#endif
Richard Henderson9002ec72010-05-06 08:50:41 -0700577#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
578 /* There's no guest base to take into account, so go ahead and
579 initialize the prologue now. */
580 tcg_prologue_init(&tcg_ctx);
581#endif
bellard26a5f132008-05-28 12:30:31 +0000582}
583
pbrook9656f322008-07-01 20:01:19 +0000584#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
585
Juan Quintelae59fb372009-09-29 22:48:21 +0200586static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200587{
588 CPUState *env = opaque;
589
aurel323098dba2009-03-07 21:28:24 +0000590 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
591 version_id is increased. */
592 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000593 tlb_flush(env, 1);
594
595 return 0;
596}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200597
598static const VMStateDescription vmstate_cpu_common = {
599 .name = "cpu_common",
600 .version_id = 1,
601 .minimum_version_id = 1,
602 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200603 .post_load = cpu_common_post_load,
604 .fields = (VMStateField []) {
605 VMSTATE_UINT32(halted, CPUState),
606 VMSTATE_UINT32(interrupt_request, CPUState),
607 VMSTATE_END_OF_LIST()
608 }
609};
pbrook9656f322008-07-01 20:01:19 +0000610#endif
611
Glauber Costa950f1472009-06-09 12:15:18 -0400612CPUState *qemu_get_cpu(int cpu)
613{
614 CPUState *env = first_cpu;
615
616 while (env) {
617 if (env->cpu_index == cpu)
618 break;
619 env = env->next_cpu;
620 }
621
622 return env;
623}
624
bellard6a00d602005-11-21 23:25:50 +0000625void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000626{
bellard6a00d602005-11-21 23:25:50 +0000627 CPUState **penv;
628 int cpu_index;
629
pbrookc2764712009-03-07 15:24:59 +0000630#if defined(CONFIG_USER_ONLY)
631 cpu_list_lock();
632#endif
bellard6a00d602005-11-21 23:25:50 +0000633 env->next_cpu = NULL;
634 penv = &first_cpu;
635 cpu_index = 0;
636 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700637 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000638 cpu_index++;
639 }
640 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000641 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000642 QTAILQ_INIT(&env->breakpoints);
643 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100644#ifndef CONFIG_USER_ONLY
645 env->thread_id = qemu_get_thread_id();
646#endif
bellard6a00d602005-11-21 23:25:50 +0000647 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000648#if defined(CONFIG_USER_ONLY)
649 cpu_list_unlock();
650#endif
pbrookb3c77242008-06-30 16:31:04 +0000651#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600652 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
653 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000654 cpu_save, cpu_load, env);
655#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000656}
657
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100658/* Allocate a new translation block. Flush the translation buffer if
659 too many translation blocks or too much generated code. */
660static TranslationBlock *tb_alloc(target_ulong pc)
661{
662 TranslationBlock *tb;
663
664 if (nb_tbs >= code_gen_max_blocks ||
665 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
666 return NULL;
667 tb = &tbs[nb_tbs++];
668 tb->pc = pc;
669 tb->cflags = 0;
670 return tb;
671}
672
673void tb_free(TranslationBlock *tb)
674{
675 /* In practice this is mostly used for single use temporary TB
676 Ignore the hard cases and just back up if this TB happens to
677 be the last one generated. */
678 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
679 code_gen_ptr = tb->tc_ptr;
680 nb_tbs--;
681 }
682}
683
bellard9fa3e852004-01-04 18:06:42 +0000684static inline void invalidate_page_bitmap(PageDesc *p)
685{
686 if (p->code_bitmap) {
bellard59817cc2004-02-16 22:01:13 +0000687 qemu_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000688 p->code_bitmap = NULL;
689 }
690 p->code_write_count = 0;
691}
692
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800693/* Set to NULL all the 'first_tb' fields in all PageDescs. */
694
695static void page_flush_tb_1 (int level, void **lp)
696{
697 int i;
698
699 if (*lp == NULL) {
700 return;
701 }
702 if (level == 0) {
703 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000704 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800705 pd[i].first_tb = NULL;
706 invalidate_page_bitmap(pd + i);
707 }
708 } else {
709 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000710 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800711 page_flush_tb_1 (level - 1, pp + i);
712 }
713 }
714}
715
bellardfd6ce8f2003-05-14 19:00:11 +0000716static void page_flush_tb(void)
717{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800718 int i;
719 for (i = 0; i < V_L1_SIZE; i++) {
720 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000721 }
722}
723
724/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000725/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000726void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000727{
bellard6a00d602005-11-21 23:25:50 +0000728 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000729#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000730 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
731 (unsigned long)(code_gen_ptr - code_gen_buffer),
732 nb_tbs, nb_tbs > 0 ?
733 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000734#endif
bellard26a5f132008-05-28 12:30:31 +0000735 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000736 cpu_abort(env1, "Internal error: code buffer overflow\n");
737
bellardfd6ce8f2003-05-14 19:00:11 +0000738 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000739
bellard6a00d602005-11-21 23:25:50 +0000740 for(env = first_cpu; env != NULL; env = env->next_cpu) {
741 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
742 }
bellard9fa3e852004-01-04 18:06:42 +0000743
bellard8a8a6082004-10-03 13:36:49 +0000744 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000745 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000746
bellardfd6ce8f2003-05-14 19:00:11 +0000747 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000748 /* XXX: flush processor icache at this point if cache flush is
749 expensive */
bellarde3db7222005-01-26 22:00:47 +0000750 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000751}
752
753#ifdef DEBUG_TB_CHECK
754
j_mayerbc98a7e2007-04-04 07:55:12 +0000755static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000756{
757 TranslationBlock *tb;
758 int i;
759 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000760 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
761 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000762 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
763 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000764 printf("ERROR invalidate: address=" TARGET_FMT_lx
765 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000766 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000767 }
768 }
769 }
770}
771
772/* verify that all the pages have correct rights for code */
773static void tb_page_check(void)
774{
775 TranslationBlock *tb;
776 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000777
pbrook99773bd2006-04-16 15:14:59 +0000778 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
779 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000780 flags1 = page_get_flags(tb->pc);
781 flags2 = page_get_flags(tb->pc + tb->size - 1);
782 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
783 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000784 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000785 }
786 }
787 }
788}
789
790#endif
791
792/* invalidate one TB */
793static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
794 int next_offset)
795{
796 TranslationBlock *tb1;
797 for(;;) {
798 tb1 = *ptb;
799 if (tb1 == tb) {
800 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
801 break;
802 }
803 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
804 }
805}
806
bellard9fa3e852004-01-04 18:06:42 +0000807static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
808{
809 TranslationBlock *tb1;
810 unsigned int n1;
811
812 for(;;) {
813 tb1 = *ptb;
814 n1 = (long)tb1 & 3;
815 tb1 = (TranslationBlock *)((long)tb1 & ~3);
816 if (tb1 == tb) {
817 *ptb = tb1->page_next[n1];
818 break;
819 }
820 ptb = &tb1->page_next[n1];
821 }
822}
823
bellardd4e81642003-05-25 16:46:15 +0000824static inline void tb_jmp_remove(TranslationBlock *tb, int n)
825{
826 TranslationBlock *tb1, **ptb;
827 unsigned int n1;
828
829 ptb = &tb->jmp_next[n];
830 tb1 = *ptb;
831 if (tb1) {
832 /* find tb(n) in circular list */
833 for(;;) {
834 tb1 = *ptb;
835 n1 = (long)tb1 & 3;
836 tb1 = (TranslationBlock *)((long)tb1 & ~3);
837 if (n1 == n && tb1 == tb)
838 break;
839 if (n1 == 2) {
840 ptb = &tb1->jmp_first;
841 } else {
842 ptb = &tb1->jmp_next[n1];
843 }
844 }
845 /* now we can suppress tb(n) from the list */
846 *ptb = tb->jmp_next[n];
847
848 tb->jmp_next[n] = NULL;
849 }
850}
851
852/* reset the jump entry 'n' of a TB so that it is not chained to
853 another TB */
854static inline void tb_reset_jump(TranslationBlock *tb, int n)
855{
856 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
857}
858
Paul Brook41c1b1c2010-03-12 16:54:58 +0000859void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000860{
bellard6a00d602005-11-21 23:25:50 +0000861 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000862 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000863 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000864 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000865 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000866
bellard9fa3e852004-01-04 18:06:42 +0000867 /* remove the TB from the hash list */
868 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
869 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000870 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000871 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000872
bellard9fa3e852004-01-04 18:06:42 +0000873 /* remove the TB from the page list */
874 if (tb->page_addr[0] != page_addr) {
875 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
876 tb_page_remove(&p->first_tb, tb);
877 invalidate_page_bitmap(p);
878 }
879 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
880 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
881 tb_page_remove(&p->first_tb, tb);
882 invalidate_page_bitmap(p);
883 }
884
bellard8a40a182005-11-20 10:35:40 +0000885 tb_invalidated_flag = 1;
886
887 /* remove the TB from the hash list */
888 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000889 for(env = first_cpu; env != NULL; env = env->next_cpu) {
890 if (env->tb_jmp_cache[h] == tb)
891 env->tb_jmp_cache[h] = NULL;
892 }
bellard8a40a182005-11-20 10:35:40 +0000893
894 /* suppress this TB from the two jump lists */
895 tb_jmp_remove(tb, 0);
896 tb_jmp_remove(tb, 1);
897
898 /* suppress any remaining jumps to this TB */
899 tb1 = tb->jmp_first;
900 for(;;) {
901 n1 = (long)tb1 & 3;
902 if (n1 == 2)
903 break;
904 tb1 = (TranslationBlock *)((long)tb1 & ~3);
905 tb2 = tb1->jmp_next[n1];
906 tb_reset_jump(tb1, n1);
907 tb1->jmp_next[n1] = NULL;
908 tb1 = tb2;
909 }
910 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
911
bellarde3db7222005-01-26 22:00:47 +0000912 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000913}
914
915static inline void set_bits(uint8_t *tab, int start, int len)
916{
917 int end, mask, end1;
918
919 end = start + len;
920 tab += start >> 3;
921 mask = 0xff << (start & 7);
922 if ((start & ~7) == (end & ~7)) {
923 if (start < end) {
924 mask &= ~(0xff << (end & 7));
925 *tab |= mask;
926 }
927 } else {
928 *tab++ |= mask;
929 start = (start + 8) & ~7;
930 end1 = end & ~7;
931 while (start < end1) {
932 *tab++ = 0xff;
933 start += 8;
934 }
935 if (start < end) {
936 mask = ~(0xff << (end & 7));
937 *tab |= mask;
938 }
939 }
940}
941
942static void build_page_bitmap(PageDesc *p)
943{
944 int n, tb_start, tb_end;
945 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000946
pbrookb2a70812008-06-09 13:57:23 +0000947 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000948
949 tb = p->first_tb;
950 while (tb != NULL) {
951 n = (long)tb & 3;
952 tb = (TranslationBlock *)((long)tb & ~3);
953 /* NOTE: this is subtle as a TB may span two physical pages */
954 if (n == 0) {
955 /* NOTE: tb_end may be after the end of the page, but
956 it is not a problem */
957 tb_start = tb->pc & ~TARGET_PAGE_MASK;
958 tb_end = tb_start + tb->size;
959 if (tb_end > TARGET_PAGE_SIZE)
960 tb_end = TARGET_PAGE_SIZE;
961 } else {
962 tb_start = 0;
963 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
964 }
965 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
966 tb = tb->page_next[n];
967 }
968}
969
pbrook2e70f6e2008-06-29 01:03:05 +0000970TranslationBlock *tb_gen_code(CPUState *env,
971 target_ulong pc, target_ulong cs_base,
972 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000973{
974 TranslationBlock *tb;
975 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000976 tb_page_addr_t phys_pc, phys_page2;
977 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000978 int code_gen_size;
979
Paul Brook41c1b1c2010-03-12 16:54:58 +0000980 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +0000981 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000982 if (!tb) {
983 /* flush must be done */
984 tb_flush(env);
985 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000986 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000987 /* Don't forget to invalidate previous TB info. */
988 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +0000989 }
990 tc_ptr = code_gen_ptr;
991 tb->tc_ptr = tc_ptr;
992 tb->cs_base = cs_base;
993 tb->flags = flags;
994 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +0000995 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +0000996 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +0000997
bellardd720b932004-04-25 17:57:43 +0000998 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +0000999 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001000 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001001 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001002 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001003 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001004 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001005 return tb;
bellardd720b932004-04-25 17:57:43 +00001006}
ths3b46e622007-09-17 08:09:54 +00001007
bellard9fa3e852004-01-04 18:06:42 +00001008/* invalidate all TBs which intersect with the target physical page
1009 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001010 the same physical page. 'is_cpu_write_access' should be true if called
1011 from a real cpu write access: the virtual CPU will exit the current
1012 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001013void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001014 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001015{
aliguori6b917542008-11-18 19:46:41 +00001016 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001017 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001018 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001019 PageDesc *p;
1020 int n;
1021#ifdef TARGET_HAS_PRECISE_SMC
1022 int current_tb_not_found = is_cpu_write_access;
1023 TranslationBlock *current_tb = NULL;
1024 int current_tb_modified = 0;
1025 target_ulong current_pc = 0;
1026 target_ulong current_cs_base = 0;
1027 int current_flags = 0;
1028#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001029
1030 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001031 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001032 return;
ths5fafdf22007-09-16 21:08:06 +00001033 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001034 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1035 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001036 /* build code bitmap */
1037 build_page_bitmap(p);
1038 }
1039
1040 /* we remove all the TBs in the range [start, end[ */
1041 /* XXX: see if in some cases it could be faster to invalidate all the code */
1042 tb = p->first_tb;
1043 while (tb != NULL) {
1044 n = (long)tb & 3;
1045 tb = (TranslationBlock *)((long)tb & ~3);
1046 tb_next = tb->page_next[n];
1047 /* NOTE: this is subtle as a TB may span two physical pages */
1048 if (n == 0) {
1049 /* NOTE: tb_end may be after the end of the page, but
1050 it is not a problem */
1051 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1052 tb_end = tb_start + tb->size;
1053 } else {
1054 tb_start = tb->page_addr[1];
1055 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1056 }
1057 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001058#ifdef TARGET_HAS_PRECISE_SMC
1059 if (current_tb_not_found) {
1060 current_tb_not_found = 0;
1061 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001062 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001063 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001064 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001065 }
1066 }
1067 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001068 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001069 /* If we are modifying the current TB, we must stop
1070 its execution. We could be more precise by checking
1071 that the modification is after the current PC, but it
1072 would require a specialized function to partially
1073 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001074
bellardd720b932004-04-25 17:57:43 +00001075 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001076 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001077 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1078 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001079 }
1080#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001081 /* we need to do that to handle the case where a signal
1082 occurs while doing tb_phys_invalidate() */
1083 saved_tb = NULL;
1084 if (env) {
1085 saved_tb = env->current_tb;
1086 env->current_tb = NULL;
1087 }
bellard9fa3e852004-01-04 18:06:42 +00001088 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001089 if (env) {
1090 env->current_tb = saved_tb;
1091 if (env->interrupt_request && env->current_tb)
1092 cpu_interrupt(env, env->interrupt_request);
1093 }
bellard9fa3e852004-01-04 18:06:42 +00001094 }
1095 tb = tb_next;
1096 }
1097#if !defined(CONFIG_USER_ONLY)
1098 /* if no code remaining, no need to continue to use slow writes */
1099 if (!p->first_tb) {
1100 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001101 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001102 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001103 }
1104 }
1105#endif
1106#ifdef TARGET_HAS_PRECISE_SMC
1107 if (current_tb_modified) {
1108 /* we generate a block containing just the instruction
1109 modifying the memory. It will ensure that it cannot modify
1110 itself */
bellardea1c1802004-06-14 18:56:36 +00001111 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001112 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001113 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001114 }
1115#endif
1116}
1117
1118/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001119static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001120{
1121 PageDesc *p;
1122 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001123#if 0
bellarda4193c82004-06-03 14:01:43 +00001124 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001125 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1126 cpu_single_env->mem_io_vaddr, len,
1127 cpu_single_env->eip,
1128 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001129 }
1130#endif
bellard9fa3e852004-01-04 18:06:42 +00001131 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001132 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001133 return;
1134 if (p->code_bitmap) {
1135 offset = start & ~TARGET_PAGE_MASK;
1136 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1137 if (b & ((1 << len) - 1))
1138 goto do_invalidate;
1139 } else {
1140 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001141 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001142 }
1143}
1144
bellard9fa3e852004-01-04 18:06:42 +00001145#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001146static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001147 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001148{
aliguori6b917542008-11-18 19:46:41 +00001149 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001150 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001151 int n;
bellardd720b932004-04-25 17:57:43 +00001152#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001153 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001154 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001155 int current_tb_modified = 0;
1156 target_ulong current_pc = 0;
1157 target_ulong current_cs_base = 0;
1158 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001159#endif
bellard9fa3e852004-01-04 18:06:42 +00001160
1161 addr &= TARGET_PAGE_MASK;
1162 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001163 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001164 return;
1165 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001166#ifdef TARGET_HAS_PRECISE_SMC
1167 if (tb && pc != 0) {
1168 current_tb = tb_find_pc(pc);
1169 }
1170#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001171 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001172 n = (long)tb & 3;
1173 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001174#ifdef TARGET_HAS_PRECISE_SMC
1175 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001176 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001177 /* If we are modifying the current TB, we must stop
1178 its execution. We could be more precise by checking
1179 that the modification is after the current PC, but it
1180 would require a specialized function to partially
1181 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001182
bellardd720b932004-04-25 17:57:43 +00001183 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001184 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001185 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1186 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001187 }
1188#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001189 tb_phys_invalidate(tb, addr);
1190 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001191 }
1192 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001193#ifdef TARGET_HAS_PRECISE_SMC
1194 if (current_tb_modified) {
1195 /* we generate a block containing just the instruction
1196 modifying the memory. It will ensure that it cannot modify
1197 itself */
bellardea1c1802004-06-14 18:56:36 +00001198 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001199 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001200 cpu_resume_from_signal(env, puc);
1201 }
1202#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001203}
bellard9fa3e852004-01-04 18:06:42 +00001204#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001205
1206/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001207static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001208 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001209{
1210 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001211#ifndef CONFIG_USER_ONLY
1212 bool page_already_protected;
1213#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001214
bellard9fa3e852004-01-04 18:06:42 +00001215 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001216 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001217 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001218#ifndef CONFIG_USER_ONLY
1219 page_already_protected = p->first_tb != NULL;
1220#endif
bellard9fa3e852004-01-04 18:06:42 +00001221 p->first_tb = (TranslationBlock *)((long)tb | n);
1222 invalidate_page_bitmap(p);
1223
bellard107db442004-06-22 18:48:46 +00001224#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001225
bellard9fa3e852004-01-04 18:06:42 +00001226#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001227 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001228 target_ulong addr;
1229 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001230 int prot;
1231
bellardfd6ce8f2003-05-14 19:00:11 +00001232 /* force the host page as non writable (writes will have a
1233 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001234 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001235 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001236 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1237 addr += TARGET_PAGE_SIZE) {
1238
1239 p2 = page_find (addr >> TARGET_PAGE_BITS);
1240 if (!p2)
1241 continue;
1242 prot |= p2->flags;
1243 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001244 }
ths5fafdf22007-09-16 21:08:06 +00001245 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001246 (prot & PAGE_BITS) & ~PAGE_WRITE);
1247#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001248 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001249 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001250#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001251 }
bellard9fa3e852004-01-04 18:06:42 +00001252#else
1253 /* if some code is already present, then the pages are already
1254 protected. So we handle the case where only the first TB is
1255 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001256 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001257 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001258 }
1259#endif
bellardd720b932004-04-25 17:57:43 +00001260
1261#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001262}
1263
bellard9fa3e852004-01-04 18:06:42 +00001264/* add a new TB and link it to the physical page tables. phys_page2 is
1265 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001266void tb_link_page(TranslationBlock *tb,
1267 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001268{
bellard9fa3e852004-01-04 18:06:42 +00001269 unsigned int h;
1270 TranslationBlock **ptb;
1271
pbrookc8a706f2008-06-02 16:16:42 +00001272 /* Grab the mmap lock to stop another thread invalidating this TB
1273 before we are done. */
1274 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001275 /* add in the physical hash table */
1276 h = tb_phys_hash_func(phys_pc);
1277 ptb = &tb_phys_hash[h];
1278 tb->phys_hash_next = *ptb;
1279 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001280
1281 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001282 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1283 if (phys_page2 != -1)
1284 tb_alloc_page(tb, 1, phys_page2);
1285 else
1286 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001287
bellardd4e81642003-05-25 16:46:15 +00001288 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1289 tb->jmp_next[0] = NULL;
1290 tb->jmp_next[1] = NULL;
1291
1292 /* init original jump addresses */
1293 if (tb->tb_next_offset[0] != 0xffff)
1294 tb_reset_jump(tb, 0);
1295 if (tb->tb_next_offset[1] != 0xffff)
1296 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001297
1298#ifdef DEBUG_TB_CHECK
1299 tb_page_check();
1300#endif
pbrookc8a706f2008-06-02 16:16:42 +00001301 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001302}
1303
bellarda513fe12003-05-27 23:29:48 +00001304/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1305 tb[1].tc_ptr. Return NULL if not found */
1306TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1307{
1308 int m_min, m_max, m;
1309 unsigned long v;
1310 TranslationBlock *tb;
1311
1312 if (nb_tbs <= 0)
1313 return NULL;
1314 if (tc_ptr < (unsigned long)code_gen_buffer ||
1315 tc_ptr >= (unsigned long)code_gen_ptr)
1316 return NULL;
1317 /* binary search (cf Knuth) */
1318 m_min = 0;
1319 m_max = nb_tbs - 1;
1320 while (m_min <= m_max) {
1321 m = (m_min + m_max) >> 1;
1322 tb = &tbs[m];
1323 v = (unsigned long)tb->tc_ptr;
1324 if (v == tc_ptr)
1325 return tb;
1326 else if (tc_ptr < v) {
1327 m_max = m - 1;
1328 } else {
1329 m_min = m + 1;
1330 }
ths5fafdf22007-09-16 21:08:06 +00001331 }
bellarda513fe12003-05-27 23:29:48 +00001332 return &tbs[m_max];
1333}
bellard75012672003-06-21 13:11:07 +00001334
bellardea041c02003-06-25 16:16:50 +00001335static void tb_reset_jump_recursive(TranslationBlock *tb);
1336
1337static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1338{
1339 TranslationBlock *tb1, *tb_next, **ptb;
1340 unsigned int n1;
1341
1342 tb1 = tb->jmp_next[n];
1343 if (tb1 != NULL) {
1344 /* find head of list */
1345 for(;;) {
1346 n1 = (long)tb1 & 3;
1347 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1348 if (n1 == 2)
1349 break;
1350 tb1 = tb1->jmp_next[n1];
1351 }
1352 /* we are now sure now that tb jumps to tb1 */
1353 tb_next = tb1;
1354
1355 /* remove tb from the jmp_first list */
1356 ptb = &tb_next->jmp_first;
1357 for(;;) {
1358 tb1 = *ptb;
1359 n1 = (long)tb1 & 3;
1360 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1361 if (n1 == n && tb1 == tb)
1362 break;
1363 ptb = &tb1->jmp_next[n1];
1364 }
1365 *ptb = tb->jmp_next[n];
1366 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001367
bellardea041c02003-06-25 16:16:50 +00001368 /* suppress the jump to next tb in generated code */
1369 tb_reset_jump(tb, n);
1370
bellard01243112004-01-04 15:48:17 +00001371 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001372 tb_reset_jump_recursive(tb_next);
1373 }
1374}
1375
1376static void tb_reset_jump_recursive(TranslationBlock *tb)
1377{
1378 tb_reset_jump_recursive2(tb, 0);
1379 tb_reset_jump_recursive2(tb, 1);
1380}
1381
bellard1fddef42005-04-17 19:16:13 +00001382#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001383#if defined(CONFIG_USER_ONLY)
1384static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1385{
1386 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1387}
1388#else
bellardd720b932004-04-25 17:57:43 +00001389static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1390{
Anthony Liguoric227f092009-10-01 16:12:16 -05001391 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001392 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001393 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001394 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001395
pbrookc2f07f82006-04-08 17:14:56 +00001396 addr = cpu_get_phys_page_debug(env, pc);
1397 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1398 if (!p) {
1399 pd = IO_MEM_UNASSIGNED;
1400 } else {
1401 pd = p->phys_offset;
1402 }
1403 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001404 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001405}
bellardc27004e2005-01-03 23:35:10 +00001406#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001407#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001408
Paul Brookc527ee82010-03-01 03:31:14 +00001409#if defined(CONFIG_USER_ONLY)
1410void cpu_watchpoint_remove_all(CPUState *env, int mask)
1411
1412{
1413}
1414
1415int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1416 int flags, CPUWatchpoint **watchpoint)
1417{
1418 return -ENOSYS;
1419}
1420#else
pbrook6658ffb2007-03-16 23:58:11 +00001421/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001422int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1423 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001424{
aliguorib4051332008-11-18 20:14:20 +00001425 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001426 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001427
aliguorib4051332008-11-18 20:14:20 +00001428 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1429 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1430 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1431 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1432 return -EINVAL;
1433 }
aliguoria1d1bb32008-11-18 20:07:32 +00001434 wp = qemu_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001435
aliguoria1d1bb32008-11-18 20:07:32 +00001436 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001437 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001438 wp->flags = flags;
1439
aliguori2dc9f412008-11-18 20:56:59 +00001440 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001441 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001442 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001443 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001444 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001445
pbrook6658ffb2007-03-16 23:58:11 +00001446 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001447
1448 if (watchpoint)
1449 *watchpoint = wp;
1450 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001451}
1452
aliguoria1d1bb32008-11-18 20:07:32 +00001453/* Remove a specific watchpoint. */
1454int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1455 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001456{
aliguorib4051332008-11-18 20:14:20 +00001457 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001458 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001459
Blue Swirl72cf2d42009-09-12 07:36:22 +00001460 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001461 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001462 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001463 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001464 return 0;
1465 }
1466 }
aliguoria1d1bb32008-11-18 20:07:32 +00001467 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001468}
1469
aliguoria1d1bb32008-11-18 20:07:32 +00001470/* Remove a specific watchpoint by reference. */
1471void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1472{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001473 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001474
aliguoria1d1bb32008-11-18 20:07:32 +00001475 tlb_flush_page(env, watchpoint->vaddr);
1476
1477 qemu_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001478}
1479
aliguoria1d1bb32008-11-18 20:07:32 +00001480/* Remove all matching watchpoints. */
1481void cpu_watchpoint_remove_all(CPUState *env, int mask)
1482{
aliguoric0ce9982008-11-25 22:13:57 +00001483 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001484
Blue Swirl72cf2d42009-09-12 07:36:22 +00001485 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001486 if (wp->flags & mask)
1487 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001488 }
aliguoria1d1bb32008-11-18 20:07:32 +00001489}
Paul Brookc527ee82010-03-01 03:31:14 +00001490#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001491
1492/* Add a breakpoint. */
1493int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1494 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001495{
bellard1fddef42005-04-17 19:16:13 +00001496#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001497 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001498
aliguoria1d1bb32008-11-18 20:07:32 +00001499 bp = qemu_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001500
1501 bp->pc = pc;
1502 bp->flags = flags;
1503
aliguori2dc9f412008-11-18 20:56:59 +00001504 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001505 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001506 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001507 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001508 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001509
1510 breakpoint_invalidate(env, pc);
1511
1512 if (breakpoint)
1513 *breakpoint = bp;
1514 return 0;
1515#else
1516 return -ENOSYS;
1517#endif
1518}
1519
1520/* Remove a specific breakpoint. */
1521int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1522{
1523#if defined(TARGET_HAS_ICE)
1524 CPUBreakpoint *bp;
1525
Blue Swirl72cf2d42009-09-12 07:36:22 +00001526 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001527 if (bp->pc == pc && bp->flags == flags) {
1528 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001529 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001530 }
bellard4c3a88a2003-07-26 12:06:08 +00001531 }
aliguoria1d1bb32008-11-18 20:07:32 +00001532 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001533#else
aliguoria1d1bb32008-11-18 20:07:32 +00001534 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001535#endif
1536}
1537
aliguoria1d1bb32008-11-18 20:07:32 +00001538/* Remove a specific breakpoint by reference. */
1539void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001540{
bellard1fddef42005-04-17 19:16:13 +00001541#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001542 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001543
aliguoria1d1bb32008-11-18 20:07:32 +00001544 breakpoint_invalidate(env, breakpoint->pc);
1545
1546 qemu_free(breakpoint);
1547#endif
1548}
1549
1550/* Remove all matching breakpoints. */
1551void cpu_breakpoint_remove_all(CPUState *env, int mask)
1552{
1553#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001554 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001555
Blue Swirl72cf2d42009-09-12 07:36:22 +00001556 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001557 if (bp->flags & mask)
1558 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001559 }
bellard4c3a88a2003-07-26 12:06:08 +00001560#endif
1561}
1562
bellardc33a3462003-07-29 20:50:33 +00001563/* enable or disable single step mode. EXCP_DEBUG is returned by the
1564 CPU loop after each instruction */
1565void cpu_single_step(CPUState *env, int enabled)
1566{
bellard1fddef42005-04-17 19:16:13 +00001567#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001568 if (env->singlestep_enabled != enabled) {
1569 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001570 if (kvm_enabled())
1571 kvm_update_guest_debug(env, 0);
1572 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001573 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001574 /* XXX: only flush what is necessary */
1575 tb_flush(env);
1576 }
bellardc33a3462003-07-29 20:50:33 +00001577 }
1578#endif
1579}
1580
bellard34865132003-10-05 14:28:56 +00001581/* enable or disable low levels log */
1582void cpu_set_log(int log_flags)
1583{
1584 loglevel = log_flags;
1585 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001586 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001587 if (!logfile) {
1588 perror(logfilename);
1589 _exit(1);
1590 }
bellard9fa3e852004-01-04 18:06:42 +00001591#if !defined(CONFIG_SOFTMMU)
1592 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1593 {
blueswir1b55266b2008-09-20 08:07:15 +00001594 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001595 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1596 }
Filip Navarabf65f532009-07-27 10:02:04 -05001597#elif !defined(_WIN32)
1598 /* Win32 doesn't support line-buffering and requires size >= 2 */
bellard34865132003-10-05 14:28:56 +00001599 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001600#endif
pbrooke735b912007-06-30 13:53:24 +00001601 log_append = 1;
1602 }
1603 if (!loglevel && logfile) {
1604 fclose(logfile);
1605 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001606 }
1607}
1608
1609void cpu_set_log_filename(const char *filename)
1610{
1611 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001612 if (logfile) {
1613 fclose(logfile);
1614 logfile = NULL;
1615 }
1616 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001617}
bellardc33a3462003-07-29 20:50:33 +00001618
aurel323098dba2009-03-07 21:28:24 +00001619static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001620{
pbrookd5975362008-06-07 20:50:51 +00001621 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1622 problem and hope the cpu will stop of its own accord. For userspace
1623 emulation this often isn't actually as bad as it sounds. Often
1624 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001625 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001626 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001627
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001628 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001629 tb = env->current_tb;
1630 /* if the cpu is currently executing code, we must unlink it and
1631 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001632 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001633 env->current_tb = NULL;
1634 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001635 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001636 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001637}
1638
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001639#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001640/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001641static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001642{
1643 int old_mask;
1644
1645 old_mask = env->interrupt_request;
1646 env->interrupt_request |= mask;
1647
aliguori8edac962009-04-24 18:03:45 +00001648 /*
1649 * If called from iothread context, wake the target cpu in
1650 * case its halted.
1651 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001652 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001653 qemu_cpu_kick(env);
1654 return;
1655 }
aliguori8edac962009-04-24 18:03:45 +00001656
pbrook2e70f6e2008-06-29 01:03:05 +00001657 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001658 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001659 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001660 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001661 cpu_abort(env, "Raised interrupt while not in I/O function");
1662 }
pbrook2e70f6e2008-06-29 01:03:05 +00001663 } else {
aurel323098dba2009-03-07 21:28:24 +00001664 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001665 }
1666}
1667
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001668CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1669
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001670#else /* CONFIG_USER_ONLY */
1671
1672void cpu_interrupt(CPUState *env, int mask)
1673{
1674 env->interrupt_request |= mask;
1675 cpu_unlink_tb(env);
1676}
1677#endif /* CONFIG_USER_ONLY */
1678
bellardb54ad042004-05-20 13:42:52 +00001679void cpu_reset_interrupt(CPUState *env, int mask)
1680{
1681 env->interrupt_request &= ~mask;
1682}
1683
aurel323098dba2009-03-07 21:28:24 +00001684void cpu_exit(CPUState *env)
1685{
1686 env->exit_request = 1;
1687 cpu_unlink_tb(env);
1688}
1689
blueswir1c7cd6a32008-10-02 18:27:46 +00001690const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001691 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001692 "show generated host assembly code for each compiled TB" },
1693 { CPU_LOG_TB_IN_ASM, "in_asm",
1694 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001695 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001696 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001697 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001698 "show micro ops "
1699#ifdef TARGET_I386
1700 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001701#endif
blueswir1e01a1152008-03-14 17:37:11 +00001702 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001703 { CPU_LOG_INT, "int",
1704 "show interrupts/exceptions in short format" },
1705 { CPU_LOG_EXEC, "exec",
1706 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001707 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001708 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001709#ifdef TARGET_I386
1710 { CPU_LOG_PCALL, "pcall",
1711 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001712 { CPU_LOG_RESET, "cpu_reset",
1713 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001714#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001715#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001716 { CPU_LOG_IOPORT, "ioport",
1717 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001718#endif
bellardf193c792004-03-21 17:06:25 +00001719 { 0, NULL, NULL },
1720};
1721
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001722#ifndef CONFIG_USER_ONLY
1723static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1724 = QLIST_HEAD_INITIALIZER(memory_client_list);
1725
1726static void cpu_notify_set_memory(target_phys_addr_t start_addr,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001727 ram_addr_t size,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001728 ram_addr_t phys_offset,
1729 bool log_dirty)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001730{
1731 CPUPhysMemoryClient *client;
1732 QLIST_FOREACH(client, &memory_client_list, list) {
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001733 client->set_memory(client, start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001734 }
1735}
1736
1737static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001738 target_phys_addr_t end)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001739{
1740 CPUPhysMemoryClient *client;
1741 QLIST_FOREACH(client, &memory_client_list, list) {
1742 int r = client->sync_dirty_bitmap(client, start, end);
1743 if (r < 0)
1744 return r;
1745 }
1746 return 0;
1747}
1748
1749static int cpu_notify_migration_log(int enable)
1750{
1751 CPUPhysMemoryClient *client;
1752 QLIST_FOREACH(client, &memory_client_list, list) {
1753 int r = client->migration_log(client, enable);
1754 if (r < 0)
1755 return r;
1756 }
1757 return 0;
1758}
1759
Alex Williamson2173a752011-05-03 12:36:58 -06001760struct last_map {
1761 target_phys_addr_t start_addr;
1762 ram_addr_t size;
1763 ram_addr_t phys_offset;
1764};
1765
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001766/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1767 * address. Each intermediate table provides the next L2_BITs of guest
1768 * physical address space. The number of levels vary based on host and
1769 * guest configuration, making it efficient to build the final guest
1770 * physical address by seeding the L1 offset and shifting and adding in
1771 * each L2 offset as we recurse through them. */
Alex Williamson2173a752011-05-03 12:36:58 -06001772static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
1773 void **lp, target_phys_addr_t addr,
1774 struct last_map *map)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001775{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001776 int i;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001777
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001778 if (*lp == NULL) {
1779 return;
1780 }
1781 if (level == 0) {
1782 PhysPageDesc *pd = *lp;
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001783 addr <<= L2_BITS + TARGET_PAGE_BITS;
Paul Brook7296aba2010-03-14 14:58:46 +00001784 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001785 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
Alex Williamson2173a752011-05-03 12:36:58 -06001786 target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
1787
1788 if (map->size &&
1789 start_addr == map->start_addr + map->size &&
1790 pd[i].phys_offset == map->phys_offset + map->size) {
1791
1792 map->size += TARGET_PAGE_SIZE;
1793 continue;
1794 } else if (map->size) {
1795 client->set_memory(client, map->start_addr,
1796 map->size, map->phys_offset, false);
1797 }
1798
1799 map->start_addr = start_addr;
1800 map->size = TARGET_PAGE_SIZE;
1801 map->phys_offset = pd[i].phys_offset;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001802 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001803 }
1804 } else {
1805 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001806 for (i = 0; i < L2_SIZE; ++i) {
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001807 phys_page_for_each_1(client, level - 1, pp + i,
Alex Williamson2173a752011-05-03 12:36:58 -06001808 (addr << L2_BITS) | i, map);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001809 }
1810 }
1811}
1812
1813static void phys_page_for_each(CPUPhysMemoryClient *client)
1814{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001815 int i;
Alex Williamson2173a752011-05-03 12:36:58 -06001816 struct last_map map = { };
1817
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001818 for (i = 0; i < P_L1_SIZE; ++i) {
1819 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
Alex Williamson2173a752011-05-03 12:36:58 -06001820 l1_phys_map + i, i, &map);
1821 }
1822 if (map.size) {
1823 client->set_memory(client, map.start_addr, map.size, map.phys_offset,
1824 false);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001825 }
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001826}
1827
1828void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1829{
1830 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1831 phys_page_for_each(client);
1832}
1833
1834void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1835{
1836 QLIST_REMOVE(client, list);
1837}
1838#endif
1839
bellardf193c792004-03-21 17:06:25 +00001840static int cmp1(const char *s1, int n, const char *s2)
1841{
1842 if (strlen(s2) != n)
1843 return 0;
1844 return memcmp(s1, s2, n) == 0;
1845}
ths3b46e622007-09-17 08:09:54 +00001846
bellardf193c792004-03-21 17:06:25 +00001847/* takes a comma separated list of log masks. Return 0 if error. */
1848int cpu_str_to_log_mask(const char *str)
1849{
blueswir1c7cd6a32008-10-02 18:27:46 +00001850 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001851 int mask;
1852 const char *p, *p1;
1853
1854 p = str;
1855 mask = 0;
1856 for(;;) {
1857 p1 = strchr(p, ',');
1858 if (!p1)
1859 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001860 if(cmp1(p,p1-p,"all")) {
1861 for(item = cpu_log_items; item->mask != 0; item++) {
1862 mask |= item->mask;
1863 }
1864 } else {
1865 for(item = cpu_log_items; item->mask != 0; item++) {
1866 if (cmp1(p, p1 - p, item->name))
1867 goto found;
1868 }
1869 return 0;
bellardf193c792004-03-21 17:06:25 +00001870 }
bellardf193c792004-03-21 17:06:25 +00001871 found:
1872 mask |= item->mask;
1873 if (*p1 != ',')
1874 break;
1875 p = p1 + 1;
1876 }
1877 return mask;
1878}
bellardea041c02003-06-25 16:16:50 +00001879
bellard75012672003-06-21 13:11:07 +00001880void cpu_abort(CPUState *env, const char *fmt, ...)
1881{
1882 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001883 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001884
1885 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001886 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001887 fprintf(stderr, "qemu: fatal: ");
1888 vfprintf(stderr, fmt, ap);
1889 fprintf(stderr, "\n");
1890#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001891 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1892#else
1893 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001894#endif
aliguori93fcfe32009-01-15 22:34:14 +00001895 if (qemu_log_enabled()) {
1896 qemu_log("qemu: fatal: ");
1897 qemu_log_vprintf(fmt, ap2);
1898 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001899#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001900 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001901#else
aliguori93fcfe32009-01-15 22:34:14 +00001902 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001903#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001904 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001905 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001906 }
pbrook493ae1f2007-11-23 16:53:59 +00001907 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001908 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001909#if defined(CONFIG_USER_ONLY)
1910 {
1911 struct sigaction act;
1912 sigfillset(&act.sa_mask);
1913 act.sa_handler = SIG_DFL;
1914 sigaction(SIGABRT, &act, NULL);
1915 }
1916#endif
bellard75012672003-06-21 13:11:07 +00001917 abort();
1918}
1919
thsc5be9f02007-02-28 20:20:53 +00001920CPUState *cpu_copy(CPUState *env)
1921{
ths01ba9812007-12-09 02:22:57 +00001922 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001923 CPUState *next_cpu = new_env->next_cpu;
1924 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001925#if defined(TARGET_HAS_ICE)
1926 CPUBreakpoint *bp;
1927 CPUWatchpoint *wp;
1928#endif
1929
thsc5be9f02007-02-28 20:20:53 +00001930 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001931
1932 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001933 new_env->next_cpu = next_cpu;
1934 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001935
1936 /* Clone all break/watchpoints.
1937 Note: Once we support ptrace with hw-debug register access, make sure
1938 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001939 QTAILQ_INIT(&env->breakpoints);
1940 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001941#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001942 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001943 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1944 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001945 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001946 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1947 wp->flags, NULL);
1948 }
1949#endif
1950
thsc5be9f02007-02-28 20:20:53 +00001951 return new_env;
1952}
1953
bellard01243112004-01-04 15:48:17 +00001954#if !defined(CONFIG_USER_ONLY)
1955
edgar_igl5c751e92008-05-06 08:44:21 +00001956static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1957{
1958 unsigned int i;
1959
1960 /* Discard jump cache entries for any tb which might potentially
1961 overlap the flushed page. */
1962 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1963 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001964 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001965
1966 i = tb_jmp_cache_hash_page(addr);
1967 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001968 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001969}
1970
Igor Kovalenko08738982009-07-12 02:15:40 +04001971static CPUTLBEntry s_cputlb_empty_entry = {
1972 .addr_read = -1,
1973 .addr_write = -1,
1974 .addr_code = -1,
1975 .addend = -1,
1976};
1977
bellardee8b7022004-02-03 23:35:10 +00001978/* NOTE: if flush_global is true, also flush global entries (not
1979 implemented yet) */
1980void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001981{
bellard33417e72003-08-10 21:47:01 +00001982 int i;
bellard01243112004-01-04 15:48:17 +00001983
bellard9fa3e852004-01-04 18:06:42 +00001984#if defined(DEBUG_TLB)
1985 printf("tlb_flush:\n");
1986#endif
bellard01243112004-01-04 15:48:17 +00001987 /* must reset current TB so that interrupts cannot modify the
1988 links while we are modifying them */
1989 env->current_tb = NULL;
1990
bellard33417e72003-08-10 21:47:01 +00001991 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001992 int mmu_idx;
1993 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001994 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001995 }
bellard33417e72003-08-10 21:47:01 +00001996 }
bellard9fa3e852004-01-04 18:06:42 +00001997
bellard8a40a182005-11-20 10:35:40 +00001998 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001999
Paul Brookd4c430a2010-03-17 02:14:28 +00002000 env->tlb_flush_addr = -1;
2001 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00002002 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00002003}
2004
bellard274da6b2004-05-20 21:56:27 +00002005static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00002006{
ths5fafdf22007-09-16 21:08:06 +00002007 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00002008 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00002009 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00002010 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00002011 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00002012 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04002013 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00002014 }
bellard61382a52003-10-27 21:22:23 +00002015}
2016
bellard2e126692004-04-25 21:28:44 +00002017void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00002018{
bellard8a40a182005-11-20 10:35:40 +00002019 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002020 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00002021
bellard9fa3e852004-01-04 18:06:42 +00002022#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00002023 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00002024#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00002025 /* Check if we need to flush due to large pages. */
2026 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2027#if defined(DEBUG_TLB)
2028 printf("tlb_flush_page: forced full flush ("
2029 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2030 env->tlb_flush_addr, env->tlb_flush_mask);
2031#endif
2032 tlb_flush(env, 1);
2033 return;
2034 }
bellard01243112004-01-04 15:48:17 +00002035 /* must reset current TB so that interrupts cannot modify the
2036 links while we are modifying them */
2037 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00002038
bellard61382a52003-10-27 21:22:23 +00002039 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00002040 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002041 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2042 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002043
edgar_igl5c751e92008-05-06 08:44:21 +00002044 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002045}
2046
bellard9fa3e852004-01-04 18:06:42 +00002047/* update the TLBs so that writes to code in the virtual page 'addr'
2048 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002049static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002050{
ths5fafdf22007-09-16 21:08:06 +00002051 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002052 ram_addr + TARGET_PAGE_SIZE,
2053 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002054}
2055
bellard9fa3e852004-01-04 18:06:42 +00002056/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002057 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002058static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002059 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002060{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002061 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002062}
2063
ths5fafdf22007-09-16 21:08:06 +00002064static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002065 unsigned long start, unsigned long length)
2066{
2067 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00002068 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2069 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002070 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002071 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002072 }
2073 }
2074}
2075
pbrook5579c7f2009-04-11 14:47:08 +00002076/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002077void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002078 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002079{
2080 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002081 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002082 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002083
2084 start &= TARGET_PAGE_MASK;
2085 end = TARGET_PAGE_ALIGN(end);
2086
2087 length = end - start;
2088 if (length == 0)
2089 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002090 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002091
bellard1ccde1c2004-02-06 19:46:14 +00002092 /* we modify the TLB cache so that the dirty bit will be set again
2093 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002094 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002095 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002096 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002097 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002098 != (end - 1) - start) {
2099 abort();
2100 }
2101
bellard6a00d602005-11-21 23:25:50 +00002102 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002103 int mmu_idx;
2104 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2105 for(i = 0; i < CPU_TLB_SIZE; i++)
2106 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2107 start1, length);
2108 }
bellard6a00d602005-11-21 23:25:50 +00002109 }
bellard1ccde1c2004-02-06 19:46:14 +00002110}
2111
aliguori74576192008-10-06 14:02:03 +00002112int cpu_physical_memory_set_dirty_tracking(int enable)
2113{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002114 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002115 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002116 ret = cpu_notify_migration_log(!!enable);
2117 return ret;
aliguori74576192008-10-06 14:02:03 +00002118}
2119
2120int cpu_physical_memory_get_dirty_tracking(void)
2121{
2122 return in_migration;
2123}
2124
Anthony Liguoric227f092009-10-01 16:12:16 -05002125int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2126 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00002127{
Michael S. Tsirkin7b8f3b72010-01-27 22:07:21 +02002128 int ret;
Jan Kiszka151f7742009-05-01 20:52:47 +02002129
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002130 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
Jan Kiszka151f7742009-05-01 20:52:47 +02002131 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00002132}
2133
Anthony PERARDe5896b12011-02-07 12:19:23 +01002134int cpu_physical_log_start(target_phys_addr_t start_addr,
2135 ram_addr_t size)
2136{
2137 CPUPhysMemoryClient *client;
2138 QLIST_FOREACH(client, &memory_client_list, list) {
2139 if (client->log_start) {
2140 int r = client->log_start(client, start_addr, size);
2141 if (r < 0) {
2142 return r;
2143 }
2144 }
2145 }
2146 return 0;
2147}
2148
2149int cpu_physical_log_stop(target_phys_addr_t start_addr,
2150 ram_addr_t size)
2151{
2152 CPUPhysMemoryClient *client;
2153 QLIST_FOREACH(client, &memory_client_list, list) {
2154 if (client->log_stop) {
2155 int r = client->log_stop(client, start_addr, size);
2156 if (r < 0) {
2157 return r;
2158 }
2159 }
2160 }
2161 return 0;
2162}
2163
bellard3a7d9292005-08-21 09:26:42 +00002164static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2165{
Anthony Liguoric227f092009-10-01 16:12:16 -05002166 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002167 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002168
bellard84b7b8e2005-11-28 21:19:04 +00002169 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00002170 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2171 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002172 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002173 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002174 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002175 }
2176 }
2177}
2178
2179/* update the TLB according to the current state of the dirty bits */
2180void cpu_tlb_update_dirty(CPUState *env)
2181{
2182 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002183 int mmu_idx;
2184 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2185 for(i = 0; i < CPU_TLB_SIZE; i++)
2186 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2187 }
bellard3a7d9292005-08-21 09:26:42 +00002188}
2189
pbrook0f459d12008-06-09 00:20:13 +00002190static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002191{
pbrook0f459d12008-06-09 00:20:13 +00002192 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2193 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002194}
2195
pbrook0f459d12008-06-09 00:20:13 +00002196/* update the TLB corresponding to virtual page vaddr
2197 so that it is no longer dirty */
2198static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002199{
bellard1ccde1c2004-02-06 19:46:14 +00002200 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002201 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002202
pbrook0f459d12008-06-09 00:20:13 +00002203 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002204 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002205 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2206 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002207}
2208
Paul Brookd4c430a2010-03-17 02:14:28 +00002209/* Our TLB does not support large pages, so remember the area covered by
2210 large pages and trigger a full TLB flush if these are invalidated. */
2211static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2212 target_ulong size)
2213{
2214 target_ulong mask = ~(size - 1);
2215
2216 if (env->tlb_flush_addr == (target_ulong)-1) {
2217 env->tlb_flush_addr = vaddr & mask;
2218 env->tlb_flush_mask = mask;
2219 return;
2220 }
2221 /* Extend the existing region to include the new page.
2222 This is a compromise between unnecessary flushes and the cost
2223 of maintaining a full variable size TLB. */
2224 mask &= env->tlb_flush_mask;
2225 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2226 mask <<= 1;
2227 }
2228 env->tlb_flush_addr &= mask;
2229 env->tlb_flush_mask = mask;
2230}
2231
2232/* Add a new TLB entry. At most one entry for a given virtual address
2233 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2234 supplied size is only used by tlb_flush_page. */
2235void tlb_set_page(CPUState *env, target_ulong vaddr,
2236 target_phys_addr_t paddr, int prot,
2237 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002238{
bellard92e873b2004-05-21 14:52:29 +00002239 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00002240 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002241 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002242 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002243 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002244 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002245 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002246 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002247 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002248
Paul Brookd4c430a2010-03-17 02:14:28 +00002249 assert(size >= TARGET_PAGE_SIZE);
2250 if (size != TARGET_PAGE_SIZE) {
2251 tlb_add_large_page(env, vaddr, size);
2252 }
bellard92e873b2004-05-21 14:52:29 +00002253 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002254 if (!p) {
2255 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00002256 } else {
2257 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002258 }
2259#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002260 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2261 " prot=%x idx=%d pd=0x%08lx\n",
2262 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002263#endif
2264
pbrook0f459d12008-06-09 00:20:13 +00002265 address = vaddr;
2266 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2267 /* IO memory case (romd handled later) */
2268 address |= TLB_MMIO;
2269 }
pbrook5579c7f2009-04-11 14:47:08 +00002270 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00002271 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2272 /* Normal RAM. */
2273 iotlb = pd & TARGET_PAGE_MASK;
2274 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2275 iotlb |= IO_MEM_NOTDIRTY;
2276 else
2277 iotlb |= IO_MEM_ROM;
2278 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002279 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002280 It would be nice to pass an offset from the base address
2281 of that region. This would avoid having to special case RAM,
2282 and avoid full address decoding in every device.
2283 We can't use the high bits of pd for this because
2284 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002285 iotlb = (pd & ~TARGET_PAGE_MASK);
2286 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002287 iotlb += p->region_offset;
2288 } else {
2289 iotlb += paddr;
2290 }
pbrook0f459d12008-06-09 00:20:13 +00002291 }
pbrook6658ffb2007-03-16 23:58:11 +00002292
pbrook0f459d12008-06-09 00:20:13 +00002293 code_address = address;
2294 /* Make accesses to pages with watchpoints go via the
2295 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002296 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002297 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002298 /* Avoid trapping reads of pages with a write breakpoint. */
2299 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2300 iotlb = io_mem_watch + paddr;
2301 address |= TLB_MMIO;
2302 break;
2303 }
pbrook6658ffb2007-03-16 23:58:11 +00002304 }
pbrook0f459d12008-06-09 00:20:13 +00002305 }
balrogd79acba2007-06-26 20:01:13 +00002306
pbrook0f459d12008-06-09 00:20:13 +00002307 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2308 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2309 te = &env->tlb_table[mmu_idx][index];
2310 te->addend = addend - vaddr;
2311 if (prot & PAGE_READ) {
2312 te->addr_read = address;
2313 } else {
2314 te->addr_read = -1;
2315 }
edgar_igl5c751e92008-05-06 08:44:21 +00002316
pbrook0f459d12008-06-09 00:20:13 +00002317 if (prot & PAGE_EXEC) {
2318 te->addr_code = code_address;
2319 } else {
2320 te->addr_code = -1;
2321 }
2322 if (prot & PAGE_WRITE) {
2323 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2324 (pd & IO_MEM_ROMD)) {
2325 /* Write access calls the I/O callback. */
2326 te->addr_write = address | TLB_MMIO;
2327 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2328 !cpu_physical_memory_is_dirty(pd)) {
2329 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002330 } else {
pbrook0f459d12008-06-09 00:20:13 +00002331 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002332 }
pbrook0f459d12008-06-09 00:20:13 +00002333 } else {
2334 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002335 }
bellard9fa3e852004-01-04 18:06:42 +00002336}
2337
bellard01243112004-01-04 15:48:17 +00002338#else
2339
bellardee8b7022004-02-03 23:35:10 +00002340void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002341{
2342}
2343
bellard2e126692004-04-25 21:28:44 +00002344void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002345{
2346}
2347
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002348/*
2349 * Walks guest process memory "regions" one by one
2350 * and calls callback function 'fn' for each region.
2351 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002352
2353struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002354{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002355 walk_memory_regions_fn fn;
2356 void *priv;
2357 unsigned long start;
2358 int prot;
2359};
bellard9fa3e852004-01-04 18:06:42 +00002360
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002361static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002362 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002363{
2364 if (data->start != -1ul) {
2365 int rc = data->fn(data->priv, data->start, end, data->prot);
2366 if (rc != 0) {
2367 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002368 }
bellard33417e72003-08-10 21:47:01 +00002369 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002370
2371 data->start = (new_prot ? end : -1ul);
2372 data->prot = new_prot;
2373
2374 return 0;
2375}
2376
2377static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002378 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002379{
Paul Brookb480d9b2010-03-12 23:23:29 +00002380 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002381 int i, rc;
2382
2383 if (*lp == NULL) {
2384 return walk_memory_regions_end(data, base, 0);
2385 }
2386
2387 if (level == 0) {
2388 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002389 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002390 int prot = pd[i].flags;
2391
2392 pa = base | (i << TARGET_PAGE_BITS);
2393 if (prot != data->prot) {
2394 rc = walk_memory_regions_end(data, pa, prot);
2395 if (rc != 0) {
2396 return rc;
2397 }
2398 }
2399 }
2400 } else {
2401 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002402 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002403 pa = base | ((abi_ulong)i <<
2404 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002405 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2406 if (rc != 0) {
2407 return rc;
2408 }
2409 }
2410 }
2411
2412 return 0;
2413}
2414
2415int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2416{
2417 struct walk_memory_regions_data data;
2418 unsigned long i;
2419
2420 data.fn = fn;
2421 data.priv = priv;
2422 data.start = -1ul;
2423 data.prot = 0;
2424
2425 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002426 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002427 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2428 if (rc != 0) {
2429 return rc;
2430 }
2431 }
2432
2433 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002434}
2435
Paul Brookb480d9b2010-03-12 23:23:29 +00002436static int dump_region(void *priv, abi_ulong start,
2437 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002438{
2439 FILE *f = (FILE *)priv;
2440
Paul Brookb480d9b2010-03-12 23:23:29 +00002441 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2442 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002443 start, end, end - start,
2444 ((prot & PAGE_READ) ? 'r' : '-'),
2445 ((prot & PAGE_WRITE) ? 'w' : '-'),
2446 ((prot & PAGE_EXEC) ? 'x' : '-'));
2447
2448 return (0);
2449}
2450
2451/* dump memory mappings */
2452void page_dump(FILE *f)
2453{
2454 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2455 "start", "end", "size", "prot");
2456 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002457}
2458
pbrook53a59602006-03-25 19:31:22 +00002459int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002460{
bellard9fa3e852004-01-04 18:06:42 +00002461 PageDesc *p;
2462
2463 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002464 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002465 return 0;
2466 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002467}
2468
Richard Henderson376a7902010-03-10 15:57:04 -08002469/* Modify the flags of a page and invalidate the code if necessary.
2470 The flag PAGE_WRITE_ORG is positioned automatically depending
2471 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002472void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002473{
Richard Henderson376a7902010-03-10 15:57:04 -08002474 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002475
Richard Henderson376a7902010-03-10 15:57:04 -08002476 /* This function should never be called with addresses outside the
2477 guest address space. If this assert fires, it probably indicates
2478 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002479#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2480 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002481#endif
2482 assert(start < end);
2483
bellard9fa3e852004-01-04 18:06:42 +00002484 start = start & TARGET_PAGE_MASK;
2485 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002486
2487 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002488 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002489 }
2490
2491 for (addr = start, len = end - start;
2492 len != 0;
2493 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2494 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2495
2496 /* If the write protection bit is set, then we invalidate
2497 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002498 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002499 (flags & PAGE_WRITE) &&
2500 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002501 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002502 }
2503 p->flags = flags;
2504 }
bellard9fa3e852004-01-04 18:06:42 +00002505}
2506
ths3d97b402007-11-02 19:02:07 +00002507int page_check_range(target_ulong start, target_ulong len, int flags)
2508{
2509 PageDesc *p;
2510 target_ulong end;
2511 target_ulong addr;
2512
Richard Henderson376a7902010-03-10 15:57:04 -08002513 /* This function should never be called with addresses outside the
2514 guest address space. If this assert fires, it probably indicates
2515 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002516#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2517 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002518#endif
2519
Richard Henderson3e0650a2010-03-29 10:54:42 -07002520 if (len == 0) {
2521 return 0;
2522 }
Richard Henderson376a7902010-03-10 15:57:04 -08002523 if (start + len - 1 < start) {
2524 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002525 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002526 }
balrog55f280c2008-10-28 10:24:11 +00002527
ths3d97b402007-11-02 19:02:07 +00002528 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2529 start = start & TARGET_PAGE_MASK;
2530
Richard Henderson376a7902010-03-10 15:57:04 -08002531 for (addr = start, len = end - start;
2532 len != 0;
2533 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002534 p = page_find(addr >> TARGET_PAGE_BITS);
2535 if( !p )
2536 return -1;
2537 if( !(p->flags & PAGE_VALID) )
2538 return -1;
2539
bellarddae32702007-11-14 10:51:00 +00002540 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002541 return -1;
bellarddae32702007-11-14 10:51:00 +00002542 if (flags & PAGE_WRITE) {
2543 if (!(p->flags & PAGE_WRITE_ORG))
2544 return -1;
2545 /* unprotect the page if it was put read-only because it
2546 contains translated code */
2547 if (!(p->flags & PAGE_WRITE)) {
2548 if (!page_unprotect(addr, 0, NULL))
2549 return -1;
2550 }
2551 return 0;
2552 }
ths3d97b402007-11-02 19:02:07 +00002553 }
2554 return 0;
2555}
2556
bellard9fa3e852004-01-04 18:06:42 +00002557/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002558 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002559int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002560{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002561 unsigned int prot;
2562 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002563 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002564
pbrookc8a706f2008-06-02 16:16:42 +00002565 /* Technically this isn't safe inside a signal handler. However we
2566 know this only ever happens in a synchronous SEGV handler, so in
2567 practice it seems to be ok. */
2568 mmap_lock();
2569
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002570 p = page_find(address >> TARGET_PAGE_BITS);
2571 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002572 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002573 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002574 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002575
bellard9fa3e852004-01-04 18:06:42 +00002576 /* if the page was really writable, then we change its
2577 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002578 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2579 host_start = address & qemu_host_page_mask;
2580 host_end = host_start + qemu_host_page_size;
2581
2582 prot = 0;
2583 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2584 p = page_find(addr >> TARGET_PAGE_BITS);
2585 p->flags |= PAGE_WRITE;
2586 prot |= p->flags;
2587
bellard9fa3e852004-01-04 18:06:42 +00002588 /* and since the content will be modified, we must invalidate
2589 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002590 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002591#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002592 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002593#endif
bellard9fa3e852004-01-04 18:06:42 +00002594 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002595 mprotect((void *)g2h(host_start), qemu_host_page_size,
2596 prot & PAGE_BITS);
2597
2598 mmap_unlock();
2599 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002600 }
pbrookc8a706f2008-06-02 16:16:42 +00002601 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002602 return 0;
2603}
2604
bellard6a00d602005-11-21 23:25:50 +00002605static inline void tlb_set_dirty(CPUState *env,
2606 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002607{
2608}
bellard9fa3e852004-01-04 18:06:42 +00002609#endif /* defined(CONFIG_USER_ONLY) */
2610
pbrooke2eef172008-06-08 01:09:01 +00002611#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002612
Paul Brookc04b2b72010-03-01 03:31:14 +00002613#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2614typedef struct subpage_t {
2615 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002616 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2617 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002618} subpage_t;
2619
Anthony Liguoric227f092009-10-01 16:12:16 -05002620static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2621 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002622static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2623 ram_addr_t orig_memory,
2624 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002625#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2626 need_subpage) \
2627 do { \
2628 if (addr > start_addr) \
2629 start_addr2 = 0; \
2630 else { \
2631 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2632 if (start_addr2 > 0) \
2633 need_subpage = 1; \
2634 } \
2635 \
blueswir149e9fba2007-05-30 17:25:06 +00002636 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002637 end_addr2 = TARGET_PAGE_SIZE - 1; \
2638 else { \
2639 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2640 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2641 need_subpage = 1; \
2642 } \
2643 } while (0)
2644
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002645/* register physical memory.
2646 For RAM, 'size' must be a multiple of the target page size.
2647 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002648 io memory page. The address used when calling the IO function is
2649 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002650 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002651 before calculating this offset. This should not be a problem unless
2652 the low bits of start_addr and region_offset differ. */
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002653void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
Anthony Liguoric227f092009-10-01 16:12:16 -05002654 ram_addr_t size,
2655 ram_addr_t phys_offset,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002656 ram_addr_t region_offset,
2657 bool log_dirty)
bellard33417e72003-08-10 21:47:01 +00002658{
Anthony Liguoric227f092009-10-01 16:12:16 -05002659 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002660 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002661 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002662 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002663 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002664
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002665 assert(size);
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002666 cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002667
pbrook67c4d232009-02-23 13:16:07 +00002668 if (phys_offset == IO_MEM_UNASSIGNED) {
2669 region_offset = start_addr;
2670 }
pbrook8da3ff12008-12-01 18:59:50 +00002671 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002672 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002673 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002674
2675 addr = start_addr;
2676 do {
blueswir1db7b5422007-05-26 17:36:03 +00002677 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2678 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002679 ram_addr_t orig_memory = p->phys_offset;
2680 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002681 int need_subpage = 0;
2682
2683 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2684 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002685 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002686 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2687 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002688 &p->phys_offset, orig_memory,
2689 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002690 } else {
2691 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2692 >> IO_MEM_SHIFT];
2693 }
pbrook8da3ff12008-12-01 18:59:50 +00002694 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2695 region_offset);
2696 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002697 } else {
2698 p->phys_offset = phys_offset;
2699 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2700 (phys_offset & IO_MEM_ROMD))
2701 phys_offset += TARGET_PAGE_SIZE;
2702 }
2703 } else {
2704 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2705 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002706 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002707 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002708 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002709 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002710 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002711 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002712 int need_subpage = 0;
2713
2714 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2715 end_addr2, need_subpage);
2716
Richard Hendersonf6405242010-04-22 16:47:31 -07002717 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002718 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002719 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002720 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002721 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002722 phys_offset, region_offset);
2723 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002724 }
2725 }
2726 }
pbrook8da3ff12008-12-01 18:59:50 +00002727 region_offset += TARGET_PAGE_SIZE;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002728 addr += TARGET_PAGE_SIZE;
2729 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002730
bellard9d420372006-06-25 22:25:22 +00002731 /* since each CPU stores ram addresses in its TLB cache, we must
2732 reset the modified entries */
2733 /* XXX: slow ! */
2734 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2735 tlb_flush(env, 1);
2736 }
bellard33417e72003-08-10 21:47:01 +00002737}
2738
bellardba863452006-09-24 18:41:10 +00002739/* XXX: temporary until new memory mapping API */
Anthony Liguoric227f092009-10-01 16:12:16 -05002740ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002741{
2742 PhysPageDesc *p;
2743
2744 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2745 if (!p)
2746 return IO_MEM_UNASSIGNED;
2747 return p->phys_offset;
2748}
2749
Anthony Liguoric227f092009-10-01 16:12:16 -05002750void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002751{
2752 if (kvm_enabled())
2753 kvm_coalesce_mmio_region(addr, size);
2754}
2755
Anthony Liguoric227f092009-10-01 16:12:16 -05002756void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002757{
2758 if (kvm_enabled())
2759 kvm_uncoalesce_mmio_region(addr, size);
2760}
2761
Sheng Yang62a27442010-01-26 19:21:16 +08002762void qemu_flush_coalesced_mmio_buffer(void)
2763{
2764 if (kvm_enabled())
2765 kvm_flush_coalesced_mmio_buffer();
2766}
2767
Marcelo Tosattic9027602010-03-01 20:25:08 -03002768#if defined(__linux__) && !defined(TARGET_S390X)
2769
2770#include <sys/vfs.h>
2771
2772#define HUGETLBFS_MAGIC 0x958458f6
2773
2774static long gethugepagesize(const char *path)
2775{
2776 struct statfs fs;
2777 int ret;
2778
2779 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002780 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002781 } while (ret != 0 && errno == EINTR);
2782
2783 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002784 perror(path);
2785 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002786 }
2787
2788 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002789 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002790
2791 return fs.f_bsize;
2792}
2793
Alex Williamson04b16652010-07-02 11:13:17 -06002794static void *file_ram_alloc(RAMBlock *block,
2795 ram_addr_t memory,
2796 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002797{
2798 char *filename;
2799 void *area;
2800 int fd;
2801#ifdef MAP_POPULATE
2802 int flags;
2803#endif
2804 unsigned long hpagesize;
2805
2806 hpagesize = gethugepagesize(path);
2807 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002808 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002809 }
2810
2811 if (memory < hpagesize) {
2812 return NULL;
2813 }
2814
2815 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2816 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2817 return NULL;
2818 }
2819
2820 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002821 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002822 }
2823
2824 fd = mkstemp(filename);
2825 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002826 perror("unable to create backing store for hugepages");
2827 free(filename);
2828 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002829 }
2830 unlink(filename);
2831 free(filename);
2832
2833 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2834
2835 /*
2836 * ftruncate is not supported by hugetlbfs in older
2837 * hosts, so don't bother bailing out on errors.
2838 * If anything goes wrong with it under other filesystems,
2839 * mmap will fail.
2840 */
2841 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002842 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002843
2844#ifdef MAP_POPULATE
2845 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2846 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2847 * to sidestep this quirk.
2848 */
2849 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2850 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2851#else
2852 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2853#endif
2854 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002855 perror("file_ram_alloc: can't mmap RAM pages");
2856 close(fd);
2857 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002858 }
Alex Williamson04b16652010-07-02 11:13:17 -06002859 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002860 return area;
2861}
2862#endif
2863
Alex Williamsond17b5282010-06-25 11:08:38 -06002864static ram_addr_t find_ram_offset(ram_addr_t size)
2865{
Alex Williamson04b16652010-07-02 11:13:17 -06002866 RAMBlock *block, *next_block;
Blue Swirl09d7ae92010-07-07 19:37:53 +00002867 ram_addr_t offset = 0, mingap = ULONG_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002868
2869 if (QLIST_EMPTY(&ram_list.blocks))
2870 return 0;
2871
2872 QLIST_FOREACH(block, &ram_list.blocks, next) {
2873 ram_addr_t end, next = ULONG_MAX;
2874
2875 end = block->offset + block->length;
2876
2877 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2878 if (next_block->offset >= end) {
2879 next = MIN(next, next_block->offset);
2880 }
2881 }
2882 if (next - end >= size && next - end < mingap) {
2883 offset = end;
2884 mingap = next - end;
2885 }
2886 }
2887 return offset;
2888}
2889
2890static ram_addr_t last_ram_offset(void)
2891{
Alex Williamsond17b5282010-06-25 11:08:38 -06002892 RAMBlock *block;
2893 ram_addr_t last = 0;
2894
2895 QLIST_FOREACH(block, &ram_list.blocks, next)
2896 last = MAX(last, block->offset + block->length);
2897
2898 return last;
2899}
2900
Cam Macdonell84b89d72010-07-26 18:10:57 -06002901ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002902 ram_addr_t size, void *host)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002903{
2904 RAMBlock *new_block, *block;
2905
2906 size = TARGET_PAGE_ALIGN(size);
2907 new_block = qemu_mallocz(sizeof(*new_block));
2908
2909 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2910 char *id = dev->parent_bus->info->get_dev_path(dev);
2911 if (id) {
2912 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2913 qemu_free(id);
2914 }
2915 }
2916 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2917
2918 QLIST_FOREACH(block, &ram_list.blocks, next) {
2919 if (!strcmp(block->idstr, new_block->idstr)) {
2920 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2921 new_block->idstr);
2922 abort();
2923 }
2924 }
2925
Jun Nakajima432d2682010-08-31 16:41:25 +01002926 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002927 if (host) {
2928 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002929 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002930 } else {
2931 if (mem_path) {
2932#if defined (__linux__) && !defined(TARGET_S390X)
2933 new_block->host = file_ram_alloc(new_block, size, mem_path);
2934 if (!new_block->host) {
2935 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002936 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002937 }
2938#else
2939 fprintf(stderr, "-mem-path option unsupported\n");
2940 exit(1);
2941#endif
2942 } else {
2943#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002944 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2945 an system defined value, which is at least 256GB. Larger systems
2946 have larger values. We put the guest between the end of data
2947 segment (system break) and this value. We use 32GB as a base to
2948 have enough room for the system break to grow. */
2949 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002950 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002951 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002952 if (new_block->host == MAP_FAILED) {
2953 fprintf(stderr, "Allocating RAM failed\n");
2954 abort();
2955 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002956#else
Jun Nakajima432d2682010-08-31 16:41:25 +01002957 if (xen_mapcache_enabled()) {
2958 xen_ram_alloc(new_block->offset, size);
2959 } else {
2960 new_block->host = qemu_vmalloc(size);
2961 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002962#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002963 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002964 }
2965 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002966 new_block->length = size;
2967
2968 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2969
2970 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2971 last_ram_offset() >> TARGET_PAGE_BITS);
2972 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2973 0xff, size >> TARGET_PAGE_BITS);
2974
2975 if (kvm_enabled())
2976 kvm_setup_guest_memory(new_block->host, size);
2977
2978 return new_block->offset;
2979}
2980
Alex Williamson1724f042010-06-25 11:09:35 -06002981ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
pbrook94a6b542009-04-11 17:15:54 +00002982{
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002983 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
pbrook94a6b542009-04-11 17:15:54 +00002984}
bellarde9a1ab12007-02-08 23:08:38 +00002985
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002986void qemu_ram_free_from_ptr(ram_addr_t addr)
2987{
2988 RAMBlock *block;
2989
2990 QLIST_FOREACH(block, &ram_list.blocks, next) {
2991 if (addr == block->offset) {
2992 QLIST_REMOVE(block, next);
2993 qemu_free(block);
2994 return;
2995 }
2996 }
2997}
2998
Anthony Liguoric227f092009-10-01 16:12:16 -05002999void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00003000{
Alex Williamson04b16652010-07-02 11:13:17 -06003001 RAMBlock *block;
3002
3003 QLIST_FOREACH(block, &ram_list.blocks, next) {
3004 if (addr == block->offset) {
3005 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01003006 if (block->flags & RAM_PREALLOC_MASK) {
3007 ;
3008 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06003009#if defined (__linux__) && !defined(TARGET_S390X)
3010 if (block->fd) {
3011 munmap(block->host, block->length);
3012 close(block->fd);
3013 } else {
3014 qemu_vfree(block->host);
3015 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003016#else
3017 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06003018#endif
3019 } else {
3020#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3021 munmap(block->host, block->length);
3022#else
Jun Nakajima432d2682010-08-31 16:41:25 +01003023 if (xen_mapcache_enabled()) {
3024 qemu_invalidate_entry(block->host);
3025 } else {
3026 qemu_vfree(block->host);
3027 }
Alex Williamson04b16652010-07-02 11:13:17 -06003028#endif
3029 }
3030 qemu_free(block);
3031 return;
3032 }
3033 }
3034
bellarde9a1ab12007-02-08 23:08:38 +00003035}
3036
Huang Yingcd19cfa2011-03-02 08:56:19 +01003037#ifndef _WIN32
3038void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3039{
3040 RAMBlock *block;
3041 ram_addr_t offset;
3042 int flags;
3043 void *area, *vaddr;
3044
3045 QLIST_FOREACH(block, &ram_list.blocks, next) {
3046 offset = addr - block->offset;
3047 if (offset < block->length) {
3048 vaddr = block->host + offset;
3049 if (block->flags & RAM_PREALLOC_MASK) {
3050 ;
3051 } else {
3052 flags = MAP_FIXED;
3053 munmap(vaddr, length);
3054 if (mem_path) {
3055#if defined(__linux__) && !defined(TARGET_S390X)
3056 if (block->fd) {
3057#ifdef MAP_POPULATE
3058 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3059 MAP_PRIVATE;
3060#else
3061 flags |= MAP_PRIVATE;
3062#endif
3063 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3064 flags, block->fd, offset);
3065 } else {
3066 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3067 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3068 flags, -1, 0);
3069 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003070#else
3071 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003072#endif
3073 } else {
3074#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3075 flags |= MAP_SHARED | MAP_ANONYMOUS;
3076 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3077 flags, -1, 0);
3078#else
3079 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3080 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3081 flags, -1, 0);
3082#endif
3083 }
3084 if (area != vaddr) {
3085 fprintf(stderr, "Could not remap addr: %lx@%lx\n",
3086 length, addr);
3087 exit(1);
3088 }
3089 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3090 }
3091 return;
3092 }
3093 }
3094}
3095#endif /* !_WIN32 */
3096
pbrookdc828ca2009-04-09 22:21:07 +00003097/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003098 With the exception of the softmmu code in this file, this should
3099 only be used for local memory (e.g. video ram) that the device owns,
3100 and knows it isn't going to access beyond the end of the block.
3101
3102 It should not be used for general purpose DMA.
3103 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3104 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003105void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003106{
pbrook94a6b542009-04-11 17:15:54 +00003107 RAMBlock *block;
3108
Alex Williamsonf471a172010-06-11 11:11:42 -06003109 QLIST_FOREACH(block, &ram_list.blocks, next) {
3110 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003111 /* Move this entry to to start of the list. */
3112 if (block != QLIST_FIRST(&ram_list.blocks)) {
3113 QLIST_REMOVE(block, next);
3114 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3115 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003116 if (xen_mapcache_enabled()) {
3117 /* We need to check if the requested address is in the RAM
3118 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003119 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003120 */
3121 if (block->offset == 0) {
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003122 return qemu_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003123 } else if (block->host == NULL) {
Stefano Stabellini6506e4f2011-05-19 18:35:44 +01003124 block->host = qemu_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003125 }
3126 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003127 return block->host + (addr - block->offset);
3128 }
pbrook94a6b542009-04-11 17:15:54 +00003129 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003130
3131 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3132 abort();
3133
3134 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003135}
3136
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003137/* Return a host pointer to ram allocated with qemu_ram_alloc.
3138 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3139 */
3140void *qemu_safe_ram_ptr(ram_addr_t addr)
3141{
3142 RAMBlock *block;
3143
3144 QLIST_FOREACH(block, &ram_list.blocks, next) {
3145 if (addr - block->offset < block->length) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003146 if (xen_mapcache_enabled()) {
3147 /* We need to check if the requested address is in the RAM
3148 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003149 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003150 */
3151 if (block->offset == 0) {
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003152 return qemu_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003153 } else if (block->host == NULL) {
Stefano Stabellini6506e4f2011-05-19 18:35:44 +01003154 block->host = qemu_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003155 }
3156 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003157 return block->host + (addr - block->offset);
3158 }
3159 }
3160
3161 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3162 abort();
3163
3164 return NULL;
3165}
3166
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003167/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3168 * but takes a size argument */
3169void *qemu_ram_ptr_length(target_phys_addr_t addr, target_phys_addr_t *size)
3170{
3171 if (xen_mapcache_enabled())
3172 return qemu_map_cache(addr, *size, 1);
3173 else {
3174 RAMBlock *block;
3175
3176 QLIST_FOREACH(block, &ram_list.blocks, next) {
3177 if (addr - block->offset < block->length) {
3178 if (addr - block->offset + *size > block->length)
3179 *size = block->length - addr + block->offset;
3180 return block->host + (addr - block->offset);
3181 }
3182 }
3183
3184 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3185 abort();
3186
3187 *size = 0;
3188 return NULL;
3189 }
3190}
3191
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003192void qemu_put_ram_ptr(void *addr)
3193{
3194 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003195}
3196
Marcelo Tosattie8902612010-10-11 15:31:19 -03003197int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003198{
pbrook94a6b542009-04-11 17:15:54 +00003199 RAMBlock *block;
3200 uint8_t *host = ptr;
3201
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003202 if (xen_mapcache_enabled()) {
3203 *ram_addr = qemu_ram_addr_from_mapcache(ptr);
3204 return 0;
3205 }
3206
Alex Williamsonf471a172010-06-11 11:11:42 -06003207 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003208 /* This case append when the block is not mapped. */
3209 if (block->host == NULL) {
3210 continue;
3211 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003212 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003213 *ram_addr = block->offset + (host - block->host);
3214 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003215 }
pbrook94a6b542009-04-11 17:15:54 +00003216 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003217
Marcelo Tosattie8902612010-10-11 15:31:19 -03003218 return -1;
3219}
Alex Williamsonf471a172010-06-11 11:11:42 -06003220
Marcelo Tosattie8902612010-10-11 15:31:19 -03003221/* Some of the softmmu routines need to translate from a host pointer
3222 (typically a TLB entry) back to a ram offset. */
3223ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3224{
3225 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003226
Marcelo Tosattie8902612010-10-11 15:31:19 -03003227 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3228 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3229 abort();
3230 }
3231 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003232}
3233
Anthony Liguoric227f092009-10-01 16:12:16 -05003234static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00003235{
pbrook67d3b952006-12-18 05:03:52 +00003236#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003237 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003238#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003239#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003240 do_unassigned_access(addr, 0, 0, 0, 1);
3241#endif
3242 return 0;
3243}
3244
Anthony Liguoric227f092009-10-01 16:12:16 -05003245static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003246{
3247#ifdef DEBUG_UNASSIGNED
3248 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3249#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003250#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003251 do_unassigned_access(addr, 0, 0, 0, 2);
3252#endif
3253 return 0;
3254}
3255
Anthony Liguoric227f092009-10-01 16:12:16 -05003256static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003257{
3258#ifdef DEBUG_UNASSIGNED
3259 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3260#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003261#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003262 do_unassigned_access(addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003263#endif
bellard33417e72003-08-10 21:47:01 +00003264 return 0;
3265}
3266
Anthony Liguoric227f092009-10-01 16:12:16 -05003267static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00003268{
pbrook67d3b952006-12-18 05:03:52 +00003269#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003270 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00003271#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003272#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003273 do_unassigned_access(addr, 1, 0, 0, 1);
3274#endif
3275}
3276
Anthony Liguoric227f092009-10-01 16:12:16 -05003277static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003278{
3279#ifdef DEBUG_UNASSIGNED
3280 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3281#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003282#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003283 do_unassigned_access(addr, 1, 0, 0, 2);
3284#endif
3285}
3286
Anthony Liguoric227f092009-10-01 16:12:16 -05003287static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003288{
3289#ifdef DEBUG_UNASSIGNED
3290 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3291#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003292#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003293 do_unassigned_access(addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003294#endif
bellard33417e72003-08-10 21:47:01 +00003295}
3296
Blue Swirld60efc62009-08-25 18:29:31 +00003297static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00003298 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00003299 unassigned_mem_readw,
3300 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00003301};
3302
Blue Swirld60efc62009-08-25 18:29:31 +00003303static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00003304 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00003305 unassigned_mem_writew,
3306 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00003307};
3308
Anthony Liguoric227f092009-10-01 16:12:16 -05003309static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003310 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003311{
bellard3a7d9292005-08-21 09:26:42 +00003312 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003313 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003314 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3315#if !defined(CONFIG_USER_ONLY)
3316 tb_invalidate_phys_page_fast(ram_addr, 1);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003317 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003318#endif
3319 }
pbrook5579c7f2009-04-11 14:47:08 +00003320 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003321 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003322 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003323 /* we remove the notdirty callback only if the code has been
3324 flushed */
3325 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003326 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003327}
3328
Anthony Liguoric227f092009-10-01 16:12:16 -05003329static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003330 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003331{
bellard3a7d9292005-08-21 09:26:42 +00003332 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003333 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003334 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3335#if !defined(CONFIG_USER_ONLY)
3336 tb_invalidate_phys_page_fast(ram_addr, 2);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003337 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003338#endif
3339 }
pbrook5579c7f2009-04-11 14:47:08 +00003340 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003341 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003342 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003343 /* we remove the notdirty callback only if the code has been
3344 flushed */
3345 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003346 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003347}
3348
Anthony Liguoric227f092009-10-01 16:12:16 -05003349static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003350 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003351{
bellard3a7d9292005-08-21 09:26:42 +00003352 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003353 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003354 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3355#if !defined(CONFIG_USER_ONLY)
3356 tb_invalidate_phys_page_fast(ram_addr, 4);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003357 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003358#endif
3359 }
pbrook5579c7f2009-04-11 14:47:08 +00003360 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003361 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003362 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003363 /* we remove the notdirty callback only if the code has been
3364 flushed */
3365 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003366 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003367}
3368
Blue Swirld60efc62009-08-25 18:29:31 +00003369static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00003370 NULL, /* never used */
3371 NULL, /* never used */
3372 NULL, /* never used */
3373};
3374
Blue Swirld60efc62009-08-25 18:29:31 +00003375static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00003376 notdirty_mem_writeb,
3377 notdirty_mem_writew,
3378 notdirty_mem_writel,
3379};
3380
pbrook0f459d12008-06-09 00:20:13 +00003381/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003382static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003383{
3384 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003385 target_ulong pc, cs_base;
3386 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003387 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003388 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003389 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003390
aliguori06d55cc2008-11-18 20:24:06 +00003391 if (env->watchpoint_hit) {
3392 /* We re-entered the check after replacing the TB. Now raise
3393 * the debug interrupt so that is will trigger after the
3394 * current instruction. */
3395 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3396 return;
3397 }
pbrook2e70f6e2008-06-29 01:03:05 +00003398 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003399 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003400 if ((vaddr == (wp->vaddr & len_mask) ||
3401 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003402 wp->flags |= BP_WATCHPOINT_HIT;
3403 if (!env->watchpoint_hit) {
3404 env->watchpoint_hit = wp;
3405 tb = tb_find_pc(env->mem_io_pc);
3406 if (!tb) {
3407 cpu_abort(env, "check_watchpoint: could not find TB for "
3408 "pc=%p", (void *)env->mem_io_pc);
3409 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003410 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003411 tb_phys_invalidate(tb, -1);
3412 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3413 env->exception_index = EXCP_DEBUG;
3414 } else {
3415 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3416 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3417 }
3418 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003419 }
aliguori6e140f22008-11-18 20:37:55 +00003420 } else {
3421 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003422 }
3423 }
3424}
3425
pbrook6658ffb2007-03-16 23:58:11 +00003426/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3427 so these check for a hit then pass through to the normal out-of-line
3428 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05003429static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003430{
aliguorib4051332008-11-18 20:14:20 +00003431 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003432 return ldub_phys(addr);
3433}
3434
Anthony Liguoric227f092009-10-01 16:12:16 -05003435static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003436{
aliguorib4051332008-11-18 20:14:20 +00003437 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003438 return lduw_phys(addr);
3439}
3440
Anthony Liguoric227f092009-10-01 16:12:16 -05003441static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003442{
aliguorib4051332008-11-18 20:14:20 +00003443 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003444 return ldl_phys(addr);
3445}
3446
Anthony Liguoric227f092009-10-01 16:12:16 -05003447static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003448 uint32_t val)
3449{
aliguorib4051332008-11-18 20:14:20 +00003450 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003451 stb_phys(addr, val);
3452}
3453
Anthony Liguoric227f092009-10-01 16:12:16 -05003454static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003455 uint32_t val)
3456{
aliguorib4051332008-11-18 20:14:20 +00003457 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003458 stw_phys(addr, val);
3459}
3460
Anthony Liguoric227f092009-10-01 16:12:16 -05003461static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003462 uint32_t val)
3463{
aliguorib4051332008-11-18 20:14:20 +00003464 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003465 stl_phys(addr, val);
3466}
3467
Blue Swirld60efc62009-08-25 18:29:31 +00003468static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003469 watch_mem_readb,
3470 watch_mem_readw,
3471 watch_mem_readl,
3472};
3473
Blue Swirld60efc62009-08-25 18:29:31 +00003474static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003475 watch_mem_writeb,
3476 watch_mem_writew,
3477 watch_mem_writel,
3478};
pbrook6658ffb2007-03-16 23:58:11 +00003479
Richard Hendersonf6405242010-04-22 16:47:31 -07003480static inline uint32_t subpage_readlen (subpage_t *mmio,
3481 target_phys_addr_t addr,
3482 unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003483{
Richard Hendersonf6405242010-04-22 16:47:31 -07003484 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003485#if defined(DEBUG_SUBPAGE)
3486 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3487 mmio, len, addr, idx);
3488#endif
blueswir1db7b5422007-05-26 17:36:03 +00003489
Richard Hendersonf6405242010-04-22 16:47:31 -07003490 addr += mmio->region_offset[idx];
3491 idx = mmio->sub_io_index[idx];
3492 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
blueswir1db7b5422007-05-26 17:36:03 +00003493}
3494
Anthony Liguoric227f092009-10-01 16:12:16 -05003495static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
Richard Hendersonf6405242010-04-22 16:47:31 -07003496 uint32_t value, unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003497{
Richard Hendersonf6405242010-04-22 16:47:31 -07003498 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003499#if defined(DEBUG_SUBPAGE)
Richard Hendersonf6405242010-04-22 16:47:31 -07003500 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3501 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003502#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003503
3504 addr += mmio->region_offset[idx];
3505 idx = mmio->sub_io_index[idx];
3506 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00003507}
3508
Anthony Liguoric227f092009-10-01 16:12:16 -05003509static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003510{
blueswir1db7b5422007-05-26 17:36:03 +00003511 return subpage_readlen(opaque, addr, 0);
3512}
3513
Anthony Liguoric227f092009-10-01 16:12:16 -05003514static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003515 uint32_t value)
3516{
blueswir1db7b5422007-05-26 17:36:03 +00003517 subpage_writelen(opaque, addr, value, 0);
3518}
3519
Anthony Liguoric227f092009-10-01 16:12:16 -05003520static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003521{
blueswir1db7b5422007-05-26 17:36:03 +00003522 return subpage_readlen(opaque, addr, 1);
3523}
3524
Anthony Liguoric227f092009-10-01 16:12:16 -05003525static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003526 uint32_t value)
3527{
blueswir1db7b5422007-05-26 17:36:03 +00003528 subpage_writelen(opaque, addr, value, 1);
3529}
3530
Anthony Liguoric227f092009-10-01 16:12:16 -05003531static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003532{
blueswir1db7b5422007-05-26 17:36:03 +00003533 return subpage_readlen(opaque, addr, 2);
3534}
3535
Richard Hendersonf6405242010-04-22 16:47:31 -07003536static void subpage_writel (void *opaque, target_phys_addr_t addr,
3537 uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00003538{
blueswir1db7b5422007-05-26 17:36:03 +00003539 subpage_writelen(opaque, addr, value, 2);
3540}
3541
Blue Swirld60efc62009-08-25 18:29:31 +00003542static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003543 &subpage_readb,
3544 &subpage_readw,
3545 &subpage_readl,
3546};
3547
Blue Swirld60efc62009-08-25 18:29:31 +00003548static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003549 &subpage_writeb,
3550 &subpage_writew,
3551 &subpage_writel,
3552};
3553
Anthony Liguoric227f092009-10-01 16:12:16 -05003554static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3555 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003556{
3557 int idx, eidx;
3558
3559 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3560 return -1;
3561 idx = SUBPAGE_IDX(start);
3562 eidx = SUBPAGE_IDX(end);
3563#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003564 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003565 mmio, start, end, idx, eidx, memory);
3566#endif
Gleb Natapov95c318f2010-07-29 10:41:45 +03003567 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3568 memory = IO_MEM_UNASSIGNED;
Richard Hendersonf6405242010-04-22 16:47:31 -07003569 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
blueswir1db7b5422007-05-26 17:36:03 +00003570 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003571 mmio->sub_io_index[idx] = memory;
3572 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003573 }
3574
3575 return 0;
3576}
3577
Richard Hendersonf6405242010-04-22 16:47:31 -07003578static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3579 ram_addr_t orig_memory,
3580 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003581{
Anthony Liguoric227f092009-10-01 16:12:16 -05003582 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003583 int subpage_memory;
3584
Anthony Liguoric227f092009-10-01 16:12:16 -05003585 mmio = qemu_mallocz(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003586
3587 mmio->base = base;
Alexander Graf2507c122010-12-08 12:05:37 +01003588 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3589 DEVICE_NATIVE_ENDIAN);
blueswir1db7b5422007-05-26 17:36:03 +00003590#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003591 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3592 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003593#endif
aliguori1eec6142009-02-05 22:06:18 +00003594 *phys = subpage_memory | IO_MEM_SUBPAGE;
Richard Hendersonf6405242010-04-22 16:47:31 -07003595 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003596
3597 return mmio;
3598}
3599
aliguori88715652009-02-11 15:20:58 +00003600static int get_free_io_mem_idx(void)
3601{
3602 int i;
3603
3604 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3605 if (!io_mem_used[i]) {
3606 io_mem_used[i] = 1;
3607 return i;
3608 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003609 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003610 return -1;
3611}
3612
Alexander Grafdd310532010-12-08 12:05:36 +01003613/*
3614 * Usually, devices operate in little endian mode. There are devices out
3615 * there that operate in big endian too. Each device gets byte swapped
3616 * mmio if plugged onto a CPU that does the other endianness.
3617 *
3618 * CPU Device swap?
3619 *
3620 * little little no
3621 * little big yes
3622 * big little yes
3623 * big big no
3624 */
3625
3626typedef struct SwapEndianContainer {
3627 CPUReadMemoryFunc *read[3];
3628 CPUWriteMemoryFunc *write[3];
3629 void *opaque;
3630} SwapEndianContainer;
3631
3632static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3633{
3634 uint32_t val;
3635 SwapEndianContainer *c = opaque;
3636 val = c->read[0](c->opaque, addr);
3637 return val;
3638}
3639
3640static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3641{
3642 uint32_t val;
3643 SwapEndianContainer *c = opaque;
3644 val = bswap16(c->read[1](c->opaque, addr));
3645 return val;
3646}
3647
3648static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3649{
3650 uint32_t val;
3651 SwapEndianContainer *c = opaque;
3652 val = bswap32(c->read[2](c->opaque, addr));
3653 return val;
3654}
3655
3656static CPUReadMemoryFunc * const swapendian_readfn[3]={
3657 swapendian_mem_readb,
3658 swapendian_mem_readw,
3659 swapendian_mem_readl
3660};
3661
3662static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3663 uint32_t val)
3664{
3665 SwapEndianContainer *c = opaque;
3666 c->write[0](c->opaque, addr, val);
3667}
3668
3669static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3670 uint32_t val)
3671{
3672 SwapEndianContainer *c = opaque;
3673 c->write[1](c->opaque, addr, bswap16(val));
3674}
3675
3676static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3677 uint32_t val)
3678{
3679 SwapEndianContainer *c = opaque;
3680 c->write[2](c->opaque, addr, bswap32(val));
3681}
3682
3683static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3684 swapendian_mem_writeb,
3685 swapendian_mem_writew,
3686 swapendian_mem_writel
3687};
3688
3689static void swapendian_init(int io_index)
3690{
3691 SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3692 int i;
3693
3694 /* Swap mmio for big endian targets */
3695 c->opaque = io_mem_opaque[io_index];
3696 for (i = 0; i < 3; i++) {
3697 c->read[i] = io_mem_read[io_index][i];
3698 c->write[i] = io_mem_write[io_index][i];
3699
3700 io_mem_read[io_index][i] = swapendian_readfn[i];
3701 io_mem_write[io_index][i] = swapendian_writefn[i];
3702 }
3703 io_mem_opaque[io_index] = c;
3704}
3705
3706static void swapendian_del(int io_index)
3707{
3708 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3709 qemu_free(io_mem_opaque[io_index]);
3710 }
3711}
3712
bellard33417e72003-08-10 21:47:01 +00003713/* mem_read and mem_write are arrays of functions containing the
3714 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003715 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003716 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003717 modified. If it is zero, a new io zone is allocated. The return
3718 value can be used with cpu_register_physical_memory(). (-1) is
3719 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003720static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003721 CPUReadMemoryFunc * const *mem_read,
3722 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003723 void *opaque, enum device_endian endian)
bellard33417e72003-08-10 21:47:01 +00003724{
Richard Henderson3cab7212010-05-07 09:52:51 -07003725 int i;
3726
bellard33417e72003-08-10 21:47:01 +00003727 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003728 io_index = get_free_io_mem_idx();
3729 if (io_index == -1)
3730 return io_index;
bellard33417e72003-08-10 21:47:01 +00003731 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003732 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003733 if (io_index >= IO_MEM_NB_ENTRIES)
3734 return -1;
3735 }
bellardb5ff1b32005-11-26 10:38:39 +00003736
Richard Henderson3cab7212010-05-07 09:52:51 -07003737 for (i = 0; i < 3; ++i) {
3738 io_mem_read[io_index][i]
3739 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3740 }
3741 for (i = 0; i < 3; ++i) {
3742 io_mem_write[io_index][i]
3743 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3744 }
bellarda4193c82004-06-03 14:01:43 +00003745 io_mem_opaque[io_index] = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003746
Alexander Grafdd310532010-12-08 12:05:36 +01003747 switch (endian) {
3748 case DEVICE_BIG_ENDIAN:
3749#ifndef TARGET_WORDS_BIGENDIAN
3750 swapendian_init(io_index);
3751#endif
3752 break;
3753 case DEVICE_LITTLE_ENDIAN:
3754#ifdef TARGET_WORDS_BIGENDIAN
3755 swapendian_init(io_index);
3756#endif
3757 break;
3758 case DEVICE_NATIVE_ENDIAN:
3759 default:
3760 break;
3761 }
3762
Richard Hendersonf6405242010-04-22 16:47:31 -07003763 return (io_index << IO_MEM_SHIFT);
bellard33417e72003-08-10 21:47:01 +00003764}
bellard61382a52003-10-27 21:22:23 +00003765
Blue Swirld60efc62009-08-25 18:29:31 +00003766int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3767 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003768 void *opaque, enum device_endian endian)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003769{
Alexander Graf2507c122010-12-08 12:05:37 +01003770 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003771}
3772
aliguori88715652009-02-11 15:20:58 +00003773void cpu_unregister_io_memory(int io_table_address)
3774{
3775 int i;
3776 int io_index = io_table_address >> IO_MEM_SHIFT;
3777
Alexander Grafdd310532010-12-08 12:05:36 +01003778 swapendian_del(io_index);
3779
aliguori88715652009-02-11 15:20:58 +00003780 for (i=0;i < 3; i++) {
3781 io_mem_read[io_index][i] = unassigned_mem_read[i];
3782 io_mem_write[io_index][i] = unassigned_mem_write[i];
3783 }
3784 io_mem_opaque[io_index] = NULL;
3785 io_mem_used[io_index] = 0;
3786}
3787
Avi Kivitye9179ce2009-06-14 11:38:52 +03003788static void io_mem_init(void)
3789{
3790 int i;
3791
Alexander Graf2507c122010-12-08 12:05:37 +01003792 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3793 unassigned_mem_write, NULL,
3794 DEVICE_NATIVE_ENDIAN);
3795 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3796 unassigned_mem_write, NULL,
3797 DEVICE_NATIVE_ENDIAN);
3798 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3799 notdirty_mem_write, NULL,
3800 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003801 for (i=0; i<5; i++)
3802 io_mem_used[i] = 1;
3803
3804 io_mem_watch = cpu_register_io_memory(watch_mem_read,
Alexander Graf2507c122010-12-08 12:05:37 +01003805 watch_mem_write, NULL,
3806 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003807}
3808
pbrooke2eef172008-06-08 01:09:01 +00003809#endif /* !defined(CONFIG_USER_ONLY) */
3810
bellard13eb76e2004-01-24 15:23:36 +00003811/* physical memory access (slow version, mainly for debug) */
3812#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003813int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3814 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003815{
3816 int l, flags;
3817 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003818 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003819
3820 while (len > 0) {
3821 page = addr & TARGET_PAGE_MASK;
3822 l = (page + TARGET_PAGE_SIZE) - addr;
3823 if (l > len)
3824 l = len;
3825 flags = page_get_flags(page);
3826 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003827 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003828 if (is_write) {
3829 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003830 return -1;
bellard579a97f2007-11-11 14:26:47 +00003831 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003832 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003833 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003834 memcpy(p, buf, l);
3835 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003836 } else {
3837 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003838 return -1;
bellard579a97f2007-11-11 14:26:47 +00003839 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003840 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003841 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003842 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003843 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003844 }
3845 len -= l;
3846 buf += l;
3847 addr += l;
3848 }
Paul Brooka68fe892010-03-01 00:08:59 +00003849 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003850}
bellard8df1cd02005-01-28 22:37:22 +00003851
bellard13eb76e2004-01-24 15:23:36 +00003852#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003853void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003854 int len, int is_write)
3855{
3856 int l, io_index;
3857 uint8_t *ptr;
3858 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003859 target_phys_addr_t page;
bellard2e126692004-04-25 21:28:44 +00003860 unsigned long pd;
bellard92e873b2004-05-21 14:52:29 +00003861 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003862
bellard13eb76e2004-01-24 15:23:36 +00003863 while (len > 0) {
3864 page = addr & TARGET_PAGE_MASK;
3865 l = (page + TARGET_PAGE_SIZE) - addr;
3866 if (l > len)
3867 l = len;
bellard92e873b2004-05-21 14:52:29 +00003868 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003869 if (!p) {
3870 pd = IO_MEM_UNASSIGNED;
3871 } else {
3872 pd = p->phys_offset;
3873 }
ths3b46e622007-09-17 08:09:54 +00003874
bellard13eb76e2004-01-24 15:23:36 +00003875 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003876 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003877 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003878 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003879 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003880 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003881 /* XXX: could force cpu_single_env to NULL to avoid
3882 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003883 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003884 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003885 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003886 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003887 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003888 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003889 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003890 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003891 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003892 l = 2;
3893 } else {
bellard1c213d12005-09-03 10:49:04 +00003894 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003895 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003896 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003897 l = 1;
3898 }
3899 } else {
bellardb448f2f2004-02-25 23:24:04 +00003900 unsigned long addr1;
3901 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003902 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003903 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003904 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003905 if (!cpu_physical_memory_is_dirty(addr1)) {
3906 /* invalidate code */
3907 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3908 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003909 cpu_physical_memory_set_dirty_flags(
3910 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003911 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003912 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003913 }
3914 } else {
ths5fafdf22007-09-16 21:08:06 +00003915 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003916 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003917 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003918 /* I/O case */
3919 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003920 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003921 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3922 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003923 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003924 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003925 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003926 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003927 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003928 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003929 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003930 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003931 l = 2;
3932 } else {
bellard1c213d12005-09-03 10:49:04 +00003933 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003934 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003935 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003936 l = 1;
3937 }
3938 } else {
3939 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003940 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3941 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3942 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003943 }
3944 }
3945 len -= l;
3946 buf += l;
3947 addr += l;
3948 }
3949}
bellard8df1cd02005-01-28 22:37:22 +00003950
bellardd0ecd2a2006-04-23 17:14:48 +00003951/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003952void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003953 const uint8_t *buf, int len)
3954{
3955 int l;
3956 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003957 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003958 unsigned long pd;
3959 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003960
bellardd0ecd2a2006-04-23 17:14:48 +00003961 while (len > 0) {
3962 page = addr & TARGET_PAGE_MASK;
3963 l = (page + TARGET_PAGE_SIZE) - addr;
3964 if (l > len)
3965 l = len;
3966 p = phys_page_find(page >> TARGET_PAGE_BITS);
3967 if (!p) {
3968 pd = IO_MEM_UNASSIGNED;
3969 } else {
3970 pd = p->phys_offset;
3971 }
ths3b46e622007-09-17 08:09:54 +00003972
bellardd0ecd2a2006-04-23 17:14:48 +00003973 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00003974 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3975 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003976 /* do nothing */
3977 } else {
3978 unsigned long addr1;
3979 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3980 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003981 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003982 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003983 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003984 }
3985 len -= l;
3986 buf += l;
3987 addr += l;
3988 }
3989}
3990
aliguori6d16c2f2009-01-22 16:59:11 +00003991typedef struct {
3992 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003993 target_phys_addr_t addr;
3994 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003995} BounceBuffer;
3996
3997static BounceBuffer bounce;
3998
aliguoriba223c22009-01-22 16:59:16 +00003999typedef struct MapClient {
4000 void *opaque;
4001 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00004002 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00004003} MapClient;
4004
Blue Swirl72cf2d42009-09-12 07:36:22 +00004005static QLIST_HEAD(map_client_list, MapClient) map_client_list
4006 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00004007
4008void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
4009{
4010 MapClient *client = qemu_malloc(sizeof(*client));
4011
4012 client->opaque = opaque;
4013 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00004014 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00004015 return client;
4016}
4017
4018void cpu_unregister_map_client(void *_client)
4019{
4020 MapClient *client = (MapClient *)_client;
4021
Blue Swirl72cf2d42009-09-12 07:36:22 +00004022 QLIST_REMOVE(client, link);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09004023 qemu_free(client);
aliguoriba223c22009-01-22 16:59:16 +00004024}
4025
4026static void cpu_notify_map_clients(void)
4027{
4028 MapClient *client;
4029
Blue Swirl72cf2d42009-09-12 07:36:22 +00004030 while (!QLIST_EMPTY(&map_client_list)) {
4031 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00004032 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09004033 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00004034 }
4035}
4036
aliguori6d16c2f2009-01-22 16:59:11 +00004037/* Map a physical memory region into a host virtual address.
4038 * May map a subset of the requested range, given by and returned in *plen.
4039 * May return NULL if resources needed to perform the mapping are exhausted.
4040 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00004041 * Use cpu_register_map_client() to know when retrying the map operation is
4042 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00004043 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004044void *cpu_physical_memory_map(target_phys_addr_t addr,
4045 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00004046 int is_write)
4047{
Anthony Liguoric227f092009-10-01 16:12:16 -05004048 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004049 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00004050 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004051 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00004052 unsigned long pd;
4053 PhysPageDesc *p;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004054 target_phys_addr_t addr1 = addr;
aliguori6d16c2f2009-01-22 16:59:11 +00004055
4056 while (len > 0) {
4057 page = addr & TARGET_PAGE_MASK;
4058 l = (page + TARGET_PAGE_SIZE) - addr;
4059 if (l > len)
4060 l = len;
4061 p = phys_page_find(page >> TARGET_PAGE_BITS);
4062 if (!p) {
4063 pd = IO_MEM_UNASSIGNED;
4064 } else {
4065 pd = p->phys_offset;
4066 }
4067
4068 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004069 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00004070 break;
4071 }
4072 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4073 bounce.addr = addr;
4074 bounce.len = l;
4075 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02004076 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00004077 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004078
4079 *plen = l;
4080 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00004081 }
4082
4083 len -= l;
4084 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004085 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004086 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004087 *plen = todo;
4088 return qemu_ram_ptr_length(addr1, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00004089}
4090
4091/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4092 * Will also mark the memory as dirty if is_write == 1. access_len gives
4093 * the amount of memory that was actually read or written by the caller.
4094 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004095void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4096 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004097{
4098 if (buffer != bounce.buffer) {
4099 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004100 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004101 while (access_len) {
4102 unsigned l;
4103 l = TARGET_PAGE_SIZE;
4104 if (l > access_len)
4105 l = access_len;
4106 if (!cpu_physical_memory_is_dirty(addr1)) {
4107 /* invalidate code */
4108 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4109 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004110 cpu_physical_memory_set_dirty_flags(
4111 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004112 }
4113 addr1 += l;
4114 access_len -= l;
4115 }
4116 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004117 if (xen_mapcache_enabled()) {
Stefano Stabellini712c2b42011-05-19 18:35:46 +01004118 qemu_invalidate_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004119 }
aliguori6d16c2f2009-01-22 16:59:11 +00004120 return;
4121 }
4122 if (is_write) {
4123 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4124 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004125 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004126 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004127 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004128}
bellardd0ecd2a2006-04-23 17:14:48 +00004129
bellard8df1cd02005-01-28 22:37:22 +00004130/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05004131uint32_t ldl_phys(target_phys_addr_t addr)
bellard8df1cd02005-01-28 22:37:22 +00004132{
4133 int io_index;
4134 uint8_t *ptr;
4135 uint32_t val;
4136 unsigned long pd;
4137 PhysPageDesc *p;
4138
4139 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4140 if (!p) {
4141 pd = IO_MEM_UNASSIGNED;
4142 } else {
4143 pd = p->phys_offset;
4144 }
ths3b46e622007-09-17 08:09:54 +00004145
ths5fafdf22007-09-16 21:08:06 +00004146 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00004147 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00004148 /* I/O case */
4149 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004150 if (p)
4151 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004152 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4153 } else {
4154 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004155 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00004156 (addr & ~TARGET_PAGE_MASK);
4157 val = ldl_p(ptr);
4158 }
4159 return val;
4160}
4161
bellard84b7b8e2005-11-28 21:19:04 +00004162/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05004163uint64_t ldq_phys(target_phys_addr_t addr)
bellard84b7b8e2005-11-28 21:19:04 +00004164{
4165 int io_index;
4166 uint8_t *ptr;
4167 uint64_t val;
4168 unsigned long pd;
4169 PhysPageDesc *p;
4170
4171 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4172 if (!p) {
4173 pd = IO_MEM_UNASSIGNED;
4174 } else {
4175 pd = p->phys_offset;
4176 }
ths3b46e622007-09-17 08:09:54 +00004177
bellard2a4188a2006-06-25 21:54:59 +00004178 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4179 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00004180 /* I/O case */
4181 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004182 if (p)
4183 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard84b7b8e2005-11-28 21:19:04 +00004184#ifdef TARGET_WORDS_BIGENDIAN
4185 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4186 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4187#else
4188 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4189 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4190#endif
4191 } else {
4192 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004193 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00004194 (addr & ~TARGET_PAGE_MASK);
4195 val = ldq_p(ptr);
4196 }
4197 return val;
4198}
4199
bellardaab33092005-10-30 20:48:42 +00004200/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004201uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004202{
4203 uint8_t val;
4204 cpu_physical_memory_read(addr, &val, 1);
4205 return val;
4206}
4207
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004208/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05004209uint32_t lduw_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004210{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004211 int io_index;
4212 uint8_t *ptr;
4213 uint64_t val;
4214 unsigned long pd;
4215 PhysPageDesc *p;
4216
4217 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4218 if (!p) {
4219 pd = IO_MEM_UNASSIGNED;
4220 } else {
4221 pd = p->phys_offset;
4222 }
4223
4224 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4225 !(pd & IO_MEM_ROMD)) {
4226 /* I/O case */
4227 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4228 if (p)
4229 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4230 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
4231 } else {
4232 /* RAM case */
4233 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4234 (addr & ~TARGET_PAGE_MASK);
4235 val = lduw_p(ptr);
4236 }
4237 return val;
bellardaab33092005-10-30 20:48:42 +00004238}
4239
bellard8df1cd02005-01-28 22:37:22 +00004240/* warning: addr must be aligned. The ram page is not masked as dirty
4241 and the code inside is not invalidated. It is useful if the dirty
4242 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004243void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004244{
4245 int io_index;
4246 uint8_t *ptr;
4247 unsigned long pd;
4248 PhysPageDesc *p;
4249
4250 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4251 if (!p) {
4252 pd = IO_MEM_UNASSIGNED;
4253 } else {
4254 pd = p->phys_offset;
4255 }
ths3b46e622007-09-17 08:09:54 +00004256
bellard3a7d9292005-08-21 09:26:42 +00004257 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004258 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004259 if (p)
4260 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004261 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4262 } else {
aliguori74576192008-10-06 14:02:03 +00004263 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004264 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004265 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004266
4267 if (unlikely(in_migration)) {
4268 if (!cpu_physical_memory_is_dirty(addr1)) {
4269 /* invalidate code */
4270 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4271 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004272 cpu_physical_memory_set_dirty_flags(
4273 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004274 }
4275 }
bellard8df1cd02005-01-28 22:37:22 +00004276 }
4277}
4278
Anthony Liguoric227f092009-10-01 16:12:16 -05004279void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004280{
4281 int io_index;
4282 uint8_t *ptr;
4283 unsigned long pd;
4284 PhysPageDesc *p;
4285
4286 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4287 if (!p) {
4288 pd = IO_MEM_UNASSIGNED;
4289 } else {
4290 pd = p->phys_offset;
4291 }
ths3b46e622007-09-17 08:09:54 +00004292
j_mayerbc98a7e2007-04-04 07:55:12 +00004293 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4294 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004295 if (p)
4296 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004297#ifdef TARGET_WORDS_BIGENDIAN
4298 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4299 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4300#else
4301 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4302 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4303#endif
4304 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004305 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004306 (addr & ~TARGET_PAGE_MASK);
4307 stq_p(ptr, val);
4308 }
4309}
4310
bellard8df1cd02005-01-28 22:37:22 +00004311/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05004312void stl_phys(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004313{
4314 int io_index;
4315 uint8_t *ptr;
4316 unsigned long pd;
4317 PhysPageDesc *p;
4318
4319 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4320 if (!p) {
4321 pd = IO_MEM_UNASSIGNED;
4322 } else {
4323 pd = p->phys_offset;
4324 }
ths3b46e622007-09-17 08:09:54 +00004325
bellard3a7d9292005-08-21 09:26:42 +00004326 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004327 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004328 if (p)
4329 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004330 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4331 } else {
4332 unsigned long addr1;
4333 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4334 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004335 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004336 stl_p(ptr, val);
bellard3a7d9292005-08-21 09:26:42 +00004337 if (!cpu_physical_memory_is_dirty(addr1)) {
4338 /* invalidate code */
4339 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4340 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004341 cpu_physical_memory_set_dirty_flags(addr1,
4342 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004343 }
bellard8df1cd02005-01-28 22:37:22 +00004344 }
4345}
4346
bellardaab33092005-10-30 20:48:42 +00004347/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004348void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004349{
4350 uint8_t v = val;
4351 cpu_physical_memory_write(addr, &v, 1);
4352}
4353
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004354/* warning: addr must be aligned */
Anthony Liguoric227f092009-10-01 16:12:16 -05004355void stw_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004356{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004357 int io_index;
4358 uint8_t *ptr;
4359 unsigned long pd;
4360 PhysPageDesc *p;
4361
4362 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4363 if (!p) {
4364 pd = IO_MEM_UNASSIGNED;
4365 } else {
4366 pd = p->phys_offset;
4367 }
4368
4369 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4370 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4371 if (p)
4372 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4373 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4374 } else {
4375 unsigned long addr1;
4376 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4377 /* RAM case */
4378 ptr = qemu_get_ram_ptr(addr1);
4379 stw_p(ptr, val);
4380 if (!cpu_physical_memory_is_dirty(addr1)) {
4381 /* invalidate code */
4382 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4383 /* set dirty bit */
4384 cpu_physical_memory_set_dirty_flags(addr1,
4385 (0xff & ~CODE_DIRTY_FLAG));
4386 }
4387 }
bellardaab33092005-10-30 20:48:42 +00004388}
4389
4390/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004391void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004392{
4393 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004394 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004395}
4396
aliguori5e2972f2009-03-28 17:51:36 +00004397/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004398int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004399 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004400{
4401 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004402 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004403 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004404
4405 while (len > 0) {
4406 page = addr & TARGET_PAGE_MASK;
4407 phys_addr = cpu_get_phys_page_debug(env, page);
4408 /* if no physical page mapped, return an error */
4409 if (phys_addr == -1)
4410 return -1;
4411 l = (page + TARGET_PAGE_SIZE) - addr;
4412 if (l > len)
4413 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004414 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004415 if (is_write)
4416 cpu_physical_memory_write_rom(phys_addr, buf, l);
4417 else
aliguori5e2972f2009-03-28 17:51:36 +00004418 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004419 len -= l;
4420 buf += l;
4421 addr += l;
4422 }
4423 return 0;
4424}
Paul Brooka68fe892010-03-01 00:08:59 +00004425#endif
bellard13eb76e2004-01-24 15:23:36 +00004426
pbrook2e70f6e2008-06-29 01:03:05 +00004427/* in deterministic execution mode, instructions doing device I/Os
4428 must be at the end of the TB */
4429void cpu_io_recompile(CPUState *env, void *retaddr)
4430{
4431 TranslationBlock *tb;
4432 uint32_t n, cflags;
4433 target_ulong pc, cs_base;
4434 uint64_t flags;
4435
4436 tb = tb_find_pc((unsigned long)retaddr);
4437 if (!tb) {
4438 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4439 retaddr);
4440 }
4441 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004442 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004443 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004444 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004445 n = n - env->icount_decr.u16.low;
4446 /* Generate a new TB ending on the I/O insn. */
4447 n++;
4448 /* On MIPS and SH, delay slot instructions can only be restarted if
4449 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004450 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004451 branch. */
4452#if defined(TARGET_MIPS)
4453 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4454 env->active_tc.PC -= 4;
4455 env->icount_decr.u16.low++;
4456 env->hflags &= ~MIPS_HFLAG_BMASK;
4457 }
4458#elif defined(TARGET_SH4)
4459 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4460 && n > 1) {
4461 env->pc -= 2;
4462 env->icount_decr.u16.low++;
4463 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4464 }
4465#endif
4466 /* This should never happen. */
4467 if (n > CF_COUNT_MASK)
4468 cpu_abort(env, "TB too big during recompile");
4469
4470 cflags = n | CF_LAST_IO;
4471 pc = tb->pc;
4472 cs_base = tb->cs_base;
4473 flags = tb->flags;
4474 tb_phys_invalidate(tb, -1);
4475 /* FIXME: In theory this could raise an exception. In practice
4476 we have already translated the block once so it's probably ok. */
4477 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004478 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004479 the first in the TB) then we end up generating a whole new TB and
4480 repeating the fault, which is horribly inefficient.
4481 Better would be to execute just this insn uncached, or generate a
4482 second new TB. */
4483 cpu_resume_from_signal(env, NULL);
4484}
4485
Paul Brookb3755a92010-03-12 16:54:58 +00004486#if !defined(CONFIG_USER_ONLY)
4487
Stefan Weil055403b2010-10-22 23:03:32 +02004488void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004489{
4490 int i, target_code_size, max_target_code_size;
4491 int direct_jmp_count, direct_jmp2_count, cross_page;
4492 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004493
bellarde3db7222005-01-26 22:00:47 +00004494 target_code_size = 0;
4495 max_target_code_size = 0;
4496 cross_page = 0;
4497 direct_jmp_count = 0;
4498 direct_jmp2_count = 0;
4499 for(i = 0; i < nb_tbs; i++) {
4500 tb = &tbs[i];
4501 target_code_size += tb->size;
4502 if (tb->size > max_target_code_size)
4503 max_target_code_size = tb->size;
4504 if (tb->page_addr[1] != -1)
4505 cross_page++;
4506 if (tb->tb_next_offset[0] != 0xffff) {
4507 direct_jmp_count++;
4508 if (tb->tb_next_offset[1] != 0xffff) {
4509 direct_jmp2_count++;
4510 }
4511 }
4512 }
4513 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004514 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004515 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004516 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4517 cpu_fprintf(f, "TB count %d/%d\n",
4518 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004519 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004520 nb_tbs ? target_code_size / nb_tbs : 0,
4521 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004522 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004523 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4524 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004525 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4526 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004527 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4528 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004529 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004530 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4531 direct_jmp2_count,
4532 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004533 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004534 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4535 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4536 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004537 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004538}
4539
bellard61382a52003-10-27 21:22:23 +00004540#define MMUSUFFIX _cmmu
4541#define GETPC() NULL
4542#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004543#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004544
4545#define SHIFT 0
4546#include "softmmu_template.h"
4547
4548#define SHIFT 1
4549#include "softmmu_template.h"
4550
4551#define SHIFT 2
4552#include "softmmu_template.h"
4553
4554#define SHIFT 3
4555#include "softmmu_template.h"
4556
4557#undef env
4558
4559#endif