blob: dffceb970cbd3eef9c1ea50a7dbc43b4e96e8430 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Avi Kivity67d95c12011-12-15 15:25:22 +020060#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
bellardfd6ce8f2003-05-14 19:00:11 +000063//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000064//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000065//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000066//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000067
68/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000069//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020096#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +0000100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000108/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000109static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200110static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000111
pbrooke2eef172008-06-08 01:09:01 +0000112#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000113int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000114static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000115
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300117
118static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300119static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300120
pbrooke2eef172008-06-08 01:09:01 +0000121#endif
bellard9fa3e852004-01-04 18:06:42 +0000122
bellard6a00d602005-11-21 23:25:50 +0000123CPUState *first_cpu;
124/* current CPU in the current thread. It is only valid inside
125 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100126DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000127/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000128 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000129 2 = Adaptive rate instruction counting. */
130int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000131
bellard54936002003-05-13 00:25:15 +0000132typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000133 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000134 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000135 /* in order to optimize self modifying code, we count the number
136 of lookups we do to a given page to use a bitmap */
137 unsigned int code_write_count;
138 uint8_t *code_bitmap;
139#if defined(CONFIG_USER_ONLY)
140 unsigned long flags;
141#endif
bellard54936002003-05-13 00:25:15 +0000142} PageDesc;
143
Paul Brook41c1b1c2010-03-12 16:54:58 +0000144/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800145 while in user mode we want it to be based on virtual addresses. */
146#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
148# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
149#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800150# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000151#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000154#endif
bellard54936002003-05-13 00:25:15 +0000155
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156/* Size of the L2 (and L3, etc) page tables. */
157#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000158#define L2_SIZE (1 << L2_BITS)
159
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800160/* The bits remaining after N lower levels of page tables. */
161#define P_L1_BITS_REM \
162 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163#define V_L1_BITS_REM \
164 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
165
166/* Size of the L1 page table. Avoid silly small sizes. */
167#if P_L1_BITS_REM < 4
168#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
169#else
170#define P_L1_BITS P_L1_BITS_REM
171#endif
172
173#if V_L1_BITS_REM < 4
174#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
175#else
176#define V_L1_BITS V_L1_BITS_REM
177#endif
178
179#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
180#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
181
182#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
183#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
184
bellard83fb7ad2004-07-05 21:25:26 +0000185unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000186unsigned long qemu_host_page_size;
187unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000188
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800189/* This is a multi-level map on the virtual address space.
190 The bottom level has pointers to PageDesc. */
191static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000192
pbrooke2eef172008-06-08 01:09:01 +0000193#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000194typedef struct PhysPageDesc {
195 /* offset in host memory of the page + io_index in the low bits */
196 ram_addr_t phys_offset;
197 ram_addr_t region_offset;
198} PhysPageDesc;
199
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800200/* This is a multi-level map on the physical address space.
201 The bottom level has pointers to PhysPageDesc. */
202static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000203
pbrooke2eef172008-06-08 01:09:01 +0000204static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300205static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000206
bellard33417e72003-08-10 21:47:01 +0000207/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000208CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
209CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000210void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000211static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000212static int io_mem_watch;
213#endif
bellard33417e72003-08-10 21:47:01 +0000214
bellard34865132003-10-05 14:28:56 +0000215/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200216#ifdef WIN32
217static const char *logfilename = "qemu.log";
218#else
blueswir1d9b630f2008-10-05 09:57:08 +0000219static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200220#endif
bellard34865132003-10-05 14:28:56 +0000221FILE *logfile;
222int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000223static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000224
bellarde3db7222005-01-26 22:00:47 +0000225/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000226#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000227static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000228#endif
bellarde3db7222005-01-26 22:00:47 +0000229static int tb_flush_count;
230static int tb_phys_invalidate_count;
231
bellard7cb69ca2008-05-10 10:55:51 +0000232#ifdef _WIN32
233static void map_exec(void *addr, long size)
234{
235 DWORD old_protect;
236 VirtualProtect(addr, size,
237 PAGE_EXECUTE_READWRITE, &old_protect);
238
239}
240#else
241static void map_exec(void *addr, long size)
242{
bellard43694152008-05-29 09:35:57 +0000243 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000244
bellard43694152008-05-29 09:35:57 +0000245 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000246 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000247 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000248
249 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000250 end += page_size - 1;
251 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000252
253 mprotect((void *)start, end - start,
254 PROT_READ | PROT_WRITE | PROT_EXEC);
255}
256#endif
257
bellardb346ff42003-06-15 20:05:50 +0000258static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000259{
bellard83fb7ad2004-07-05 21:25:26 +0000260 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000261 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000262#ifdef _WIN32
263 {
264 SYSTEM_INFO system_info;
265
266 GetSystemInfo(&system_info);
267 qemu_real_host_page_size = system_info.dwPageSize;
268 }
269#else
270 qemu_real_host_page_size = getpagesize();
271#endif
bellard83fb7ad2004-07-05 21:25:26 +0000272 if (qemu_host_page_size == 0)
273 qemu_host_page_size = qemu_real_host_page_size;
274 if (qemu_host_page_size < TARGET_PAGE_SIZE)
275 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000276 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000277
Paul Brook2e9a5712010-05-05 16:32:59 +0100278#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000279 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100280#ifdef HAVE_KINFO_GETVMMAP
281 struct kinfo_vmentry *freep;
282 int i, cnt;
283
284 freep = kinfo_getvmmap(getpid(), &cnt);
285 if (freep) {
286 mmap_lock();
287 for (i = 0; i < cnt; i++) {
288 unsigned long startaddr, endaddr;
289
290 startaddr = freep[i].kve_start;
291 endaddr = freep[i].kve_end;
292 if (h2g_valid(startaddr)) {
293 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
294
295 if (h2g_valid(endaddr)) {
296 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200297 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100298 } else {
299#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
300 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200301 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100302#endif
303 }
304 }
305 }
306 free(freep);
307 mmap_unlock();
308 }
309#else
balrog50a95692007-12-12 01:16:23 +0000310 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000311
pbrook07765902008-05-31 16:33:53 +0000312 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800313
Aurelien Jarnofd436902010-04-10 17:20:36 +0200314 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000315 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800316 mmap_lock();
317
balrog50a95692007-12-12 01:16:23 +0000318 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800319 unsigned long startaddr, endaddr;
320 int n;
321
322 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
323
324 if (n == 2 && h2g_valid(startaddr)) {
325 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
326
327 if (h2g_valid(endaddr)) {
328 endaddr = h2g(endaddr);
329 } else {
330 endaddr = ~0ul;
331 }
332 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000333 }
334 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800335
balrog50a95692007-12-12 01:16:23 +0000336 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800337 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000338 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100339#endif
balrog50a95692007-12-12 01:16:23 +0000340 }
341#endif
bellard54936002003-05-13 00:25:15 +0000342}
343
Paul Brook41c1b1c2010-03-12 16:54:58 +0000344static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000345{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000346 PageDesc *pd;
347 void **lp;
348 int i;
349
pbrook17e23772008-06-09 13:47:45 +0000350#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500351 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800352# define ALLOC(P, SIZE) \
353 do { \
354 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
355 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800356 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000357#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800358# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500359 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000360#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800361
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362 /* Level 1. Always allocated. */
363 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
364
365 /* Level 2..N-1. */
366 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
367 void **p = *lp;
368
369 if (p == NULL) {
370 if (!alloc) {
371 return NULL;
372 }
373 ALLOC(p, sizeof(void *) * L2_SIZE);
374 *lp = p;
375 }
376
377 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000378 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800379
380 pd = *lp;
381 if (pd == NULL) {
382 if (!alloc) {
383 return NULL;
384 }
385 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
386 *lp = pd;
387 }
388
389#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800390
391 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000392}
393
Paul Brook41c1b1c2010-03-12 16:54:58 +0000394static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000395{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800396 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000397}
398
Paul Brook6d9a1302010-02-28 23:55:53 +0000399#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500400static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000401{
pbrooke3f4e2a2006-04-08 20:02:06 +0000402 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800403 void **lp;
404 int i;
bellard92e873b2004-05-21 14:52:29 +0000405
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800406 /* Level 1. Always allocated. */
407 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000408
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800409 /* Level 2..N-1. */
410 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
411 void **p = *lp;
412 if (p == NULL) {
413 if (!alloc) {
414 return NULL;
415 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500416 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800417 }
418 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000419 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800420
pbrooke3f4e2a2006-04-08 20:02:06 +0000421 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800422 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000423 int i;
Alex Rozenman5ab97b72011-12-13 12:52:08 +0200424 int first_index = index & ~(L2_SIZE - 1);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800425
426 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000427 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800428 }
429
Anthony Liguori7267c092011-08-20 22:09:37 -0500430 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800431
pbrook67c4d232009-02-23 13:16:07 +0000432 for (i = 0; i < L2_SIZE; i++) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800433 pd[i].phys_offset = IO_MEM_UNASSIGNED;
Alex Rozenman5ab97b72011-12-13 12:52:08 +0200434 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000435 }
bellard92e873b2004-05-21 14:52:29 +0000436 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800437
438 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000439}
440
Anthony Liguoric227f092009-10-01 16:12:16 -0500441static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000442{
bellard108c49b2005-07-24 12:55:09 +0000443 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000444}
445
Anthony Liguoric227f092009-10-01 16:12:16 -0500446static void tlb_protect_code(ram_addr_t ram_addr);
447static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000448 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000449#define mmap_lock() do { } while(0)
450#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000451#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000452
bellard43694152008-05-29 09:35:57 +0000453#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
454
455#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100456/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000457 user mode. It will change when a dedicated libc will be used */
458#define USE_STATIC_CODE_GEN_BUFFER
459#endif
460
461#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200462static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
463 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000464#endif
465
blueswir18fcd3692008-08-17 20:26:25 +0000466static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000467{
bellard43694152008-05-29 09:35:57 +0000468#ifdef USE_STATIC_CODE_GEN_BUFFER
469 code_gen_buffer = static_code_gen_buffer;
470 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
471 map_exec(code_gen_buffer, code_gen_buffer_size);
472#else
bellard26a5f132008-05-28 12:30:31 +0000473 code_gen_buffer_size = tb_size;
474 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000475#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000476 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
477#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100478 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000479 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000480#endif
bellard26a5f132008-05-28 12:30:31 +0000481 }
482 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
483 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
484 /* The code gen buffer location may have constraints depending on
485 the host cpu and OS */
486#if defined(__linux__)
487 {
488 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000489 void *start = NULL;
490
bellard26a5f132008-05-28 12:30:31 +0000491 flags = MAP_PRIVATE | MAP_ANONYMOUS;
492#if defined(__x86_64__)
493 flags |= MAP_32BIT;
494 /* Cannot map more than that */
495 if (code_gen_buffer_size > (800 * 1024 * 1024))
496 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000497#elif defined(__sparc_v9__)
498 // Map the buffer below 2G, so we can use direct calls and branches
499 flags |= MAP_FIXED;
500 start = (void *) 0x60000000UL;
501 if (code_gen_buffer_size > (512 * 1024 * 1024))
502 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000503#elif defined(__arm__)
Dr. David Alan Gilbert222f23f2011-12-12 16:37:31 +0100504 /* Keep the buffer no bigger than 16GB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000505 if (code_gen_buffer_size > 16 * 1024 * 1024)
506 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700507#elif defined(__s390x__)
508 /* Map the buffer so that we can use direct calls and branches. */
509 /* We have a +- 4GB range on the branches; leave some slop. */
510 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
511 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
512 }
513 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000514#endif
blueswir1141ac462008-07-26 15:05:57 +0000515 code_gen_buffer = mmap(start, code_gen_buffer_size,
516 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000517 flags, -1, 0);
518 if (code_gen_buffer == MAP_FAILED) {
519 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
520 exit(1);
521 }
522 }
Bradcbb608a2010-12-20 21:25:40 -0500523#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000524 || defined(__DragonFly__) || defined(__OpenBSD__) \
525 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000526 {
527 int flags;
528 void *addr = NULL;
529 flags = MAP_PRIVATE | MAP_ANONYMOUS;
530#if defined(__x86_64__)
531 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
532 * 0x40000000 is free */
533 flags |= MAP_FIXED;
534 addr = (void *)0x40000000;
535 /* Cannot map more than that */
536 if (code_gen_buffer_size > (800 * 1024 * 1024))
537 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000538#elif defined(__sparc_v9__)
539 // Map the buffer below 2G, so we can use direct calls and branches
540 flags |= MAP_FIXED;
541 addr = (void *) 0x60000000UL;
542 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
543 code_gen_buffer_size = (512 * 1024 * 1024);
544 }
aliguori06e67a82008-09-27 15:32:41 +0000545#endif
546 code_gen_buffer = mmap(addr, code_gen_buffer_size,
547 PROT_WRITE | PROT_READ | PROT_EXEC,
548 flags, -1, 0);
549 if (code_gen_buffer == MAP_FAILED) {
550 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
551 exit(1);
552 }
553 }
bellard26a5f132008-05-28 12:30:31 +0000554#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500555 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000556 map_exec(code_gen_buffer, code_gen_buffer_size);
557#endif
bellard43694152008-05-29 09:35:57 +0000558#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000559 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100560 code_gen_buffer_max_size = code_gen_buffer_size -
561 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000562 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500563 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000564}
565
566/* Must be called before using the QEMU cpus. 'tb_size' is the size
567 (in bytes) allocated to the translation buffer. Zero means default
568 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200569void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000570{
bellard26a5f132008-05-28 12:30:31 +0000571 cpu_gen_init();
572 code_gen_alloc(tb_size);
573 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000574 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700575#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
576 /* There's no guest base to take into account, so go ahead and
577 initialize the prologue now. */
578 tcg_prologue_init(&tcg_ctx);
579#endif
bellard26a5f132008-05-28 12:30:31 +0000580}
581
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200582bool tcg_enabled(void)
583{
584 return code_gen_buffer != NULL;
585}
586
587void cpu_exec_init_all(void)
588{
589#if !defined(CONFIG_USER_ONLY)
590 memory_map_init();
591 io_mem_init();
592#endif
593}
594
pbrook9656f322008-07-01 20:01:19 +0000595#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
596
Juan Quintelae59fb372009-09-29 22:48:21 +0200597static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200598{
599 CPUState *env = opaque;
600
aurel323098dba2009-03-07 21:28:24 +0000601 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
602 version_id is increased. */
603 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000604 tlb_flush(env, 1);
605
606 return 0;
607}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200608
609static const VMStateDescription vmstate_cpu_common = {
610 .name = "cpu_common",
611 .version_id = 1,
612 .minimum_version_id = 1,
613 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200614 .post_load = cpu_common_post_load,
615 .fields = (VMStateField []) {
616 VMSTATE_UINT32(halted, CPUState),
617 VMSTATE_UINT32(interrupt_request, CPUState),
618 VMSTATE_END_OF_LIST()
619 }
620};
pbrook9656f322008-07-01 20:01:19 +0000621#endif
622
Glauber Costa950f1472009-06-09 12:15:18 -0400623CPUState *qemu_get_cpu(int cpu)
624{
625 CPUState *env = first_cpu;
626
627 while (env) {
628 if (env->cpu_index == cpu)
629 break;
630 env = env->next_cpu;
631 }
632
633 return env;
634}
635
bellard6a00d602005-11-21 23:25:50 +0000636void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000637{
bellard6a00d602005-11-21 23:25:50 +0000638 CPUState **penv;
639 int cpu_index;
640
pbrookc2764712009-03-07 15:24:59 +0000641#if defined(CONFIG_USER_ONLY)
642 cpu_list_lock();
643#endif
bellard6a00d602005-11-21 23:25:50 +0000644 env->next_cpu = NULL;
645 penv = &first_cpu;
646 cpu_index = 0;
647 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700648 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000649 cpu_index++;
650 }
651 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000652 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000653 QTAILQ_INIT(&env->breakpoints);
654 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100655#ifndef CONFIG_USER_ONLY
656 env->thread_id = qemu_get_thread_id();
657#endif
bellard6a00d602005-11-21 23:25:50 +0000658 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000659#if defined(CONFIG_USER_ONLY)
660 cpu_list_unlock();
661#endif
pbrookb3c77242008-06-30 16:31:04 +0000662#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600663 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
664 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000665 cpu_save, cpu_load, env);
666#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000667}
668
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100669/* Allocate a new translation block. Flush the translation buffer if
670 too many translation blocks or too much generated code. */
671static TranslationBlock *tb_alloc(target_ulong pc)
672{
673 TranslationBlock *tb;
674
675 if (nb_tbs >= code_gen_max_blocks ||
676 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
677 return NULL;
678 tb = &tbs[nb_tbs++];
679 tb->pc = pc;
680 tb->cflags = 0;
681 return tb;
682}
683
684void tb_free(TranslationBlock *tb)
685{
686 /* In practice this is mostly used for single use temporary TB
687 Ignore the hard cases and just back up if this TB happens to
688 be the last one generated. */
689 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
690 code_gen_ptr = tb->tc_ptr;
691 nb_tbs--;
692 }
693}
694
bellard9fa3e852004-01-04 18:06:42 +0000695static inline void invalidate_page_bitmap(PageDesc *p)
696{
697 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500698 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000699 p->code_bitmap = NULL;
700 }
701 p->code_write_count = 0;
702}
703
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800704/* Set to NULL all the 'first_tb' fields in all PageDescs. */
705
706static void page_flush_tb_1 (int level, void **lp)
707{
708 int i;
709
710 if (*lp == NULL) {
711 return;
712 }
713 if (level == 0) {
714 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000715 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800716 pd[i].first_tb = NULL;
717 invalidate_page_bitmap(pd + i);
718 }
719 } else {
720 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000721 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800722 page_flush_tb_1 (level - 1, pp + i);
723 }
724 }
725}
726
bellardfd6ce8f2003-05-14 19:00:11 +0000727static void page_flush_tb(void)
728{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800729 int i;
730 for (i = 0; i < V_L1_SIZE; i++) {
731 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000732 }
733}
734
735/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000736/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000737void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000738{
bellard6a00d602005-11-21 23:25:50 +0000739 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000740#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000741 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
742 (unsigned long)(code_gen_ptr - code_gen_buffer),
743 nb_tbs, nb_tbs > 0 ?
744 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000745#endif
bellard26a5f132008-05-28 12:30:31 +0000746 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000747 cpu_abort(env1, "Internal error: code buffer overflow\n");
748
bellardfd6ce8f2003-05-14 19:00:11 +0000749 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000750
bellard6a00d602005-11-21 23:25:50 +0000751 for(env = first_cpu; env != NULL; env = env->next_cpu) {
752 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
753 }
bellard9fa3e852004-01-04 18:06:42 +0000754
bellard8a8a6082004-10-03 13:36:49 +0000755 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000756 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000757
bellardfd6ce8f2003-05-14 19:00:11 +0000758 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000759 /* XXX: flush processor icache at this point if cache flush is
760 expensive */
bellarde3db7222005-01-26 22:00:47 +0000761 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000762}
763
764#ifdef DEBUG_TB_CHECK
765
j_mayerbc98a7e2007-04-04 07:55:12 +0000766static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000767{
768 TranslationBlock *tb;
769 int i;
770 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000771 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
772 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000773 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
774 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000775 printf("ERROR invalidate: address=" TARGET_FMT_lx
776 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000777 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000778 }
779 }
780 }
781}
782
783/* verify that all the pages have correct rights for code */
784static void tb_page_check(void)
785{
786 TranslationBlock *tb;
787 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000788
pbrook99773bd2006-04-16 15:14:59 +0000789 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
790 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000791 flags1 = page_get_flags(tb->pc);
792 flags2 = page_get_flags(tb->pc + tb->size - 1);
793 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
794 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000795 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000796 }
797 }
798 }
799}
800
801#endif
802
803/* invalidate one TB */
804static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
805 int next_offset)
806{
807 TranslationBlock *tb1;
808 for(;;) {
809 tb1 = *ptb;
810 if (tb1 == tb) {
811 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
812 break;
813 }
814 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
815 }
816}
817
bellard9fa3e852004-01-04 18:06:42 +0000818static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
819{
820 TranslationBlock *tb1;
821 unsigned int n1;
822
823 for(;;) {
824 tb1 = *ptb;
825 n1 = (long)tb1 & 3;
826 tb1 = (TranslationBlock *)((long)tb1 & ~3);
827 if (tb1 == tb) {
828 *ptb = tb1->page_next[n1];
829 break;
830 }
831 ptb = &tb1->page_next[n1];
832 }
833}
834
bellardd4e81642003-05-25 16:46:15 +0000835static inline void tb_jmp_remove(TranslationBlock *tb, int n)
836{
837 TranslationBlock *tb1, **ptb;
838 unsigned int n1;
839
840 ptb = &tb->jmp_next[n];
841 tb1 = *ptb;
842 if (tb1) {
843 /* find tb(n) in circular list */
844 for(;;) {
845 tb1 = *ptb;
846 n1 = (long)tb1 & 3;
847 tb1 = (TranslationBlock *)((long)tb1 & ~3);
848 if (n1 == n && tb1 == tb)
849 break;
850 if (n1 == 2) {
851 ptb = &tb1->jmp_first;
852 } else {
853 ptb = &tb1->jmp_next[n1];
854 }
855 }
856 /* now we can suppress tb(n) from the list */
857 *ptb = tb->jmp_next[n];
858
859 tb->jmp_next[n] = NULL;
860 }
861}
862
863/* reset the jump entry 'n' of a TB so that it is not chained to
864 another TB */
865static inline void tb_reset_jump(TranslationBlock *tb, int n)
866{
867 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
868}
869
Paul Brook41c1b1c2010-03-12 16:54:58 +0000870void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000871{
bellard6a00d602005-11-21 23:25:50 +0000872 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000873 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000874 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000875 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000876 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000877
bellard9fa3e852004-01-04 18:06:42 +0000878 /* remove the TB from the hash list */
879 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
880 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000881 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000882 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000883
bellard9fa3e852004-01-04 18:06:42 +0000884 /* remove the TB from the page list */
885 if (tb->page_addr[0] != page_addr) {
886 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
887 tb_page_remove(&p->first_tb, tb);
888 invalidate_page_bitmap(p);
889 }
890 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
891 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
892 tb_page_remove(&p->first_tb, tb);
893 invalidate_page_bitmap(p);
894 }
895
bellard8a40a182005-11-20 10:35:40 +0000896 tb_invalidated_flag = 1;
897
898 /* remove the TB from the hash list */
899 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000900 for(env = first_cpu; env != NULL; env = env->next_cpu) {
901 if (env->tb_jmp_cache[h] == tb)
902 env->tb_jmp_cache[h] = NULL;
903 }
bellard8a40a182005-11-20 10:35:40 +0000904
905 /* suppress this TB from the two jump lists */
906 tb_jmp_remove(tb, 0);
907 tb_jmp_remove(tb, 1);
908
909 /* suppress any remaining jumps to this TB */
910 tb1 = tb->jmp_first;
911 for(;;) {
912 n1 = (long)tb1 & 3;
913 if (n1 == 2)
914 break;
915 tb1 = (TranslationBlock *)((long)tb1 & ~3);
916 tb2 = tb1->jmp_next[n1];
917 tb_reset_jump(tb1, n1);
918 tb1->jmp_next[n1] = NULL;
919 tb1 = tb2;
920 }
921 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
922
bellarde3db7222005-01-26 22:00:47 +0000923 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000924}
925
926static inline void set_bits(uint8_t *tab, int start, int len)
927{
928 int end, mask, end1;
929
930 end = start + len;
931 tab += start >> 3;
932 mask = 0xff << (start & 7);
933 if ((start & ~7) == (end & ~7)) {
934 if (start < end) {
935 mask &= ~(0xff << (end & 7));
936 *tab |= mask;
937 }
938 } else {
939 *tab++ |= mask;
940 start = (start + 8) & ~7;
941 end1 = end & ~7;
942 while (start < end1) {
943 *tab++ = 0xff;
944 start += 8;
945 }
946 if (start < end) {
947 mask = ~(0xff << (end & 7));
948 *tab |= mask;
949 }
950 }
951}
952
953static void build_page_bitmap(PageDesc *p)
954{
955 int n, tb_start, tb_end;
956 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000957
Anthony Liguori7267c092011-08-20 22:09:37 -0500958 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000959
960 tb = p->first_tb;
961 while (tb != NULL) {
962 n = (long)tb & 3;
963 tb = (TranslationBlock *)((long)tb & ~3);
964 /* NOTE: this is subtle as a TB may span two physical pages */
965 if (n == 0) {
966 /* NOTE: tb_end may be after the end of the page, but
967 it is not a problem */
968 tb_start = tb->pc & ~TARGET_PAGE_MASK;
969 tb_end = tb_start + tb->size;
970 if (tb_end > TARGET_PAGE_SIZE)
971 tb_end = TARGET_PAGE_SIZE;
972 } else {
973 tb_start = 0;
974 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
975 }
976 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
977 tb = tb->page_next[n];
978 }
979}
980
pbrook2e70f6e2008-06-29 01:03:05 +0000981TranslationBlock *tb_gen_code(CPUState *env,
982 target_ulong pc, target_ulong cs_base,
983 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000984{
985 TranslationBlock *tb;
986 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000987 tb_page_addr_t phys_pc, phys_page2;
988 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000989 int code_gen_size;
990
Paul Brook41c1b1c2010-03-12 16:54:58 +0000991 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +0000992 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000993 if (!tb) {
994 /* flush must be done */
995 tb_flush(env);
996 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000997 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000998 /* Don't forget to invalidate previous TB info. */
999 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001000 }
1001 tc_ptr = code_gen_ptr;
1002 tb->tc_ptr = tc_ptr;
1003 tb->cs_base = cs_base;
1004 tb->flags = flags;
1005 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001006 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001007 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001008
bellardd720b932004-04-25 17:57:43 +00001009 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001010 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001011 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001012 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001013 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001014 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001015 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001016 return tb;
bellardd720b932004-04-25 17:57:43 +00001017}
ths3b46e622007-09-17 08:09:54 +00001018
bellard9fa3e852004-01-04 18:06:42 +00001019/* invalidate all TBs which intersect with the target physical page
1020 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001021 the same physical page. 'is_cpu_write_access' should be true if called
1022 from a real cpu write access: the virtual CPU will exit the current
1023 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001024void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001025 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001026{
aliguori6b917542008-11-18 19:46:41 +00001027 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001028 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001029 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001030 PageDesc *p;
1031 int n;
1032#ifdef TARGET_HAS_PRECISE_SMC
1033 int current_tb_not_found = is_cpu_write_access;
1034 TranslationBlock *current_tb = NULL;
1035 int current_tb_modified = 0;
1036 target_ulong current_pc = 0;
1037 target_ulong current_cs_base = 0;
1038 int current_flags = 0;
1039#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001040
1041 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001042 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001043 return;
ths5fafdf22007-09-16 21:08:06 +00001044 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001045 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1046 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001047 /* build code bitmap */
1048 build_page_bitmap(p);
1049 }
1050
1051 /* we remove all the TBs in the range [start, end[ */
1052 /* XXX: see if in some cases it could be faster to invalidate all the code */
1053 tb = p->first_tb;
1054 while (tb != NULL) {
1055 n = (long)tb & 3;
1056 tb = (TranslationBlock *)((long)tb & ~3);
1057 tb_next = tb->page_next[n];
1058 /* NOTE: this is subtle as a TB may span two physical pages */
1059 if (n == 0) {
1060 /* NOTE: tb_end may be after the end of the page, but
1061 it is not a problem */
1062 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1063 tb_end = tb_start + tb->size;
1064 } else {
1065 tb_start = tb->page_addr[1];
1066 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1067 }
1068 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001069#ifdef TARGET_HAS_PRECISE_SMC
1070 if (current_tb_not_found) {
1071 current_tb_not_found = 0;
1072 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001073 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001074 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001075 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001076 }
1077 }
1078 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001079 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001080 /* If we are modifying the current TB, we must stop
1081 its execution. We could be more precise by checking
1082 that the modification is after the current PC, but it
1083 would require a specialized function to partially
1084 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001085
bellardd720b932004-04-25 17:57:43 +00001086 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001087 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001088 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1089 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001090 }
1091#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001092 /* we need to do that to handle the case where a signal
1093 occurs while doing tb_phys_invalidate() */
1094 saved_tb = NULL;
1095 if (env) {
1096 saved_tb = env->current_tb;
1097 env->current_tb = NULL;
1098 }
bellard9fa3e852004-01-04 18:06:42 +00001099 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001100 if (env) {
1101 env->current_tb = saved_tb;
1102 if (env->interrupt_request && env->current_tb)
1103 cpu_interrupt(env, env->interrupt_request);
1104 }
bellard9fa3e852004-01-04 18:06:42 +00001105 }
1106 tb = tb_next;
1107 }
1108#if !defined(CONFIG_USER_ONLY)
1109 /* if no code remaining, no need to continue to use slow writes */
1110 if (!p->first_tb) {
1111 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001112 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001113 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001114 }
1115 }
1116#endif
1117#ifdef TARGET_HAS_PRECISE_SMC
1118 if (current_tb_modified) {
1119 /* we generate a block containing just the instruction
1120 modifying the memory. It will ensure that it cannot modify
1121 itself */
bellardea1c1802004-06-14 18:56:36 +00001122 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001123 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001124 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001125 }
1126#endif
1127}
1128
1129/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001130static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001131{
1132 PageDesc *p;
1133 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001134#if 0
bellarda4193c82004-06-03 14:01:43 +00001135 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001136 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1137 cpu_single_env->mem_io_vaddr, len,
1138 cpu_single_env->eip,
1139 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001140 }
1141#endif
bellard9fa3e852004-01-04 18:06:42 +00001142 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001143 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001144 return;
1145 if (p->code_bitmap) {
1146 offset = start & ~TARGET_PAGE_MASK;
1147 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1148 if (b & ((1 << len) - 1))
1149 goto do_invalidate;
1150 } else {
1151 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001152 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001153 }
1154}
1155
bellard9fa3e852004-01-04 18:06:42 +00001156#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001157static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001158 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001159{
aliguori6b917542008-11-18 19:46:41 +00001160 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001161 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001162 int n;
bellardd720b932004-04-25 17:57:43 +00001163#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001164 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001165 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001166 int current_tb_modified = 0;
1167 target_ulong current_pc = 0;
1168 target_ulong current_cs_base = 0;
1169 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001170#endif
bellard9fa3e852004-01-04 18:06:42 +00001171
1172 addr &= TARGET_PAGE_MASK;
1173 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001174 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001175 return;
1176 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001177#ifdef TARGET_HAS_PRECISE_SMC
1178 if (tb && pc != 0) {
1179 current_tb = tb_find_pc(pc);
1180 }
1181#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001182 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001183 n = (long)tb & 3;
1184 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001185#ifdef TARGET_HAS_PRECISE_SMC
1186 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001187 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001188 /* If we are modifying the current TB, we must stop
1189 its execution. We could be more precise by checking
1190 that the modification is after the current PC, but it
1191 would require a specialized function to partially
1192 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001193
bellardd720b932004-04-25 17:57:43 +00001194 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001195 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001196 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1197 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001198 }
1199#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001200 tb_phys_invalidate(tb, addr);
1201 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001202 }
1203 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001204#ifdef TARGET_HAS_PRECISE_SMC
1205 if (current_tb_modified) {
1206 /* we generate a block containing just the instruction
1207 modifying the memory. It will ensure that it cannot modify
1208 itself */
bellardea1c1802004-06-14 18:56:36 +00001209 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001210 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001211 cpu_resume_from_signal(env, puc);
1212 }
1213#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001214}
bellard9fa3e852004-01-04 18:06:42 +00001215#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001216
1217/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001218static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001219 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001220{
1221 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001222#ifndef CONFIG_USER_ONLY
1223 bool page_already_protected;
1224#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001225
bellard9fa3e852004-01-04 18:06:42 +00001226 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001227 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001228 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001229#ifndef CONFIG_USER_ONLY
1230 page_already_protected = p->first_tb != NULL;
1231#endif
bellard9fa3e852004-01-04 18:06:42 +00001232 p->first_tb = (TranslationBlock *)((long)tb | n);
1233 invalidate_page_bitmap(p);
1234
bellard107db442004-06-22 18:48:46 +00001235#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001236
bellard9fa3e852004-01-04 18:06:42 +00001237#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001238 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001239 target_ulong addr;
1240 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001241 int prot;
1242
bellardfd6ce8f2003-05-14 19:00:11 +00001243 /* force the host page as non writable (writes will have a
1244 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001245 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001246 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001247 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1248 addr += TARGET_PAGE_SIZE) {
1249
1250 p2 = page_find (addr >> TARGET_PAGE_BITS);
1251 if (!p2)
1252 continue;
1253 prot |= p2->flags;
1254 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001255 }
ths5fafdf22007-09-16 21:08:06 +00001256 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001257 (prot & PAGE_BITS) & ~PAGE_WRITE);
1258#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001259 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001260 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001261#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001262 }
bellard9fa3e852004-01-04 18:06:42 +00001263#else
1264 /* if some code is already present, then the pages are already
1265 protected. So we handle the case where only the first TB is
1266 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001267 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001268 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001269 }
1270#endif
bellardd720b932004-04-25 17:57:43 +00001271
1272#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001273}
1274
bellard9fa3e852004-01-04 18:06:42 +00001275/* add a new TB and link it to the physical page tables. phys_page2 is
1276 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001277void tb_link_page(TranslationBlock *tb,
1278 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001279{
bellard9fa3e852004-01-04 18:06:42 +00001280 unsigned int h;
1281 TranslationBlock **ptb;
1282
pbrookc8a706f2008-06-02 16:16:42 +00001283 /* Grab the mmap lock to stop another thread invalidating this TB
1284 before we are done. */
1285 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001286 /* add in the physical hash table */
1287 h = tb_phys_hash_func(phys_pc);
1288 ptb = &tb_phys_hash[h];
1289 tb->phys_hash_next = *ptb;
1290 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001291
1292 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001293 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1294 if (phys_page2 != -1)
1295 tb_alloc_page(tb, 1, phys_page2);
1296 else
1297 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001298
bellardd4e81642003-05-25 16:46:15 +00001299 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1300 tb->jmp_next[0] = NULL;
1301 tb->jmp_next[1] = NULL;
1302
1303 /* init original jump addresses */
1304 if (tb->tb_next_offset[0] != 0xffff)
1305 tb_reset_jump(tb, 0);
1306 if (tb->tb_next_offset[1] != 0xffff)
1307 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001308
1309#ifdef DEBUG_TB_CHECK
1310 tb_page_check();
1311#endif
pbrookc8a706f2008-06-02 16:16:42 +00001312 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001313}
1314
bellarda513fe12003-05-27 23:29:48 +00001315/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1316 tb[1].tc_ptr. Return NULL if not found */
1317TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1318{
1319 int m_min, m_max, m;
1320 unsigned long v;
1321 TranslationBlock *tb;
1322
1323 if (nb_tbs <= 0)
1324 return NULL;
1325 if (tc_ptr < (unsigned long)code_gen_buffer ||
1326 tc_ptr >= (unsigned long)code_gen_ptr)
1327 return NULL;
1328 /* binary search (cf Knuth) */
1329 m_min = 0;
1330 m_max = nb_tbs - 1;
1331 while (m_min <= m_max) {
1332 m = (m_min + m_max) >> 1;
1333 tb = &tbs[m];
1334 v = (unsigned long)tb->tc_ptr;
1335 if (v == tc_ptr)
1336 return tb;
1337 else if (tc_ptr < v) {
1338 m_max = m - 1;
1339 } else {
1340 m_min = m + 1;
1341 }
ths5fafdf22007-09-16 21:08:06 +00001342 }
bellarda513fe12003-05-27 23:29:48 +00001343 return &tbs[m_max];
1344}
bellard75012672003-06-21 13:11:07 +00001345
bellardea041c02003-06-25 16:16:50 +00001346static void tb_reset_jump_recursive(TranslationBlock *tb);
1347
1348static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1349{
1350 TranslationBlock *tb1, *tb_next, **ptb;
1351 unsigned int n1;
1352
1353 tb1 = tb->jmp_next[n];
1354 if (tb1 != NULL) {
1355 /* find head of list */
1356 for(;;) {
1357 n1 = (long)tb1 & 3;
1358 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1359 if (n1 == 2)
1360 break;
1361 tb1 = tb1->jmp_next[n1];
1362 }
1363 /* we are now sure now that tb jumps to tb1 */
1364 tb_next = tb1;
1365
1366 /* remove tb from the jmp_first list */
1367 ptb = &tb_next->jmp_first;
1368 for(;;) {
1369 tb1 = *ptb;
1370 n1 = (long)tb1 & 3;
1371 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1372 if (n1 == n && tb1 == tb)
1373 break;
1374 ptb = &tb1->jmp_next[n1];
1375 }
1376 *ptb = tb->jmp_next[n];
1377 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001378
bellardea041c02003-06-25 16:16:50 +00001379 /* suppress the jump to next tb in generated code */
1380 tb_reset_jump(tb, n);
1381
bellard01243112004-01-04 15:48:17 +00001382 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001383 tb_reset_jump_recursive(tb_next);
1384 }
1385}
1386
1387static void tb_reset_jump_recursive(TranslationBlock *tb)
1388{
1389 tb_reset_jump_recursive2(tb, 0);
1390 tb_reset_jump_recursive2(tb, 1);
1391}
1392
bellard1fddef42005-04-17 19:16:13 +00001393#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001394#if defined(CONFIG_USER_ONLY)
1395static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1396{
1397 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1398}
1399#else
bellardd720b932004-04-25 17:57:43 +00001400static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1401{
Anthony Liguoric227f092009-10-01 16:12:16 -05001402 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001403 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001404 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001405 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001406
pbrookc2f07f82006-04-08 17:14:56 +00001407 addr = cpu_get_phys_page_debug(env, pc);
1408 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1409 if (!p) {
1410 pd = IO_MEM_UNASSIGNED;
1411 } else {
1412 pd = p->phys_offset;
1413 }
1414 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001415 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001416}
bellardc27004e2005-01-03 23:35:10 +00001417#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001418#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001419
Paul Brookc527ee82010-03-01 03:31:14 +00001420#if defined(CONFIG_USER_ONLY)
1421void cpu_watchpoint_remove_all(CPUState *env, int mask)
1422
1423{
1424}
1425
1426int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1427 int flags, CPUWatchpoint **watchpoint)
1428{
1429 return -ENOSYS;
1430}
1431#else
pbrook6658ffb2007-03-16 23:58:11 +00001432/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001433int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1434 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001435{
aliguorib4051332008-11-18 20:14:20 +00001436 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001437 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001438
aliguorib4051332008-11-18 20:14:20 +00001439 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1440 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1441 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1442 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1443 return -EINVAL;
1444 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001445 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001446
aliguoria1d1bb32008-11-18 20:07:32 +00001447 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001448 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001449 wp->flags = flags;
1450
aliguori2dc9f412008-11-18 20:56:59 +00001451 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001452 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001453 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001454 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001455 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001456
pbrook6658ffb2007-03-16 23:58:11 +00001457 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001458
1459 if (watchpoint)
1460 *watchpoint = wp;
1461 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001462}
1463
aliguoria1d1bb32008-11-18 20:07:32 +00001464/* Remove a specific watchpoint. */
1465int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1466 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001467{
aliguorib4051332008-11-18 20:14:20 +00001468 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001469 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001470
Blue Swirl72cf2d42009-09-12 07:36:22 +00001471 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001472 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001473 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001474 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001475 return 0;
1476 }
1477 }
aliguoria1d1bb32008-11-18 20:07:32 +00001478 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001479}
1480
aliguoria1d1bb32008-11-18 20:07:32 +00001481/* Remove a specific watchpoint by reference. */
1482void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1483{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001484 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001485
aliguoria1d1bb32008-11-18 20:07:32 +00001486 tlb_flush_page(env, watchpoint->vaddr);
1487
Anthony Liguori7267c092011-08-20 22:09:37 -05001488 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001489}
1490
aliguoria1d1bb32008-11-18 20:07:32 +00001491/* Remove all matching watchpoints. */
1492void cpu_watchpoint_remove_all(CPUState *env, int mask)
1493{
aliguoric0ce9982008-11-25 22:13:57 +00001494 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001495
Blue Swirl72cf2d42009-09-12 07:36:22 +00001496 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001497 if (wp->flags & mask)
1498 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001499 }
aliguoria1d1bb32008-11-18 20:07:32 +00001500}
Paul Brookc527ee82010-03-01 03:31:14 +00001501#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001502
1503/* Add a breakpoint. */
1504int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1505 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001506{
bellard1fddef42005-04-17 19:16:13 +00001507#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001508 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001509
Anthony Liguori7267c092011-08-20 22:09:37 -05001510 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001511
1512 bp->pc = pc;
1513 bp->flags = flags;
1514
aliguori2dc9f412008-11-18 20:56:59 +00001515 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001516 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001517 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001518 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001519 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001520
1521 breakpoint_invalidate(env, pc);
1522
1523 if (breakpoint)
1524 *breakpoint = bp;
1525 return 0;
1526#else
1527 return -ENOSYS;
1528#endif
1529}
1530
1531/* Remove a specific breakpoint. */
1532int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1533{
1534#if defined(TARGET_HAS_ICE)
1535 CPUBreakpoint *bp;
1536
Blue Swirl72cf2d42009-09-12 07:36:22 +00001537 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001538 if (bp->pc == pc && bp->flags == flags) {
1539 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001540 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001541 }
bellard4c3a88a2003-07-26 12:06:08 +00001542 }
aliguoria1d1bb32008-11-18 20:07:32 +00001543 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001544#else
aliguoria1d1bb32008-11-18 20:07:32 +00001545 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001546#endif
1547}
1548
aliguoria1d1bb32008-11-18 20:07:32 +00001549/* Remove a specific breakpoint by reference. */
1550void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001551{
bellard1fddef42005-04-17 19:16:13 +00001552#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001553 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001554
aliguoria1d1bb32008-11-18 20:07:32 +00001555 breakpoint_invalidate(env, breakpoint->pc);
1556
Anthony Liguori7267c092011-08-20 22:09:37 -05001557 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001558#endif
1559}
1560
1561/* Remove all matching breakpoints. */
1562void cpu_breakpoint_remove_all(CPUState *env, int mask)
1563{
1564#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001565 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001566
Blue Swirl72cf2d42009-09-12 07:36:22 +00001567 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001568 if (bp->flags & mask)
1569 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001570 }
bellard4c3a88a2003-07-26 12:06:08 +00001571#endif
1572}
1573
bellardc33a3462003-07-29 20:50:33 +00001574/* enable or disable single step mode. EXCP_DEBUG is returned by the
1575 CPU loop after each instruction */
1576void cpu_single_step(CPUState *env, int enabled)
1577{
bellard1fddef42005-04-17 19:16:13 +00001578#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001579 if (env->singlestep_enabled != enabled) {
1580 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001581 if (kvm_enabled())
1582 kvm_update_guest_debug(env, 0);
1583 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001584 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001585 /* XXX: only flush what is necessary */
1586 tb_flush(env);
1587 }
bellardc33a3462003-07-29 20:50:33 +00001588 }
1589#endif
1590}
1591
bellard34865132003-10-05 14:28:56 +00001592/* enable or disable low levels log */
1593void cpu_set_log(int log_flags)
1594{
1595 loglevel = log_flags;
1596 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001597 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001598 if (!logfile) {
1599 perror(logfilename);
1600 _exit(1);
1601 }
bellard9fa3e852004-01-04 18:06:42 +00001602#if !defined(CONFIG_SOFTMMU)
1603 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1604 {
blueswir1b55266b2008-09-20 08:07:15 +00001605 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001606 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1607 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001608#elif defined(_WIN32)
1609 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1610 setvbuf(logfile, NULL, _IONBF, 0);
1611#else
bellard34865132003-10-05 14:28:56 +00001612 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001613#endif
pbrooke735b912007-06-30 13:53:24 +00001614 log_append = 1;
1615 }
1616 if (!loglevel && logfile) {
1617 fclose(logfile);
1618 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001619 }
1620}
1621
1622void cpu_set_log_filename(const char *filename)
1623{
1624 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001625 if (logfile) {
1626 fclose(logfile);
1627 logfile = NULL;
1628 }
1629 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001630}
bellardc33a3462003-07-29 20:50:33 +00001631
aurel323098dba2009-03-07 21:28:24 +00001632static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001633{
pbrookd5975362008-06-07 20:50:51 +00001634 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1635 problem and hope the cpu will stop of its own accord. For userspace
1636 emulation this often isn't actually as bad as it sounds. Often
1637 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001638 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001639 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001640
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001641 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001642 tb = env->current_tb;
1643 /* if the cpu is currently executing code, we must unlink it and
1644 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001645 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001646 env->current_tb = NULL;
1647 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001648 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001649 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001650}
1651
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001652#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001653/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001654static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001655{
1656 int old_mask;
1657
1658 old_mask = env->interrupt_request;
1659 env->interrupt_request |= mask;
1660
aliguori8edac962009-04-24 18:03:45 +00001661 /*
1662 * If called from iothread context, wake the target cpu in
1663 * case its halted.
1664 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001665 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001666 qemu_cpu_kick(env);
1667 return;
1668 }
aliguori8edac962009-04-24 18:03:45 +00001669
pbrook2e70f6e2008-06-29 01:03:05 +00001670 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001671 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001672 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001673 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001674 cpu_abort(env, "Raised interrupt while not in I/O function");
1675 }
pbrook2e70f6e2008-06-29 01:03:05 +00001676 } else {
aurel323098dba2009-03-07 21:28:24 +00001677 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001678 }
1679}
1680
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001681CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1682
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001683#else /* CONFIG_USER_ONLY */
1684
1685void cpu_interrupt(CPUState *env, int mask)
1686{
1687 env->interrupt_request |= mask;
1688 cpu_unlink_tb(env);
1689}
1690#endif /* CONFIG_USER_ONLY */
1691
bellardb54ad042004-05-20 13:42:52 +00001692void cpu_reset_interrupt(CPUState *env, int mask)
1693{
1694 env->interrupt_request &= ~mask;
1695}
1696
aurel323098dba2009-03-07 21:28:24 +00001697void cpu_exit(CPUState *env)
1698{
1699 env->exit_request = 1;
1700 cpu_unlink_tb(env);
1701}
1702
blueswir1c7cd6a32008-10-02 18:27:46 +00001703const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001704 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001705 "show generated host assembly code for each compiled TB" },
1706 { CPU_LOG_TB_IN_ASM, "in_asm",
1707 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001708 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001709 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001710 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001711 "show micro ops "
1712#ifdef TARGET_I386
1713 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001714#endif
blueswir1e01a1152008-03-14 17:37:11 +00001715 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001716 { CPU_LOG_INT, "int",
1717 "show interrupts/exceptions in short format" },
1718 { CPU_LOG_EXEC, "exec",
1719 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001720 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001721 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001722#ifdef TARGET_I386
1723 { CPU_LOG_PCALL, "pcall",
1724 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001725 { CPU_LOG_RESET, "cpu_reset",
1726 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001727#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001728#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001729 { CPU_LOG_IOPORT, "ioport",
1730 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001731#endif
bellardf193c792004-03-21 17:06:25 +00001732 { 0, NULL, NULL },
1733};
1734
1735static int cmp1(const char *s1, int n, const char *s2)
1736{
1737 if (strlen(s2) != n)
1738 return 0;
1739 return memcmp(s1, s2, n) == 0;
1740}
ths3b46e622007-09-17 08:09:54 +00001741
bellardf193c792004-03-21 17:06:25 +00001742/* takes a comma separated list of log masks. Return 0 if error. */
1743int cpu_str_to_log_mask(const char *str)
1744{
blueswir1c7cd6a32008-10-02 18:27:46 +00001745 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001746 int mask;
1747 const char *p, *p1;
1748
1749 p = str;
1750 mask = 0;
1751 for(;;) {
1752 p1 = strchr(p, ',');
1753 if (!p1)
1754 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001755 if(cmp1(p,p1-p,"all")) {
1756 for(item = cpu_log_items; item->mask != 0; item++) {
1757 mask |= item->mask;
1758 }
1759 } else {
1760 for(item = cpu_log_items; item->mask != 0; item++) {
1761 if (cmp1(p, p1 - p, item->name))
1762 goto found;
1763 }
1764 return 0;
bellardf193c792004-03-21 17:06:25 +00001765 }
bellardf193c792004-03-21 17:06:25 +00001766 found:
1767 mask |= item->mask;
1768 if (*p1 != ',')
1769 break;
1770 p = p1 + 1;
1771 }
1772 return mask;
1773}
bellardea041c02003-06-25 16:16:50 +00001774
bellard75012672003-06-21 13:11:07 +00001775void cpu_abort(CPUState *env, const char *fmt, ...)
1776{
1777 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001778 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001779
1780 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001781 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001782 fprintf(stderr, "qemu: fatal: ");
1783 vfprintf(stderr, fmt, ap);
1784 fprintf(stderr, "\n");
1785#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001786 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1787#else
1788 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001789#endif
aliguori93fcfe32009-01-15 22:34:14 +00001790 if (qemu_log_enabled()) {
1791 qemu_log("qemu: fatal: ");
1792 qemu_log_vprintf(fmt, ap2);
1793 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001794#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001795 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001796#else
aliguori93fcfe32009-01-15 22:34:14 +00001797 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001798#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001799 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001800 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001801 }
pbrook493ae1f2007-11-23 16:53:59 +00001802 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001803 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001804#if defined(CONFIG_USER_ONLY)
1805 {
1806 struct sigaction act;
1807 sigfillset(&act.sa_mask);
1808 act.sa_handler = SIG_DFL;
1809 sigaction(SIGABRT, &act, NULL);
1810 }
1811#endif
bellard75012672003-06-21 13:11:07 +00001812 abort();
1813}
1814
thsc5be9f02007-02-28 20:20:53 +00001815CPUState *cpu_copy(CPUState *env)
1816{
ths01ba9812007-12-09 02:22:57 +00001817 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001818 CPUState *next_cpu = new_env->next_cpu;
1819 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001820#if defined(TARGET_HAS_ICE)
1821 CPUBreakpoint *bp;
1822 CPUWatchpoint *wp;
1823#endif
1824
thsc5be9f02007-02-28 20:20:53 +00001825 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001826
1827 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001828 new_env->next_cpu = next_cpu;
1829 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001830
1831 /* Clone all break/watchpoints.
1832 Note: Once we support ptrace with hw-debug register access, make sure
1833 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001834 QTAILQ_INIT(&env->breakpoints);
1835 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001836#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001837 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001838 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1839 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001840 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001841 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1842 wp->flags, NULL);
1843 }
1844#endif
1845
thsc5be9f02007-02-28 20:20:53 +00001846 return new_env;
1847}
1848
bellard01243112004-01-04 15:48:17 +00001849#if !defined(CONFIG_USER_ONLY)
1850
edgar_igl5c751e92008-05-06 08:44:21 +00001851static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1852{
1853 unsigned int i;
1854
1855 /* Discard jump cache entries for any tb which might potentially
1856 overlap the flushed page. */
1857 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1858 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001859 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001860
1861 i = tb_jmp_cache_hash_page(addr);
1862 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001863 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001864}
1865
Igor Kovalenko08738982009-07-12 02:15:40 +04001866static CPUTLBEntry s_cputlb_empty_entry = {
1867 .addr_read = -1,
1868 .addr_write = -1,
1869 .addr_code = -1,
1870 .addend = -1,
1871};
1872
bellardee8b7022004-02-03 23:35:10 +00001873/* NOTE: if flush_global is true, also flush global entries (not
1874 implemented yet) */
1875void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001876{
bellard33417e72003-08-10 21:47:01 +00001877 int i;
bellard01243112004-01-04 15:48:17 +00001878
bellard9fa3e852004-01-04 18:06:42 +00001879#if defined(DEBUG_TLB)
1880 printf("tlb_flush:\n");
1881#endif
bellard01243112004-01-04 15:48:17 +00001882 /* must reset current TB so that interrupts cannot modify the
1883 links while we are modifying them */
1884 env->current_tb = NULL;
1885
bellard33417e72003-08-10 21:47:01 +00001886 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001887 int mmu_idx;
1888 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001889 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001890 }
bellard33417e72003-08-10 21:47:01 +00001891 }
bellard9fa3e852004-01-04 18:06:42 +00001892
bellard8a40a182005-11-20 10:35:40 +00001893 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001894
Paul Brookd4c430a2010-03-17 02:14:28 +00001895 env->tlb_flush_addr = -1;
1896 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00001897 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00001898}
1899
bellard274da6b2004-05-20 21:56:27 +00001900static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00001901{
ths5fafdf22007-09-16 21:08:06 +00001902 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00001903 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001904 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00001905 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00001906 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00001907 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001908 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00001909 }
bellard61382a52003-10-27 21:22:23 +00001910}
1911
bellard2e126692004-04-25 21:28:44 +00001912void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00001913{
bellard8a40a182005-11-20 10:35:40 +00001914 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001915 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00001916
bellard9fa3e852004-01-04 18:06:42 +00001917#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00001918 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00001919#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00001920 /* Check if we need to flush due to large pages. */
1921 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1922#if defined(DEBUG_TLB)
1923 printf("tlb_flush_page: forced full flush ("
1924 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1925 env->tlb_flush_addr, env->tlb_flush_mask);
1926#endif
1927 tlb_flush(env, 1);
1928 return;
1929 }
bellard01243112004-01-04 15:48:17 +00001930 /* must reset current TB so that interrupts cannot modify the
1931 links while we are modifying them */
1932 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001933
bellard61382a52003-10-27 21:22:23 +00001934 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001935 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001936 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1937 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00001938
edgar_igl5c751e92008-05-06 08:44:21 +00001939 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00001940}
1941
bellard9fa3e852004-01-04 18:06:42 +00001942/* update the TLBs so that writes to code in the virtual page 'addr'
1943 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05001944static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00001945{
ths5fafdf22007-09-16 21:08:06 +00001946 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00001947 ram_addr + TARGET_PAGE_SIZE,
1948 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00001949}
1950
bellard9fa3e852004-01-04 18:06:42 +00001951/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00001952 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05001953static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00001954 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00001955{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001956 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00001957}
1958
ths5fafdf22007-09-16 21:08:06 +00001959static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00001960 unsigned long start, unsigned long length)
1961{
1962 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00001963 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1964 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00001965 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00001966 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00001967 }
1968 }
1969}
1970
pbrook5579c7f2009-04-11 14:47:08 +00001971/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001972void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001973 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001974{
1975 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00001976 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001977 int i;
bellard1ccde1c2004-02-06 19:46:14 +00001978
1979 start &= TARGET_PAGE_MASK;
1980 end = TARGET_PAGE_ALIGN(end);
1981
1982 length = end - start;
1983 if (length == 0)
1984 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001985 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001986
bellard1ccde1c2004-02-06 19:46:14 +00001987 /* we modify the TLB cache so that the dirty bit will be set again
1988 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001989 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001990 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001991 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001992 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001993 != (end - 1) - start) {
1994 abort();
1995 }
1996
bellard6a00d602005-11-21 23:25:50 +00001997 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001998 int mmu_idx;
1999 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2000 for(i = 0; i < CPU_TLB_SIZE; i++)
2001 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2002 start1, length);
2003 }
bellard6a00d602005-11-21 23:25:50 +00002004 }
bellard1ccde1c2004-02-06 19:46:14 +00002005}
2006
aliguori74576192008-10-06 14:02:03 +00002007int cpu_physical_memory_set_dirty_tracking(int enable)
2008{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002009 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002010 in_migration = enable;
Avi Kivitydcd97e32011-12-19 12:53:48 +02002011 if (enable) {
2012 memory_global_dirty_log_start();
2013 } else {
2014 memory_global_dirty_log_stop();
2015 }
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002016 return ret;
aliguori74576192008-10-06 14:02:03 +00002017}
2018
2019int cpu_physical_memory_get_dirty_tracking(void)
2020{
2021 return in_migration;
2022}
2023
bellard3a7d9292005-08-21 09:26:42 +00002024static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2025{
Anthony Liguoric227f092009-10-01 16:12:16 -05002026 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002027 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002028
bellard84b7b8e2005-11-28 21:19:04 +00002029 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00002030 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2031 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002032 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002033 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002034 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002035 }
2036 }
2037}
2038
2039/* update the TLB according to the current state of the dirty bits */
2040void cpu_tlb_update_dirty(CPUState *env)
2041{
2042 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002043 int mmu_idx;
2044 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2045 for(i = 0; i < CPU_TLB_SIZE; i++)
2046 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2047 }
bellard3a7d9292005-08-21 09:26:42 +00002048}
2049
pbrook0f459d12008-06-09 00:20:13 +00002050static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002051{
pbrook0f459d12008-06-09 00:20:13 +00002052 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2053 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002054}
2055
pbrook0f459d12008-06-09 00:20:13 +00002056/* update the TLB corresponding to virtual page vaddr
2057 so that it is no longer dirty */
2058static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002059{
bellard1ccde1c2004-02-06 19:46:14 +00002060 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002061 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002062
pbrook0f459d12008-06-09 00:20:13 +00002063 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002064 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002065 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2066 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002067}
2068
Paul Brookd4c430a2010-03-17 02:14:28 +00002069/* Our TLB does not support large pages, so remember the area covered by
2070 large pages and trigger a full TLB flush if these are invalidated. */
2071static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2072 target_ulong size)
2073{
2074 target_ulong mask = ~(size - 1);
2075
2076 if (env->tlb_flush_addr == (target_ulong)-1) {
2077 env->tlb_flush_addr = vaddr & mask;
2078 env->tlb_flush_mask = mask;
2079 return;
2080 }
2081 /* Extend the existing region to include the new page.
2082 This is a compromise between unnecessary flushes and the cost
2083 of maintaining a full variable size TLB. */
2084 mask &= env->tlb_flush_mask;
2085 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2086 mask <<= 1;
2087 }
2088 env->tlb_flush_addr &= mask;
2089 env->tlb_flush_mask = mask;
2090}
2091
2092/* Add a new TLB entry. At most one entry for a given virtual address
2093 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2094 supplied size is only used by tlb_flush_page. */
2095void tlb_set_page(CPUState *env, target_ulong vaddr,
2096 target_phys_addr_t paddr, int prot,
2097 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002098{
bellard92e873b2004-05-21 14:52:29 +00002099 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00002100 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002101 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002102 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002103 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002104 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002105 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002106 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002107 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002108
Paul Brookd4c430a2010-03-17 02:14:28 +00002109 assert(size >= TARGET_PAGE_SIZE);
2110 if (size != TARGET_PAGE_SIZE) {
2111 tlb_add_large_page(env, vaddr, size);
2112 }
bellard92e873b2004-05-21 14:52:29 +00002113 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002114 if (!p) {
2115 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00002116 } else {
2117 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002118 }
2119#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002120 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2121 " prot=%x idx=%d pd=0x%08lx\n",
2122 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002123#endif
2124
pbrook0f459d12008-06-09 00:20:13 +00002125 address = vaddr;
2126 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2127 /* IO memory case (romd handled later) */
2128 address |= TLB_MMIO;
2129 }
pbrook5579c7f2009-04-11 14:47:08 +00002130 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00002131 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2132 /* Normal RAM. */
2133 iotlb = pd & TARGET_PAGE_MASK;
2134 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2135 iotlb |= IO_MEM_NOTDIRTY;
2136 else
2137 iotlb |= IO_MEM_ROM;
2138 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002139 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002140 It would be nice to pass an offset from the base address
2141 of that region. This would avoid having to special case RAM,
2142 and avoid full address decoding in every device.
2143 We can't use the high bits of pd for this because
2144 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002145 iotlb = (pd & ~TARGET_PAGE_MASK);
2146 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002147 iotlb += p->region_offset;
2148 } else {
2149 iotlb += paddr;
2150 }
pbrook0f459d12008-06-09 00:20:13 +00002151 }
pbrook6658ffb2007-03-16 23:58:11 +00002152
pbrook0f459d12008-06-09 00:20:13 +00002153 code_address = address;
2154 /* Make accesses to pages with watchpoints go via the
2155 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002156 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002157 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002158 /* Avoid trapping reads of pages with a write breakpoint. */
2159 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2160 iotlb = io_mem_watch + paddr;
2161 address |= TLB_MMIO;
2162 break;
2163 }
pbrook6658ffb2007-03-16 23:58:11 +00002164 }
pbrook0f459d12008-06-09 00:20:13 +00002165 }
balrogd79acba2007-06-26 20:01:13 +00002166
pbrook0f459d12008-06-09 00:20:13 +00002167 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2168 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2169 te = &env->tlb_table[mmu_idx][index];
2170 te->addend = addend - vaddr;
2171 if (prot & PAGE_READ) {
2172 te->addr_read = address;
2173 } else {
2174 te->addr_read = -1;
2175 }
edgar_igl5c751e92008-05-06 08:44:21 +00002176
pbrook0f459d12008-06-09 00:20:13 +00002177 if (prot & PAGE_EXEC) {
2178 te->addr_code = code_address;
2179 } else {
2180 te->addr_code = -1;
2181 }
2182 if (prot & PAGE_WRITE) {
2183 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2184 (pd & IO_MEM_ROMD)) {
2185 /* Write access calls the I/O callback. */
2186 te->addr_write = address | TLB_MMIO;
2187 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2188 !cpu_physical_memory_is_dirty(pd)) {
2189 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002190 } else {
pbrook0f459d12008-06-09 00:20:13 +00002191 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002192 }
pbrook0f459d12008-06-09 00:20:13 +00002193 } else {
2194 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002195 }
bellard9fa3e852004-01-04 18:06:42 +00002196}
2197
bellard01243112004-01-04 15:48:17 +00002198#else
2199
bellardee8b7022004-02-03 23:35:10 +00002200void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002201{
2202}
2203
bellard2e126692004-04-25 21:28:44 +00002204void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002205{
2206}
2207
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002208/*
2209 * Walks guest process memory "regions" one by one
2210 * and calls callback function 'fn' for each region.
2211 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002212
2213struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002214{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002215 walk_memory_regions_fn fn;
2216 void *priv;
2217 unsigned long start;
2218 int prot;
2219};
bellard9fa3e852004-01-04 18:06:42 +00002220
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002221static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002222 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002223{
2224 if (data->start != -1ul) {
2225 int rc = data->fn(data->priv, data->start, end, data->prot);
2226 if (rc != 0) {
2227 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002228 }
bellard33417e72003-08-10 21:47:01 +00002229 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002230
2231 data->start = (new_prot ? end : -1ul);
2232 data->prot = new_prot;
2233
2234 return 0;
2235}
2236
2237static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002238 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002239{
Paul Brookb480d9b2010-03-12 23:23:29 +00002240 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002241 int i, rc;
2242
2243 if (*lp == NULL) {
2244 return walk_memory_regions_end(data, base, 0);
2245 }
2246
2247 if (level == 0) {
2248 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002249 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002250 int prot = pd[i].flags;
2251
2252 pa = base | (i << TARGET_PAGE_BITS);
2253 if (prot != data->prot) {
2254 rc = walk_memory_regions_end(data, pa, prot);
2255 if (rc != 0) {
2256 return rc;
2257 }
2258 }
2259 }
2260 } else {
2261 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002262 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002263 pa = base | ((abi_ulong)i <<
2264 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002265 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2266 if (rc != 0) {
2267 return rc;
2268 }
2269 }
2270 }
2271
2272 return 0;
2273}
2274
2275int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2276{
2277 struct walk_memory_regions_data data;
2278 unsigned long i;
2279
2280 data.fn = fn;
2281 data.priv = priv;
2282 data.start = -1ul;
2283 data.prot = 0;
2284
2285 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002286 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002287 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2288 if (rc != 0) {
2289 return rc;
2290 }
2291 }
2292
2293 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002294}
2295
Paul Brookb480d9b2010-03-12 23:23:29 +00002296static int dump_region(void *priv, abi_ulong start,
2297 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002298{
2299 FILE *f = (FILE *)priv;
2300
Paul Brookb480d9b2010-03-12 23:23:29 +00002301 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2302 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002303 start, end, end - start,
2304 ((prot & PAGE_READ) ? 'r' : '-'),
2305 ((prot & PAGE_WRITE) ? 'w' : '-'),
2306 ((prot & PAGE_EXEC) ? 'x' : '-'));
2307
2308 return (0);
2309}
2310
2311/* dump memory mappings */
2312void page_dump(FILE *f)
2313{
2314 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2315 "start", "end", "size", "prot");
2316 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002317}
2318
pbrook53a59602006-03-25 19:31:22 +00002319int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002320{
bellard9fa3e852004-01-04 18:06:42 +00002321 PageDesc *p;
2322
2323 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002324 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002325 return 0;
2326 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002327}
2328
Richard Henderson376a7902010-03-10 15:57:04 -08002329/* Modify the flags of a page and invalidate the code if necessary.
2330 The flag PAGE_WRITE_ORG is positioned automatically depending
2331 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002332void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002333{
Richard Henderson376a7902010-03-10 15:57:04 -08002334 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002335
Richard Henderson376a7902010-03-10 15:57:04 -08002336 /* This function should never be called with addresses outside the
2337 guest address space. If this assert fires, it probably indicates
2338 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002339#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2340 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002341#endif
2342 assert(start < end);
2343
bellard9fa3e852004-01-04 18:06:42 +00002344 start = start & TARGET_PAGE_MASK;
2345 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002346
2347 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002348 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002349 }
2350
2351 for (addr = start, len = end - start;
2352 len != 0;
2353 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2354 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2355
2356 /* If the write protection bit is set, then we invalidate
2357 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002358 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002359 (flags & PAGE_WRITE) &&
2360 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002361 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002362 }
2363 p->flags = flags;
2364 }
bellard9fa3e852004-01-04 18:06:42 +00002365}
2366
ths3d97b402007-11-02 19:02:07 +00002367int page_check_range(target_ulong start, target_ulong len, int flags)
2368{
2369 PageDesc *p;
2370 target_ulong end;
2371 target_ulong addr;
2372
Richard Henderson376a7902010-03-10 15:57:04 -08002373 /* This function should never be called with addresses outside the
2374 guest address space. If this assert fires, it probably indicates
2375 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002376#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2377 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002378#endif
2379
Richard Henderson3e0650a2010-03-29 10:54:42 -07002380 if (len == 0) {
2381 return 0;
2382 }
Richard Henderson376a7902010-03-10 15:57:04 -08002383 if (start + len - 1 < start) {
2384 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002385 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002386 }
balrog55f280c2008-10-28 10:24:11 +00002387
ths3d97b402007-11-02 19:02:07 +00002388 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2389 start = start & TARGET_PAGE_MASK;
2390
Richard Henderson376a7902010-03-10 15:57:04 -08002391 for (addr = start, len = end - start;
2392 len != 0;
2393 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002394 p = page_find(addr >> TARGET_PAGE_BITS);
2395 if( !p )
2396 return -1;
2397 if( !(p->flags & PAGE_VALID) )
2398 return -1;
2399
bellarddae32702007-11-14 10:51:00 +00002400 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002401 return -1;
bellarddae32702007-11-14 10:51:00 +00002402 if (flags & PAGE_WRITE) {
2403 if (!(p->flags & PAGE_WRITE_ORG))
2404 return -1;
2405 /* unprotect the page if it was put read-only because it
2406 contains translated code */
2407 if (!(p->flags & PAGE_WRITE)) {
2408 if (!page_unprotect(addr, 0, NULL))
2409 return -1;
2410 }
2411 return 0;
2412 }
ths3d97b402007-11-02 19:02:07 +00002413 }
2414 return 0;
2415}
2416
bellard9fa3e852004-01-04 18:06:42 +00002417/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002418 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002419int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002420{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002421 unsigned int prot;
2422 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002423 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002424
pbrookc8a706f2008-06-02 16:16:42 +00002425 /* Technically this isn't safe inside a signal handler. However we
2426 know this only ever happens in a synchronous SEGV handler, so in
2427 practice it seems to be ok. */
2428 mmap_lock();
2429
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002430 p = page_find(address >> TARGET_PAGE_BITS);
2431 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002432 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002433 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002434 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002435
bellard9fa3e852004-01-04 18:06:42 +00002436 /* if the page was really writable, then we change its
2437 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002438 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2439 host_start = address & qemu_host_page_mask;
2440 host_end = host_start + qemu_host_page_size;
2441
2442 prot = 0;
2443 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2444 p = page_find(addr >> TARGET_PAGE_BITS);
2445 p->flags |= PAGE_WRITE;
2446 prot |= p->flags;
2447
bellard9fa3e852004-01-04 18:06:42 +00002448 /* and since the content will be modified, we must invalidate
2449 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002450 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002451#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002452 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002453#endif
bellard9fa3e852004-01-04 18:06:42 +00002454 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002455 mprotect((void *)g2h(host_start), qemu_host_page_size,
2456 prot & PAGE_BITS);
2457
2458 mmap_unlock();
2459 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002460 }
pbrookc8a706f2008-06-02 16:16:42 +00002461 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002462 return 0;
2463}
2464
bellard6a00d602005-11-21 23:25:50 +00002465static inline void tlb_set_dirty(CPUState *env,
2466 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002467{
2468}
bellard9fa3e852004-01-04 18:06:42 +00002469#endif /* defined(CONFIG_USER_ONLY) */
2470
pbrooke2eef172008-06-08 01:09:01 +00002471#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002472
Paul Brookc04b2b72010-03-01 03:31:14 +00002473#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2474typedef struct subpage_t {
2475 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002476 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2477 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002478} subpage_t;
2479
Anthony Liguoric227f092009-10-01 16:12:16 -05002480static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2481 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002482static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2483 ram_addr_t orig_memory,
2484 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002485#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2486 need_subpage) \
2487 do { \
2488 if (addr > start_addr) \
2489 start_addr2 = 0; \
2490 else { \
2491 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2492 if (start_addr2 > 0) \
2493 need_subpage = 1; \
2494 } \
2495 \
blueswir149e9fba2007-05-30 17:25:06 +00002496 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002497 end_addr2 = TARGET_PAGE_SIZE - 1; \
2498 else { \
2499 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2500 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2501 need_subpage = 1; \
2502 } \
2503 } while (0)
2504
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002505/* register physical memory.
2506 For RAM, 'size' must be a multiple of the target page size.
2507 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002508 io memory page. The address used when calling the IO function is
2509 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002510 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002511 before calculating this offset. This should not be a problem unless
2512 the low bits of start_addr and region_offset differ. */
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002513void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
Anthony Liguoric227f092009-10-01 16:12:16 -05002514 ram_addr_t size,
2515 ram_addr_t phys_offset,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002516 ram_addr_t region_offset,
2517 bool log_dirty)
bellard33417e72003-08-10 21:47:01 +00002518{
Anthony Liguoric227f092009-10-01 16:12:16 -05002519 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002520 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002521 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002522 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002523 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002524
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002525 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002526
pbrook67c4d232009-02-23 13:16:07 +00002527 if (phys_offset == IO_MEM_UNASSIGNED) {
2528 region_offset = start_addr;
2529 }
pbrook8da3ff12008-12-01 18:59:50 +00002530 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002531 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002532 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002533
2534 addr = start_addr;
2535 do {
blueswir1db7b5422007-05-26 17:36:03 +00002536 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2537 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002538 ram_addr_t orig_memory = p->phys_offset;
2539 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002540 int need_subpage = 0;
2541
2542 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2543 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002544 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002545 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2546 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002547 &p->phys_offset, orig_memory,
2548 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002549 } else {
2550 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2551 >> IO_MEM_SHIFT];
2552 }
pbrook8da3ff12008-12-01 18:59:50 +00002553 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2554 region_offset);
2555 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002556 } else {
2557 p->phys_offset = phys_offset;
2558 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2559 (phys_offset & IO_MEM_ROMD))
2560 phys_offset += TARGET_PAGE_SIZE;
2561 }
2562 } else {
2563 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2564 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002565 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002566 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002567 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002568 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002569 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002570 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002571 int need_subpage = 0;
2572
2573 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2574 end_addr2, need_subpage);
2575
Richard Hendersonf6405242010-04-22 16:47:31 -07002576 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002577 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002578 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002579 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002580 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002581 phys_offset, region_offset);
2582 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002583 }
2584 }
2585 }
pbrook8da3ff12008-12-01 18:59:50 +00002586 region_offset += TARGET_PAGE_SIZE;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002587 addr += TARGET_PAGE_SIZE;
2588 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002589
bellard9d420372006-06-25 22:25:22 +00002590 /* since each CPU stores ram addresses in its TLB cache, we must
2591 reset the modified entries */
2592 /* XXX: slow ! */
2593 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2594 tlb_flush(env, 1);
2595 }
bellard33417e72003-08-10 21:47:01 +00002596}
2597
Anthony Liguoric227f092009-10-01 16:12:16 -05002598void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002599{
2600 if (kvm_enabled())
2601 kvm_coalesce_mmio_region(addr, size);
2602}
2603
Anthony Liguoric227f092009-10-01 16:12:16 -05002604void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002605{
2606 if (kvm_enabled())
2607 kvm_uncoalesce_mmio_region(addr, size);
2608}
2609
Sheng Yang62a27442010-01-26 19:21:16 +08002610void qemu_flush_coalesced_mmio_buffer(void)
2611{
2612 if (kvm_enabled())
2613 kvm_flush_coalesced_mmio_buffer();
2614}
2615
Marcelo Tosattic9027602010-03-01 20:25:08 -03002616#if defined(__linux__) && !defined(TARGET_S390X)
2617
2618#include <sys/vfs.h>
2619
2620#define HUGETLBFS_MAGIC 0x958458f6
2621
2622static long gethugepagesize(const char *path)
2623{
2624 struct statfs fs;
2625 int ret;
2626
2627 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002628 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002629 } while (ret != 0 && errno == EINTR);
2630
2631 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002632 perror(path);
2633 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002634 }
2635
2636 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002637 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002638
2639 return fs.f_bsize;
2640}
2641
Alex Williamson04b16652010-07-02 11:13:17 -06002642static void *file_ram_alloc(RAMBlock *block,
2643 ram_addr_t memory,
2644 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002645{
2646 char *filename;
2647 void *area;
2648 int fd;
2649#ifdef MAP_POPULATE
2650 int flags;
2651#endif
2652 unsigned long hpagesize;
2653
2654 hpagesize = gethugepagesize(path);
2655 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002656 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002657 }
2658
2659 if (memory < hpagesize) {
2660 return NULL;
2661 }
2662
2663 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2664 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2665 return NULL;
2666 }
2667
2668 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002669 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002670 }
2671
2672 fd = mkstemp(filename);
2673 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002674 perror("unable to create backing store for hugepages");
2675 free(filename);
2676 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002677 }
2678 unlink(filename);
2679 free(filename);
2680
2681 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2682
2683 /*
2684 * ftruncate is not supported by hugetlbfs in older
2685 * hosts, so don't bother bailing out on errors.
2686 * If anything goes wrong with it under other filesystems,
2687 * mmap will fail.
2688 */
2689 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002690 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002691
2692#ifdef MAP_POPULATE
2693 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2694 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2695 * to sidestep this quirk.
2696 */
2697 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2698 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2699#else
2700 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2701#endif
2702 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002703 perror("file_ram_alloc: can't mmap RAM pages");
2704 close(fd);
2705 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002706 }
Alex Williamson04b16652010-07-02 11:13:17 -06002707 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002708 return area;
2709}
2710#endif
2711
Alex Williamsond17b5282010-06-25 11:08:38 -06002712static ram_addr_t find_ram_offset(ram_addr_t size)
2713{
Alex Williamson04b16652010-07-02 11:13:17 -06002714 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002715 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002716
2717 if (QLIST_EMPTY(&ram_list.blocks))
2718 return 0;
2719
2720 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002721 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002722
2723 end = block->offset + block->length;
2724
2725 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2726 if (next_block->offset >= end) {
2727 next = MIN(next, next_block->offset);
2728 }
2729 }
2730 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002731 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002732 mingap = next - end;
2733 }
2734 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002735
2736 if (offset == RAM_ADDR_MAX) {
2737 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2738 (uint64_t)size);
2739 abort();
2740 }
2741
Alex Williamson04b16652010-07-02 11:13:17 -06002742 return offset;
2743}
2744
2745static ram_addr_t last_ram_offset(void)
2746{
Alex Williamsond17b5282010-06-25 11:08:38 -06002747 RAMBlock *block;
2748 ram_addr_t last = 0;
2749
2750 QLIST_FOREACH(block, &ram_list.blocks, next)
2751 last = MAX(last, block->offset + block->length);
2752
2753 return last;
2754}
2755
Avi Kivityc5705a72011-12-20 15:59:12 +02002756void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002757{
2758 RAMBlock *new_block, *block;
2759
Avi Kivityc5705a72011-12-20 15:59:12 +02002760 new_block = NULL;
2761 QLIST_FOREACH(block, &ram_list.blocks, next) {
2762 if (block->offset == addr) {
2763 new_block = block;
2764 break;
2765 }
2766 }
2767 assert(new_block);
2768 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002769
2770 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2771 char *id = dev->parent_bus->info->get_dev_path(dev);
2772 if (id) {
2773 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002774 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002775 }
2776 }
2777 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2778
2779 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002780 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002781 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2782 new_block->idstr);
2783 abort();
2784 }
2785 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002786}
2787
2788ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2789 MemoryRegion *mr)
2790{
2791 RAMBlock *new_block;
2792
2793 size = TARGET_PAGE_ALIGN(size);
2794 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002795
Jun Nakajima432d2682010-08-31 16:41:25 +01002796 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002797 if (host) {
2798 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002799 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002800 } else {
2801 if (mem_path) {
2802#if defined (__linux__) && !defined(TARGET_S390X)
2803 new_block->host = file_ram_alloc(new_block, size, mem_path);
2804 if (!new_block->host) {
2805 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002806 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002807 }
2808#else
2809 fprintf(stderr, "-mem-path option unsupported\n");
2810 exit(1);
2811#endif
2812 } else {
2813#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002814 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2815 an system defined value, which is at least 256GB. Larger systems
2816 have larger values. We put the guest between the end of data
2817 segment (system break) and this value. We use 32GB as a base to
2818 have enough room for the system break to grow. */
2819 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002820 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002821 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002822 if (new_block->host == MAP_FAILED) {
2823 fprintf(stderr, "Allocating RAM failed\n");
2824 abort();
2825 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002826#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002827 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002828 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002829 } else {
2830 new_block->host = qemu_vmalloc(size);
2831 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002832#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002833 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002834 }
2835 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002836 new_block->length = size;
2837
2838 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2839
Anthony Liguori7267c092011-08-20 22:09:37 -05002840 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002841 last_ram_offset() >> TARGET_PAGE_BITS);
2842 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2843 0xff, size >> TARGET_PAGE_BITS);
2844
2845 if (kvm_enabled())
2846 kvm_setup_guest_memory(new_block->host, size);
2847
2848 return new_block->offset;
2849}
2850
Avi Kivityc5705a72011-12-20 15:59:12 +02002851ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002852{
Avi Kivityc5705a72011-12-20 15:59:12 +02002853 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002854}
bellarde9a1ab12007-02-08 23:08:38 +00002855
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002856void qemu_ram_free_from_ptr(ram_addr_t addr)
2857{
2858 RAMBlock *block;
2859
2860 QLIST_FOREACH(block, &ram_list.blocks, next) {
2861 if (addr == block->offset) {
2862 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002863 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002864 return;
2865 }
2866 }
2867}
2868
Anthony Liguoric227f092009-10-01 16:12:16 -05002869void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002870{
Alex Williamson04b16652010-07-02 11:13:17 -06002871 RAMBlock *block;
2872
2873 QLIST_FOREACH(block, &ram_list.blocks, next) {
2874 if (addr == block->offset) {
2875 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002876 if (block->flags & RAM_PREALLOC_MASK) {
2877 ;
2878 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002879#if defined (__linux__) && !defined(TARGET_S390X)
2880 if (block->fd) {
2881 munmap(block->host, block->length);
2882 close(block->fd);
2883 } else {
2884 qemu_vfree(block->host);
2885 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002886#else
2887 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002888#endif
2889 } else {
2890#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2891 munmap(block->host, block->length);
2892#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002893 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002894 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002895 } else {
2896 qemu_vfree(block->host);
2897 }
Alex Williamson04b16652010-07-02 11:13:17 -06002898#endif
2899 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002900 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002901 return;
2902 }
2903 }
2904
bellarde9a1ab12007-02-08 23:08:38 +00002905}
2906
Huang Yingcd19cfa2011-03-02 08:56:19 +01002907#ifndef _WIN32
2908void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2909{
2910 RAMBlock *block;
2911 ram_addr_t offset;
2912 int flags;
2913 void *area, *vaddr;
2914
2915 QLIST_FOREACH(block, &ram_list.blocks, next) {
2916 offset = addr - block->offset;
2917 if (offset < block->length) {
2918 vaddr = block->host + offset;
2919 if (block->flags & RAM_PREALLOC_MASK) {
2920 ;
2921 } else {
2922 flags = MAP_FIXED;
2923 munmap(vaddr, length);
2924 if (mem_path) {
2925#if defined(__linux__) && !defined(TARGET_S390X)
2926 if (block->fd) {
2927#ifdef MAP_POPULATE
2928 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2929 MAP_PRIVATE;
2930#else
2931 flags |= MAP_PRIVATE;
2932#endif
2933 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2934 flags, block->fd, offset);
2935 } else {
2936 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2937 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2938 flags, -1, 0);
2939 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002940#else
2941 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002942#endif
2943 } else {
2944#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2945 flags |= MAP_SHARED | MAP_ANONYMOUS;
2946 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2947 flags, -1, 0);
2948#else
2949 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2950 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2951 flags, -1, 0);
2952#endif
2953 }
2954 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002955 fprintf(stderr, "Could not remap addr: "
2956 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002957 length, addr);
2958 exit(1);
2959 }
2960 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2961 }
2962 return;
2963 }
2964 }
2965}
2966#endif /* !_WIN32 */
2967
pbrookdc828ca2009-04-09 22:21:07 +00002968/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002969 With the exception of the softmmu code in this file, this should
2970 only be used for local memory (e.g. video ram) that the device owns,
2971 and knows it isn't going to access beyond the end of the block.
2972
2973 It should not be used for general purpose DMA.
2974 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2975 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002976void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002977{
pbrook94a6b542009-04-11 17:15:54 +00002978 RAMBlock *block;
2979
Alex Williamsonf471a172010-06-11 11:11:42 -06002980 QLIST_FOREACH(block, &ram_list.blocks, next) {
2981 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002982 /* Move this entry to to start of the list. */
2983 if (block != QLIST_FIRST(&ram_list.blocks)) {
2984 QLIST_REMOVE(block, next);
2985 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2986 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002987 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002988 /* We need to check if the requested address is in the RAM
2989 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002990 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002991 */
2992 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002993 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002994 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002995 block->host =
2996 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002997 }
2998 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002999 return block->host + (addr - block->offset);
3000 }
pbrook94a6b542009-04-11 17:15:54 +00003001 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003002
3003 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3004 abort();
3005
3006 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003007}
3008
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003009/* Return a host pointer to ram allocated with qemu_ram_alloc.
3010 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3011 */
3012void *qemu_safe_ram_ptr(ram_addr_t addr)
3013{
3014 RAMBlock *block;
3015
3016 QLIST_FOREACH(block, &ram_list.blocks, next) {
3017 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003018 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003019 /* We need to check if the requested address is in the RAM
3020 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003021 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003022 */
3023 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003024 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003025 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003026 block->host =
3027 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003028 }
3029 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003030 return block->host + (addr - block->offset);
3031 }
3032 }
3033
3034 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3035 abort();
3036
3037 return NULL;
3038}
3039
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003040/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3041 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003042void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003043{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003044 if (*size == 0) {
3045 return NULL;
3046 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003047 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003048 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003049 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003050 RAMBlock *block;
3051
3052 QLIST_FOREACH(block, &ram_list.blocks, next) {
3053 if (addr - block->offset < block->length) {
3054 if (addr - block->offset + *size > block->length)
3055 *size = block->length - addr + block->offset;
3056 return block->host + (addr - block->offset);
3057 }
3058 }
3059
3060 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3061 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003062 }
3063}
3064
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003065void qemu_put_ram_ptr(void *addr)
3066{
3067 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003068}
3069
Marcelo Tosattie8902612010-10-11 15:31:19 -03003070int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003071{
pbrook94a6b542009-04-11 17:15:54 +00003072 RAMBlock *block;
3073 uint8_t *host = ptr;
3074
Jan Kiszka868bb332011-06-21 22:59:09 +02003075 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003076 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003077 return 0;
3078 }
3079
Alex Williamsonf471a172010-06-11 11:11:42 -06003080 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003081 /* This case append when the block is not mapped. */
3082 if (block->host == NULL) {
3083 continue;
3084 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003085 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003086 *ram_addr = block->offset + (host - block->host);
3087 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003088 }
pbrook94a6b542009-04-11 17:15:54 +00003089 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003090
Marcelo Tosattie8902612010-10-11 15:31:19 -03003091 return -1;
3092}
Alex Williamsonf471a172010-06-11 11:11:42 -06003093
Marcelo Tosattie8902612010-10-11 15:31:19 -03003094/* Some of the softmmu routines need to translate from a host pointer
3095 (typically a TLB entry) back to a ram offset. */
3096ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3097{
3098 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003099
Marcelo Tosattie8902612010-10-11 15:31:19 -03003100 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3101 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3102 abort();
3103 }
3104 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003105}
3106
Anthony Liguoric227f092009-10-01 16:12:16 -05003107static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00003108{
pbrook67d3b952006-12-18 05:03:52 +00003109#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003110 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003111#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003112#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003113 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
blueswir1e18231a2008-10-06 18:46:28 +00003114#endif
3115 return 0;
3116}
3117
Anthony Liguoric227f092009-10-01 16:12:16 -05003118static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003119{
3120#ifdef DEBUG_UNASSIGNED
3121 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3122#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003123#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003124 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
blueswir1e18231a2008-10-06 18:46:28 +00003125#endif
3126 return 0;
3127}
3128
Anthony Liguoric227f092009-10-01 16:12:16 -05003129static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003130{
3131#ifdef DEBUG_UNASSIGNED
3132 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3133#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003134#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003135 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003136#endif
bellard33417e72003-08-10 21:47:01 +00003137 return 0;
3138}
3139
Anthony Liguoric227f092009-10-01 16:12:16 -05003140static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00003141{
pbrook67d3b952006-12-18 05:03:52 +00003142#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003143 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00003144#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003145#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003146 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
blueswir1e18231a2008-10-06 18:46:28 +00003147#endif
3148}
3149
Anthony Liguoric227f092009-10-01 16:12:16 -05003150static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003151{
3152#ifdef DEBUG_UNASSIGNED
3153 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3154#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003155#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003156 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
blueswir1e18231a2008-10-06 18:46:28 +00003157#endif
3158}
3159
Anthony Liguoric227f092009-10-01 16:12:16 -05003160static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003161{
3162#ifdef DEBUG_UNASSIGNED
3163 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3164#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003165#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003166 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003167#endif
bellard33417e72003-08-10 21:47:01 +00003168}
3169
Blue Swirld60efc62009-08-25 18:29:31 +00003170static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00003171 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00003172 unassigned_mem_readw,
3173 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00003174};
3175
Blue Swirld60efc62009-08-25 18:29:31 +00003176static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00003177 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00003178 unassigned_mem_writew,
3179 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00003180};
3181
Anthony Liguoric227f092009-10-01 16:12:16 -05003182static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003183 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003184{
bellard3a7d9292005-08-21 09:26:42 +00003185 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003186 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003187 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3188#if !defined(CONFIG_USER_ONLY)
3189 tb_invalidate_phys_page_fast(ram_addr, 1);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003190 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003191#endif
3192 }
pbrook5579c7f2009-04-11 14:47:08 +00003193 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003194 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003195 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003196 /* we remove the notdirty callback only if the code has been
3197 flushed */
3198 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003199 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003200}
3201
Anthony Liguoric227f092009-10-01 16:12:16 -05003202static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003203 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003204{
bellard3a7d9292005-08-21 09:26:42 +00003205 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003206 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003207 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3208#if !defined(CONFIG_USER_ONLY)
3209 tb_invalidate_phys_page_fast(ram_addr, 2);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003210 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003211#endif
3212 }
pbrook5579c7f2009-04-11 14:47:08 +00003213 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003214 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003215 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003216 /* we remove the notdirty callback only if the code has been
3217 flushed */
3218 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003219 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003220}
3221
Anthony Liguoric227f092009-10-01 16:12:16 -05003222static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003223 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003224{
bellard3a7d9292005-08-21 09:26:42 +00003225 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003226 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003227 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3228#if !defined(CONFIG_USER_ONLY)
3229 tb_invalidate_phys_page_fast(ram_addr, 4);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003230 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003231#endif
3232 }
pbrook5579c7f2009-04-11 14:47:08 +00003233 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003234 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003235 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003236 /* we remove the notdirty callback only if the code has been
3237 flushed */
3238 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003239 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003240}
3241
Blue Swirld60efc62009-08-25 18:29:31 +00003242static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00003243 NULL, /* never used */
3244 NULL, /* never used */
3245 NULL, /* never used */
3246};
3247
Blue Swirld60efc62009-08-25 18:29:31 +00003248static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00003249 notdirty_mem_writeb,
3250 notdirty_mem_writew,
3251 notdirty_mem_writel,
3252};
3253
pbrook0f459d12008-06-09 00:20:13 +00003254/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003255static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003256{
3257 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003258 target_ulong pc, cs_base;
3259 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003260 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003261 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003262 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003263
aliguori06d55cc2008-11-18 20:24:06 +00003264 if (env->watchpoint_hit) {
3265 /* We re-entered the check after replacing the TB. Now raise
3266 * the debug interrupt so that is will trigger after the
3267 * current instruction. */
3268 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3269 return;
3270 }
pbrook2e70f6e2008-06-29 01:03:05 +00003271 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003272 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003273 if ((vaddr == (wp->vaddr & len_mask) ||
3274 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003275 wp->flags |= BP_WATCHPOINT_HIT;
3276 if (!env->watchpoint_hit) {
3277 env->watchpoint_hit = wp;
3278 tb = tb_find_pc(env->mem_io_pc);
3279 if (!tb) {
3280 cpu_abort(env, "check_watchpoint: could not find TB for "
3281 "pc=%p", (void *)env->mem_io_pc);
3282 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003283 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003284 tb_phys_invalidate(tb, -1);
3285 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3286 env->exception_index = EXCP_DEBUG;
3287 } else {
3288 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3289 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3290 }
3291 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003292 }
aliguori6e140f22008-11-18 20:37:55 +00003293 } else {
3294 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003295 }
3296 }
3297}
3298
pbrook6658ffb2007-03-16 23:58:11 +00003299/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3300 so these check for a hit then pass through to the normal out-of-line
3301 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05003302static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003303{
aliguorib4051332008-11-18 20:14:20 +00003304 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003305 return ldub_phys(addr);
3306}
3307
Anthony Liguoric227f092009-10-01 16:12:16 -05003308static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003309{
aliguorib4051332008-11-18 20:14:20 +00003310 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003311 return lduw_phys(addr);
3312}
3313
Anthony Liguoric227f092009-10-01 16:12:16 -05003314static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003315{
aliguorib4051332008-11-18 20:14:20 +00003316 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003317 return ldl_phys(addr);
3318}
3319
Anthony Liguoric227f092009-10-01 16:12:16 -05003320static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003321 uint32_t val)
3322{
aliguorib4051332008-11-18 20:14:20 +00003323 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003324 stb_phys(addr, val);
3325}
3326
Anthony Liguoric227f092009-10-01 16:12:16 -05003327static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003328 uint32_t val)
3329{
aliguorib4051332008-11-18 20:14:20 +00003330 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003331 stw_phys(addr, val);
3332}
3333
Anthony Liguoric227f092009-10-01 16:12:16 -05003334static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003335 uint32_t val)
3336{
aliguorib4051332008-11-18 20:14:20 +00003337 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003338 stl_phys(addr, val);
3339}
3340
Blue Swirld60efc62009-08-25 18:29:31 +00003341static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003342 watch_mem_readb,
3343 watch_mem_readw,
3344 watch_mem_readl,
3345};
3346
Blue Swirld60efc62009-08-25 18:29:31 +00003347static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003348 watch_mem_writeb,
3349 watch_mem_writew,
3350 watch_mem_writel,
3351};
pbrook6658ffb2007-03-16 23:58:11 +00003352
Richard Hendersonf6405242010-04-22 16:47:31 -07003353static inline uint32_t subpage_readlen (subpage_t *mmio,
3354 target_phys_addr_t addr,
3355 unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003356{
Richard Hendersonf6405242010-04-22 16:47:31 -07003357 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003358#if defined(DEBUG_SUBPAGE)
3359 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3360 mmio, len, addr, idx);
3361#endif
blueswir1db7b5422007-05-26 17:36:03 +00003362
Richard Hendersonf6405242010-04-22 16:47:31 -07003363 addr += mmio->region_offset[idx];
3364 idx = mmio->sub_io_index[idx];
3365 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
blueswir1db7b5422007-05-26 17:36:03 +00003366}
3367
Anthony Liguoric227f092009-10-01 16:12:16 -05003368static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
Richard Hendersonf6405242010-04-22 16:47:31 -07003369 uint32_t value, unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003370{
Richard Hendersonf6405242010-04-22 16:47:31 -07003371 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003372#if defined(DEBUG_SUBPAGE)
Richard Hendersonf6405242010-04-22 16:47:31 -07003373 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3374 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003375#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003376
3377 addr += mmio->region_offset[idx];
3378 idx = mmio->sub_io_index[idx];
3379 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00003380}
3381
Anthony Liguoric227f092009-10-01 16:12:16 -05003382static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003383{
blueswir1db7b5422007-05-26 17:36:03 +00003384 return subpage_readlen(opaque, addr, 0);
3385}
3386
Anthony Liguoric227f092009-10-01 16:12:16 -05003387static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003388 uint32_t value)
3389{
blueswir1db7b5422007-05-26 17:36:03 +00003390 subpage_writelen(opaque, addr, value, 0);
3391}
3392
Anthony Liguoric227f092009-10-01 16:12:16 -05003393static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003394{
blueswir1db7b5422007-05-26 17:36:03 +00003395 return subpage_readlen(opaque, addr, 1);
3396}
3397
Anthony Liguoric227f092009-10-01 16:12:16 -05003398static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003399 uint32_t value)
3400{
blueswir1db7b5422007-05-26 17:36:03 +00003401 subpage_writelen(opaque, addr, value, 1);
3402}
3403
Anthony Liguoric227f092009-10-01 16:12:16 -05003404static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003405{
blueswir1db7b5422007-05-26 17:36:03 +00003406 return subpage_readlen(opaque, addr, 2);
3407}
3408
Richard Hendersonf6405242010-04-22 16:47:31 -07003409static void subpage_writel (void *opaque, target_phys_addr_t addr,
3410 uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00003411{
blueswir1db7b5422007-05-26 17:36:03 +00003412 subpage_writelen(opaque, addr, value, 2);
3413}
3414
Blue Swirld60efc62009-08-25 18:29:31 +00003415static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003416 &subpage_readb,
3417 &subpage_readw,
3418 &subpage_readl,
3419};
3420
Blue Swirld60efc62009-08-25 18:29:31 +00003421static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003422 &subpage_writeb,
3423 &subpage_writew,
3424 &subpage_writel,
3425};
3426
Andreas Färber56384e82011-11-30 16:26:21 +01003427static uint32_t subpage_ram_readb(void *opaque, target_phys_addr_t addr)
3428{
3429 ram_addr_t raddr = addr;
3430 void *ptr = qemu_get_ram_ptr(raddr);
3431 return ldub_p(ptr);
3432}
3433
3434static void subpage_ram_writeb(void *opaque, target_phys_addr_t addr,
3435 uint32_t value)
3436{
3437 ram_addr_t raddr = addr;
3438 void *ptr = qemu_get_ram_ptr(raddr);
3439 stb_p(ptr, value);
3440}
3441
3442static uint32_t subpage_ram_readw(void *opaque, target_phys_addr_t addr)
3443{
3444 ram_addr_t raddr = addr;
3445 void *ptr = qemu_get_ram_ptr(raddr);
3446 return lduw_p(ptr);
3447}
3448
3449static void subpage_ram_writew(void *opaque, target_phys_addr_t addr,
3450 uint32_t value)
3451{
3452 ram_addr_t raddr = addr;
3453 void *ptr = qemu_get_ram_ptr(raddr);
3454 stw_p(ptr, value);
3455}
3456
3457static uint32_t subpage_ram_readl(void *opaque, target_phys_addr_t addr)
3458{
3459 ram_addr_t raddr = addr;
3460 void *ptr = qemu_get_ram_ptr(raddr);
3461 return ldl_p(ptr);
3462}
3463
3464static void subpage_ram_writel(void *opaque, target_phys_addr_t addr,
3465 uint32_t value)
3466{
3467 ram_addr_t raddr = addr;
3468 void *ptr = qemu_get_ram_ptr(raddr);
3469 stl_p(ptr, value);
3470}
3471
3472static CPUReadMemoryFunc * const subpage_ram_read[] = {
3473 &subpage_ram_readb,
3474 &subpage_ram_readw,
3475 &subpage_ram_readl,
3476};
3477
3478static CPUWriteMemoryFunc * const subpage_ram_write[] = {
3479 &subpage_ram_writeb,
3480 &subpage_ram_writew,
3481 &subpage_ram_writel,
3482};
3483
Anthony Liguoric227f092009-10-01 16:12:16 -05003484static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3485 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003486{
3487 int idx, eidx;
3488
3489 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3490 return -1;
3491 idx = SUBPAGE_IDX(start);
3492 eidx = SUBPAGE_IDX(end);
3493#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003494 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003495 mmio, start, end, idx, eidx, memory);
3496#endif
Andreas Färber56384e82011-11-30 16:26:21 +01003497 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
3498 memory = IO_MEM_SUBPAGE_RAM;
3499 }
Richard Hendersonf6405242010-04-22 16:47:31 -07003500 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
blueswir1db7b5422007-05-26 17:36:03 +00003501 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003502 mmio->sub_io_index[idx] = memory;
3503 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003504 }
3505
3506 return 0;
3507}
3508
Richard Hendersonf6405242010-04-22 16:47:31 -07003509static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3510 ram_addr_t orig_memory,
3511 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003512{
Anthony Liguoric227f092009-10-01 16:12:16 -05003513 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003514 int subpage_memory;
3515
Anthony Liguori7267c092011-08-20 22:09:37 -05003516 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003517
3518 mmio->base = base;
Alexander Graf2507c122010-12-08 12:05:37 +01003519 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3520 DEVICE_NATIVE_ENDIAN);
blueswir1db7b5422007-05-26 17:36:03 +00003521#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003522 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3523 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003524#endif
aliguori1eec6142009-02-05 22:06:18 +00003525 *phys = subpage_memory | IO_MEM_SUBPAGE;
Richard Hendersonf6405242010-04-22 16:47:31 -07003526 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003527
3528 return mmio;
3529}
3530
aliguori88715652009-02-11 15:20:58 +00003531static int get_free_io_mem_idx(void)
3532{
3533 int i;
3534
3535 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3536 if (!io_mem_used[i]) {
3537 io_mem_used[i] = 1;
3538 return i;
3539 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003540 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003541 return -1;
3542}
3543
Alexander Grafdd310532010-12-08 12:05:36 +01003544/*
3545 * Usually, devices operate in little endian mode. There are devices out
3546 * there that operate in big endian too. Each device gets byte swapped
3547 * mmio if plugged onto a CPU that does the other endianness.
3548 *
3549 * CPU Device swap?
3550 *
3551 * little little no
3552 * little big yes
3553 * big little yes
3554 * big big no
3555 */
3556
3557typedef struct SwapEndianContainer {
3558 CPUReadMemoryFunc *read[3];
3559 CPUWriteMemoryFunc *write[3];
3560 void *opaque;
3561} SwapEndianContainer;
3562
3563static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3564{
3565 uint32_t val;
3566 SwapEndianContainer *c = opaque;
3567 val = c->read[0](c->opaque, addr);
3568 return val;
3569}
3570
3571static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3572{
3573 uint32_t val;
3574 SwapEndianContainer *c = opaque;
3575 val = bswap16(c->read[1](c->opaque, addr));
3576 return val;
3577}
3578
3579static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3580{
3581 uint32_t val;
3582 SwapEndianContainer *c = opaque;
3583 val = bswap32(c->read[2](c->opaque, addr));
3584 return val;
3585}
3586
3587static CPUReadMemoryFunc * const swapendian_readfn[3]={
3588 swapendian_mem_readb,
3589 swapendian_mem_readw,
3590 swapendian_mem_readl
3591};
3592
3593static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3594 uint32_t val)
3595{
3596 SwapEndianContainer *c = opaque;
3597 c->write[0](c->opaque, addr, val);
3598}
3599
3600static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3601 uint32_t val)
3602{
3603 SwapEndianContainer *c = opaque;
3604 c->write[1](c->opaque, addr, bswap16(val));
3605}
3606
3607static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3608 uint32_t val)
3609{
3610 SwapEndianContainer *c = opaque;
3611 c->write[2](c->opaque, addr, bswap32(val));
3612}
3613
3614static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3615 swapendian_mem_writeb,
3616 swapendian_mem_writew,
3617 swapendian_mem_writel
3618};
3619
3620static void swapendian_init(int io_index)
3621{
Anthony Liguori7267c092011-08-20 22:09:37 -05003622 SwapEndianContainer *c = g_malloc(sizeof(SwapEndianContainer));
Alexander Grafdd310532010-12-08 12:05:36 +01003623 int i;
3624
3625 /* Swap mmio for big endian targets */
3626 c->opaque = io_mem_opaque[io_index];
3627 for (i = 0; i < 3; i++) {
3628 c->read[i] = io_mem_read[io_index][i];
3629 c->write[i] = io_mem_write[io_index][i];
3630
3631 io_mem_read[io_index][i] = swapendian_readfn[i];
3632 io_mem_write[io_index][i] = swapendian_writefn[i];
3633 }
3634 io_mem_opaque[io_index] = c;
3635}
3636
3637static void swapendian_del(int io_index)
3638{
3639 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
Anthony Liguori7267c092011-08-20 22:09:37 -05003640 g_free(io_mem_opaque[io_index]);
Alexander Grafdd310532010-12-08 12:05:36 +01003641 }
3642}
3643
bellard33417e72003-08-10 21:47:01 +00003644/* mem_read and mem_write are arrays of functions containing the
3645 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003646 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003647 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003648 modified. If it is zero, a new io zone is allocated. The return
3649 value can be used with cpu_register_physical_memory(). (-1) is
3650 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003651static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003652 CPUReadMemoryFunc * const *mem_read,
3653 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003654 void *opaque, enum device_endian endian)
bellard33417e72003-08-10 21:47:01 +00003655{
Richard Henderson3cab7212010-05-07 09:52:51 -07003656 int i;
3657
bellard33417e72003-08-10 21:47:01 +00003658 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003659 io_index = get_free_io_mem_idx();
3660 if (io_index == -1)
3661 return io_index;
bellard33417e72003-08-10 21:47:01 +00003662 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003663 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003664 if (io_index >= IO_MEM_NB_ENTRIES)
3665 return -1;
3666 }
bellardb5ff1b32005-11-26 10:38:39 +00003667
Richard Henderson3cab7212010-05-07 09:52:51 -07003668 for (i = 0; i < 3; ++i) {
3669 io_mem_read[io_index][i]
3670 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3671 }
3672 for (i = 0; i < 3; ++i) {
3673 io_mem_write[io_index][i]
3674 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3675 }
bellarda4193c82004-06-03 14:01:43 +00003676 io_mem_opaque[io_index] = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003677
Alexander Grafdd310532010-12-08 12:05:36 +01003678 switch (endian) {
3679 case DEVICE_BIG_ENDIAN:
3680#ifndef TARGET_WORDS_BIGENDIAN
3681 swapendian_init(io_index);
3682#endif
3683 break;
3684 case DEVICE_LITTLE_ENDIAN:
3685#ifdef TARGET_WORDS_BIGENDIAN
3686 swapendian_init(io_index);
3687#endif
3688 break;
3689 case DEVICE_NATIVE_ENDIAN:
3690 default:
3691 break;
3692 }
3693
Richard Hendersonf6405242010-04-22 16:47:31 -07003694 return (io_index << IO_MEM_SHIFT);
bellard33417e72003-08-10 21:47:01 +00003695}
bellard61382a52003-10-27 21:22:23 +00003696
Blue Swirld60efc62009-08-25 18:29:31 +00003697int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3698 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003699 void *opaque, enum device_endian endian)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003700{
Alexander Graf2507c122010-12-08 12:05:37 +01003701 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003702}
3703
aliguori88715652009-02-11 15:20:58 +00003704void cpu_unregister_io_memory(int io_table_address)
3705{
3706 int i;
3707 int io_index = io_table_address >> IO_MEM_SHIFT;
3708
Alexander Grafdd310532010-12-08 12:05:36 +01003709 swapendian_del(io_index);
3710
aliguori88715652009-02-11 15:20:58 +00003711 for (i=0;i < 3; i++) {
3712 io_mem_read[io_index][i] = unassigned_mem_read[i];
3713 io_mem_write[io_index][i] = unassigned_mem_write[i];
3714 }
3715 io_mem_opaque[io_index] = NULL;
3716 io_mem_used[io_index] = 0;
3717}
3718
Avi Kivitye9179ce2009-06-14 11:38:52 +03003719static void io_mem_init(void)
3720{
3721 int i;
3722
Alexander Graf2507c122010-12-08 12:05:37 +01003723 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3724 unassigned_mem_write, NULL,
3725 DEVICE_NATIVE_ENDIAN);
3726 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3727 unassigned_mem_write, NULL,
3728 DEVICE_NATIVE_ENDIAN);
3729 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3730 notdirty_mem_write, NULL,
3731 DEVICE_NATIVE_ENDIAN);
Andreas Färber56384e82011-11-30 16:26:21 +01003732 cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM, subpage_ram_read,
3733 subpage_ram_write, NULL,
3734 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003735 for (i=0; i<5; i++)
3736 io_mem_used[i] = 1;
3737
3738 io_mem_watch = cpu_register_io_memory(watch_mem_read,
Alexander Graf2507c122010-12-08 12:05:37 +01003739 watch_mem_write, NULL,
3740 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003741}
3742
Avi Kivity62152b82011-07-26 14:26:14 +03003743static void memory_map_init(void)
3744{
Anthony Liguori7267c092011-08-20 22:09:37 -05003745 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003746 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003747 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003748
Anthony Liguori7267c092011-08-20 22:09:37 -05003749 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003750 memory_region_init(system_io, "io", 65536);
3751 set_system_io_map(system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003752}
3753
3754MemoryRegion *get_system_memory(void)
3755{
3756 return system_memory;
3757}
3758
Avi Kivity309cb472011-08-08 16:09:03 +03003759MemoryRegion *get_system_io(void)
3760{
3761 return system_io;
3762}
3763
pbrooke2eef172008-06-08 01:09:01 +00003764#endif /* !defined(CONFIG_USER_ONLY) */
3765
bellard13eb76e2004-01-24 15:23:36 +00003766/* physical memory access (slow version, mainly for debug) */
3767#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003768int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3769 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003770{
3771 int l, flags;
3772 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003773 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003774
3775 while (len > 0) {
3776 page = addr & TARGET_PAGE_MASK;
3777 l = (page + TARGET_PAGE_SIZE) - addr;
3778 if (l > len)
3779 l = len;
3780 flags = page_get_flags(page);
3781 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003782 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003783 if (is_write) {
3784 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003785 return -1;
bellard579a97f2007-11-11 14:26:47 +00003786 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003787 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003788 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003789 memcpy(p, buf, l);
3790 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003791 } else {
3792 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003793 return -1;
bellard579a97f2007-11-11 14:26:47 +00003794 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003795 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003796 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003797 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003798 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003799 }
3800 len -= l;
3801 buf += l;
3802 addr += l;
3803 }
Paul Brooka68fe892010-03-01 00:08:59 +00003804 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003805}
bellard8df1cd02005-01-28 22:37:22 +00003806
bellard13eb76e2004-01-24 15:23:36 +00003807#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003808void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003809 int len, int is_write)
3810{
3811 int l, io_index;
3812 uint8_t *ptr;
3813 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003814 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003815 ram_addr_t pd;
bellard92e873b2004-05-21 14:52:29 +00003816 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003817
bellard13eb76e2004-01-24 15:23:36 +00003818 while (len > 0) {
3819 page = addr & TARGET_PAGE_MASK;
3820 l = (page + TARGET_PAGE_SIZE) - addr;
3821 if (l > len)
3822 l = len;
bellard92e873b2004-05-21 14:52:29 +00003823 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003824 if (!p) {
3825 pd = IO_MEM_UNASSIGNED;
3826 } else {
3827 pd = p->phys_offset;
3828 }
ths3b46e622007-09-17 08:09:54 +00003829
bellard13eb76e2004-01-24 15:23:36 +00003830 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003831 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003832 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003833 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003834 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003835 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003836 /* XXX: could force cpu_single_env to NULL to avoid
3837 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003838 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003839 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003840 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003841 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003842 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003843 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003844 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003845 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003846 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003847 l = 2;
3848 } else {
bellard1c213d12005-09-03 10:49:04 +00003849 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003850 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003851 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003852 l = 1;
3853 }
3854 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003855 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00003856 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003857 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003858 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003859 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003860 if (!cpu_physical_memory_is_dirty(addr1)) {
3861 /* invalidate code */
3862 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3863 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003864 cpu_physical_memory_set_dirty_flags(
3865 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003866 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003867 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003868 }
3869 } else {
ths5fafdf22007-09-16 21:08:06 +00003870 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003871 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003872 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003873 /* I/O case */
3874 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003875 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003876 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3877 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003878 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003879 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003880 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003881 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003882 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003883 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003884 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003885 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003886 l = 2;
3887 } else {
bellard1c213d12005-09-03 10:49:04 +00003888 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003889 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003890 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003891 l = 1;
3892 }
3893 } else {
3894 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003895 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3896 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3897 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003898 }
3899 }
3900 len -= l;
3901 buf += l;
3902 addr += l;
3903 }
3904}
bellard8df1cd02005-01-28 22:37:22 +00003905
bellardd0ecd2a2006-04-23 17:14:48 +00003906/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003907void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003908 const uint8_t *buf, int len)
3909{
3910 int l;
3911 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003912 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003913 unsigned long pd;
3914 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003915
bellardd0ecd2a2006-04-23 17:14:48 +00003916 while (len > 0) {
3917 page = addr & TARGET_PAGE_MASK;
3918 l = (page + TARGET_PAGE_SIZE) - addr;
3919 if (l > len)
3920 l = len;
3921 p = phys_page_find(page >> TARGET_PAGE_BITS);
3922 if (!p) {
3923 pd = IO_MEM_UNASSIGNED;
3924 } else {
3925 pd = p->phys_offset;
3926 }
ths3b46e622007-09-17 08:09:54 +00003927
bellardd0ecd2a2006-04-23 17:14:48 +00003928 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00003929 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3930 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003931 /* do nothing */
3932 } else {
3933 unsigned long addr1;
3934 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3935 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003936 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003937 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003938 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003939 }
3940 len -= l;
3941 buf += l;
3942 addr += l;
3943 }
3944}
3945
aliguori6d16c2f2009-01-22 16:59:11 +00003946typedef struct {
3947 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003948 target_phys_addr_t addr;
3949 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003950} BounceBuffer;
3951
3952static BounceBuffer bounce;
3953
aliguoriba223c22009-01-22 16:59:16 +00003954typedef struct MapClient {
3955 void *opaque;
3956 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003957 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003958} MapClient;
3959
Blue Swirl72cf2d42009-09-12 07:36:22 +00003960static QLIST_HEAD(map_client_list, MapClient) map_client_list
3961 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003962
3963void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3964{
Anthony Liguori7267c092011-08-20 22:09:37 -05003965 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003966
3967 client->opaque = opaque;
3968 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003969 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003970 return client;
3971}
3972
3973void cpu_unregister_map_client(void *_client)
3974{
3975 MapClient *client = (MapClient *)_client;
3976
Blue Swirl72cf2d42009-09-12 07:36:22 +00003977 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003978 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003979}
3980
3981static void cpu_notify_map_clients(void)
3982{
3983 MapClient *client;
3984
Blue Swirl72cf2d42009-09-12 07:36:22 +00003985 while (!QLIST_EMPTY(&map_client_list)) {
3986 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003987 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003988 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003989 }
3990}
3991
aliguori6d16c2f2009-01-22 16:59:11 +00003992/* Map a physical memory region into a host virtual address.
3993 * May map a subset of the requested range, given by and returned in *plen.
3994 * May return NULL if resources needed to perform the mapping are exhausted.
3995 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003996 * Use cpu_register_map_client() to know when retrying the map operation is
3997 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003998 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003999void *cpu_physical_memory_map(target_phys_addr_t addr,
4000 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00004001 int is_write)
4002{
Anthony Liguoric227f092009-10-01 16:12:16 -05004003 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004004 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00004005 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004006 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00004007 unsigned long pd;
4008 PhysPageDesc *p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00004009 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004010 ram_addr_t rlen;
4011 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004012
4013 while (len > 0) {
4014 page = addr & TARGET_PAGE_MASK;
4015 l = (page + TARGET_PAGE_SIZE) - addr;
4016 if (l > len)
4017 l = len;
4018 p = phys_page_find(page >> TARGET_PAGE_BITS);
4019 if (!p) {
4020 pd = IO_MEM_UNASSIGNED;
4021 } else {
4022 pd = p->phys_offset;
4023 }
4024
4025 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004026 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00004027 break;
4028 }
4029 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4030 bounce.addr = addr;
4031 bounce.len = l;
4032 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02004033 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00004034 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004035
4036 *plen = l;
4037 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00004038 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004039 if (!todo) {
4040 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4041 }
aliguori6d16c2f2009-01-22 16:59:11 +00004042
4043 len -= l;
4044 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004045 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004046 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004047 rlen = todo;
4048 ret = qemu_ram_ptr_length(raddr, &rlen);
4049 *plen = rlen;
4050 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004051}
4052
4053/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4054 * Will also mark the memory as dirty if is_write == 1. access_len gives
4055 * the amount of memory that was actually read or written by the caller.
4056 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004057void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4058 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004059{
4060 if (buffer != bounce.buffer) {
4061 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004062 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004063 while (access_len) {
4064 unsigned l;
4065 l = TARGET_PAGE_SIZE;
4066 if (l > access_len)
4067 l = access_len;
4068 if (!cpu_physical_memory_is_dirty(addr1)) {
4069 /* invalidate code */
4070 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4071 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004072 cpu_physical_memory_set_dirty_flags(
4073 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004074 }
4075 addr1 += l;
4076 access_len -= l;
4077 }
4078 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004079 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004080 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004081 }
aliguori6d16c2f2009-01-22 16:59:11 +00004082 return;
4083 }
4084 if (is_write) {
4085 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4086 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004087 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004088 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004089 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004090}
bellardd0ecd2a2006-04-23 17:14:48 +00004091
bellard8df1cd02005-01-28 22:37:22 +00004092/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004093static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4094 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004095{
4096 int io_index;
4097 uint8_t *ptr;
4098 uint32_t val;
4099 unsigned long pd;
4100 PhysPageDesc *p;
4101
4102 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4103 if (!p) {
4104 pd = IO_MEM_UNASSIGNED;
4105 } else {
4106 pd = p->phys_offset;
4107 }
ths3b46e622007-09-17 08:09:54 +00004108
ths5fafdf22007-09-16 21:08:06 +00004109 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00004110 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00004111 /* I/O case */
4112 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004113 if (p)
4114 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004115 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004116#if defined(TARGET_WORDS_BIGENDIAN)
4117 if (endian == DEVICE_LITTLE_ENDIAN) {
4118 val = bswap32(val);
4119 }
4120#else
4121 if (endian == DEVICE_BIG_ENDIAN) {
4122 val = bswap32(val);
4123 }
4124#endif
bellard8df1cd02005-01-28 22:37:22 +00004125 } else {
4126 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004127 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00004128 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004129 switch (endian) {
4130 case DEVICE_LITTLE_ENDIAN:
4131 val = ldl_le_p(ptr);
4132 break;
4133 case DEVICE_BIG_ENDIAN:
4134 val = ldl_be_p(ptr);
4135 break;
4136 default:
4137 val = ldl_p(ptr);
4138 break;
4139 }
bellard8df1cd02005-01-28 22:37:22 +00004140 }
4141 return val;
4142}
4143
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004144uint32_t ldl_phys(target_phys_addr_t addr)
4145{
4146 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4147}
4148
4149uint32_t ldl_le_phys(target_phys_addr_t addr)
4150{
4151 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4152}
4153
4154uint32_t ldl_be_phys(target_phys_addr_t addr)
4155{
4156 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4157}
4158
bellard84b7b8e2005-11-28 21:19:04 +00004159/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004160static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4161 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004162{
4163 int io_index;
4164 uint8_t *ptr;
4165 uint64_t val;
4166 unsigned long pd;
4167 PhysPageDesc *p;
4168
4169 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4170 if (!p) {
4171 pd = IO_MEM_UNASSIGNED;
4172 } else {
4173 pd = p->phys_offset;
4174 }
ths3b46e622007-09-17 08:09:54 +00004175
bellard2a4188a2006-06-25 21:54:59 +00004176 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4177 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00004178 /* I/O case */
4179 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004180 if (p)
4181 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004182
4183 /* XXX This is broken when device endian != cpu endian.
4184 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004185#ifdef TARGET_WORDS_BIGENDIAN
4186 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4187 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4188#else
4189 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4190 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4191#endif
4192 } else {
4193 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004194 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00004195 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004196 switch (endian) {
4197 case DEVICE_LITTLE_ENDIAN:
4198 val = ldq_le_p(ptr);
4199 break;
4200 case DEVICE_BIG_ENDIAN:
4201 val = ldq_be_p(ptr);
4202 break;
4203 default:
4204 val = ldq_p(ptr);
4205 break;
4206 }
bellard84b7b8e2005-11-28 21:19:04 +00004207 }
4208 return val;
4209}
4210
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004211uint64_t ldq_phys(target_phys_addr_t addr)
4212{
4213 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4214}
4215
4216uint64_t ldq_le_phys(target_phys_addr_t addr)
4217{
4218 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4219}
4220
4221uint64_t ldq_be_phys(target_phys_addr_t addr)
4222{
4223 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4224}
4225
bellardaab33092005-10-30 20:48:42 +00004226/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004227uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004228{
4229 uint8_t val;
4230 cpu_physical_memory_read(addr, &val, 1);
4231 return val;
4232}
4233
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004234/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004235static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4236 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004237{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004238 int io_index;
4239 uint8_t *ptr;
4240 uint64_t val;
4241 unsigned long pd;
4242 PhysPageDesc *p;
4243
4244 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4245 if (!p) {
4246 pd = IO_MEM_UNASSIGNED;
4247 } else {
4248 pd = p->phys_offset;
4249 }
4250
4251 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4252 !(pd & IO_MEM_ROMD)) {
4253 /* I/O case */
4254 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4255 if (p)
4256 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4257 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004258#if defined(TARGET_WORDS_BIGENDIAN)
4259 if (endian == DEVICE_LITTLE_ENDIAN) {
4260 val = bswap16(val);
4261 }
4262#else
4263 if (endian == DEVICE_BIG_ENDIAN) {
4264 val = bswap16(val);
4265 }
4266#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004267 } else {
4268 /* RAM case */
4269 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4270 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004271 switch (endian) {
4272 case DEVICE_LITTLE_ENDIAN:
4273 val = lduw_le_p(ptr);
4274 break;
4275 case DEVICE_BIG_ENDIAN:
4276 val = lduw_be_p(ptr);
4277 break;
4278 default:
4279 val = lduw_p(ptr);
4280 break;
4281 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004282 }
4283 return val;
bellardaab33092005-10-30 20:48:42 +00004284}
4285
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004286uint32_t lduw_phys(target_phys_addr_t addr)
4287{
4288 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4289}
4290
4291uint32_t lduw_le_phys(target_phys_addr_t addr)
4292{
4293 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4294}
4295
4296uint32_t lduw_be_phys(target_phys_addr_t addr)
4297{
4298 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4299}
4300
bellard8df1cd02005-01-28 22:37:22 +00004301/* warning: addr must be aligned. The ram page is not masked as dirty
4302 and the code inside is not invalidated. It is useful if the dirty
4303 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004304void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004305{
4306 int io_index;
4307 uint8_t *ptr;
4308 unsigned long pd;
4309 PhysPageDesc *p;
4310
4311 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4312 if (!p) {
4313 pd = IO_MEM_UNASSIGNED;
4314 } else {
4315 pd = p->phys_offset;
4316 }
ths3b46e622007-09-17 08:09:54 +00004317
bellard3a7d9292005-08-21 09:26:42 +00004318 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004319 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004320 if (p)
4321 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004322 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4323 } else {
aliguori74576192008-10-06 14:02:03 +00004324 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004325 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004326 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004327
4328 if (unlikely(in_migration)) {
4329 if (!cpu_physical_memory_is_dirty(addr1)) {
4330 /* invalidate code */
4331 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4332 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004333 cpu_physical_memory_set_dirty_flags(
4334 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004335 }
4336 }
bellard8df1cd02005-01-28 22:37:22 +00004337 }
4338}
4339
Anthony Liguoric227f092009-10-01 16:12:16 -05004340void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004341{
4342 int io_index;
4343 uint8_t *ptr;
4344 unsigned long pd;
4345 PhysPageDesc *p;
4346
4347 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4348 if (!p) {
4349 pd = IO_MEM_UNASSIGNED;
4350 } else {
4351 pd = p->phys_offset;
4352 }
ths3b46e622007-09-17 08:09:54 +00004353
j_mayerbc98a7e2007-04-04 07:55:12 +00004354 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4355 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004356 if (p)
4357 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004358#ifdef TARGET_WORDS_BIGENDIAN
4359 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4360 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4361#else
4362 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4363 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4364#endif
4365 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004366 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004367 (addr & ~TARGET_PAGE_MASK);
4368 stq_p(ptr, val);
4369 }
4370}
4371
bellard8df1cd02005-01-28 22:37:22 +00004372/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004373static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4374 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004375{
4376 int io_index;
4377 uint8_t *ptr;
4378 unsigned long pd;
4379 PhysPageDesc *p;
4380
4381 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4382 if (!p) {
4383 pd = IO_MEM_UNASSIGNED;
4384 } else {
4385 pd = p->phys_offset;
4386 }
ths3b46e622007-09-17 08:09:54 +00004387
bellard3a7d9292005-08-21 09:26:42 +00004388 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004389 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004390 if (p)
4391 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004392#if defined(TARGET_WORDS_BIGENDIAN)
4393 if (endian == DEVICE_LITTLE_ENDIAN) {
4394 val = bswap32(val);
4395 }
4396#else
4397 if (endian == DEVICE_BIG_ENDIAN) {
4398 val = bswap32(val);
4399 }
4400#endif
bellard8df1cd02005-01-28 22:37:22 +00004401 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4402 } else {
4403 unsigned long addr1;
4404 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4405 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004406 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004407 switch (endian) {
4408 case DEVICE_LITTLE_ENDIAN:
4409 stl_le_p(ptr, val);
4410 break;
4411 case DEVICE_BIG_ENDIAN:
4412 stl_be_p(ptr, val);
4413 break;
4414 default:
4415 stl_p(ptr, val);
4416 break;
4417 }
bellard3a7d9292005-08-21 09:26:42 +00004418 if (!cpu_physical_memory_is_dirty(addr1)) {
4419 /* invalidate code */
4420 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4421 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004422 cpu_physical_memory_set_dirty_flags(addr1,
4423 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004424 }
bellard8df1cd02005-01-28 22:37:22 +00004425 }
4426}
4427
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004428void stl_phys(target_phys_addr_t addr, uint32_t val)
4429{
4430 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4431}
4432
4433void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4434{
4435 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4436}
4437
4438void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4439{
4440 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4441}
4442
bellardaab33092005-10-30 20:48:42 +00004443/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004444void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004445{
4446 uint8_t v = val;
4447 cpu_physical_memory_write(addr, &v, 1);
4448}
4449
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004450/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004451static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4452 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004453{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004454 int io_index;
4455 uint8_t *ptr;
4456 unsigned long pd;
4457 PhysPageDesc *p;
4458
4459 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4460 if (!p) {
4461 pd = IO_MEM_UNASSIGNED;
4462 } else {
4463 pd = p->phys_offset;
4464 }
4465
4466 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4467 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4468 if (p)
4469 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004470#if defined(TARGET_WORDS_BIGENDIAN)
4471 if (endian == DEVICE_LITTLE_ENDIAN) {
4472 val = bswap16(val);
4473 }
4474#else
4475 if (endian == DEVICE_BIG_ENDIAN) {
4476 val = bswap16(val);
4477 }
4478#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004479 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4480 } else {
4481 unsigned long addr1;
4482 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4483 /* RAM case */
4484 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004485 switch (endian) {
4486 case DEVICE_LITTLE_ENDIAN:
4487 stw_le_p(ptr, val);
4488 break;
4489 case DEVICE_BIG_ENDIAN:
4490 stw_be_p(ptr, val);
4491 break;
4492 default:
4493 stw_p(ptr, val);
4494 break;
4495 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004496 if (!cpu_physical_memory_is_dirty(addr1)) {
4497 /* invalidate code */
4498 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4499 /* set dirty bit */
4500 cpu_physical_memory_set_dirty_flags(addr1,
4501 (0xff & ~CODE_DIRTY_FLAG));
4502 }
4503 }
bellardaab33092005-10-30 20:48:42 +00004504}
4505
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004506void stw_phys(target_phys_addr_t addr, uint32_t val)
4507{
4508 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4509}
4510
4511void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4512{
4513 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4514}
4515
4516void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4517{
4518 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4519}
4520
bellardaab33092005-10-30 20:48:42 +00004521/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004522void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004523{
4524 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004525 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004526}
4527
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004528void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4529{
4530 val = cpu_to_le64(val);
4531 cpu_physical_memory_write(addr, &val, 8);
4532}
4533
4534void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4535{
4536 val = cpu_to_be64(val);
4537 cpu_physical_memory_write(addr, &val, 8);
4538}
4539
aliguori5e2972f2009-03-28 17:51:36 +00004540/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004541int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004542 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004543{
4544 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004545 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004546 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004547
4548 while (len > 0) {
4549 page = addr & TARGET_PAGE_MASK;
4550 phys_addr = cpu_get_phys_page_debug(env, page);
4551 /* if no physical page mapped, return an error */
4552 if (phys_addr == -1)
4553 return -1;
4554 l = (page + TARGET_PAGE_SIZE) - addr;
4555 if (l > len)
4556 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004557 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004558 if (is_write)
4559 cpu_physical_memory_write_rom(phys_addr, buf, l);
4560 else
aliguori5e2972f2009-03-28 17:51:36 +00004561 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004562 len -= l;
4563 buf += l;
4564 addr += l;
4565 }
4566 return 0;
4567}
Paul Brooka68fe892010-03-01 00:08:59 +00004568#endif
bellard13eb76e2004-01-24 15:23:36 +00004569
pbrook2e70f6e2008-06-29 01:03:05 +00004570/* in deterministic execution mode, instructions doing device I/Os
4571 must be at the end of the TB */
4572void cpu_io_recompile(CPUState *env, void *retaddr)
4573{
4574 TranslationBlock *tb;
4575 uint32_t n, cflags;
4576 target_ulong pc, cs_base;
4577 uint64_t flags;
4578
4579 tb = tb_find_pc((unsigned long)retaddr);
4580 if (!tb) {
4581 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4582 retaddr);
4583 }
4584 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004585 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004586 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004587 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004588 n = n - env->icount_decr.u16.low;
4589 /* Generate a new TB ending on the I/O insn. */
4590 n++;
4591 /* On MIPS and SH, delay slot instructions can only be restarted if
4592 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004593 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004594 branch. */
4595#if defined(TARGET_MIPS)
4596 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4597 env->active_tc.PC -= 4;
4598 env->icount_decr.u16.low++;
4599 env->hflags &= ~MIPS_HFLAG_BMASK;
4600 }
4601#elif defined(TARGET_SH4)
4602 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4603 && n > 1) {
4604 env->pc -= 2;
4605 env->icount_decr.u16.low++;
4606 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4607 }
4608#endif
4609 /* This should never happen. */
4610 if (n > CF_COUNT_MASK)
4611 cpu_abort(env, "TB too big during recompile");
4612
4613 cflags = n | CF_LAST_IO;
4614 pc = tb->pc;
4615 cs_base = tb->cs_base;
4616 flags = tb->flags;
4617 tb_phys_invalidate(tb, -1);
4618 /* FIXME: In theory this could raise an exception. In practice
4619 we have already translated the block once so it's probably ok. */
4620 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004621 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004622 the first in the TB) then we end up generating a whole new TB and
4623 repeating the fault, which is horribly inefficient.
4624 Better would be to execute just this insn uncached, or generate a
4625 second new TB. */
4626 cpu_resume_from_signal(env, NULL);
4627}
4628
Paul Brookb3755a92010-03-12 16:54:58 +00004629#if !defined(CONFIG_USER_ONLY)
4630
Stefan Weil055403b2010-10-22 23:03:32 +02004631void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004632{
4633 int i, target_code_size, max_target_code_size;
4634 int direct_jmp_count, direct_jmp2_count, cross_page;
4635 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004636
bellarde3db7222005-01-26 22:00:47 +00004637 target_code_size = 0;
4638 max_target_code_size = 0;
4639 cross_page = 0;
4640 direct_jmp_count = 0;
4641 direct_jmp2_count = 0;
4642 for(i = 0; i < nb_tbs; i++) {
4643 tb = &tbs[i];
4644 target_code_size += tb->size;
4645 if (tb->size > max_target_code_size)
4646 max_target_code_size = tb->size;
4647 if (tb->page_addr[1] != -1)
4648 cross_page++;
4649 if (tb->tb_next_offset[0] != 0xffff) {
4650 direct_jmp_count++;
4651 if (tb->tb_next_offset[1] != 0xffff) {
4652 direct_jmp2_count++;
4653 }
4654 }
4655 }
4656 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004657 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004658 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004659 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4660 cpu_fprintf(f, "TB count %d/%d\n",
4661 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004662 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004663 nb_tbs ? target_code_size / nb_tbs : 0,
4664 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004665 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004666 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4667 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004668 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4669 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004670 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4671 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004672 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004673 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4674 direct_jmp2_count,
4675 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004676 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004677 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4678 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4679 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004680 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004681}
4682
bellard61382a52003-10-27 21:22:23 +00004683#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004684#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004685#define GETPC() NULL
4686#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004687#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004688
4689#define SHIFT 0
4690#include "softmmu_template.h"
4691
4692#define SHIFT 1
4693#include "softmmu_template.h"
4694
4695#define SHIFT 2
4696#include "softmmu_template.h"
4697
4698#define SHIFT 3
4699#include "softmmu_template.h"
4700
4701#undef env
4702
4703#endif