blob: d0cbf1582297b403936035e953a01462e0ccd21c [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
bellardfd6ce8f2003-05-14 19:00:11 +000060//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000061//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000062//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000063//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000064
65/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000066//#define DEBUG_TB_CHECK
67//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000068
ths1196be32007-03-17 15:17:58 +000069//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000070//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000071
pbrook99773bd2006-04-16 15:14:59 +000072#if !defined(CONFIG_USER_ONLY)
73/* TB consistency checks only implemented for usermode emulation. */
74#undef DEBUG_TB_CHECK
75#endif
76
bellard9fa3e852004-01-04 18:06:42 +000077#define SMC_BITMAP_USE_THRESHOLD 10
78
blueswir1bdaf78e2008-10-04 07:24:27 +000079static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020080static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000081TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000082static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000083/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050084spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000085
blueswir1141ac462008-07-26 15:05:57 +000086#if defined(__arm__) || defined(__sparc_v9__)
87/* The prologue must be reachable with a direct jump. ARM and Sparc64
88 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000089 section close to code segment. */
90#define code_gen_section \
91 __attribute__((__section__(".gen_code"))) \
92 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020093#elif defined(_WIN32)
94/* Maximum alignment for Win32 is 16. */
95#define code_gen_section \
96 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000097#else
98#define code_gen_section \
99 __attribute__((aligned (32)))
100#endif
101
102uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000103static uint8_t *code_gen_buffer;
104static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000105/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200107static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000108
pbrooke2eef172008-06-08 01:09:01 +0000109#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000110int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000111static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000112
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200113RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300114
115static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300116static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300117
pbrooke2eef172008-06-08 01:09:01 +0000118#endif
bellard9fa3e852004-01-04 18:06:42 +0000119
bellard6a00d602005-11-21 23:25:50 +0000120CPUState *first_cpu;
121/* current CPU in the current thread. It is only valid inside
122 cpu_exec() */
ths5fafdf22007-09-16 21:08:06 +0000123CPUState *cpu_single_env;
pbrook2e70f6e2008-06-29 01:03:05 +0000124/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000125 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000126 2 = Adaptive rate instruction counting. */
127int use_icount = 0;
128/* Current instruction counter. While executing translated code this may
129 include some instructions that have not yet been executed. */
130int64_t qemu_icount;
bellard6a00d602005-11-21 23:25:50 +0000131
bellard54936002003-05-13 00:25:15 +0000132typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000133 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000134 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000135 /* in order to optimize self modifying code, we count the number
136 of lookups we do to a given page to use a bitmap */
137 unsigned int code_write_count;
138 uint8_t *code_bitmap;
139#if defined(CONFIG_USER_ONLY)
140 unsigned long flags;
141#endif
bellard54936002003-05-13 00:25:15 +0000142} PageDesc;
143
Paul Brook41c1b1c2010-03-12 16:54:58 +0000144/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800145 while in user mode we want it to be based on virtual addresses. */
146#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000147#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
148# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
149#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800150# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000151#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000152#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000154#endif
bellard54936002003-05-13 00:25:15 +0000155
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800156/* Size of the L2 (and L3, etc) page tables. */
157#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000158#define L2_SIZE (1 << L2_BITS)
159
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800160/* The bits remaining after N lower levels of page tables. */
161#define P_L1_BITS_REM \
162 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163#define V_L1_BITS_REM \
164 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
165
166/* Size of the L1 page table. Avoid silly small sizes. */
167#if P_L1_BITS_REM < 4
168#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
169#else
170#define P_L1_BITS P_L1_BITS_REM
171#endif
172
173#if V_L1_BITS_REM < 4
174#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
175#else
176#define V_L1_BITS V_L1_BITS_REM
177#endif
178
179#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
180#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
181
182#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
183#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
184
bellard83fb7ad2004-07-05 21:25:26 +0000185unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000186unsigned long qemu_host_page_size;
187unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000188
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800189/* This is a multi-level map on the virtual address space.
190 The bottom level has pointers to PageDesc. */
191static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000192
pbrooke2eef172008-06-08 01:09:01 +0000193#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000194typedef struct PhysPageDesc {
195 /* offset in host memory of the page + io_index in the low bits */
196 ram_addr_t phys_offset;
197 ram_addr_t region_offset;
198} PhysPageDesc;
199
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800200/* This is a multi-level map on the physical address space.
201 The bottom level has pointers to PhysPageDesc. */
202static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000203
pbrooke2eef172008-06-08 01:09:01 +0000204static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300205static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000206
bellard33417e72003-08-10 21:47:01 +0000207/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000208CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
209CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000210void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000211static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000212static int io_mem_watch;
213#endif
bellard33417e72003-08-10 21:47:01 +0000214
bellard34865132003-10-05 14:28:56 +0000215/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200216#ifdef WIN32
217static const char *logfilename = "qemu.log";
218#else
blueswir1d9b630f2008-10-05 09:57:08 +0000219static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200220#endif
bellard34865132003-10-05 14:28:56 +0000221FILE *logfile;
222int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000223static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000224
bellarde3db7222005-01-26 22:00:47 +0000225/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000226#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000227static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000228#endif
bellarde3db7222005-01-26 22:00:47 +0000229static int tb_flush_count;
230static int tb_phys_invalidate_count;
231
bellard7cb69ca2008-05-10 10:55:51 +0000232#ifdef _WIN32
233static void map_exec(void *addr, long size)
234{
235 DWORD old_protect;
236 VirtualProtect(addr, size,
237 PAGE_EXECUTE_READWRITE, &old_protect);
238
239}
240#else
241static void map_exec(void *addr, long size)
242{
bellard43694152008-05-29 09:35:57 +0000243 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000244
bellard43694152008-05-29 09:35:57 +0000245 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000246 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000247 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000248
249 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000250 end += page_size - 1;
251 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000252
253 mprotect((void *)start, end - start,
254 PROT_READ | PROT_WRITE | PROT_EXEC);
255}
256#endif
257
bellardb346ff42003-06-15 20:05:50 +0000258static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000259{
bellard83fb7ad2004-07-05 21:25:26 +0000260 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000261 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000262#ifdef _WIN32
263 {
264 SYSTEM_INFO system_info;
265
266 GetSystemInfo(&system_info);
267 qemu_real_host_page_size = system_info.dwPageSize;
268 }
269#else
270 qemu_real_host_page_size = getpagesize();
271#endif
bellard83fb7ad2004-07-05 21:25:26 +0000272 if (qemu_host_page_size == 0)
273 qemu_host_page_size = qemu_real_host_page_size;
274 if (qemu_host_page_size < TARGET_PAGE_SIZE)
275 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000276 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000277
Paul Brook2e9a5712010-05-05 16:32:59 +0100278#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000279 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100280#ifdef HAVE_KINFO_GETVMMAP
281 struct kinfo_vmentry *freep;
282 int i, cnt;
283
284 freep = kinfo_getvmmap(getpid(), &cnt);
285 if (freep) {
286 mmap_lock();
287 for (i = 0; i < cnt; i++) {
288 unsigned long startaddr, endaddr;
289
290 startaddr = freep[i].kve_start;
291 endaddr = freep[i].kve_end;
292 if (h2g_valid(startaddr)) {
293 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
294
295 if (h2g_valid(endaddr)) {
296 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200297 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100298 } else {
299#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
300 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200301 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100302#endif
303 }
304 }
305 }
306 free(freep);
307 mmap_unlock();
308 }
309#else
balrog50a95692007-12-12 01:16:23 +0000310 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000311
pbrook07765902008-05-31 16:33:53 +0000312 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800313
Aurelien Jarnofd436902010-04-10 17:20:36 +0200314 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000315 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800316 mmap_lock();
317
balrog50a95692007-12-12 01:16:23 +0000318 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800319 unsigned long startaddr, endaddr;
320 int n;
321
322 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
323
324 if (n == 2 && h2g_valid(startaddr)) {
325 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
326
327 if (h2g_valid(endaddr)) {
328 endaddr = h2g(endaddr);
329 } else {
330 endaddr = ~0ul;
331 }
332 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000333 }
334 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800335
balrog50a95692007-12-12 01:16:23 +0000336 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800337 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000338 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100339#endif
balrog50a95692007-12-12 01:16:23 +0000340 }
341#endif
bellard54936002003-05-13 00:25:15 +0000342}
343
Paul Brook41c1b1c2010-03-12 16:54:58 +0000344static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000345{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000346 PageDesc *pd;
347 void **lp;
348 int i;
349
pbrook17e23772008-06-09 13:47:45 +0000350#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500351 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800352# define ALLOC(P, SIZE) \
353 do { \
354 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
355 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800356 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000357#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800358# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500359 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000360#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800361
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362 /* Level 1. Always allocated. */
363 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
364
365 /* Level 2..N-1. */
366 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
367 void **p = *lp;
368
369 if (p == NULL) {
370 if (!alloc) {
371 return NULL;
372 }
373 ALLOC(p, sizeof(void *) * L2_SIZE);
374 *lp = p;
375 }
376
377 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000378 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800379
380 pd = *lp;
381 if (pd == NULL) {
382 if (!alloc) {
383 return NULL;
384 }
385 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
386 *lp = pd;
387 }
388
389#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800390
391 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000392}
393
Paul Brook41c1b1c2010-03-12 16:54:58 +0000394static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000395{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800396 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000397}
398
Paul Brook6d9a1302010-02-28 23:55:53 +0000399#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500400static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000401{
pbrooke3f4e2a2006-04-08 20:02:06 +0000402 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800403 void **lp;
404 int i;
bellard92e873b2004-05-21 14:52:29 +0000405
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800406 /* Level 1. Always allocated. */
407 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000408
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800409 /* Level 2..N-1. */
410 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
411 void **p = *lp;
412 if (p == NULL) {
413 if (!alloc) {
414 return NULL;
415 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500416 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800417 }
418 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000419 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800420
pbrooke3f4e2a2006-04-08 20:02:06 +0000421 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800422 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000423 int i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800424
425 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000426 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800427 }
428
Anthony Liguori7267c092011-08-20 22:09:37 -0500429 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800430
pbrook67c4d232009-02-23 13:16:07 +0000431 for (i = 0; i < L2_SIZE; i++) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800432 pd[i].phys_offset = IO_MEM_UNASSIGNED;
433 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000434 }
bellard92e873b2004-05-21 14:52:29 +0000435 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800436
437 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000438}
439
Anthony Liguoric227f092009-10-01 16:12:16 -0500440static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000441{
bellard108c49b2005-07-24 12:55:09 +0000442 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000443}
444
Anthony Liguoric227f092009-10-01 16:12:16 -0500445static void tlb_protect_code(ram_addr_t ram_addr);
446static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000447 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000448#define mmap_lock() do { } while(0)
449#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000450#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000451
bellard43694152008-05-29 09:35:57 +0000452#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
453
454#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100455/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000456 user mode. It will change when a dedicated libc will be used */
457#define USE_STATIC_CODE_GEN_BUFFER
458#endif
459
460#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200461static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
462 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000463#endif
464
blueswir18fcd3692008-08-17 20:26:25 +0000465static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000466{
bellard43694152008-05-29 09:35:57 +0000467#ifdef USE_STATIC_CODE_GEN_BUFFER
468 code_gen_buffer = static_code_gen_buffer;
469 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
470 map_exec(code_gen_buffer, code_gen_buffer_size);
471#else
bellard26a5f132008-05-28 12:30:31 +0000472 code_gen_buffer_size = tb_size;
473 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000474#if defined(CONFIG_USER_ONLY)
475 /* in user mode, phys_ram_size is not meaningful */
476 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
477#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100478 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000479 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000480#endif
bellard26a5f132008-05-28 12:30:31 +0000481 }
482 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
483 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
484 /* The code gen buffer location may have constraints depending on
485 the host cpu and OS */
486#if defined(__linux__)
487 {
488 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000489 void *start = NULL;
490
bellard26a5f132008-05-28 12:30:31 +0000491 flags = MAP_PRIVATE | MAP_ANONYMOUS;
492#if defined(__x86_64__)
493 flags |= MAP_32BIT;
494 /* Cannot map more than that */
495 if (code_gen_buffer_size > (800 * 1024 * 1024))
496 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000497#elif defined(__sparc_v9__)
498 // Map the buffer below 2G, so we can use direct calls and branches
499 flags |= MAP_FIXED;
500 start = (void *) 0x60000000UL;
501 if (code_gen_buffer_size > (512 * 1024 * 1024))
502 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000503#elif defined(__arm__)
balrog63d41242008-12-01 02:19:41 +0000504 /* Map the buffer below 32M, so we can use direct calls and branches */
balrog1cb06612008-12-01 02:10:17 +0000505 flags |= MAP_FIXED;
506 start = (void *) 0x01000000UL;
507 if (code_gen_buffer_size > 16 * 1024 * 1024)
508 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700509#elif defined(__s390x__)
510 /* Map the buffer so that we can use direct calls and branches. */
511 /* We have a +- 4GB range on the branches; leave some slop. */
512 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
513 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
514 }
515 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000516#endif
blueswir1141ac462008-07-26 15:05:57 +0000517 code_gen_buffer = mmap(start, code_gen_buffer_size,
518 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000519 flags, -1, 0);
520 if (code_gen_buffer == MAP_FAILED) {
521 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
522 exit(1);
523 }
524 }
Bradcbb608a2010-12-20 21:25:40 -0500525#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000526 || defined(__DragonFly__) || defined(__OpenBSD__) \
527 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000528 {
529 int flags;
530 void *addr = NULL;
531 flags = MAP_PRIVATE | MAP_ANONYMOUS;
532#if defined(__x86_64__)
533 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
534 * 0x40000000 is free */
535 flags |= MAP_FIXED;
536 addr = (void *)0x40000000;
537 /* Cannot map more than that */
538 if (code_gen_buffer_size > (800 * 1024 * 1024))
539 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000540#elif defined(__sparc_v9__)
541 // Map the buffer below 2G, so we can use direct calls and branches
542 flags |= MAP_FIXED;
543 addr = (void *) 0x60000000UL;
544 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
545 code_gen_buffer_size = (512 * 1024 * 1024);
546 }
aliguori06e67a82008-09-27 15:32:41 +0000547#endif
548 code_gen_buffer = mmap(addr, code_gen_buffer_size,
549 PROT_WRITE | PROT_READ | PROT_EXEC,
550 flags, -1, 0);
551 if (code_gen_buffer == MAP_FAILED) {
552 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
553 exit(1);
554 }
555 }
bellard26a5f132008-05-28 12:30:31 +0000556#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500557 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000558 map_exec(code_gen_buffer, code_gen_buffer_size);
559#endif
bellard43694152008-05-29 09:35:57 +0000560#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000561 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100562 code_gen_buffer_max_size = code_gen_buffer_size -
563 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000564 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500565 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000566}
567
568/* Must be called before using the QEMU cpus. 'tb_size' is the size
569 (in bytes) allocated to the translation buffer. Zero means default
570 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200571void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000572{
bellard26a5f132008-05-28 12:30:31 +0000573 cpu_gen_init();
574 code_gen_alloc(tb_size);
575 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000576 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700577#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
578 /* There's no guest base to take into account, so go ahead and
579 initialize the prologue now. */
580 tcg_prologue_init(&tcg_ctx);
581#endif
bellard26a5f132008-05-28 12:30:31 +0000582}
583
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200584bool tcg_enabled(void)
585{
586 return code_gen_buffer != NULL;
587}
588
589void cpu_exec_init_all(void)
590{
591#if !defined(CONFIG_USER_ONLY)
592 memory_map_init();
593 io_mem_init();
594#endif
595}
596
pbrook9656f322008-07-01 20:01:19 +0000597#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
598
Juan Quintelae59fb372009-09-29 22:48:21 +0200599static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200600{
601 CPUState *env = opaque;
602
aurel323098dba2009-03-07 21:28:24 +0000603 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
604 version_id is increased. */
605 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000606 tlb_flush(env, 1);
607
608 return 0;
609}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200610
611static const VMStateDescription vmstate_cpu_common = {
612 .name = "cpu_common",
613 .version_id = 1,
614 .minimum_version_id = 1,
615 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200616 .post_load = cpu_common_post_load,
617 .fields = (VMStateField []) {
618 VMSTATE_UINT32(halted, CPUState),
619 VMSTATE_UINT32(interrupt_request, CPUState),
620 VMSTATE_END_OF_LIST()
621 }
622};
pbrook9656f322008-07-01 20:01:19 +0000623#endif
624
Glauber Costa950f1472009-06-09 12:15:18 -0400625CPUState *qemu_get_cpu(int cpu)
626{
627 CPUState *env = first_cpu;
628
629 while (env) {
630 if (env->cpu_index == cpu)
631 break;
632 env = env->next_cpu;
633 }
634
635 return env;
636}
637
bellard6a00d602005-11-21 23:25:50 +0000638void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000639{
bellard6a00d602005-11-21 23:25:50 +0000640 CPUState **penv;
641 int cpu_index;
642
pbrookc2764712009-03-07 15:24:59 +0000643#if defined(CONFIG_USER_ONLY)
644 cpu_list_lock();
645#endif
bellard6a00d602005-11-21 23:25:50 +0000646 env->next_cpu = NULL;
647 penv = &first_cpu;
648 cpu_index = 0;
649 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700650 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000651 cpu_index++;
652 }
653 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000654 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000655 QTAILQ_INIT(&env->breakpoints);
656 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100657#ifndef CONFIG_USER_ONLY
658 env->thread_id = qemu_get_thread_id();
659#endif
bellard6a00d602005-11-21 23:25:50 +0000660 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000661#if defined(CONFIG_USER_ONLY)
662 cpu_list_unlock();
663#endif
pbrookb3c77242008-06-30 16:31:04 +0000664#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600665 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
666 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000667 cpu_save, cpu_load, env);
668#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000669}
670
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100671/* Allocate a new translation block. Flush the translation buffer if
672 too many translation blocks or too much generated code. */
673static TranslationBlock *tb_alloc(target_ulong pc)
674{
675 TranslationBlock *tb;
676
677 if (nb_tbs >= code_gen_max_blocks ||
678 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
679 return NULL;
680 tb = &tbs[nb_tbs++];
681 tb->pc = pc;
682 tb->cflags = 0;
683 return tb;
684}
685
686void tb_free(TranslationBlock *tb)
687{
688 /* In practice this is mostly used for single use temporary TB
689 Ignore the hard cases and just back up if this TB happens to
690 be the last one generated. */
691 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
692 code_gen_ptr = tb->tc_ptr;
693 nb_tbs--;
694 }
695}
696
bellard9fa3e852004-01-04 18:06:42 +0000697static inline void invalidate_page_bitmap(PageDesc *p)
698{
699 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500700 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000701 p->code_bitmap = NULL;
702 }
703 p->code_write_count = 0;
704}
705
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800706/* Set to NULL all the 'first_tb' fields in all PageDescs. */
707
708static void page_flush_tb_1 (int level, void **lp)
709{
710 int i;
711
712 if (*lp == NULL) {
713 return;
714 }
715 if (level == 0) {
716 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000717 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800718 pd[i].first_tb = NULL;
719 invalidate_page_bitmap(pd + i);
720 }
721 } else {
722 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000723 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800724 page_flush_tb_1 (level - 1, pp + i);
725 }
726 }
727}
728
bellardfd6ce8f2003-05-14 19:00:11 +0000729static void page_flush_tb(void)
730{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800731 int i;
732 for (i = 0; i < V_L1_SIZE; i++) {
733 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000734 }
735}
736
737/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000738/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000739void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000740{
bellard6a00d602005-11-21 23:25:50 +0000741 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000742#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000743 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
744 (unsigned long)(code_gen_ptr - code_gen_buffer),
745 nb_tbs, nb_tbs > 0 ?
746 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000747#endif
bellard26a5f132008-05-28 12:30:31 +0000748 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000749 cpu_abort(env1, "Internal error: code buffer overflow\n");
750
bellardfd6ce8f2003-05-14 19:00:11 +0000751 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000752
bellard6a00d602005-11-21 23:25:50 +0000753 for(env = first_cpu; env != NULL; env = env->next_cpu) {
754 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
755 }
bellard9fa3e852004-01-04 18:06:42 +0000756
bellard8a8a6082004-10-03 13:36:49 +0000757 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000758 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000759
bellardfd6ce8f2003-05-14 19:00:11 +0000760 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000761 /* XXX: flush processor icache at this point if cache flush is
762 expensive */
bellarde3db7222005-01-26 22:00:47 +0000763 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000764}
765
766#ifdef DEBUG_TB_CHECK
767
j_mayerbc98a7e2007-04-04 07:55:12 +0000768static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000769{
770 TranslationBlock *tb;
771 int i;
772 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000773 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
774 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000775 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
776 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000777 printf("ERROR invalidate: address=" TARGET_FMT_lx
778 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000779 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000780 }
781 }
782 }
783}
784
785/* verify that all the pages have correct rights for code */
786static void tb_page_check(void)
787{
788 TranslationBlock *tb;
789 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000790
pbrook99773bd2006-04-16 15:14:59 +0000791 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
792 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000793 flags1 = page_get_flags(tb->pc);
794 flags2 = page_get_flags(tb->pc + tb->size - 1);
795 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
796 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000797 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000798 }
799 }
800 }
801}
802
803#endif
804
805/* invalidate one TB */
806static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
807 int next_offset)
808{
809 TranslationBlock *tb1;
810 for(;;) {
811 tb1 = *ptb;
812 if (tb1 == tb) {
813 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
814 break;
815 }
816 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
817 }
818}
819
bellard9fa3e852004-01-04 18:06:42 +0000820static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
821{
822 TranslationBlock *tb1;
823 unsigned int n1;
824
825 for(;;) {
826 tb1 = *ptb;
827 n1 = (long)tb1 & 3;
828 tb1 = (TranslationBlock *)((long)tb1 & ~3);
829 if (tb1 == tb) {
830 *ptb = tb1->page_next[n1];
831 break;
832 }
833 ptb = &tb1->page_next[n1];
834 }
835}
836
bellardd4e81642003-05-25 16:46:15 +0000837static inline void tb_jmp_remove(TranslationBlock *tb, int n)
838{
839 TranslationBlock *tb1, **ptb;
840 unsigned int n1;
841
842 ptb = &tb->jmp_next[n];
843 tb1 = *ptb;
844 if (tb1) {
845 /* find tb(n) in circular list */
846 for(;;) {
847 tb1 = *ptb;
848 n1 = (long)tb1 & 3;
849 tb1 = (TranslationBlock *)((long)tb1 & ~3);
850 if (n1 == n && tb1 == tb)
851 break;
852 if (n1 == 2) {
853 ptb = &tb1->jmp_first;
854 } else {
855 ptb = &tb1->jmp_next[n1];
856 }
857 }
858 /* now we can suppress tb(n) from the list */
859 *ptb = tb->jmp_next[n];
860
861 tb->jmp_next[n] = NULL;
862 }
863}
864
865/* reset the jump entry 'n' of a TB so that it is not chained to
866 another TB */
867static inline void tb_reset_jump(TranslationBlock *tb, int n)
868{
869 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
870}
871
Paul Brook41c1b1c2010-03-12 16:54:58 +0000872void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000873{
bellard6a00d602005-11-21 23:25:50 +0000874 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000875 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000876 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000877 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000878 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000879
bellard9fa3e852004-01-04 18:06:42 +0000880 /* remove the TB from the hash list */
881 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
882 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000883 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000884 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000885
bellard9fa3e852004-01-04 18:06:42 +0000886 /* remove the TB from the page list */
887 if (tb->page_addr[0] != page_addr) {
888 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
889 tb_page_remove(&p->first_tb, tb);
890 invalidate_page_bitmap(p);
891 }
892 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
893 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
894 tb_page_remove(&p->first_tb, tb);
895 invalidate_page_bitmap(p);
896 }
897
bellard8a40a182005-11-20 10:35:40 +0000898 tb_invalidated_flag = 1;
899
900 /* remove the TB from the hash list */
901 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000902 for(env = first_cpu; env != NULL; env = env->next_cpu) {
903 if (env->tb_jmp_cache[h] == tb)
904 env->tb_jmp_cache[h] = NULL;
905 }
bellard8a40a182005-11-20 10:35:40 +0000906
907 /* suppress this TB from the two jump lists */
908 tb_jmp_remove(tb, 0);
909 tb_jmp_remove(tb, 1);
910
911 /* suppress any remaining jumps to this TB */
912 tb1 = tb->jmp_first;
913 for(;;) {
914 n1 = (long)tb1 & 3;
915 if (n1 == 2)
916 break;
917 tb1 = (TranslationBlock *)((long)tb1 & ~3);
918 tb2 = tb1->jmp_next[n1];
919 tb_reset_jump(tb1, n1);
920 tb1->jmp_next[n1] = NULL;
921 tb1 = tb2;
922 }
923 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
924
bellarde3db7222005-01-26 22:00:47 +0000925 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000926}
927
928static inline void set_bits(uint8_t *tab, int start, int len)
929{
930 int end, mask, end1;
931
932 end = start + len;
933 tab += start >> 3;
934 mask = 0xff << (start & 7);
935 if ((start & ~7) == (end & ~7)) {
936 if (start < end) {
937 mask &= ~(0xff << (end & 7));
938 *tab |= mask;
939 }
940 } else {
941 *tab++ |= mask;
942 start = (start + 8) & ~7;
943 end1 = end & ~7;
944 while (start < end1) {
945 *tab++ = 0xff;
946 start += 8;
947 }
948 if (start < end) {
949 mask = ~(0xff << (end & 7));
950 *tab |= mask;
951 }
952 }
953}
954
955static void build_page_bitmap(PageDesc *p)
956{
957 int n, tb_start, tb_end;
958 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000959
Anthony Liguori7267c092011-08-20 22:09:37 -0500960 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000961
962 tb = p->first_tb;
963 while (tb != NULL) {
964 n = (long)tb & 3;
965 tb = (TranslationBlock *)((long)tb & ~3);
966 /* NOTE: this is subtle as a TB may span two physical pages */
967 if (n == 0) {
968 /* NOTE: tb_end may be after the end of the page, but
969 it is not a problem */
970 tb_start = tb->pc & ~TARGET_PAGE_MASK;
971 tb_end = tb_start + tb->size;
972 if (tb_end > TARGET_PAGE_SIZE)
973 tb_end = TARGET_PAGE_SIZE;
974 } else {
975 tb_start = 0;
976 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
977 }
978 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
979 tb = tb->page_next[n];
980 }
981}
982
pbrook2e70f6e2008-06-29 01:03:05 +0000983TranslationBlock *tb_gen_code(CPUState *env,
984 target_ulong pc, target_ulong cs_base,
985 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000986{
987 TranslationBlock *tb;
988 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000989 tb_page_addr_t phys_pc, phys_page2;
990 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000991 int code_gen_size;
992
Paul Brook41c1b1c2010-03-12 16:54:58 +0000993 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +0000994 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000995 if (!tb) {
996 /* flush must be done */
997 tb_flush(env);
998 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000999 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001000 /* Don't forget to invalidate previous TB info. */
1001 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001002 }
1003 tc_ptr = code_gen_ptr;
1004 tb->tc_ptr = tc_ptr;
1005 tb->cs_base = cs_base;
1006 tb->flags = flags;
1007 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001008 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001009 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001010
bellardd720b932004-04-25 17:57:43 +00001011 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001012 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001013 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001014 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001015 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001016 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001017 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001018 return tb;
bellardd720b932004-04-25 17:57:43 +00001019}
ths3b46e622007-09-17 08:09:54 +00001020
bellard9fa3e852004-01-04 18:06:42 +00001021/* invalidate all TBs which intersect with the target physical page
1022 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001023 the same physical page. 'is_cpu_write_access' should be true if called
1024 from a real cpu write access: the virtual CPU will exit the current
1025 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001026void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001027 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001028{
aliguori6b917542008-11-18 19:46:41 +00001029 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001030 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001031 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001032 PageDesc *p;
1033 int n;
1034#ifdef TARGET_HAS_PRECISE_SMC
1035 int current_tb_not_found = is_cpu_write_access;
1036 TranslationBlock *current_tb = NULL;
1037 int current_tb_modified = 0;
1038 target_ulong current_pc = 0;
1039 target_ulong current_cs_base = 0;
1040 int current_flags = 0;
1041#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001042
1043 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001044 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001045 return;
ths5fafdf22007-09-16 21:08:06 +00001046 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001047 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1048 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001049 /* build code bitmap */
1050 build_page_bitmap(p);
1051 }
1052
1053 /* we remove all the TBs in the range [start, end[ */
1054 /* XXX: see if in some cases it could be faster to invalidate all the code */
1055 tb = p->first_tb;
1056 while (tb != NULL) {
1057 n = (long)tb & 3;
1058 tb = (TranslationBlock *)((long)tb & ~3);
1059 tb_next = tb->page_next[n];
1060 /* NOTE: this is subtle as a TB may span two physical pages */
1061 if (n == 0) {
1062 /* NOTE: tb_end may be after the end of the page, but
1063 it is not a problem */
1064 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1065 tb_end = tb_start + tb->size;
1066 } else {
1067 tb_start = tb->page_addr[1];
1068 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1069 }
1070 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001071#ifdef TARGET_HAS_PRECISE_SMC
1072 if (current_tb_not_found) {
1073 current_tb_not_found = 0;
1074 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001075 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001076 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001077 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001078 }
1079 }
1080 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001081 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001082 /* If we are modifying the current TB, we must stop
1083 its execution. We could be more precise by checking
1084 that the modification is after the current PC, but it
1085 would require a specialized function to partially
1086 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001087
bellardd720b932004-04-25 17:57:43 +00001088 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001089 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001090 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1091 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001092 }
1093#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001094 /* we need to do that to handle the case where a signal
1095 occurs while doing tb_phys_invalidate() */
1096 saved_tb = NULL;
1097 if (env) {
1098 saved_tb = env->current_tb;
1099 env->current_tb = NULL;
1100 }
bellard9fa3e852004-01-04 18:06:42 +00001101 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001102 if (env) {
1103 env->current_tb = saved_tb;
1104 if (env->interrupt_request && env->current_tb)
1105 cpu_interrupt(env, env->interrupt_request);
1106 }
bellard9fa3e852004-01-04 18:06:42 +00001107 }
1108 tb = tb_next;
1109 }
1110#if !defined(CONFIG_USER_ONLY)
1111 /* if no code remaining, no need to continue to use slow writes */
1112 if (!p->first_tb) {
1113 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001114 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001115 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001116 }
1117 }
1118#endif
1119#ifdef TARGET_HAS_PRECISE_SMC
1120 if (current_tb_modified) {
1121 /* we generate a block containing just the instruction
1122 modifying the memory. It will ensure that it cannot modify
1123 itself */
bellardea1c1802004-06-14 18:56:36 +00001124 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001125 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001126 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001127 }
1128#endif
1129}
1130
1131/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001132static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001133{
1134 PageDesc *p;
1135 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001136#if 0
bellarda4193c82004-06-03 14:01:43 +00001137 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001138 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1139 cpu_single_env->mem_io_vaddr, len,
1140 cpu_single_env->eip,
1141 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001142 }
1143#endif
bellard9fa3e852004-01-04 18:06:42 +00001144 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001145 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001146 return;
1147 if (p->code_bitmap) {
1148 offset = start & ~TARGET_PAGE_MASK;
1149 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1150 if (b & ((1 << len) - 1))
1151 goto do_invalidate;
1152 } else {
1153 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001154 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001155 }
1156}
1157
bellard9fa3e852004-01-04 18:06:42 +00001158#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001159static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001160 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001161{
aliguori6b917542008-11-18 19:46:41 +00001162 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001163 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001164 int n;
bellardd720b932004-04-25 17:57:43 +00001165#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001166 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001167 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001168 int current_tb_modified = 0;
1169 target_ulong current_pc = 0;
1170 target_ulong current_cs_base = 0;
1171 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001172#endif
bellard9fa3e852004-01-04 18:06:42 +00001173
1174 addr &= TARGET_PAGE_MASK;
1175 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001176 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001177 return;
1178 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001179#ifdef TARGET_HAS_PRECISE_SMC
1180 if (tb && pc != 0) {
1181 current_tb = tb_find_pc(pc);
1182 }
1183#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001184 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001185 n = (long)tb & 3;
1186 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001187#ifdef TARGET_HAS_PRECISE_SMC
1188 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001189 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001190 /* If we are modifying the current TB, we must stop
1191 its execution. We could be more precise by checking
1192 that the modification is after the current PC, but it
1193 would require a specialized function to partially
1194 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001195
bellardd720b932004-04-25 17:57:43 +00001196 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001197 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001198 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1199 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001200 }
1201#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001202 tb_phys_invalidate(tb, addr);
1203 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001204 }
1205 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001206#ifdef TARGET_HAS_PRECISE_SMC
1207 if (current_tb_modified) {
1208 /* we generate a block containing just the instruction
1209 modifying the memory. It will ensure that it cannot modify
1210 itself */
bellardea1c1802004-06-14 18:56:36 +00001211 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001212 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001213 cpu_resume_from_signal(env, puc);
1214 }
1215#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001216}
bellard9fa3e852004-01-04 18:06:42 +00001217#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001218
1219/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001220static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001221 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001222{
1223 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001224#ifndef CONFIG_USER_ONLY
1225 bool page_already_protected;
1226#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001227
bellard9fa3e852004-01-04 18:06:42 +00001228 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001229 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001230 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001231#ifndef CONFIG_USER_ONLY
1232 page_already_protected = p->first_tb != NULL;
1233#endif
bellard9fa3e852004-01-04 18:06:42 +00001234 p->first_tb = (TranslationBlock *)((long)tb | n);
1235 invalidate_page_bitmap(p);
1236
bellard107db442004-06-22 18:48:46 +00001237#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001238
bellard9fa3e852004-01-04 18:06:42 +00001239#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001240 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001241 target_ulong addr;
1242 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001243 int prot;
1244
bellardfd6ce8f2003-05-14 19:00:11 +00001245 /* force the host page as non writable (writes will have a
1246 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001247 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001248 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001249 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1250 addr += TARGET_PAGE_SIZE) {
1251
1252 p2 = page_find (addr >> TARGET_PAGE_BITS);
1253 if (!p2)
1254 continue;
1255 prot |= p2->flags;
1256 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001257 }
ths5fafdf22007-09-16 21:08:06 +00001258 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001259 (prot & PAGE_BITS) & ~PAGE_WRITE);
1260#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001261 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001262 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001263#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001264 }
bellard9fa3e852004-01-04 18:06:42 +00001265#else
1266 /* if some code is already present, then the pages are already
1267 protected. So we handle the case where only the first TB is
1268 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001269 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001270 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001271 }
1272#endif
bellardd720b932004-04-25 17:57:43 +00001273
1274#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001275}
1276
bellard9fa3e852004-01-04 18:06:42 +00001277/* add a new TB and link it to the physical page tables. phys_page2 is
1278 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001279void tb_link_page(TranslationBlock *tb,
1280 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001281{
bellard9fa3e852004-01-04 18:06:42 +00001282 unsigned int h;
1283 TranslationBlock **ptb;
1284
pbrookc8a706f2008-06-02 16:16:42 +00001285 /* Grab the mmap lock to stop another thread invalidating this TB
1286 before we are done. */
1287 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001288 /* add in the physical hash table */
1289 h = tb_phys_hash_func(phys_pc);
1290 ptb = &tb_phys_hash[h];
1291 tb->phys_hash_next = *ptb;
1292 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001293
1294 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001295 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1296 if (phys_page2 != -1)
1297 tb_alloc_page(tb, 1, phys_page2);
1298 else
1299 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001300
bellardd4e81642003-05-25 16:46:15 +00001301 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1302 tb->jmp_next[0] = NULL;
1303 tb->jmp_next[1] = NULL;
1304
1305 /* init original jump addresses */
1306 if (tb->tb_next_offset[0] != 0xffff)
1307 tb_reset_jump(tb, 0);
1308 if (tb->tb_next_offset[1] != 0xffff)
1309 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001310
1311#ifdef DEBUG_TB_CHECK
1312 tb_page_check();
1313#endif
pbrookc8a706f2008-06-02 16:16:42 +00001314 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001315}
1316
bellarda513fe12003-05-27 23:29:48 +00001317/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1318 tb[1].tc_ptr. Return NULL if not found */
1319TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1320{
1321 int m_min, m_max, m;
1322 unsigned long v;
1323 TranslationBlock *tb;
1324
1325 if (nb_tbs <= 0)
1326 return NULL;
1327 if (tc_ptr < (unsigned long)code_gen_buffer ||
1328 tc_ptr >= (unsigned long)code_gen_ptr)
1329 return NULL;
1330 /* binary search (cf Knuth) */
1331 m_min = 0;
1332 m_max = nb_tbs - 1;
1333 while (m_min <= m_max) {
1334 m = (m_min + m_max) >> 1;
1335 tb = &tbs[m];
1336 v = (unsigned long)tb->tc_ptr;
1337 if (v == tc_ptr)
1338 return tb;
1339 else if (tc_ptr < v) {
1340 m_max = m - 1;
1341 } else {
1342 m_min = m + 1;
1343 }
ths5fafdf22007-09-16 21:08:06 +00001344 }
bellarda513fe12003-05-27 23:29:48 +00001345 return &tbs[m_max];
1346}
bellard75012672003-06-21 13:11:07 +00001347
bellardea041c02003-06-25 16:16:50 +00001348static void tb_reset_jump_recursive(TranslationBlock *tb);
1349
1350static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1351{
1352 TranslationBlock *tb1, *tb_next, **ptb;
1353 unsigned int n1;
1354
1355 tb1 = tb->jmp_next[n];
1356 if (tb1 != NULL) {
1357 /* find head of list */
1358 for(;;) {
1359 n1 = (long)tb1 & 3;
1360 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1361 if (n1 == 2)
1362 break;
1363 tb1 = tb1->jmp_next[n1];
1364 }
1365 /* we are now sure now that tb jumps to tb1 */
1366 tb_next = tb1;
1367
1368 /* remove tb from the jmp_first list */
1369 ptb = &tb_next->jmp_first;
1370 for(;;) {
1371 tb1 = *ptb;
1372 n1 = (long)tb1 & 3;
1373 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1374 if (n1 == n && tb1 == tb)
1375 break;
1376 ptb = &tb1->jmp_next[n1];
1377 }
1378 *ptb = tb->jmp_next[n];
1379 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001380
bellardea041c02003-06-25 16:16:50 +00001381 /* suppress the jump to next tb in generated code */
1382 tb_reset_jump(tb, n);
1383
bellard01243112004-01-04 15:48:17 +00001384 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001385 tb_reset_jump_recursive(tb_next);
1386 }
1387}
1388
1389static void tb_reset_jump_recursive(TranslationBlock *tb)
1390{
1391 tb_reset_jump_recursive2(tb, 0);
1392 tb_reset_jump_recursive2(tb, 1);
1393}
1394
bellard1fddef42005-04-17 19:16:13 +00001395#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001396#if defined(CONFIG_USER_ONLY)
1397static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1398{
1399 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1400}
1401#else
bellardd720b932004-04-25 17:57:43 +00001402static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1403{
Anthony Liguoric227f092009-10-01 16:12:16 -05001404 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001405 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001406 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001407 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001408
pbrookc2f07f82006-04-08 17:14:56 +00001409 addr = cpu_get_phys_page_debug(env, pc);
1410 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1411 if (!p) {
1412 pd = IO_MEM_UNASSIGNED;
1413 } else {
1414 pd = p->phys_offset;
1415 }
1416 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001417 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001418}
bellardc27004e2005-01-03 23:35:10 +00001419#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001420#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001421
Paul Brookc527ee82010-03-01 03:31:14 +00001422#if defined(CONFIG_USER_ONLY)
1423void cpu_watchpoint_remove_all(CPUState *env, int mask)
1424
1425{
1426}
1427
1428int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1429 int flags, CPUWatchpoint **watchpoint)
1430{
1431 return -ENOSYS;
1432}
1433#else
pbrook6658ffb2007-03-16 23:58:11 +00001434/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001435int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1436 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001437{
aliguorib4051332008-11-18 20:14:20 +00001438 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001439 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001440
aliguorib4051332008-11-18 20:14:20 +00001441 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1442 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1443 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1444 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1445 return -EINVAL;
1446 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001447 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001448
aliguoria1d1bb32008-11-18 20:07:32 +00001449 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001450 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001451 wp->flags = flags;
1452
aliguori2dc9f412008-11-18 20:56:59 +00001453 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001454 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001455 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001456 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001457 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001458
pbrook6658ffb2007-03-16 23:58:11 +00001459 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001460
1461 if (watchpoint)
1462 *watchpoint = wp;
1463 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001464}
1465
aliguoria1d1bb32008-11-18 20:07:32 +00001466/* Remove a specific watchpoint. */
1467int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1468 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001469{
aliguorib4051332008-11-18 20:14:20 +00001470 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001471 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001472
Blue Swirl72cf2d42009-09-12 07:36:22 +00001473 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001474 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001475 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001476 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001477 return 0;
1478 }
1479 }
aliguoria1d1bb32008-11-18 20:07:32 +00001480 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001481}
1482
aliguoria1d1bb32008-11-18 20:07:32 +00001483/* Remove a specific watchpoint by reference. */
1484void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1485{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001486 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001487
aliguoria1d1bb32008-11-18 20:07:32 +00001488 tlb_flush_page(env, watchpoint->vaddr);
1489
Anthony Liguori7267c092011-08-20 22:09:37 -05001490 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001491}
1492
aliguoria1d1bb32008-11-18 20:07:32 +00001493/* Remove all matching watchpoints. */
1494void cpu_watchpoint_remove_all(CPUState *env, int mask)
1495{
aliguoric0ce9982008-11-25 22:13:57 +00001496 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001497
Blue Swirl72cf2d42009-09-12 07:36:22 +00001498 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001499 if (wp->flags & mask)
1500 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001501 }
aliguoria1d1bb32008-11-18 20:07:32 +00001502}
Paul Brookc527ee82010-03-01 03:31:14 +00001503#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001504
1505/* Add a breakpoint. */
1506int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1507 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001508{
bellard1fddef42005-04-17 19:16:13 +00001509#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001510 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001511
Anthony Liguori7267c092011-08-20 22:09:37 -05001512 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001513
1514 bp->pc = pc;
1515 bp->flags = flags;
1516
aliguori2dc9f412008-11-18 20:56:59 +00001517 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001518 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001519 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001520 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001521 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001522
1523 breakpoint_invalidate(env, pc);
1524
1525 if (breakpoint)
1526 *breakpoint = bp;
1527 return 0;
1528#else
1529 return -ENOSYS;
1530#endif
1531}
1532
1533/* Remove a specific breakpoint. */
1534int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1535{
1536#if defined(TARGET_HAS_ICE)
1537 CPUBreakpoint *bp;
1538
Blue Swirl72cf2d42009-09-12 07:36:22 +00001539 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001540 if (bp->pc == pc && bp->flags == flags) {
1541 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001542 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001543 }
bellard4c3a88a2003-07-26 12:06:08 +00001544 }
aliguoria1d1bb32008-11-18 20:07:32 +00001545 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001546#else
aliguoria1d1bb32008-11-18 20:07:32 +00001547 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001548#endif
1549}
1550
aliguoria1d1bb32008-11-18 20:07:32 +00001551/* Remove a specific breakpoint by reference. */
1552void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001553{
bellard1fddef42005-04-17 19:16:13 +00001554#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001555 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001556
aliguoria1d1bb32008-11-18 20:07:32 +00001557 breakpoint_invalidate(env, breakpoint->pc);
1558
Anthony Liguori7267c092011-08-20 22:09:37 -05001559 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001560#endif
1561}
1562
1563/* Remove all matching breakpoints. */
1564void cpu_breakpoint_remove_all(CPUState *env, int mask)
1565{
1566#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001567 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001568
Blue Swirl72cf2d42009-09-12 07:36:22 +00001569 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001570 if (bp->flags & mask)
1571 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001572 }
bellard4c3a88a2003-07-26 12:06:08 +00001573#endif
1574}
1575
bellardc33a3462003-07-29 20:50:33 +00001576/* enable or disable single step mode. EXCP_DEBUG is returned by the
1577 CPU loop after each instruction */
1578void cpu_single_step(CPUState *env, int enabled)
1579{
bellard1fddef42005-04-17 19:16:13 +00001580#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001581 if (env->singlestep_enabled != enabled) {
1582 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001583 if (kvm_enabled())
1584 kvm_update_guest_debug(env, 0);
1585 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001586 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001587 /* XXX: only flush what is necessary */
1588 tb_flush(env);
1589 }
bellardc33a3462003-07-29 20:50:33 +00001590 }
1591#endif
1592}
1593
bellard34865132003-10-05 14:28:56 +00001594/* enable or disable low levels log */
1595void cpu_set_log(int log_flags)
1596{
1597 loglevel = log_flags;
1598 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001599 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001600 if (!logfile) {
1601 perror(logfilename);
1602 _exit(1);
1603 }
bellard9fa3e852004-01-04 18:06:42 +00001604#if !defined(CONFIG_SOFTMMU)
1605 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1606 {
blueswir1b55266b2008-09-20 08:07:15 +00001607 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001608 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1609 }
Filip Navarabf65f532009-07-27 10:02:04 -05001610#elif !defined(_WIN32)
1611 /* Win32 doesn't support line-buffering and requires size >= 2 */
bellard34865132003-10-05 14:28:56 +00001612 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001613#endif
pbrooke735b912007-06-30 13:53:24 +00001614 log_append = 1;
1615 }
1616 if (!loglevel && logfile) {
1617 fclose(logfile);
1618 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001619 }
1620}
1621
1622void cpu_set_log_filename(const char *filename)
1623{
1624 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001625 if (logfile) {
1626 fclose(logfile);
1627 logfile = NULL;
1628 }
1629 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001630}
bellardc33a3462003-07-29 20:50:33 +00001631
aurel323098dba2009-03-07 21:28:24 +00001632static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001633{
pbrookd5975362008-06-07 20:50:51 +00001634 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1635 problem and hope the cpu will stop of its own accord. For userspace
1636 emulation this often isn't actually as bad as it sounds. Often
1637 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001638 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001639 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001640
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001641 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001642 tb = env->current_tb;
1643 /* if the cpu is currently executing code, we must unlink it and
1644 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001645 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001646 env->current_tb = NULL;
1647 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001648 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001649 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001650}
1651
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001652#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001653/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001654static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001655{
1656 int old_mask;
1657
1658 old_mask = env->interrupt_request;
1659 env->interrupt_request |= mask;
1660
aliguori8edac962009-04-24 18:03:45 +00001661 /*
1662 * If called from iothread context, wake the target cpu in
1663 * case its halted.
1664 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001665 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001666 qemu_cpu_kick(env);
1667 return;
1668 }
aliguori8edac962009-04-24 18:03:45 +00001669
pbrook2e70f6e2008-06-29 01:03:05 +00001670 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001671 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001672 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001673 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001674 cpu_abort(env, "Raised interrupt while not in I/O function");
1675 }
pbrook2e70f6e2008-06-29 01:03:05 +00001676 } else {
aurel323098dba2009-03-07 21:28:24 +00001677 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001678 }
1679}
1680
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001681CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1682
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001683#else /* CONFIG_USER_ONLY */
1684
1685void cpu_interrupt(CPUState *env, int mask)
1686{
1687 env->interrupt_request |= mask;
1688 cpu_unlink_tb(env);
1689}
1690#endif /* CONFIG_USER_ONLY */
1691
bellardb54ad042004-05-20 13:42:52 +00001692void cpu_reset_interrupt(CPUState *env, int mask)
1693{
1694 env->interrupt_request &= ~mask;
1695}
1696
aurel323098dba2009-03-07 21:28:24 +00001697void cpu_exit(CPUState *env)
1698{
1699 env->exit_request = 1;
1700 cpu_unlink_tb(env);
1701}
1702
blueswir1c7cd6a32008-10-02 18:27:46 +00001703const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001704 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001705 "show generated host assembly code for each compiled TB" },
1706 { CPU_LOG_TB_IN_ASM, "in_asm",
1707 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001708 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001709 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001710 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001711 "show micro ops "
1712#ifdef TARGET_I386
1713 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001714#endif
blueswir1e01a1152008-03-14 17:37:11 +00001715 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001716 { CPU_LOG_INT, "int",
1717 "show interrupts/exceptions in short format" },
1718 { CPU_LOG_EXEC, "exec",
1719 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001720 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001721 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001722#ifdef TARGET_I386
1723 { CPU_LOG_PCALL, "pcall",
1724 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001725 { CPU_LOG_RESET, "cpu_reset",
1726 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001727#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001728#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001729 { CPU_LOG_IOPORT, "ioport",
1730 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001731#endif
bellardf193c792004-03-21 17:06:25 +00001732 { 0, NULL, NULL },
1733};
1734
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001735#ifndef CONFIG_USER_ONLY
1736static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1737 = QLIST_HEAD_INITIALIZER(memory_client_list);
1738
1739static void cpu_notify_set_memory(target_phys_addr_t start_addr,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001740 ram_addr_t size,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001741 ram_addr_t phys_offset,
1742 bool log_dirty)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001743{
1744 CPUPhysMemoryClient *client;
1745 QLIST_FOREACH(client, &memory_client_list, list) {
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001746 client->set_memory(client, start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001747 }
1748}
1749
1750static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001751 target_phys_addr_t end)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001752{
1753 CPUPhysMemoryClient *client;
1754 QLIST_FOREACH(client, &memory_client_list, list) {
1755 int r = client->sync_dirty_bitmap(client, start, end);
1756 if (r < 0)
1757 return r;
1758 }
1759 return 0;
1760}
1761
1762static int cpu_notify_migration_log(int enable)
1763{
1764 CPUPhysMemoryClient *client;
1765 QLIST_FOREACH(client, &memory_client_list, list) {
1766 int r = client->migration_log(client, enable);
1767 if (r < 0)
1768 return r;
1769 }
1770 return 0;
1771}
1772
Alex Williamson2173a752011-05-03 12:36:58 -06001773struct last_map {
1774 target_phys_addr_t start_addr;
1775 ram_addr_t size;
1776 ram_addr_t phys_offset;
1777};
1778
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001779/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1780 * address. Each intermediate table provides the next L2_BITs of guest
1781 * physical address space. The number of levels vary based on host and
1782 * guest configuration, making it efficient to build the final guest
1783 * physical address by seeding the L1 offset and shifting and adding in
1784 * each L2 offset as we recurse through them. */
Alex Williamson2173a752011-05-03 12:36:58 -06001785static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
1786 void **lp, target_phys_addr_t addr,
1787 struct last_map *map)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001788{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001789 int i;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001790
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001791 if (*lp == NULL) {
1792 return;
1793 }
1794 if (level == 0) {
1795 PhysPageDesc *pd = *lp;
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001796 addr <<= L2_BITS + TARGET_PAGE_BITS;
Paul Brook7296aba2010-03-14 14:58:46 +00001797 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001798 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
Alex Williamson2173a752011-05-03 12:36:58 -06001799 target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
1800
1801 if (map->size &&
1802 start_addr == map->start_addr + map->size &&
1803 pd[i].phys_offset == map->phys_offset + map->size) {
1804
1805 map->size += TARGET_PAGE_SIZE;
1806 continue;
1807 } else if (map->size) {
1808 client->set_memory(client, map->start_addr,
1809 map->size, map->phys_offset, false);
1810 }
1811
1812 map->start_addr = start_addr;
1813 map->size = TARGET_PAGE_SIZE;
1814 map->phys_offset = pd[i].phys_offset;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001815 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001816 }
1817 } else {
1818 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001819 for (i = 0; i < L2_SIZE; ++i) {
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001820 phys_page_for_each_1(client, level - 1, pp + i,
Alex Williamson2173a752011-05-03 12:36:58 -06001821 (addr << L2_BITS) | i, map);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001822 }
1823 }
1824}
1825
1826static void phys_page_for_each(CPUPhysMemoryClient *client)
1827{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001828 int i;
Alex Williamson2173a752011-05-03 12:36:58 -06001829 struct last_map map = { };
1830
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001831 for (i = 0; i < P_L1_SIZE; ++i) {
1832 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
Alex Williamson2173a752011-05-03 12:36:58 -06001833 l1_phys_map + i, i, &map);
1834 }
1835 if (map.size) {
1836 client->set_memory(client, map.start_addr, map.size, map.phys_offset,
1837 false);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001838 }
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001839}
1840
1841void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1842{
1843 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1844 phys_page_for_each(client);
1845}
1846
1847void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1848{
1849 QLIST_REMOVE(client, list);
1850}
1851#endif
1852
bellardf193c792004-03-21 17:06:25 +00001853static int cmp1(const char *s1, int n, const char *s2)
1854{
1855 if (strlen(s2) != n)
1856 return 0;
1857 return memcmp(s1, s2, n) == 0;
1858}
ths3b46e622007-09-17 08:09:54 +00001859
bellardf193c792004-03-21 17:06:25 +00001860/* takes a comma separated list of log masks. Return 0 if error. */
1861int cpu_str_to_log_mask(const char *str)
1862{
blueswir1c7cd6a32008-10-02 18:27:46 +00001863 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001864 int mask;
1865 const char *p, *p1;
1866
1867 p = str;
1868 mask = 0;
1869 for(;;) {
1870 p1 = strchr(p, ',');
1871 if (!p1)
1872 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001873 if(cmp1(p,p1-p,"all")) {
1874 for(item = cpu_log_items; item->mask != 0; item++) {
1875 mask |= item->mask;
1876 }
1877 } else {
1878 for(item = cpu_log_items; item->mask != 0; item++) {
1879 if (cmp1(p, p1 - p, item->name))
1880 goto found;
1881 }
1882 return 0;
bellardf193c792004-03-21 17:06:25 +00001883 }
bellardf193c792004-03-21 17:06:25 +00001884 found:
1885 mask |= item->mask;
1886 if (*p1 != ',')
1887 break;
1888 p = p1 + 1;
1889 }
1890 return mask;
1891}
bellardea041c02003-06-25 16:16:50 +00001892
bellard75012672003-06-21 13:11:07 +00001893void cpu_abort(CPUState *env, const char *fmt, ...)
1894{
1895 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001896 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001897
1898 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001899 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001900 fprintf(stderr, "qemu: fatal: ");
1901 vfprintf(stderr, fmt, ap);
1902 fprintf(stderr, "\n");
1903#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001904 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1905#else
1906 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001907#endif
aliguori93fcfe32009-01-15 22:34:14 +00001908 if (qemu_log_enabled()) {
1909 qemu_log("qemu: fatal: ");
1910 qemu_log_vprintf(fmt, ap2);
1911 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001912#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001913 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001914#else
aliguori93fcfe32009-01-15 22:34:14 +00001915 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001916#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001917 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001918 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001919 }
pbrook493ae1f2007-11-23 16:53:59 +00001920 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001921 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001922#if defined(CONFIG_USER_ONLY)
1923 {
1924 struct sigaction act;
1925 sigfillset(&act.sa_mask);
1926 act.sa_handler = SIG_DFL;
1927 sigaction(SIGABRT, &act, NULL);
1928 }
1929#endif
bellard75012672003-06-21 13:11:07 +00001930 abort();
1931}
1932
thsc5be9f02007-02-28 20:20:53 +00001933CPUState *cpu_copy(CPUState *env)
1934{
ths01ba9812007-12-09 02:22:57 +00001935 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001936 CPUState *next_cpu = new_env->next_cpu;
1937 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001938#if defined(TARGET_HAS_ICE)
1939 CPUBreakpoint *bp;
1940 CPUWatchpoint *wp;
1941#endif
1942
thsc5be9f02007-02-28 20:20:53 +00001943 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001944
1945 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001946 new_env->next_cpu = next_cpu;
1947 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001948
1949 /* Clone all break/watchpoints.
1950 Note: Once we support ptrace with hw-debug register access, make sure
1951 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001952 QTAILQ_INIT(&env->breakpoints);
1953 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001954#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001955 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001956 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1957 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001958 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001959 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1960 wp->flags, NULL);
1961 }
1962#endif
1963
thsc5be9f02007-02-28 20:20:53 +00001964 return new_env;
1965}
1966
bellard01243112004-01-04 15:48:17 +00001967#if !defined(CONFIG_USER_ONLY)
1968
edgar_igl5c751e92008-05-06 08:44:21 +00001969static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1970{
1971 unsigned int i;
1972
1973 /* Discard jump cache entries for any tb which might potentially
1974 overlap the flushed page. */
1975 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1976 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001977 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001978
1979 i = tb_jmp_cache_hash_page(addr);
1980 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001981 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001982}
1983
Igor Kovalenko08738982009-07-12 02:15:40 +04001984static CPUTLBEntry s_cputlb_empty_entry = {
1985 .addr_read = -1,
1986 .addr_write = -1,
1987 .addr_code = -1,
1988 .addend = -1,
1989};
1990
bellardee8b7022004-02-03 23:35:10 +00001991/* NOTE: if flush_global is true, also flush global entries (not
1992 implemented yet) */
1993void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001994{
bellard33417e72003-08-10 21:47:01 +00001995 int i;
bellard01243112004-01-04 15:48:17 +00001996
bellard9fa3e852004-01-04 18:06:42 +00001997#if defined(DEBUG_TLB)
1998 printf("tlb_flush:\n");
1999#endif
bellard01243112004-01-04 15:48:17 +00002000 /* must reset current TB so that interrupts cannot modify the
2001 links while we are modifying them */
2002 env->current_tb = NULL;
2003
bellard33417e72003-08-10 21:47:01 +00002004 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002005 int mmu_idx;
2006 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04002007 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002008 }
bellard33417e72003-08-10 21:47:01 +00002009 }
bellard9fa3e852004-01-04 18:06:42 +00002010
bellard8a40a182005-11-20 10:35:40 +00002011 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00002012
Paul Brookd4c430a2010-03-17 02:14:28 +00002013 env->tlb_flush_addr = -1;
2014 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00002015 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00002016}
2017
bellard274da6b2004-05-20 21:56:27 +00002018static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00002019{
ths5fafdf22007-09-16 21:08:06 +00002020 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00002021 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00002022 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00002023 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00002024 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00002025 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04002026 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00002027 }
bellard61382a52003-10-27 21:22:23 +00002028}
2029
bellard2e126692004-04-25 21:28:44 +00002030void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00002031{
bellard8a40a182005-11-20 10:35:40 +00002032 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002033 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00002034
bellard9fa3e852004-01-04 18:06:42 +00002035#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00002036 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00002037#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00002038 /* Check if we need to flush due to large pages. */
2039 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2040#if defined(DEBUG_TLB)
2041 printf("tlb_flush_page: forced full flush ("
2042 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2043 env->tlb_flush_addr, env->tlb_flush_mask);
2044#endif
2045 tlb_flush(env, 1);
2046 return;
2047 }
bellard01243112004-01-04 15:48:17 +00002048 /* must reset current TB so that interrupts cannot modify the
2049 links while we are modifying them */
2050 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00002051
bellard61382a52003-10-27 21:22:23 +00002052 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00002053 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002054 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2055 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002056
edgar_igl5c751e92008-05-06 08:44:21 +00002057 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002058}
2059
bellard9fa3e852004-01-04 18:06:42 +00002060/* update the TLBs so that writes to code in the virtual page 'addr'
2061 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002062static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002063{
ths5fafdf22007-09-16 21:08:06 +00002064 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002065 ram_addr + TARGET_PAGE_SIZE,
2066 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002067}
2068
bellard9fa3e852004-01-04 18:06:42 +00002069/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002070 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002071static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002072 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002073{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002074 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002075}
2076
ths5fafdf22007-09-16 21:08:06 +00002077static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002078 unsigned long start, unsigned long length)
2079{
2080 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00002081 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2082 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002083 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002084 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002085 }
2086 }
2087}
2088
pbrook5579c7f2009-04-11 14:47:08 +00002089/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002090void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002091 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002092{
2093 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002094 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002095 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002096
2097 start &= TARGET_PAGE_MASK;
2098 end = TARGET_PAGE_ALIGN(end);
2099
2100 length = end - start;
2101 if (length == 0)
2102 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002103 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002104
bellard1ccde1c2004-02-06 19:46:14 +00002105 /* we modify the TLB cache so that the dirty bit will be set again
2106 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002107 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002108 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002109 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002110 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002111 != (end - 1) - start) {
2112 abort();
2113 }
2114
bellard6a00d602005-11-21 23:25:50 +00002115 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002116 int mmu_idx;
2117 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2118 for(i = 0; i < CPU_TLB_SIZE; i++)
2119 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2120 start1, length);
2121 }
bellard6a00d602005-11-21 23:25:50 +00002122 }
bellard1ccde1c2004-02-06 19:46:14 +00002123}
2124
aliguori74576192008-10-06 14:02:03 +00002125int cpu_physical_memory_set_dirty_tracking(int enable)
2126{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002127 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002128 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002129 ret = cpu_notify_migration_log(!!enable);
2130 return ret;
aliguori74576192008-10-06 14:02:03 +00002131}
2132
2133int cpu_physical_memory_get_dirty_tracking(void)
2134{
2135 return in_migration;
2136}
2137
Anthony Liguoric227f092009-10-01 16:12:16 -05002138int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2139 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00002140{
Michael S. Tsirkin7b8f3b72010-01-27 22:07:21 +02002141 int ret;
Jan Kiszka151f7742009-05-01 20:52:47 +02002142
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002143 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
Jan Kiszka151f7742009-05-01 20:52:47 +02002144 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00002145}
2146
Anthony PERARDe5896b12011-02-07 12:19:23 +01002147int cpu_physical_log_start(target_phys_addr_t start_addr,
2148 ram_addr_t size)
2149{
2150 CPUPhysMemoryClient *client;
2151 QLIST_FOREACH(client, &memory_client_list, list) {
2152 if (client->log_start) {
2153 int r = client->log_start(client, start_addr, size);
2154 if (r < 0) {
2155 return r;
2156 }
2157 }
2158 }
2159 return 0;
2160}
2161
2162int cpu_physical_log_stop(target_phys_addr_t start_addr,
2163 ram_addr_t size)
2164{
2165 CPUPhysMemoryClient *client;
2166 QLIST_FOREACH(client, &memory_client_list, list) {
2167 if (client->log_stop) {
2168 int r = client->log_stop(client, start_addr, size);
2169 if (r < 0) {
2170 return r;
2171 }
2172 }
2173 }
2174 return 0;
2175}
2176
bellard3a7d9292005-08-21 09:26:42 +00002177static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2178{
Anthony Liguoric227f092009-10-01 16:12:16 -05002179 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002180 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002181
bellard84b7b8e2005-11-28 21:19:04 +00002182 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00002183 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2184 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002185 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002186 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002187 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002188 }
2189 }
2190}
2191
2192/* update the TLB according to the current state of the dirty bits */
2193void cpu_tlb_update_dirty(CPUState *env)
2194{
2195 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002196 int mmu_idx;
2197 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2198 for(i = 0; i < CPU_TLB_SIZE; i++)
2199 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2200 }
bellard3a7d9292005-08-21 09:26:42 +00002201}
2202
pbrook0f459d12008-06-09 00:20:13 +00002203static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002204{
pbrook0f459d12008-06-09 00:20:13 +00002205 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2206 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002207}
2208
pbrook0f459d12008-06-09 00:20:13 +00002209/* update the TLB corresponding to virtual page vaddr
2210 so that it is no longer dirty */
2211static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002212{
bellard1ccde1c2004-02-06 19:46:14 +00002213 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002214 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002215
pbrook0f459d12008-06-09 00:20:13 +00002216 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002217 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002218 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2219 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002220}
2221
Paul Brookd4c430a2010-03-17 02:14:28 +00002222/* Our TLB does not support large pages, so remember the area covered by
2223 large pages and trigger a full TLB flush if these are invalidated. */
2224static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2225 target_ulong size)
2226{
2227 target_ulong mask = ~(size - 1);
2228
2229 if (env->tlb_flush_addr == (target_ulong)-1) {
2230 env->tlb_flush_addr = vaddr & mask;
2231 env->tlb_flush_mask = mask;
2232 return;
2233 }
2234 /* Extend the existing region to include the new page.
2235 This is a compromise between unnecessary flushes and the cost
2236 of maintaining a full variable size TLB. */
2237 mask &= env->tlb_flush_mask;
2238 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2239 mask <<= 1;
2240 }
2241 env->tlb_flush_addr &= mask;
2242 env->tlb_flush_mask = mask;
2243}
2244
2245/* Add a new TLB entry. At most one entry for a given virtual address
2246 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2247 supplied size is only used by tlb_flush_page. */
2248void tlb_set_page(CPUState *env, target_ulong vaddr,
2249 target_phys_addr_t paddr, int prot,
2250 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002251{
bellard92e873b2004-05-21 14:52:29 +00002252 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00002253 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002254 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002255 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002256 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002257 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002258 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002259 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002260 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002261
Paul Brookd4c430a2010-03-17 02:14:28 +00002262 assert(size >= TARGET_PAGE_SIZE);
2263 if (size != TARGET_PAGE_SIZE) {
2264 tlb_add_large_page(env, vaddr, size);
2265 }
bellard92e873b2004-05-21 14:52:29 +00002266 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002267 if (!p) {
2268 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00002269 } else {
2270 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002271 }
2272#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002273 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2274 " prot=%x idx=%d pd=0x%08lx\n",
2275 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002276#endif
2277
pbrook0f459d12008-06-09 00:20:13 +00002278 address = vaddr;
2279 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2280 /* IO memory case (romd handled later) */
2281 address |= TLB_MMIO;
2282 }
pbrook5579c7f2009-04-11 14:47:08 +00002283 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00002284 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2285 /* Normal RAM. */
2286 iotlb = pd & TARGET_PAGE_MASK;
2287 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2288 iotlb |= IO_MEM_NOTDIRTY;
2289 else
2290 iotlb |= IO_MEM_ROM;
2291 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002292 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002293 It would be nice to pass an offset from the base address
2294 of that region. This would avoid having to special case RAM,
2295 and avoid full address decoding in every device.
2296 We can't use the high bits of pd for this because
2297 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002298 iotlb = (pd & ~TARGET_PAGE_MASK);
2299 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002300 iotlb += p->region_offset;
2301 } else {
2302 iotlb += paddr;
2303 }
pbrook0f459d12008-06-09 00:20:13 +00002304 }
pbrook6658ffb2007-03-16 23:58:11 +00002305
pbrook0f459d12008-06-09 00:20:13 +00002306 code_address = address;
2307 /* Make accesses to pages with watchpoints go via the
2308 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002309 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002310 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002311 /* Avoid trapping reads of pages with a write breakpoint. */
2312 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2313 iotlb = io_mem_watch + paddr;
2314 address |= TLB_MMIO;
2315 break;
2316 }
pbrook6658ffb2007-03-16 23:58:11 +00002317 }
pbrook0f459d12008-06-09 00:20:13 +00002318 }
balrogd79acba2007-06-26 20:01:13 +00002319
pbrook0f459d12008-06-09 00:20:13 +00002320 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2321 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2322 te = &env->tlb_table[mmu_idx][index];
2323 te->addend = addend - vaddr;
2324 if (prot & PAGE_READ) {
2325 te->addr_read = address;
2326 } else {
2327 te->addr_read = -1;
2328 }
edgar_igl5c751e92008-05-06 08:44:21 +00002329
pbrook0f459d12008-06-09 00:20:13 +00002330 if (prot & PAGE_EXEC) {
2331 te->addr_code = code_address;
2332 } else {
2333 te->addr_code = -1;
2334 }
2335 if (prot & PAGE_WRITE) {
2336 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2337 (pd & IO_MEM_ROMD)) {
2338 /* Write access calls the I/O callback. */
2339 te->addr_write = address | TLB_MMIO;
2340 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2341 !cpu_physical_memory_is_dirty(pd)) {
2342 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002343 } else {
pbrook0f459d12008-06-09 00:20:13 +00002344 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002345 }
pbrook0f459d12008-06-09 00:20:13 +00002346 } else {
2347 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002348 }
bellard9fa3e852004-01-04 18:06:42 +00002349}
2350
bellard01243112004-01-04 15:48:17 +00002351#else
2352
bellardee8b7022004-02-03 23:35:10 +00002353void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002354{
2355}
2356
bellard2e126692004-04-25 21:28:44 +00002357void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002358{
2359}
2360
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002361/*
2362 * Walks guest process memory "regions" one by one
2363 * and calls callback function 'fn' for each region.
2364 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002365
2366struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002367{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002368 walk_memory_regions_fn fn;
2369 void *priv;
2370 unsigned long start;
2371 int prot;
2372};
bellard9fa3e852004-01-04 18:06:42 +00002373
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002374static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002375 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002376{
2377 if (data->start != -1ul) {
2378 int rc = data->fn(data->priv, data->start, end, data->prot);
2379 if (rc != 0) {
2380 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002381 }
bellard33417e72003-08-10 21:47:01 +00002382 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002383
2384 data->start = (new_prot ? end : -1ul);
2385 data->prot = new_prot;
2386
2387 return 0;
2388}
2389
2390static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002391 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002392{
Paul Brookb480d9b2010-03-12 23:23:29 +00002393 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002394 int i, rc;
2395
2396 if (*lp == NULL) {
2397 return walk_memory_regions_end(data, base, 0);
2398 }
2399
2400 if (level == 0) {
2401 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002402 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002403 int prot = pd[i].flags;
2404
2405 pa = base | (i << TARGET_PAGE_BITS);
2406 if (prot != data->prot) {
2407 rc = walk_memory_regions_end(data, pa, prot);
2408 if (rc != 0) {
2409 return rc;
2410 }
2411 }
2412 }
2413 } else {
2414 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002415 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002416 pa = base | ((abi_ulong)i <<
2417 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002418 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2419 if (rc != 0) {
2420 return rc;
2421 }
2422 }
2423 }
2424
2425 return 0;
2426}
2427
2428int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2429{
2430 struct walk_memory_regions_data data;
2431 unsigned long i;
2432
2433 data.fn = fn;
2434 data.priv = priv;
2435 data.start = -1ul;
2436 data.prot = 0;
2437
2438 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002439 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002440 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2441 if (rc != 0) {
2442 return rc;
2443 }
2444 }
2445
2446 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002447}
2448
Paul Brookb480d9b2010-03-12 23:23:29 +00002449static int dump_region(void *priv, abi_ulong start,
2450 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002451{
2452 FILE *f = (FILE *)priv;
2453
Paul Brookb480d9b2010-03-12 23:23:29 +00002454 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2455 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002456 start, end, end - start,
2457 ((prot & PAGE_READ) ? 'r' : '-'),
2458 ((prot & PAGE_WRITE) ? 'w' : '-'),
2459 ((prot & PAGE_EXEC) ? 'x' : '-'));
2460
2461 return (0);
2462}
2463
2464/* dump memory mappings */
2465void page_dump(FILE *f)
2466{
2467 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2468 "start", "end", "size", "prot");
2469 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002470}
2471
pbrook53a59602006-03-25 19:31:22 +00002472int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002473{
bellard9fa3e852004-01-04 18:06:42 +00002474 PageDesc *p;
2475
2476 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002477 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002478 return 0;
2479 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002480}
2481
Richard Henderson376a7902010-03-10 15:57:04 -08002482/* Modify the flags of a page and invalidate the code if necessary.
2483 The flag PAGE_WRITE_ORG is positioned automatically depending
2484 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002485void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002486{
Richard Henderson376a7902010-03-10 15:57:04 -08002487 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002488
Richard Henderson376a7902010-03-10 15:57:04 -08002489 /* This function should never be called with addresses outside the
2490 guest address space. If this assert fires, it probably indicates
2491 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002492#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2493 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002494#endif
2495 assert(start < end);
2496
bellard9fa3e852004-01-04 18:06:42 +00002497 start = start & TARGET_PAGE_MASK;
2498 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002499
2500 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002501 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002502 }
2503
2504 for (addr = start, len = end - start;
2505 len != 0;
2506 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2507 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2508
2509 /* If the write protection bit is set, then we invalidate
2510 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002511 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002512 (flags & PAGE_WRITE) &&
2513 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002514 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002515 }
2516 p->flags = flags;
2517 }
bellard9fa3e852004-01-04 18:06:42 +00002518}
2519
ths3d97b402007-11-02 19:02:07 +00002520int page_check_range(target_ulong start, target_ulong len, int flags)
2521{
2522 PageDesc *p;
2523 target_ulong end;
2524 target_ulong addr;
2525
Richard Henderson376a7902010-03-10 15:57:04 -08002526 /* This function should never be called with addresses outside the
2527 guest address space. If this assert fires, it probably indicates
2528 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002529#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2530 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002531#endif
2532
Richard Henderson3e0650a2010-03-29 10:54:42 -07002533 if (len == 0) {
2534 return 0;
2535 }
Richard Henderson376a7902010-03-10 15:57:04 -08002536 if (start + len - 1 < start) {
2537 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002538 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002539 }
balrog55f280c2008-10-28 10:24:11 +00002540
ths3d97b402007-11-02 19:02:07 +00002541 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2542 start = start & TARGET_PAGE_MASK;
2543
Richard Henderson376a7902010-03-10 15:57:04 -08002544 for (addr = start, len = end - start;
2545 len != 0;
2546 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002547 p = page_find(addr >> TARGET_PAGE_BITS);
2548 if( !p )
2549 return -1;
2550 if( !(p->flags & PAGE_VALID) )
2551 return -1;
2552
bellarddae32702007-11-14 10:51:00 +00002553 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002554 return -1;
bellarddae32702007-11-14 10:51:00 +00002555 if (flags & PAGE_WRITE) {
2556 if (!(p->flags & PAGE_WRITE_ORG))
2557 return -1;
2558 /* unprotect the page if it was put read-only because it
2559 contains translated code */
2560 if (!(p->flags & PAGE_WRITE)) {
2561 if (!page_unprotect(addr, 0, NULL))
2562 return -1;
2563 }
2564 return 0;
2565 }
ths3d97b402007-11-02 19:02:07 +00002566 }
2567 return 0;
2568}
2569
bellard9fa3e852004-01-04 18:06:42 +00002570/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002571 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002572int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002573{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002574 unsigned int prot;
2575 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002576 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002577
pbrookc8a706f2008-06-02 16:16:42 +00002578 /* Technically this isn't safe inside a signal handler. However we
2579 know this only ever happens in a synchronous SEGV handler, so in
2580 practice it seems to be ok. */
2581 mmap_lock();
2582
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002583 p = page_find(address >> TARGET_PAGE_BITS);
2584 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002585 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002586 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002587 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002588
bellard9fa3e852004-01-04 18:06:42 +00002589 /* if the page was really writable, then we change its
2590 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002591 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2592 host_start = address & qemu_host_page_mask;
2593 host_end = host_start + qemu_host_page_size;
2594
2595 prot = 0;
2596 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2597 p = page_find(addr >> TARGET_PAGE_BITS);
2598 p->flags |= PAGE_WRITE;
2599 prot |= p->flags;
2600
bellard9fa3e852004-01-04 18:06:42 +00002601 /* and since the content will be modified, we must invalidate
2602 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002603 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002604#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002605 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002606#endif
bellard9fa3e852004-01-04 18:06:42 +00002607 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002608 mprotect((void *)g2h(host_start), qemu_host_page_size,
2609 prot & PAGE_BITS);
2610
2611 mmap_unlock();
2612 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002613 }
pbrookc8a706f2008-06-02 16:16:42 +00002614 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002615 return 0;
2616}
2617
bellard6a00d602005-11-21 23:25:50 +00002618static inline void tlb_set_dirty(CPUState *env,
2619 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002620{
2621}
bellard9fa3e852004-01-04 18:06:42 +00002622#endif /* defined(CONFIG_USER_ONLY) */
2623
pbrooke2eef172008-06-08 01:09:01 +00002624#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002625
Paul Brookc04b2b72010-03-01 03:31:14 +00002626#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2627typedef struct subpage_t {
2628 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002629 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2630 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002631} subpage_t;
2632
Anthony Liguoric227f092009-10-01 16:12:16 -05002633static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2634 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002635static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2636 ram_addr_t orig_memory,
2637 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002638#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2639 need_subpage) \
2640 do { \
2641 if (addr > start_addr) \
2642 start_addr2 = 0; \
2643 else { \
2644 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2645 if (start_addr2 > 0) \
2646 need_subpage = 1; \
2647 } \
2648 \
blueswir149e9fba2007-05-30 17:25:06 +00002649 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002650 end_addr2 = TARGET_PAGE_SIZE - 1; \
2651 else { \
2652 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2653 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2654 need_subpage = 1; \
2655 } \
2656 } while (0)
2657
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002658/* register physical memory.
2659 For RAM, 'size' must be a multiple of the target page size.
2660 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002661 io memory page. The address used when calling the IO function is
2662 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002663 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002664 before calculating this offset. This should not be a problem unless
2665 the low bits of start_addr and region_offset differ. */
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002666void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
Anthony Liguoric227f092009-10-01 16:12:16 -05002667 ram_addr_t size,
2668 ram_addr_t phys_offset,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002669 ram_addr_t region_offset,
2670 bool log_dirty)
bellard33417e72003-08-10 21:47:01 +00002671{
Anthony Liguoric227f092009-10-01 16:12:16 -05002672 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002673 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002674 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002675 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002676 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002677
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002678 assert(size);
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002679 cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002680
pbrook67c4d232009-02-23 13:16:07 +00002681 if (phys_offset == IO_MEM_UNASSIGNED) {
2682 region_offset = start_addr;
2683 }
pbrook8da3ff12008-12-01 18:59:50 +00002684 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002685 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002686 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002687
2688 addr = start_addr;
2689 do {
blueswir1db7b5422007-05-26 17:36:03 +00002690 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2691 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002692 ram_addr_t orig_memory = p->phys_offset;
2693 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002694 int need_subpage = 0;
2695
2696 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2697 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002698 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002699 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2700 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002701 &p->phys_offset, orig_memory,
2702 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002703 } else {
2704 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2705 >> IO_MEM_SHIFT];
2706 }
pbrook8da3ff12008-12-01 18:59:50 +00002707 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2708 region_offset);
2709 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002710 } else {
2711 p->phys_offset = phys_offset;
2712 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2713 (phys_offset & IO_MEM_ROMD))
2714 phys_offset += TARGET_PAGE_SIZE;
2715 }
2716 } else {
2717 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2718 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002719 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002720 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002721 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002722 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002723 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002724 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002725 int need_subpage = 0;
2726
2727 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2728 end_addr2, need_subpage);
2729
Richard Hendersonf6405242010-04-22 16:47:31 -07002730 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002731 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002732 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002733 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002734 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002735 phys_offset, region_offset);
2736 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002737 }
2738 }
2739 }
pbrook8da3ff12008-12-01 18:59:50 +00002740 region_offset += TARGET_PAGE_SIZE;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002741 addr += TARGET_PAGE_SIZE;
2742 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002743
bellard9d420372006-06-25 22:25:22 +00002744 /* since each CPU stores ram addresses in its TLB cache, we must
2745 reset the modified entries */
2746 /* XXX: slow ! */
2747 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2748 tlb_flush(env, 1);
2749 }
bellard33417e72003-08-10 21:47:01 +00002750}
2751
bellardba863452006-09-24 18:41:10 +00002752/* XXX: temporary until new memory mapping API */
Anthony Liguoric227f092009-10-01 16:12:16 -05002753ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002754{
2755 PhysPageDesc *p;
2756
2757 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2758 if (!p)
2759 return IO_MEM_UNASSIGNED;
2760 return p->phys_offset;
2761}
2762
Anthony Liguoric227f092009-10-01 16:12:16 -05002763void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002764{
2765 if (kvm_enabled())
2766 kvm_coalesce_mmio_region(addr, size);
2767}
2768
Anthony Liguoric227f092009-10-01 16:12:16 -05002769void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002770{
2771 if (kvm_enabled())
2772 kvm_uncoalesce_mmio_region(addr, size);
2773}
2774
Sheng Yang62a27442010-01-26 19:21:16 +08002775void qemu_flush_coalesced_mmio_buffer(void)
2776{
2777 if (kvm_enabled())
2778 kvm_flush_coalesced_mmio_buffer();
2779}
2780
Marcelo Tosattic9027602010-03-01 20:25:08 -03002781#if defined(__linux__) && !defined(TARGET_S390X)
2782
2783#include <sys/vfs.h>
2784
2785#define HUGETLBFS_MAGIC 0x958458f6
2786
2787static long gethugepagesize(const char *path)
2788{
2789 struct statfs fs;
2790 int ret;
2791
2792 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002793 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002794 } while (ret != 0 && errno == EINTR);
2795
2796 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002797 perror(path);
2798 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002799 }
2800
2801 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002802 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002803
2804 return fs.f_bsize;
2805}
2806
Alex Williamson04b16652010-07-02 11:13:17 -06002807static void *file_ram_alloc(RAMBlock *block,
2808 ram_addr_t memory,
2809 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002810{
2811 char *filename;
2812 void *area;
2813 int fd;
2814#ifdef MAP_POPULATE
2815 int flags;
2816#endif
2817 unsigned long hpagesize;
2818
2819 hpagesize = gethugepagesize(path);
2820 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002821 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002822 }
2823
2824 if (memory < hpagesize) {
2825 return NULL;
2826 }
2827
2828 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2829 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2830 return NULL;
2831 }
2832
2833 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002834 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002835 }
2836
2837 fd = mkstemp(filename);
2838 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002839 perror("unable to create backing store for hugepages");
2840 free(filename);
2841 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002842 }
2843 unlink(filename);
2844 free(filename);
2845
2846 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2847
2848 /*
2849 * ftruncate is not supported by hugetlbfs in older
2850 * hosts, so don't bother bailing out on errors.
2851 * If anything goes wrong with it under other filesystems,
2852 * mmap will fail.
2853 */
2854 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002855 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002856
2857#ifdef MAP_POPULATE
2858 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2859 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2860 * to sidestep this quirk.
2861 */
2862 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2863 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2864#else
2865 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2866#endif
2867 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002868 perror("file_ram_alloc: can't mmap RAM pages");
2869 close(fd);
2870 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002871 }
Alex Williamson04b16652010-07-02 11:13:17 -06002872 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002873 return area;
2874}
2875#endif
2876
Alex Williamsond17b5282010-06-25 11:08:38 -06002877static ram_addr_t find_ram_offset(ram_addr_t size)
2878{
Alex Williamson04b16652010-07-02 11:13:17 -06002879 RAMBlock *block, *next_block;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002880 ram_addr_t offset = 0, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002881
2882 if (QLIST_EMPTY(&ram_list.blocks))
2883 return 0;
2884
2885 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002886 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002887
2888 end = block->offset + block->length;
2889
2890 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2891 if (next_block->offset >= end) {
2892 next = MIN(next, next_block->offset);
2893 }
2894 }
2895 if (next - end >= size && next - end < mingap) {
2896 offset = end;
2897 mingap = next - end;
2898 }
2899 }
2900 return offset;
2901}
2902
2903static ram_addr_t last_ram_offset(void)
2904{
Alex Williamsond17b5282010-06-25 11:08:38 -06002905 RAMBlock *block;
2906 ram_addr_t last = 0;
2907
2908 QLIST_FOREACH(block, &ram_list.blocks, next)
2909 last = MAX(last, block->offset + block->length);
2910
2911 return last;
2912}
2913
Cam Macdonell84b89d72010-07-26 18:10:57 -06002914ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002915 ram_addr_t size, void *host)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002916{
2917 RAMBlock *new_block, *block;
2918
2919 size = TARGET_PAGE_ALIGN(size);
Anthony Liguori7267c092011-08-20 22:09:37 -05002920 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002921
2922 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2923 char *id = dev->parent_bus->info->get_dev_path(dev);
2924 if (id) {
2925 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002926 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002927 }
2928 }
2929 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2930
2931 QLIST_FOREACH(block, &ram_list.blocks, next) {
2932 if (!strcmp(block->idstr, new_block->idstr)) {
2933 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2934 new_block->idstr);
2935 abort();
2936 }
2937 }
2938
Jun Nakajima432d2682010-08-31 16:41:25 +01002939 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002940 if (host) {
2941 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002942 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002943 } else {
2944 if (mem_path) {
2945#if defined (__linux__) && !defined(TARGET_S390X)
2946 new_block->host = file_ram_alloc(new_block, size, mem_path);
2947 if (!new_block->host) {
2948 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002949 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002950 }
2951#else
2952 fprintf(stderr, "-mem-path option unsupported\n");
2953 exit(1);
2954#endif
2955 } else {
2956#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002957 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2958 an system defined value, which is at least 256GB. Larger systems
2959 have larger values. We put the guest between the end of data
2960 segment (system break) and this value. We use 32GB as a base to
2961 have enough room for the system break to grow. */
2962 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002963 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002964 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002965 if (new_block->host == MAP_FAILED) {
2966 fprintf(stderr, "Allocating RAM failed\n");
2967 abort();
2968 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002969#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002970 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002971 xen_ram_alloc(new_block->offset, size);
2972 } else {
2973 new_block->host = qemu_vmalloc(size);
2974 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002975#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002976 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002977 }
2978 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002979 new_block->length = size;
2980
2981 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2982
Anthony Liguori7267c092011-08-20 22:09:37 -05002983 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002984 last_ram_offset() >> TARGET_PAGE_BITS);
2985 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2986 0xff, size >> TARGET_PAGE_BITS);
2987
2988 if (kvm_enabled())
2989 kvm_setup_guest_memory(new_block->host, size);
2990
2991 return new_block->offset;
2992}
2993
Alex Williamson1724f042010-06-25 11:09:35 -06002994ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
pbrook94a6b542009-04-11 17:15:54 +00002995{
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002996 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
pbrook94a6b542009-04-11 17:15:54 +00002997}
bellarde9a1ab12007-02-08 23:08:38 +00002998
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002999void qemu_ram_free_from_ptr(ram_addr_t addr)
3000{
3001 RAMBlock *block;
3002
3003 QLIST_FOREACH(block, &ram_list.blocks, next) {
3004 if (addr == block->offset) {
3005 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05003006 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06003007 return;
3008 }
3009 }
3010}
3011
Anthony Liguoric227f092009-10-01 16:12:16 -05003012void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00003013{
Alex Williamson04b16652010-07-02 11:13:17 -06003014 RAMBlock *block;
3015
3016 QLIST_FOREACH(block, &ram_list.blocks, next) {
3017 if (addr == block->offset) {
3018 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01003019 if (block->flags & RAM_PREALLOC_MASK) {
3020 ;
3021 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06003022#if defined (__linux__) && !defined(TARGET_S390X)
3023 if (block->fd) {
3024 munmap(block->host, block->length);
3025 close(block->fd);
3026 } else {
3027 qemu_vfree(block->host);
3028 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003029#else
3030 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06003031#endif
3032 } else {
3033#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3034 munmap(block->host, block->length);
3035#else
Jan Kiszka868bb332011-06-21 22:59:09 +02003036 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003037 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01003038 } else {
3039 qemu_vfree(block->host);
3040 }
Alex Williamson04b16652010-07-02 11:13:17 -06003041#endif
3042 }
Anthony Liguori7267c092011-08-20 22:09:37 -05003043 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06003044 return;
3045 }
3046 }
3047
bellarde9a1ab12007-02-08 23:08:38 +00003048}
3049
Huang Yingcd19cfa2011-03-02 08:56:19 +01003050#ifndef _WIN32
3051void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3052{
3053 RAMBlock *block;
3054 ram_addr_t offset;
3055 int flags;
3056 void *area, *vaddr;
3057
3058 QLIST_FOREACH(block, &ram_list.blocks, next) {
3059 offset = addr - block->offset;
3060 if (offset < block->length) {
3061 vaddr = block->host + offset;
3062 if (block->flags & RAM_PREALLOC_MASK) {
3063 ;
3064 } else {
3065 flags = MAP_FIXED;
3066 munmap(vaddr, length);
3067 if (mem_path) {
3068#if defined(__linux__) && !defined(TARGET_S390X)
3069 if (block->fd) {
3070#ifdef MAP_POPULATE
3071 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3072 MAP_PRIVATE;
3073#else
3074 flags |= MAP_PRIVATE;
3075#endif
3076 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3077 flags, block->fd, offset);
3078 } else {
3079 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3080 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3081 flags, -1, 0);
3082 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003083#else
3084 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003085#endif
3086 } else {
3087#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3088 flags |= MAP_SHARED | MAP_ANONYMOUS;
3089 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3090 flags, -1, 0);
3091#else
3092 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3093 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3094 flags, -1, 0);
3095#endif
3096 }
3097 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003098 fprintf(stderr, "Could not remap addr: "
3099 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003100 length, addr);
3101 exit(1);
3102 }
3103 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3104 }
3105 return;
3106 }
3107 }
3108}
3109#endif /* !_WIN32 */
3110
pbrookdc828ca2009-04-09 22:21:07 +00003111/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003112 With the exception of the softmmu code in this file, this should
3113 only be used for local memory (e.g. video ram) that the device owns,
3114 and knows it isn't going to access beyond the end of the block.
3115
3116 It should not be used for general purpose DMA.
3117 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3118 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003119void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003120{
pbrook94a6b542009-04-11 17:15:54 +00003121 RAMBlock *block;
3122
Alex Williamsonf471a172010-06-11 11:11:42 -06003123 QLIST_FOREACH(block, &ram_list.blocks, next) {
3124 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003125 /* Move this entry to to start of the list. */
3126 if (block != QLIST_FIRST(&ram_list.blocks)) {
3127 QLIST_REMOVE(block, next);
3128 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3129 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003130 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003131 /* We need to check if the requested address is in the RAM
3132 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003133 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003134 */
3135 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003136 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003137 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003138 block->host =
3139 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003140 }
3141 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003142 return block->host + (addr - block->offset);
3143 }
pbrook94a6b542009-04-11 17:15:54 +00003144 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003145
3146 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3147 abort();
3148
3149 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003150}
3151
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003152/* Return a host pointer to ram allocated with qemu_ram_alloc.
3153 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3154 */
3155void *qemu_safe_ram_ptr(ram_addr_t addr)
3156{
3157 RAMBlock *block;
3158
3159 QLIST_FOREACH(block, &ram_list.blocks, next) {
3160 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003161 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003162 /* We need to check if the requested address is in the RAM
3163 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003164 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003165 */
3166 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003167 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003168 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003169 block->host =
3170 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003171 }
3172 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003173 return block->host + (addr - block->offset);
3174 }
3175 }
3176
3177 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3178 abort();
3179
3180 return NULL;
3181}
3182
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003183/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3184 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003185void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003186{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003187 if (*size == 0) {
3188 return NULL;
3189 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003190 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003191 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003192 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003193 RAMBlock *block;
3194
3195 QLIST_FOREACH(block, &ram_list.blocks, next) {
3196 if (addr - block->offset < block->length) {
3197 if (addr - block->offset + *size > block->length)
3198 *size = block->length - addr + block->offset;
3199 return block->host + (addr - block->offset);
3200 }
3201 }
3202
3203 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3204 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003205 }
3206}
3207
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003208void qemu_put_ram_ptr(void *addr)
3209{
3210 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003211}
3212
Marcelo Tosattie8902612010-10-11 15:31:19 -03003213int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003214{
pbrook94a6b542009-04-11 17:15:54 +00003215 RAMBlock *block;
3216 uint8_t *host = ptr;
3217
Jan Kiszka868bb332011-06-21 22:59:09 +02003218 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003219 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003220 return 0;
3221 }
3222
Alex Williamsonf471a172010-06-11 11:11:42 -06003223 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003224 /* This case append when the block is not mapped. */
3225 if (block->host == NULL) {
3226 continue;
3227 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003228 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003229 *ram_addr = block->offset + (host - block->host);
3230 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003231 }
pbrook94a6b542009-04-11 17:15:54 +00003232 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003233
Marcelo Tosattie8902612010-10-11 15:31:19 -03003234 return -1;
3235}
Alex Williamsonf471a172010-06-11 11:11:42 -06003236
Marcelo Tosattie8902612010-10-11 15:31:19 -03003237/* Some of the softmmu routines need to translate from a host pointer
3238 (typically a TLB entry) back to a ram offset. */
3239ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3240{
3241 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003242
Marcelo Tosattie8902612010-10-11 15:31:19 -03003243 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3244 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3245 abort();
3246 }
3247 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003248}
3249
Anthony Liguoric227f092009-10-01 16:12:16 -05003250static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00003251{
pbrook67d3b952006-12-18 05:03:52 +00003252#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003253 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003254#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003255#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003256 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
blueswir1e18231a2008-10-06 18:46:28 +00003257#endif
3258 return 0;
3259}
3260
Anthony Liguoric227f092009-10-01 16:12:16 -05003261static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003262{
3263#ifdef DEBUG_UNASSIGNED
3264 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3265#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003266#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003267 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
blueswir1e18231a2008-10-06 18:46:28 +00003268#endif
3269 return 0;
3270}
3271
Anthony Liguoric227f092009-10-01 16:12:16 -05003272static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003273{
3274#ifdef DEBUG_UNASSIGNED
3275 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3276#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003277#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003278 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003279#endif
bellard33417e72003-08-10 21:47:01 +00003280 return 0;
3281}
3282
Anthony Liguoric227f092009-10-01 16:12:16 -05003283static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00003284{
pbrook67d3b952006-12-18 05:03:52 +00003285#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003286 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00003287#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003288#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003289 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
blueswir1e18231a2008-10-06 18:46:28 +00003290#endif
3291}
3292
Anthony Liguoric227f092009-10-01 16:12:16 -05003293static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003294{
3295#ifdef DEBUG_UNASSIGNED
3296 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3297#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003298#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003299 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
blueswir1e18231a2008-10-06 18:46:28 +00003300#endif
3301}
3302
Anthony Liguoric227f092009-10-01 16:12:16 -05003303static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003304{
3305#ifdef DEBUG_UNASSIGNED
3306 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3307#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003308#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003309 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003310#endif
bellard33417e72003-08-10 21:47:01 +00003311}
3312
Blue Swirld60efc62009-08-25 18:29:31 +00003313static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00003314 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00003315 unassigned_mem_readw,
3316 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00003317};
3318
Blue Swirld60efc62009-08-25 18:29:31 +00003319static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00003320 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00003321 unassigned_mem_writew,
3322 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00003323};
3324
Anthony Liguoric227f092009-10-01 16:12:16 -05003325static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003326 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003327{
bellard3a7d9292005-08-21 09:26:42 +00003328 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003329 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003330 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3331#if !defined(CONFIG_USER_ONLY)
3332 tb_invalidate_phys_page_fast(ram_addr, 1);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003333 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003334#endif
3335 }
pbrook5579c7f2009-04-11 14:47:08 +00003336 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003337 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003338 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003339 /* we remove the notdirty callback only if the code has been
3340 flushed */
3341 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003342 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003343}
3344
Anthony Liguoric227f092009-10-01 16:12:16 -05003345static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003346 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003347{
bellard3a7d9292005-08-21 09:26:42 +00003348 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003349 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003350 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3351#if !defined(CONFIG_USER_ONLY)
3352 tb_invalidate_phys_page_fast(ram_addr, 2);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003353 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003354#endif
3355 }
pbrook5579c7f2009-04-11 14:47:08 +00003356 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003357 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003358 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003359 /* we remove the notdirty callback only if the code has been
3360 flushed */
3361 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003362 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003363}
3364
Anthony Liguoric227f092009-10-01 16:12:16 -05003365static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003366 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003367{
bellard3a7d9292005-08-21 09:26:42 +00003368 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003369 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003370 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3371#if !defined(CONFIG_USER_ONLY)
3372 tb_invalidate_phys_page_fast(ram_addr, 4);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003373 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003374#endif
3375 }
pbrook5579c7f2009-04-11 14:47:08 +00003376 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003377 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003378 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003379 /* we remove the notdirty callback only if the code has been
3380 flushed */
3381 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003382 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003383}
3384
Blue Swirld60efc62009-08-25 18:29:31 +00003385static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00003386 NULL, /* never used */
3387 NULL, /* never used */
3388 NULL, /* never used */
3389};
3390
Blue Swirld60efc62009-08-25 18:29:31 +00003391static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00003392 notdirty_mem_writeb,
3393 notdirty_mem_writew,
3394 notdirty_mem_writel,
3395};
3396
pbrook0f459d12008-06-09 00:20:13 +00003397/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003398static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003399{
3400 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003401 target_ulong pc, cs_base;
3402 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003403 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003404 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003405 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003406
aliguori06d55cc2008-11-18 20:24:06 +00003407 if (env->watchpoint_hit) {
3408 /* We re-entered the check after replacing the TB. Now raise
3409 * the debug interrupt so that is will trigger after the
3410 * current instruction. */
3411 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3412 return;
3413 }
pbrook2e70f6e2008-06-29 01:03:05 +00003414 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003415 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003416 if ((vaddr == (wp->vaddr & len_mask) ||
3417 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003418 wp->flags |= BP_WATCHPOINT_HIT;
3419 if (!env->watchpoint_hit) {
3420 env->watchpoint_hit = wp;
3421 tb = tb_find_pc(env->mem_io_pc);
3422 if (!tb) {
3423 cpu_abort(env, "check_watchpoint: could not find TB for "
3424 "pc=%p", (void *)env->mem_io_pc);
3425 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003426 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003427 tb_phys_invalidate(tb, -1);
3428 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3429 env->exception_index = EXCP_DEBUG;
3430 } else {
3431 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3432 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3433 }
3434 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003435 }
aliguori6e140f22008-11-18 20:37:55 +00003436 } else {
3437 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003438 }
3439 }
3440}
3441
pbrook6658ffb2007-03-16 23:58:11 +00003442/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3443 so these check for a hit then pass through to the normal out-of-line
3444 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05003445static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003446{
aliguorib4051332008-11-18 20:14:20 +00003447 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003448 return ldub_phys(addr);
3449}
3450
Anthony Liguoric227f092009-10-01 16:12:16 -05003451static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003452{
aliguorib4051332008-11-18 20:14:20 +00003453 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003454 return lduw_phys(addr);
3455}
3456
Anthony Liguoric227f092009-10-01 16:12:16 -05003457static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003458{
aliguorib4051332008-11-18 20:14:20 +00003459 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003460 return ldl_phys(addr);
3461}
3462
Anthony Liguoric227f092009-10-01 16:12:16 -05003463static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003464 uint32_t val)
3465{
aliguorib4051332008-11-18 20:14:20 +00003466 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003467 stb_phys(addr, val);
3468}
3469
Anthony Liguoric227f092009-10-01 16:12:16 -05003470static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003471 uint32_t val)
3472{
aliguorib4051332008-11-18 20:14:20 +00003473 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003474 stw_phys(addr, val);
3475}
3476
Anthony Liguoric227f092009-10-01 16:12:16 -05003477static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003478 uint32_t val)
3479{
aliguorib4051332008-11-18 20:14:20 +00003480 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003481 stl_phys(addr, val);
3482}
3483
Blue Swirld60efc62009-08-25 18:29:31 +00003484static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003485 watch_mem_readb,
3486 watch_mem_readw,
3487 watch_mem_readl,
3488};
3489
Blue Swirld60efc62009-08-25 18:29:31 +00003490static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003491 watch_mem_writeb,
3492 watch_mem_writew,
3493 watch_mem_writel,
3494};
pbrook6658ffb2007-03-16 23:58:11 +00003495
Richard Hendersonf6405242010-04-22 16:47:31 -07003496static inline uint32_t subpage_readlen (subpage_t *mmio,
3497 target_phys_addr_t addr,
3498 unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003499{
Richard Hendersonf6405242010-04-22 16:47:31 -07003500 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003501#if defined(DEBUG_SUBPAGE)
3502 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3503 mmio, len, addr, idx);
3504#endif
blueswir1db7b5422007-05-26 17:36:03 +00003505
Richard Hendersonf6405242010-04-22 16:47:31 -07003506 addr += mmio->region_offset[idx];
3507 idx = mmio->sub_io_index[idx];
3508 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
blueswir1db7b5422007-05-26 17:36:03 +00003509}
3510
Anthony Liguoric227f092009-10-01 16:12:16 -05003511static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
Richard Hendersonf6405242010-04-22 16:47:31 -07003512 uint32_t value, unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003513{
Richard Hendersonf6405242010-04-22 16:47:31 -07003514 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003515#if defined(DEBUG_SUBPAGE)
Richard Hendersonf6405242010-04-22 16:47:31 -07003516 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3517 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003518#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003519
3520 addr += mmio->region_offset[idx];
3521 idx = mmio->sub_io_index[idx];
3522 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00003523}
3524
Anthony Liguoric227f092009-10-01 16:12:16 -05003525static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003526{
blueswir1db7b5422007-05-26 17:36:03 +00003527 return subpage_readlen(opaque, addr, 0);
3528}
3529
Anthony Liguoric227f092009-10-01 16:12:16 -05003530static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003531 uint32_t value)
3532{
blueswir1db7b5422007-05-26 17:36:03 +00003533 subpage_writelen(opaque, addr, value, 0);
3534}
3535
Anthony Liguoric227f092009-10-01 16:12:16 -05003536static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003537{
blueswir1db7b5422007-05-26 17:36:03 +00003538 return subpage_readlen(opaque, addr, 1);
3539}
3540
Anthony Liguoric227f092009-10-01 16:12:16 -05003541static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003542 uint32_t value)
3543{
blueswir1db7b5422007-05-26 17:36:03 +00003544 subpage_writelen(opaque, addr, value, 1);
3545}
3546
Anthony Liguoric227f092009-10-01 16:12:16 -05003547static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003548{
blueswir1db7b5422007-05-26 17:36:03 +00003549 return subpage_readlen(opaque, addr, 2);
3550}
3551
Richard Hendersonf6405242010-04-22 16:47:31 -07003552static void subpage_writel (void *opaque, target_phys_addr_t addr,
3553 uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00003554{
blueswir1db7b5422007-05-26 17:36:03 +00003555 subpage_writelen(opaque, addr, value, 2);
3556}
3557
Blue Swirld60efc62009-08-25 18:29:31 +00003558static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003559 &subpage_readb,
3560 &subpage_readw,
3561 &subpage_readl,
3562};
3563
Blue Swirld60efc62009-08-25 18:29:31 +00003564static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003565 &subpage_writeb,
3566 &subpage_writew,
3567 &subpage_writel,
3568};
3569
Anthony Liguoric227f092009-10-01 16:12:16 -05003570static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3571 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003572{
3573 int idx, eidx;
3574
3575 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3576 return -1;
3577 idx = SUBPAGE_IDX(start);
3578 eidx = SUBPAGE_IDX(end);
3579#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003580 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003581 mmio, start, end, idx, eidx, memory);
3582#endif
Gleb Natapov95c318f2010-07-29 10:41:45 +03003583 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3584 memory = IO_MEM_UNASSIGNED;
Richard Hendersonf6405242010-04-22 16:47:31 -07003585 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
blueswir1db7b5422007-05-26 17:36:03 +00003586 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003587 mmio->sub_io_index[idx] = memory;
3588 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003589 }
3590
3591 return 0;
3592}
3593
Richard Hendersonf6405242010-04-22 16:47:31 -07003594static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3595 ram_addr_t orig_memory,
3596 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003597{
Anthony Liguoric227f092009-10-01 16:12:16 -05003598 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003599 int subpage_memory;
3600
Anthony Liguori7267c092011-08-20 22:09:37 -05003601 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003602
3603 mmio->base = base;
Alexander Graf2507c122010-12-08 12:05:37 +01003604 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3605 DEVICE_NATIVE_ENDIAN);
blueswir1db7b5422007-05-26 17:36:03 +00003606#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003607 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3608 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003609#endif
aliguori1eec6142009-02-05 22:06:18 +00003610 *phys = subpage_memory | IO_MEM_SUBPAGE;
Richard Hendersonf6405242010-04-22 16:47:31 -07003611 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003612
3613 return mmio;
3614}
3615
aliguori88715652009-02-11 15:20:58 +00003616static int get_free_io_mem_idx(void)
3617{
3618 int i;
3619
3620 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3621 if (!io_mem_used[i]) {
3622 io_mem_used[i] = 1;
3623 return i;
3624 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003625 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003626 return -1;
3627}
3628
Alexander Grafdd310532010-12-08 12:05:36 +01003629/*
3630 * Usually, devices operate in little endian mode. There are devices out
3631 * there that operate in big endian too. Each device gets byte swapped
3632 * mmio if plugged onto a CPU that does the other endianness.
3633 *
3634 * CPU Device swap?
3635 *
3636 * little little no
3637 * little big yes
3638 * big little yes
3639 * big big no
3640 */
3641
3642typedef struct SwapEndianContainer {
3643 CPUReadMemoryFunc *read[3];
3644 CPUWriteMemoryFunc *write[3];
3645 void *opaque;
3646} SwapEndianContainer;
3647
3648static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3649{
3650 uint32_t val;
3651 SwapEndianContainer *c = opaque;
3652 val = c->read[0](c->opaque, addr);
3653 return val;
3654}
3655
3656static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3657{
3658 uint32_t val;
3659 SwapEndianContainer *c = opaque;
3660 val = bswap16(c->read[1](c->opaque, addr));
3661 return val;
3662}
3663
3664static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3665{
3666 uint32_t val;
3667 SwapEndianContainer *c = opaque;
3668 val = bswap32(c->read[2](c->opaque, addr));
3669 return val;
3670}
3671
3672static CPUReadMemoryFunc * const swapendian_readfn[3]={
3673 swapendian_mem_readb,
3674 swapendian_mem_readw,
3675 swapendian_mem_readl
3676};
3677
3678static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3679 uint32_t val)
3680{
3681 SwapEndianContainer *c = opaque;
3682 c->write[0](c->opaque, addr, val);
3683}
3684
3685static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3686 uint32_t val)
3687{
3688 SwapEndianContainer *c = opaque;
3689 c->write[1](c->opaque, addr, bswap16(val));
3690}
3691
3692static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3693 uint32_t val)
3694{
3695 SwapEndianContainer *c = opaque;
3696 c->write[2](c->opaque, addr, bswap32(val));
3697}
3698
3699static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3700 swapendian_mem_writeb,
3701 swapendian_mem_writew,
3702 swapendian_mem_writel
3703};
3704
3705static void swapendian_init(int io_index)
3706{
Anthony Liguori7267c092011-08-20 22:09:37 -05003707 SwapEndianContainer *c = g_malloc(sizeof(SwapEndianContainer));
Alexander Grafdd310532010-12-08 12:05:36 +01003708 int i;
3709
3710 /* Swap mmio for big endian targets */
3711 c->opaque = io_mem_opaque[io_index];
3712 for (i = 0; i < 3; i++) {
3713 c->read[i] = io_mem_read[io_index][i];
3714 c->write[i] = io_mem_write[io_index][i];
3715
3716 io_mem_read[io_index][i] = swapendian_readfn[i];
3717 io_mem_write[io_index][i] = swapendian_writefn[i];
3718 }
3719 io_mem_opaque[io_index] = c;
3720}
3721
3722static void swapendian_del(int io_index)
3723{
3724 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
Anthony Liguori7267c092011-08-20 22:09:37 -05003725 g_free(io_mem_opaque[io_index]);
Alexander Grafdd310532010-12-08 12:05:36 +01003726 }
3727}
3728
bellard33417e72003-08-10 21:47:01 +00003729/* mem_read and mem_write are arrays of functions containing the
3730 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003731 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003732 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003733 modified. If it is zero, a new io zone is allocated. The return
3734 value can be used with cpu_register_physical_memory(). (-1) is
3735 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003736static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003737 CPUReadMemoryFunc * const *mem_read,
3738 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003739 void *opaque, enum device_endian endian)
bellard33417e72003-08-10 21:47:01 +00003740{
Richard Henderson3cab7212010-05-07 09:52:51 -07003741 int i;
3742
bellard33417e72003-08-10 21:47:01 +00003743 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003744 io_index = get_free_io_mem_idx();
3745 if (io_index == -1)
3746 return io_index;
bellard33417e72003-08-10 21:47:01 +00003747 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003748 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003749 if (io_index >= IO_MEM_NB_ENTRIES)
3750 return -1;
3751 }
bellardb5ff1b32005-11-26 10:38:39 +00003752
Richard Henderson3cab7212010-05-07 09:52:51 -07003753 for (i = 0; i < 3; ++i) {
3754 io_mem_read[io_index][i]
3755 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3756 }
3757 for (i = 0; i < 3; ++i) {
3758 io_mem_write[io_index][i]
3759 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3760 }
bellarda4193c82004-06-03 14:01:43 +00003761 io_mem_opaque[io_index] = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003762
Alexander Grafdd310532010-12-08 12:05:36 +01003763 switch (endian) {
3764 case DEVICE_BIG_ENDIAN:
3765#ifndef TARGET_WORDS_BIGENDIAN
3766 swapendian_init(io_index);
3767#endif
3768 break;
3769 case DEVICE_LITTLE_ENDIAN:
3770#ifdef TARGET_WORDS_BIGENDIAN
3771 swapendian_init(io_index);
3772#endif
3773 break;
3774 case DEVICE_NATIVE_ENDIAN:
3775 default:
3776 break;
3777 }
3778
Richard Hendersonf6405242010-04-22 16:47:31 -07003779 return (io_index << IO_MEM_SHIFT);
bellard33417e72003-08-10 21:47:01 +00003780}
bellard61382a52003-10-27 21:22:23 +00003781
Blue Swirld60efc62009-08-25 18:29:31 +00003782int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3783 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003784 void *opaque, enum device_endian endian)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003785{
Alexander Graf2507c122010-12-08 12:05:37 +01003786 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003787}
3788
aliguori88715652009-02-11 15:20:58 +00003789void cpu_unregister_io_memory(int io_table_address)
3790{
3791 int i;
3792 int io_index = io_table_address >> IO_MEM_SHIFT;
3793
Alexander Grafdd310532010-12-08 12:05:36 +01003794 swapendian_del(io_index);
3795
aliguori88715652009-02-11 15:20:58 +00003796 for (i=0;i < 3; i++) {
3797 io_mem_read[io_index][i] = unassigned_mem_read[i];
3798 io_mem_write[io_index][i] = unassigned_mem_write[i];
3799 }
3800 io_mem_opaque[io_index] = NULL;
3801 io_mem_used[io_index] = 0;
3802}
3803
Avi Kivitye9179ce2009-06-14 11:38:52 +03003804static void io_mem_init(void)
3805{
3806 int i;
3807
Alexander Graf2507c122010-12-08 12:05:37 +01003808 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3809 unassigned_mem_write, NULL,
3810 DEVICE_NATIVE_ENDIAN);
3811 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3812 unassigned_mem_write, NULL,
3813 DEVICE_NATIVE_ENDIAN);
3814 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3815 notdirty_mem_write, NULL,
3816 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003817 for (i=0; i<5; i++)
3818 io_mem_used[i] = 1;
3819
3820 io_mem_watch = cpu_register_io_memory(watch_mem_read,
Alexander Graf2507c122010-12-08 12:05:37 +01003821 watch_mem_write, NULL,
3822 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003823}
3824
Avi Kivity62152b82011-07-26 14:26:14 +03003825static void memory_map_init(void)
3826{
Anthony Liguori7267c092011-08-20 22:09:37 -05003827 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003828 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003829 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003830
Anthony Liguori7267c092011-08-20 22:09:37 -05003831 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003832 memory_region_init(system_io, "io", 65536);
3833 set_system_io_map(system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003834}
3835
3836MemoryRegion *get_system_memory(void)
3837{
3838 return system_memory;
3839}
3840
Avi Kivity309cb472011-08-08 16:09:03 +03003841MemoryRegion *get_system_io(void)
3842{
3843 return system_io;
3844}
3845
pbrooke2eef172008-06-08 01:09:01 +00003846#endif /* !defined(CONFIG_USER_ONLY) */
3847
bellard13eb76e2004-01-24 15:23:36 +00003848/* physical memory access (slow version, mainly for debug) */
3849#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003850int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3851 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003852{
3853 int l, flags;
3854 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003855 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003856
3857 while (len > 0) {
3858 page = addr & TARGET_PAGE_MASK;
3859 l = (page + TARGET_PAGE_SIZE) - addr;
3860 if (l > len)
3861 l = len;
3862 flags = page_get_flags(page);
3863 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003864 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003865 if (is_write) {
3866 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003867 return -1;
bellard579a97f2007-11-11 14:26:47 +00003868 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003869 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003870 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003871 memcpy(p, buf, l);
3872 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003873 } else {
3874 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003875 return -1;
bellard579a97f2007-11-11 14:26:47 +00003876 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003877 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003878 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003879 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003880 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003881 }
3882 len -= l;
3883 buf += l;
3884 addr += l;
3885 }
Paul Brooka68fe892010-03-01 00:08:59 +00003886 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003887}
bellard8df1cd02005-01-28 22:37:22 +00003888
bellard13eb76e2004-01-24 15:23:36 +00003889#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003890void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003891 int len, int is_write)
3892{
3893 int l, io_index;
3894 uint8_t *ptr;
3895 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003896 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003897 ram_addr_t pd;
bellard92e873b2004-05-21 14:52:29 +00003898 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003899
bellard13eb76e2004-01-24 15:23:36 +00003900 while (len > 0) {
3901 page = addr & TARGET_PAGE_MASK;
3902 l = (page + TARGET_PAGE_SIZE) - addr;
3903 if (l > len)
3904 l = len;
bellard92e873b2004-05-21 14:52:29 +00003905 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003906 if (!p) {
3907 pd = IO_MEM_UNASSIGNED;
3908 } else {
3909 pd = p->phys_offset;
3910 }
ths3b46e622007-09-17 08:09:54 +00003911
bellard13eb76e2004-01-24 15:23:36 +00003912 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003913 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003914 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003915 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003916 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003917 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003918 /* XXX: could force cpu_single_env to NULL to avoid
3919 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003920 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003921 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003922 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003923 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003924 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003925 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003926 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003927 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003928 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003929 l = 2;
3930 } else {
bellard1c213d12005-09-03 10:49:04 +00003931 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003932 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003933 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003934 l = 1;
3935 }
3936 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003937 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00003938 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003939 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003940 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003941 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003942 if (!cpu_physical_memory_is_dirty(addr1)) {
3943 /* invalidate code */
3944 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3945 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003946 cpu_physical_memory_set_dirty_flags(
3947 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003948 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003949 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003950 }
3951 } else {
ths5fafdf22007-09-16 21:08:06 +00003952 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003953 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003954 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003955 /* I/O case */
3956 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003957 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003958 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3959 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003960 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003961 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003962 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003963 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003964 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003965 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003966 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003967 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003968 l = 2;
3969 } else {
bellard1c213d12005-09-03 10:49:04 +00003970 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003971 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003972 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003973 l = 1;
3974 }
3975 } else {
3976 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003977 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3978 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3979 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003980 }
3981 }
3982 len -= l;
3983 buf += l;
3984 addr += l;
3985 }
3986}
bellard8df1cd02005-01-28 22:37:22 +00003987
bellardd0ecd2a2006-04-23 17:14:48 +00003988/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003989void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003990 const uint8_t *buf, int len)
3991{
3992 int l;
3993 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003994 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003995 unsigned long pd;
3996 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003997
bellardd0ecd2a2006-04-23 17:14:48 +00003998 while (len > 0) {
3999 page = addr & TARGET_PAGE_MASK;
4000 l = (page + TARGET_PAGE_SIZE) - addr;
4001 if (l > len)
4002 l = len;
4003 p = phys_page_find(page >> TARGET_PAGE_BITS);
4004 if (!p) {
4005 pd = IO_MEM_UNASSIGNED;
4006 } else {
4007 pd = p->phys_offset;
4008 }
ths3b46e622007-09-17 08:09:54 +00004009
bellardd0ecd2a2006-04-23 17:14:48 +00004010 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00004011 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
4012 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00004013 /* do nothing */
4014 } else {
4015 unsigned long addr1;
4016 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4017 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004018 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00004019 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004020 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00004021 }
4022 len -= l;
4023 buf += l;
4024 addr += l;
4025 }
4026}
4027
aliguori6d16c2f2009-01-22 16:59:11 +00004028typedef struct {
4029 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05004030 target_phys_addr_t addr;
4031 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00004032} BounceBuffer;
4033
4034static BounceBuffer bounce;
4035
aliguoriba223c22009-01-22 16:59:16 +00004036typedef struct MapClient {
4037 void *opaque;
4038 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00004039 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00004040} MapClient;
4041
Blue Swirl72cf2d42009-09-12 07:36:22 +00004042static QLIST_HEAD(map_client_list, MapClient) map_client_list
4043 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00004044
4045void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
4046{
Anthony Liguori7267c092011-08-20 22:09:37 -05004047 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00004048
4049 client->opaque = opaque;
4050 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00004051 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00004052 return client;
4053}
4054
4055void cpu_unregister_map_client(void *_client)
4056{
4057 MapClient *client = (MapClient *)_client;
4058
Blue Swirl72cf2d42009-09-12 07:36:22 +00004059 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05004060 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00004061}
4062
4063static void cpu_notify_map_clients(void)
4064{
4065 MapClient *client;
4066
Blue Swirl72cf2d42009-09-12 07:36:22 +00004067 while (!QLIST_EMPTY(&map_client_list)) {
4068 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00004069 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09004070 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00004071 }
4072}
4073
aliguori6d16c2f2009-01-22 16:59:11 +00004074/* Map a physical memory region into a host virtual address.
4075 * May map a subset of the requested range, given by and returned in *plen.
4076 * May return NULL if resources needed to perform the mapping are exhausted.
4077 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00004078 * Use cpu_register_map_client() to know when retrying the map operation is
4079 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00004080 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004081void *cpu_physical_memory_map(target_phys_addr_t addr,
4082 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00004083 int is_write)
4084{
Anthony Liguoric227f092009-10-01 16:12:16 -05004085 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004086 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00004087 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004088 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00004089 unsigned long pd;
4090 PhysPageDesc *p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00004091 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004092 ram_addr_t rlen;
4093 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004094
4095 while (len > 0) {
4096 page = addr & TARGET_PAGE_MASK;
4097 l = (page + TARGET_PAGE_SIZE) - addr;
4098 if (l > len)
4099 l = len;
4100 p = phys_page_find(page >> TARGET_PAGE_BITS);
4101 if (!p) {
4102 pd = IO_MEM_UNASSIGNED;
4103 } else {
4104 pd = p->phys_offset;
4105 }
4106
4107 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004108 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00004109 break;
4110 }
4111 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4112 bounce.addr = addr;
4113 bounce.len = l;
4114 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02004115 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00004116 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004117
4118 *plen = l;
4119 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00004120 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004121 if (!todo) {
4122 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4123 }
aliguori6d16c2f2009-01-22 16:59:11 +00004124
4125 len -= l;
4126 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004127 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004128 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004129 rlen = todo;
4130 ret = qemu_ram_ptr_length(raddr, &rlen);
4131 *plen = rlen;
4132 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004133}
4134
4135/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4136 * Will also mark the memory as dirty if is_write == 1. access_len gives
4137 * the amount of memory that was actually read or written by the caller.
4138 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004139void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4140 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004141{
4142 if (buffer != bounce.buffer) {
4143 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004144 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004145 while (access_len) {
4146 unsigned l;
4147 l = TARGET_PAGE_SIZE;
4148 if (l > access_len)
4149 l = access_len;
4150 if (!cpu_physical_memory_is_dirty(addr1)) {
4151 /* invalidate code */
4152 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4153 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004154 cpu_physical_memory_set_dirty_flags(
4155 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004156 }
4157 addr1 += l;
4158 access_len -= l;
4159 }
4160 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004161 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004162 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004163 }
aliguori6d16c2f2009-01-22 16:59:11 +00004164 return;
4165 }
4166 if (is_write) {
4167 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4168 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004169 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004170 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004171 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004172}
bellardd0ecd2a2006-04-23 17:14:48 +00004173
bellard8df1cd02005-01-28 22:37:22 +00004174/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004175static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4176 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004177{
4178 int io_index;
4179 uint8_t *ptr;
4180 uint32_t val;
4181 unsigned long pd;
4182 PhysPageDesc *p;
4183
4184 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4185 if (!p) {
4186 pd = IO_MEM_UNASSIGNED;
4187 } else {
4188 pd = p->phys_offset;
4189 }
ths3b46e622007-09-17 08:09:54 +00004190
ths5fafdf22007-09-16 21:08:06 +00004191 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00004192 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00004193 /* I/O case */
4194 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004195 if (p)
4196 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004197 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004198#if defined(TARGET_WORDS_BIGENDIAN)
4199 if (endian == DEVICE_LITTLE_ENDIAN) {
4200 val = bswap32(val);
4201 }
4202#else
4203 if (endian == DEVICE_BIG_ENDIAN) {
4204 val = bswap32(val);
4205 }
4206#endif
bellard8df1cd02005-01-28 22:37:22 +00004207 } else {
4208 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004209 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00004210 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004211 switch (endian) {
4212 case DEVICE_LITTLE_ENDIAN:
4213 val = ldl_le_p(ptr);
4214 break;
4215 case DEVICE_BIG_ENDIAN:
4216 val = ldl_be_p(ptr);
4217 break;
4218 default:
4219 val = ldl_p(ptr);
4220 break;
4221 }
bellard8df1cd02005-01-28 22:37:22 +00004222 }
4223 return val;
4224}
4225
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004226uint32_t ldl_phys(target_phys_addr_t addr)
4227{
4228 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4229}
4230
4231uint32_t ldl_le_phys(target_phys_addr_t addr)
4232{
4233 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4234}
4235
4236uint32_t ldl_be_phys(target_phys_addr_t addr)
4237{
4238 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4239}
4240
bellard84b7b8e2005-11-28 21:19:04 +00004241/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004242static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4243 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004244{
4245 int io_index;
4246 uint8_t *ptr;
4247 uint64_t val;
4248 unsigned long pd;
4249 PhysPageDesc *p;
4250
4251 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4252 if (!p) {
4253 pd = IO_MEM_UNASSIGNED;
4254 } else {
4255 pd = p->phys_offset;
4256 }
ths3b46e622007-09-17 08:09:54 +00004257
bellard2a4188a2006-06-25 21:54:59 +00004258 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4259 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00004260 /* I/O case */
4261 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004262 if (p)
4263 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004264
4265 /* XXX This is broken when device endian != cpu endian.
4266 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004267#ifdef TARGET_WORDS_BIGENDIAN
4268 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4269 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4270#else
4271 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4272 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4273#endif
4274 } else {
4275 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004276 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00004277 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004278 switch (endian) {
4279 case DEVICE_LITTLE_ENDIAN:
4280 val = ldq_le_p(ptr);
4281 break;
4282 case DEVICE_BIG_ENDIAN:
4283 val = ldq_be_p(ptr);
4284 break;
4285 default:
4286 val = ldq_p(ptr);
4287 break;
4288 }
bellard84b7b8e2005-11-28 21:19:04 +00004289 }
4290 return val;
4291}
4292
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004293uint64_t ldq_phys(target_phys_addr_t addr)
4294{
4295 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4296}
4297
4298uint64_t ldq_le_phys(target_phys_addr_t addr)
4299{
4300 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4301}
4302
4303uint64_t ldq_be_phys(target_phys_addr_t addr)
4304{
4305 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4306}
4307
bellardaab33092005-10-30 20:48:42 +00004308/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004309uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004310{
4311 uint8_t val;
4312 cpu_physical_memory_read(addr, &val, 1);
4313 return val;
4314}
4315
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004316/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004317static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4318 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004319{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004320 int io_index;
4321 uint8_t *ptr;
4322 uint64_t val;
4323 unsigned long pd;
4324 PhysPageDesc *p;
4325
4326 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4327 if (!p) {
4328 pd = IO_MEM_UNASSIGNED;
4329 } else {
4330 pd = p->phys_offset;
4331 }
4332
4333 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4334 !(pd & IO_MEM_ROMD)) {
4335 /* I/O case */
4336 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4337 if (p)
4338 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4339 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004340#if defined(TARGET_WORDS_BIGENDIAN)
4341 if (endian == DEVICE_LITTLE_ENDIAN) {
4342 val = bswap16(val);
4343 }
4344#else
4345 if (endian == DEVICE_BIG_ENDIAN) {
4346 val = bswap16(val);
4347 }
4348#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004349 } else {
4350 /* RAM case */
4351 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4352 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004353 switch (endian) {
4354 case DEVICE_LITTLE_ENDIAN:
4355 val = lduw_le_p(ptr);
4356 break;
4357 case DEVICE_BIG_ENDIAN:
4358 val = lduw_be_p(ptr);
4359 break;
4360 default:
4361 val = lduw_p(ptr);
4362 break;
4363 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004364 }
4365 return val;
bellardaab33092005-10-30 20:48:42 +00004366}
4367
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004368uint32_t lduw_phys(target_phys_addr_t addr)
4369{
4370 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4371}
4372
4373uint32_t lduw_le_phys(target_phys_addr_t addr)
4374{
4375 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4376}
4377
4378uint32_t lduw_be_phys(target_phys_addr_t addr)
4379{
4380 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4381}
4382
bellard8df1cd02005-01-28 22:37:22 +00004383/* warning: addr must be aligned. The ram page is not masked as dirty
4384 and the code inside is not invalidated. It is useful if the dirty
4385 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004386void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004387{
4388 int io_index;
4389 uint8_t *ptr;
4390 unsigned long pd;
4391 PhysPageDesc *p;
4392
4393 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4394 if (!p) {
4395 pd = IO_MEM_UNASSIGNED;
4396 } else {
4397 pd = p->phys_offset;
4398 }
ths3b46e622007-09-17 08:09:54 +00004399
bellard3a7d9292005-08-21 09:26:42 +00004400 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004401 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004402 if (p)
4403 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004404 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4405 } else {
aliguori74576192008-10-06 14:02:03 +00004406 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004407 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004408 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004409
4410 if (unlikely(in_migration)) {
4411 if (!cpu_physical_memory_is_dirty(addr1)) {
4412 /* invalidate code */
4413 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4414 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004415 cpu_physical_memory_set_dirty_flags(
4416 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004417 }
4418 }
bellard8df1cd02005-01-28 22:37:22 +00004419 }
4420}
4421
Anthony Liguoric227f092009-10-01 16:12:16 -05004422void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004423{
4424 int io_index;
4425 uint8_t *ptr;
4426 unsigned long pd;
4427 PhysPageDesc *p;
4428
4429 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4430 if (!p) {
4431 pd = IO_MEM_UNASSIGNED;
4432 } else {
4433 pd = p->phys_offset;
4434 }
ths3b46e622007-09-17 08:09:54 +00004435
j_mayerbc98a7e2007-04-04 07:55:12 +00004436 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4437 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004438 if (p)
4439 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004440#ifdef TARGET_WORDS_BIGENDIAN
4441 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4442 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4443#else
4444 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4445 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4446#endif
4447 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004448 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004449 (addr & ~TARGET_PAGE_MASK);
4450 stq_p(ptr, val);
4451 }
4452}
4453
bellard8df1cd02005-01-28 22:37:22 +00004454/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004455static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4456 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004457{
4458 int io_index;
4459 uint8_t *ptr;
4460 unsigned long pd;
4461 PhysPageDesc *p;
4462
4463 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4464 if (!p) {
4465 pd = IO_MEM_UNASSIGNED;
4466 } else {
4467 pd = p->phys_offset;
4468 }
ths3b46e622007-09-17 08:09:54 +00004469
bellard3a7d9292005-08-21 09:26:42 +00004470 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004471 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004472 if (p)
4473 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004474#if defined(TARGET_WORDS_BIGENDIAN)
4475 if (endian == DEVICE_LITTLE_ENDIAN) {
4476 val = bswap32(val);
4477 }
4478#else
4479 if (endian == DEVICE_BIG_ENDIAN) {
4480 val = bswap32(val);
4481 }
4482#endif
bellard8df1cd02005-01-28 22:37:22 +00004483 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4484 } else {
4485 unsigned long addr1;
4486 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4487 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004488 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004489 switch (endian) {
4490 case DEVICE_LITTLE_ENDIAN:
4491 stl_le_p(ptr, val);
4492 break;
4493 case DEVICE_BIG_ENDIAN:
4494 stl_be_p(ptr, val);
4495 break;
4496 default:
4497 stl_p(ptr, val);
4498 break;
4499 }
bellard3a7d9292005-08-21 09:26:42 +00004500 if (!cpu_physical_memory_is_dirty(addr1)) {
4501 /* invalidate code */
4502 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4503 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004504 cpu_physical_memory_set_dirty_flags(addr1,
4505 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004506 }
bellard8df1cd02005-01-28 22:37:22 +00004507 }
4508}
4509
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004510void stl_phys(target_phys_addr_t addr, uint32_t val)
4511{
4512 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4513}
4514
4515void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4516{
4517 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4518}
4519
4520void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4521{
4522 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4523}
4524
bellardaab33092005-10-30 20:48:42 +00004525/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004526void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004527{
4528 uint8_t v = val;
4529 cpu_physical_memory_write(addr, &v, 1);
4530}
4531
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004532/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004533static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4534 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004535{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004536 int io_index;
4537 uint8_t *ptr;
4538 unsigned long pd;
4539 PhysPageDesc *p;
4540
4541 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4542 if (!p) {
4543 pd = IO_MEM_UNASSIGNED;
4544 } else {
4545 pd = p->phys_offset;
4546 }
4547
4548 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4549 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4550 if (p)
4551 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004552#if defined(TARGET_WORDS_BIGENDIAN)
4553 if (endian == DEVICE_LITTLE_ENDIAN) {
4554 val = bswap16(val);
4555 }
4556#else
4557 if (endian == DEVICE_BIG_ENDIAN) {
4558 val = bswap16(val);
4559 }
4560#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004561 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4562 } else {
4563 unsigned long addr1;
4564 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4565 /* RAM case */
4566 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004567 switch (endian) {
4568 case DEVICE_LITTLE_ENDIAN:
4569 stw_le_p(ptr, val);
4570 break;
4571 case DEVICE_BIG_ENDIAN:
4572 stw_be_p(ptr, val);
4573 break;
4574 default:
4575 stw_p(ptr, val);
4576 break;
4577 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004578 if (!cpu_physical_memory_is_dirty(addr1)) {
4579 /* invalidate code */
4580 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4581 /* set dirty bit */
4582 cpu_physical_memory_set_dirty_flags(addr1,
4583 (0xff & ~CODE_DIRTY_FLAG));
4584 }
4585 }
bellardaab33092005-10-30 20:48:42 +00004586}
4587
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004588void stw_phys(target_phys_addr_t addr, uint32_t val)
4589{
4590 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4591}
4592
4593void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4594{
4595 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4596}
4597
4598void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4599{
4600 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4601}
4602
bellardaab33092005-10-30 20:48:42 +00004603/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004604void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004605{
4606 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004607 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004608}
4609
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004610void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4611{
4612 val = cpu_to_le64(val);
4613 cpu_physical_memory_write(addr, &val, 8);
4614}
4615
4616void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4617{
4618 val = cpu_to_be64(val);
4619 cpu_physical_memory_write(addr, &val, 8);
4620}
4621
aliguori5e2972f2009-03-28 17:51:36 +00004622/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004623int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004624 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004625{
4626 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004627 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004628 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004629
4630 while (len > 0) {
4631 page = addr & TARGET_PAGE_MASK;
4632 phys_addr = cpu_get_phys_page_debug(env, page);
4633 /* if no physical page mapped, return an error */
4634 if (phys_addr == -1)
4635 return -1;
4636 l = (page + TARGET_PAGE_SIZE) - addr;
4637 if (l > len)
4638 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004639 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004640 if (is_write)
4641 cpu_physical_memory_write_rom(phys_addr, buf, l);
4642 else
aliguori5e2972f2009-03-28 17:51:36 +00004643 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004644 len -= l;
4645 buf += l;
4646 addr += l;
4647 }
4648 return 0;
4649}
Paul Brooka68fe892010-03-01 00:08:59 +00004650#endif
bellard13eb76e2004-01-24 15:23:36 +00004651
pbrook2e70f6e2008-06-29 01:03:05 +00004652/* in deterministic execution mode, instructions doing device I/Os
4653 must be at the end of the TB */
4654void cpu_io_recompile(CPUState *env, void *retaddr)
4655{
4656 TranslationBlock *tb;
4657 uint32_t n, cflags;
4658 target_ulong pc, cs_base;
4659 uint64_t flags;
4660
4661 tb = tb_find_pc((unsigned long)retaddr);
4662 if (!tb) {
4663 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4664 retaddr);
4665 }
4666 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004667 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004668 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004669 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004670 n = n - env->icount_decr.u16.low;
4671 /* Generate a new TB ending on the I/O insn. */
4672 n++;
4673 /* On MIPS and SH, delay slot instructions can only be restarted if
4674 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004675 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004676 branch. */
4677#if defined(TARGET_MIPS)
4678 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4679 env->active_tc.PC -= 4;
4680 env->icount_decr.u16.low++;
4681 env->hflags &= ~MIPS_HFLAG_BMASK;
4682 }
4683#elif defined(TARGET_SH4)
4684 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4685 && n > 1) {
4686 env->pc -= 2;
4687 env->icount_decr.u16.low++;
4688 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4689 }
4690#endif
4691 /* This should never happen. */
4692 if (n > CF_COUNT_MASK)
4693 cpu_abort(env, "TB too big during recompile");
4694
4695 cflags = n | CF_LAST_IO;
4696 pc = tb->pc;
4697 cs_base = tb->cs_base;
4698 flags = tb->flags;
4699 tb_phys_invalidate(tb, -1);
4700 /* FIXME: In theory this could raise an exception. In practice
4701 we have already translated the block once so it's probably ok. */
4702 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004703 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004704 the first in the TB) then we end up generating a whole new TB and
4705 repeating the fault, which is horribly inefficient.
4706 Better would be to execute just this insn uncached, or generate a
4707 second new TB. */
4708 cpu_resume_from_signal(env, NULL);
4709}
4710
Paul Brookb3755a92010-03-12 16:54:58 +00004711#if !defined(CONFIG_USER_ONLY)
4712
Stefan Weil055403b2010-10-22 23:03:32 +02004713void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004714{
4715 int i, target_code_size, max_target_code_size;
4716 int direct_jmp_count, direct_jmp2_count, cross_page;
4717 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004718
bellarde3db7222005-01-26 22:00:47 +00004719 target_code_size = 0;
4720 max_target_code_size = 0;
4721 cross_page = 0;
4722 direct_jmp_count = 0;
4723 direct_jmp2_count = 0;
4724 for(i = 0; i < nb_tbs; i++) {
4725 tb = &tbs[i];
4726 target_code_size += tb->size;
4727 if (tb->size > max_target_code_size)
4728 max_target_code_size = tb->size;
4729 if (tb->page_addr[1] != -1)
4730 cross_page++;
4731 if (tb->tb_next_offset[0] != 0xffff) {
4732 direct_jmp_count++;
4733 if (tb->tb_next_offset[1] != 0xffff) {
4734 direct_jmp2_count++;
4735 }
4736 }
4737 }
4738 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004739 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004740 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004741 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4742 cpu_fprintf(f, "TB count %d/%d\n",
4743 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004744 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004745 nb_tbs ? target_code_size / nb_tbs : 0,
4746 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004747 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004748 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4749 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004750 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4751 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004752 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4753 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004754 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004755 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4756 direct_jmp2_count,
4757 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004758 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004759 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4760 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4761 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004762 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004763}
4764
bellard61382a52003-10-27 21:22:23 +00004765#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004766#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004767#define GETPC() NULL
4768#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004769#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004770
4771#define SHIFT 0
4772#include "softmmu_template.h"
4773
4774#define SHIFT 1
4775#include "softmmu_template.h"
4776
4777#define SHIFT 2
4778#include "softmmu_template.h"
4779
4780#define SHIFT 3
4781#include "softmmu_template.h"
4782
4783#undef env
4784
4785#endif