blob: eef5e2f12acbc6d130b5ebf73bc6a39de77fc3e3 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
bellardfd6ce8f2003-05-14 19:00:11 +000060//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000061//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000062//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000063//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000064
65/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000066//#define DEBUG_TB_CHECK
67//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000068
ths1196be32007-03-17 15:17:58 +000069//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000070//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000071
pbrook99773bd2006-04-16 15:14:59 +000072#if !defined(CONFIG_USER_ONLY)
73/* TB consistency checks only implemented for usermode emulation. */
74#undef DEBUG_TB_CHECK
75#endif
76
bellard9fa3e852004-01-04 18:06:42 +000077#define SMC_BITMAP_USE_THRESHOLD 10
78
blueswir1bdaf78e2008-10-04 07:24:27 +000079static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020080static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000081TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000082static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000083/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050084spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000085
blueswir1141ac462008-07-26 15:05:57 +000086#if defined(__arm__) || defined(__sparc_v9__)
87/* The prologue must be reachable with a direct jump. ARM and Sparc64
88 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000089 section close to code segment. */
90#define code_gen_section \
91 __attribute__((__section__(".gen_code"))) \
92 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020093#elif defined(_WIN32)
94/* Maximum alignment for Win32 is 16. */
95#define code_gen_section \
96 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000097#else
98#define code_gen_section \
99 __attribute__((aligned (32)))
100#endif
101
102uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000103static uint8_t *code_gen_buffer;
104static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000105/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200107static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000108
pbrooke2eef172008-06-08 01:09:01 +0000109#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000110int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000111static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000112
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200113RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300114
115static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300116static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300117
pbrooke2eef172008-06-08 01:09:01 +0000118#endif
bellard9fa3e852004-01-04 18:06:42 +0000119
bellard6a00d602005-11-21 23:25:50 +0000120CPUState *first_cpu;
121/* current CPU in the current thread. It is only valid inside
122 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100123DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000124/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000125 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000126 2 = Adaptive rate instruction counting. */
127int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000128
bellard54936002003-05-13 00:25:15 +0000129typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000130 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000131 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000132 /* in order to optimize self modifying code, we count the number
133 of lookups we do to a given page to use a bitmap */
134 unsigned int code_write_count;
135 uint8_t *code_bitmap;
136#if defined(CONFIG_USER_ONLY)
137 unsigned long flags;
138#endif
bellard54936002003-05-13 00:25:15 +0000139} PageDesc;
140
Paul Brook41c1b1c2010-03-12 16:54:58 +0000141/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800142 while in user mode we want it to be based on virtual addresses. */
143#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000144#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
145# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
146#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800147# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000148#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000149#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800150# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000151#endif
bellard54936002003-05-13 00:25:15 +0000152
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153/* Size of the L2 (and L3, etc) page tables. */
154#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000155#define L2_SIZE (1 << L2_BITS)
156
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800157/* The bits remaining after N lower levels of page tables. */
158#define P_L1_BITS_REM \
159 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
160#define V_L1_BITS_REM \
161 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
162
163/* Size of the L1 page table. Avoid silly small sizes. */
164#if P_L1_BITS_REM < 4
165#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
166#else
167#define P_L1_BITS P_L1_BITS_REM
168#endif
169
170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
176#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
177#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
178
179#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
180#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
181
bellard83fb7ad2004-07-05 21:25:26 +0000182unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000183unsigned long qemu_host_page_size;
184unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000185
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800186/* This is a multi-level map on the virtual address space.
187 The bottom level has pointers to PageDesc. */
188static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000189
pbrooke2eef172008-06-08 01:09:01 +0000190#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000191typedef struct PhysPageDesc {
192 /* offset in host memory of the page + io_index in the low bits */
193 ram_addr_t phys_offset;
194 ram_addr_t region_offset;
195} PhysPageDesc;
196
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800197/* This is a multi-level map on the physical address space.
198 The bottom level has pointers to PhysPageDesc. */
199static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000200
pbrooke2eef172008-06-08 01:09:01 +0000201static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300202static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000203
bellard33417e72003-08-10 21:47:01 +0000204/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000205CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
206CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000207void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000208static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000209static int io_mem_watch;
210#endif
bellard33417e72003-08-10 21:47:01 +0000211
bellard34865132003-10-05 14:28:56 +0000212/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200213#ifdef WIN32
214static const char *logfilename = "qemu.log";
215#else
blueswir1d9b630f2008-10-05 09:57:08 +0000216static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200217#endif
bellard34865132003-10-05 14:28:56 +0000218FILE *logfile;
219int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000220static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000221
bellarde3db7222005-01-26 22:00:47 +0000222/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000223#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000224static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000225#endif
bellarde3db7222005-01-26 22:00:47 +0000226static int tb_flush_count;
227static int tb_phys_invalidate_count;
228
bellard7cb69ca2008-05-10 10:55:51 +0000229#ifdef _WIN32
230static void map_exec(void *addr, long size)
231{
232 DWORD old_protect;
233 VirtualProtect(addr, size,
234 PAGE_EXECUTE_READWRITE, &old_protect);
235
236}
237#else
238static void map_exec(void *addr, long size)
239{
bellard43694152008-05-29 09:35:57 +0000240 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000241
bellard43694152008-05-29 09:35:57 +0000242 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000243 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000244 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000245
246 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000247 end += page_size - 1;
248 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000249
250 mprotect((void *)start, end - start,
251 PROT_READ | PROT_WRITE | PROT_EXEC);
252}
253#endif
254
bellardb346ff42003-06-15 20:05:50 +0000255static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000256{
bellard83fb7ad2004-07-05 21:25:26 +0000257 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000258 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000259#ifdef _WIN32
260 {
261 SYSTEM_INFO system_info;
262
263 GetSystemInfo(&system_info);
264 qemu_real_host_page_size = system_info.dwPageSize;
265 }
266#else
267 qemu_real_host_page_size = getpagesize();
268#endif
bellard83fb7ad2004-07-05 21:25:26 +0000269 if (qemu_host_page_size == 0)
270 qemu_host_page_size = qemu_real_host_page_size;
271 if (qemu_host_page_size < TARGET_PAGE_SIZE)
272 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000273 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000274
Paul Brook2e9a5712010-05-05 16:32:59 +0100275#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000276 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100277#ifdef HAVE_KINFO_GETVMMAP
278 struct kinfo_vmentry *freep;
279 int i, cnt;
280
281 freep = kinfo_getvmmap(getpid(), &cnt);
282 if (freep) {
283 mmap_lock();
284 for (i = 0; i < cnt; i++) {
285 unsigned long startaddr, endaddr;
286
287 startaddr = freep[i].kve_start;
288 endaddr = freep[i].kve_end;
289 if (h2g_valid(startaddr)) {
290 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
291
292 if (h2g_valid(endaddr)) {
293 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200294 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100295 } else {
296#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
297 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100299#endif
300 }
301 }
302 }
303 free(freep);
304 mmap_unlock();
305 }
306#else
balrog50a95692007-12-12 01:16:23 +0000307 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000308
pbrook07765902008-05-31 16:33:53 +0000309 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800310
Aurelien Jarnofd436902010-04-10 17:20:36 +0200311 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000312 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800313 mmap_lock();
314
balrog50a95692007-12-12 01:16:23 +0000315 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800316 unsigned long startaddr, endaddr;
317 int n;
318
319 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
320
321 if (n == 2 && h2g_valid(startaddr)) {
322 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
323
324 if (h2g_valid(endaddr)) {
325 endaddr = h2g(endaddr);
326 } else {
327 endaddr = ~0ul;
328 }
329 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000330 }
331 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800332
balrog50a95692007-12-12 01:16:23 +0000333 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800334 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000335 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100336#endif
balrog50a95692007-12-12 01:16:23 +0000337 }
338#endif
bellard54936002003-05-13 00:25:15 +0000339}
340
Paul Brook41c1b1c2010-03-12 16:54:58 +0000341static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000342{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000343 PageDesc *pd;
344 void **lp;
345 int i;
346
pbrook17e23772008-06-09 13:47:45 +0000347#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500348 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800349# define ALLOC(P, SIZE) \
350 do { \
351 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
352 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000354#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800355# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500356 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000357#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800358
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800359 /* Level 1. Always allocated. */
360 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
361
362 /* Level 2..N-1. */
363 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
364 void **p = *lp;
365
366 if (p == NULL) {
367 if (!alloc) {
368 return NULL;
369 }
370 ALLOC(p, sizeof(void *) * L2_SIZE);
371 *lp = p;
372 }
373
374 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000375 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800376
377 pd = *lp;
378 if (pd == NULL) {
379 if (!alloc) {
380 return NULL;
381 }
382 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
383 *lp = pd;
384 }
385
386#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800387
388 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000389}
390
Paul Brook41c1b1c2010-03-12 16:54:58 +0000391static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000392{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800393 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000394}
395
Paul Brook6d9a1302010-02-28 23:55:53 +0000396#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500397static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000398{
pbrooke3f4e2a2006-04-08 20:02:06 +0000399 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800400 void **lp;
401 int i;
bellard92e873b2004-05-21 14:52:29 +0000402
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800403 /* Level 1. Always allocated. */
404 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000405
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800406 /* Level 2..N-1. */
407 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
408 void **p = *lp;
409 if (p == NULL) {
410 if (!alloc) {
411 return NULL;
412 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500413 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800414 }
415 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000416 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800417
pbrooke3f4e2a2006-04-08 20:02:06 +0000418 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800419 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000420 int i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800421
422 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000423 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800424 }
425
Anthony Liguori7267c092011-08-20 22:09:37 -0500426 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800427
pbrook67c4d232009-02-23 13:16:07 +0000428 for (i = 0; i < L2_SIZE; i++) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800429 pd[i].phys_offset = IO_MEM_UNASSIGNED;
430 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000431 }
bellard92e873b2004-05-21 14:52:29 +0000432 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800433
434 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000435}
436
Anthony Liguoric227f092009-10-01 16:12:16 -0500437static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000438{
bellard108c49b2005-07-24 12:55:09 +0000439 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000440}
441
Anthony Liguoric227f092009-10-01 16:12:16 -0500442static void tlb_protect_code(ram_addr_t ram_addr);
443static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000444 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000445#define mmap_lock() do { } while(0)
446#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000447#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000448
bellard43694152008-05-29 09:35:57 +0000449#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
450
451#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100452/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000453 user mode. It will change when a dedicated libc will be used */
454#define USE_STATIC_CODE_GEN_BUFFER
455#endif
456
457#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200458static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
459 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000460#endif
461
blueswir18fcd3692008-08-17 20:26:25 +0000462static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000463{
bellard43694152008-05-29 09:35:57 +0000464#ifdef USE_STATIC_CODE_GEN_BUFFER
465 code_gen_buffer = static_code_gen_buffer;
466 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
467 map_exec(code_gen_buffer, code_gen_buffer_size);
468#else
bellard26a5f132008-05-28 12:30:31 +0000469 code_gen_buffer_size = tb_size;
470 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000471#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000472 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
473#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100474 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000475 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000476#endif
bellard26a5f132008-05-28 12:30:31 +0000477 }
478 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
479 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
480 /* The code gen buffer location may have constraints depending on
481 the host cpu and OS */
482#if defined(__linux__)
483 {
484 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000485 void *start = NULL;
486
bellard26a5f132008-05-28 12:30:31 +0000487 flags = MAP_PRIVATE | MAP_ANONYMOUS;
488#if defined(__x86_64__)
489 flags |= MAP_32BIT;
490 /* Cannot map more than that */
491 if (code_gen_buffer_size > (800 * 1024 * 1024))
492 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000493#elif defined(__sparc_v9__)
494 // Map the buffer below 2G, so we can use direct calls and branches
495 flags |= MAP_FIXED;
496 start = (void *) 0x60000000UL;
497 if (code_gen_buffer_size > (512 * 1024 * 1024))
498 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000499#elif defined(__arm__)
Dr. David Alan Gilbert222f23f2011-12-12 16:37:31 +0100500 /* Keep the buffer no bigger than 16GB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000501 if (code_gen_buffer_size > 16 * 1024 * 1024)
502 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700503#elif defined(__s390x__)
504 /* Map the buffer so that we can use direct calls and branches. */
505 /* We have a +- 4GB range on the branches; leave some slop. */
506 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
507 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
508 }
509 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000510#endif
blueswir1141ac462008-07-26 15:05:57 +0000511 code_gen_buffer = mmap(start, code_gen_buffer_size,
512 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000513 flags, -1, 0);
514 if (code_gen_buffer == MAP_FAILED) {
515 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
516 exit(1);
517 }
518 }
Bradcbb608a2010-12-20 21:25:40 -0500519#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000520 || defined(__DragonFly__) || defined(__OpenBSD__) \
521 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000522 {
523 int flags;
524 void *addr = NULL;
525 flags = MAP_PRIVATE | MAP_ANONYMOUS;
526#if defined(__x86_64__)
527 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
528 * 0x40000000 is free */
529 flags |= MAP_FIXED;
530 addr = (void *)0x40000000;
531 /* Cannot map more than that */
532 if (code_gen_buffer_size > (800 * 1024 * 1024))
533 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000534#elif defined(__sparc_v9__)
535 // Map the buffer below 2G, so we can use direct calls and branches
536 flags |= MAP_FIXED;
537 addr = (void *) 0x60000000UL;
538 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
539 code_gen_buffer_size = (512 * 1024 * 1024);
540 }
aliguori06e67a82008-09-27 15:32:41 +0000541#endif
542 code_gen_buffer = mmap(addr, code_gen_buffer_size,
543 PROT_WRITE | PROT_READ | PROT_EXEC,
544 flags, -1, 0);
545 if (code_gen_buffer == MAP_FAILED) {
546 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
547 exit(1);
548 }
549 }
bellard26a5f132008-05-28 12:30:31 +0000550#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500551 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000552 map_exec(code_gen_buffer, code_gen_buffer_size);
553#endif
bellard43694152008-05-29 09:35:57 +0000554#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000555 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100556 code_gen_buffer_max_size = code_gen_buffer_size -
557 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000558 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500559 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000560}
561
562/* Must be called before using the QEMU cpus. 'tb_size' is the size
563 (in bytes) allocated to the translation buffer. Zero means default
564 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200565void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000566{
bellard26a5f132008-05-28 12:30:31 +0000567 cpu_gen_init();
568 code_gen_alloc(tb_size);
569 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000570 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700571#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
572 /* There's no guest base to take into account, so go ahead and
573 initialize the prologue now. */
574 tcg_prologue_init(&tcg_ctx);
575#endif
bellard26a5f132008-05-28 12:30:31 +0000576}
577
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200578bool tcg_enabled(void)
579{
580 return code_gen_buffer != NULL;
581}
582
583void cpu_exec_init_all(void)
584{
585#if !defined(CONFIG_USER_ONLY)
586 memory_map_init();
587 io_mem_init();
588#endif
589}
590
pbrook9656f322008-07-01 20:01:19 +0000591#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
592
Juan Quintelae59fb372009-09-29 22:48:21 +0200593static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200594{
595 CPUState *env = opaque;
596
aurel323098dba2009-03-07 21:28:24 +0000597 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
598 version_id is increased. */
599 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000600 tlb_flush(env, 1);
601
602 return 0;
603}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200604
605static const VMStateDescription vmstate_cpu_common = {
606 .name = "cpu_common",
607 .version_id = 1,
608 .minimum_version_id = 1,
609 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200610 .post_load = cpu_common_post_load,
611 .fields = (VMStateField []) {
612 VMSTATE_UINT32(halted, CPUState),
613 VMSTATE_UINT32(interrupt_request, CPUState),
614 VMSTATE_END_OF_LIST()
615 }
616};
pbrook9656f322008-07-01 20:01:19 +0000617#endif
618
Glauber Costa950f1472009-06-09 12:15:18 -0400619CPUState *qemu_get_cpu(int cpu)
620{
621 CPUState *env = first_cpu;
622
623 while (env) {
624 if (env->cpu_index == cpu)
625 break;
626 env = env->next_cpu;
627 }
628
629 return env;
630}
631
bellard6a00d602005-11-21 23:25:50 +0000632void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000633{
bellard6a00d602005-11-21 23:25:50 +0000634 CPUState **penv;
635 int cpu_index;
636
pbrookc2764712009-03-07 15:24:59 +0000637#if defined(CONFIG_USER_ONLY)
638 cpu_list_lock();
639#endif
bellard6a00d602005-11-21 23:25:50 +0000640 env->next_cpu = NULL;
641 penv = &first_cpu;
642 cpu_index = 0;
643 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700644 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000645 cpu_index++;
646 }
647 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000648 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000649 QTAILQ_INIT(&env->breakpoints);
650 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100651#ifndef CONFIG_USER_ONLY
652 env->thread_id = qemu_get_thread_id();
653#endif
bellard6a00d602005-11-21 23:25:50 +0000654 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000655#if defined(CONFIG_USER_ONLY)
656 cpu_list_unlock();
657#endif
pbrookb3c77242008-06-30 16:31:04 +0000658#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600659 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
660 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000661 cpu_save, cpu_load, env);
662#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000663}
664
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100665/* Allocate a new translation block. Flush the translation buffer if
666 too many translation blocks or too much generated code. */
667static TranslationBlock *tb_alloc(target_ulong pc)
668{
669 TranslationBlock *tb;
670
671 if (nb_tbs >= code_gen_max_blocks ||
672 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
673 return NULL;
674 tb = &tbs[nb_tbs++];
675 tb->pc = pc;
676 tb->cflags = 0;
677 return tb;
678}
679
680void tb_free(TranslationBlock *tb)
681{
682 /* In practice this is mostly used for single use temporary TB
683 Ignore the hard cases and just back up if this TB happens to
684 be the last one generated. */
685 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
686 code_gen_ptr = tb->tc_ptr;
687 nb_tbs--;
688 }
689}
690
bellard9fa3e852004-01-04 18:06:42 +0000691static inline void invalidate_page_bitmap(PageDesc *p)
692{
693 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500694 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000695 p->code_bitmap = NULL;
696 }
697 p->code_write_count = 0;
698}
699
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800700/* Set to NULL all the 'first_tb' fields in all PageDescs. */
701
702static void page_flush_tb_1 (int level, void **lp)
703{
704 int i;
705
706 if (*lp == NULL) {
707 return;
708 }
709 if (level == 0) {
710 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000711 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800712 pd[i].first_tb = NULL;
713 invalidate_page_bitmap(pd + i);
714 }
715 } else {
716 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000717 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800718 page_flush_tb_1 (level - 1, pp + i);
719 }
720 }
721}
722
bellardfd6ce8f2003-05-14 19:00:11 +0000723static void page_flush_tb(void)
724{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800725 int i;
726 for (i = 0; i < V_L1_SIZE; i++) {
727 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000728 }
729}
730
731/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000732/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000733void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000734{
bellard6a00d602005-11-21 23:25:50 +0000735 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000736#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000737 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
738 (unsigned long)(code_gen_ptr - code_gen_buffer),
739 nb_tbs, nb_tbs > 0 ?
740 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000741#endif
bellard26a5f132008-05-28 12:30:31 +0000742 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000743 cpu_abort(env1, "Internal error: code buffer overflow\n");
744
bellardfd6ce8f2003-05-14 19:00:11 +0000745 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000746
bellard6a00d602005-11-21 23:25:50 +0000747 for(env = first_cpu; env != NULL; env = env->next_cpu) {
748 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
749 }
bellard9fa3e852004-01-04 18:06:42 +0000750
bellard8a8a6082004-10-03 13:36:49 +0000751 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000752 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000753
bellardfd6ce8f2003-05-14 19:00:11 +0000754 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000755 /* XXX: flush processor icache at this point if cache flush is
756 expensive */
bellarde3db7222005-01-26 22:00:47 +0000757 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000758}
759
760#ifdef DEBUG_TB_CHECK
761
j_mayerbc98a7e2007-04-04 07:55:12 +0000762static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000763{
764 TranslationBlock *tb;
765 int i;
766 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000767 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
768 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000769 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
770 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000771 printf("ERROR invalidate: address=" TARGET_FMT_lx
772 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000773 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000774 }
775 }
776 }
777}
778
779/* verify that all the pages have correct rights for code */
780static void tb_page_check(void)
781{
782 TranslationBlock *tb;
783 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000784
pbrook99773bd2006-04-16 15:14:59 +0000785 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
786 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000787 flags1 = page_get_flags(tb->pc);
788 flags2 = page_get_flags(tb->pc + tb->size - 1);
789 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
790 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000791 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000792 }
793 }
794 }
795}
796
797#endif
798
799/* invalidate one TB */
800static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
801 int next_offset)
802{
803 TranslationBlock *tb1;
804 for(;;) {
805 tb1 = *ptb;
806 if (tb1 == tb) {
807 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
808 break;
809 }
810 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
811 }
812}
813
bellard9fa3e852004-01-04 18:06:42 +0000814static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
815{
816 TranslationBlock *tb1;
817 unsigned int n1;
818
819 for(;;) {
820 tb1 = *ptb;
821 n1 = (long)tb1 & 3;
822 tb1 = (TranslationBlock *)((long)tb1 & ~3);
823 if (tb1 == tb) {
824 *ptb = tb1->page_next[n1];
825 break;
826 }
827 ptb = &tb1->page_next[n1];
828 }
829}
830
bellardd4e81642003-05-25 16:46:15 +0000831static inline void tb_jmp_remove(TranslationBlock *tb, int n)
832{
833 TranslationBlock *tb1, **ptb;
834 unsigned int n1;
835
836 ptb = &tb->jmp_next[n];
837 tb1 = *ptb;
838 if (tb1) {
839 /* find tb(n) in circular list */
840 for(;;) {
841 tb1 = *ptb;
842 n1 = (long)tb1 & 3;
843 tb1 = (TranslationBlock *)((long)tb1 & ~3);
844 if (n1 == n && tb1 == tb)
845 break;
846 if (n1 == 2) {
847 ptb = &tb1->jmp_first;
848 } else {
849 ptb = &tb1->jmp_next[n1];
850 }
851 }
852 /* now we can suppress tb(n) from the list */
853 *ptb = tb->jmp_next[n];
854
855 tb->jmp_next[n] = NULL;
856 }
857}
858
859/* reset the jump entry 'n' of a TB so that it is not chained to
860 another TB */
861static inline void tb_reset_jump(TranslationBlock *tb, int n)
862{
863 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
864}
865
Paul Brook41c1b1c2010-03-12 16:54:58 +0000866void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000867{
bellard6a00d602005-11-21 23:25:50 +0000868 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000869 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000870 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000871 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000872 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000873
bellard9fa3e852004-01-04 18:06:42 +0000874 /* remove the TB from the hash list */
875 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
876 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000877 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000878 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000879
bellard9fa3e852004-01-04 18:06:42 +0000880 /* remove the TB from the page list */
881 if (tb->page_addr[0] != page_addr) {
882 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
883 tb_page_remove(&p->first_tb, tb);
884 invalidate_page_bitmap(p);
885 }
886 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
887 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
888 tb_page_remove(&p->first_tb, tb);
889 invalidate_page_bitmap(p);
890 }
891
bellard8a40a182005-11-20 10:35:40 +0000892 tb_invalidated_flag = 1;
893
894 /* remove the TB from the hash list */
895 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000896 for(env = first_cpu; env != NULL; env = env->next_cpu) {
897 if (env->tb_jmp_cache[h] == tb)
898 env->tb_jmp_cache[h] = NULL;
899 }
bellard8a40a182005-11-20 10:35:40 +0000900
901 /* suppress this TB from the two jump lists */
902 tb_jmp_remove(tb, 0);
903 tb_jmp_remove(tb, 1);
904
905 /* suppress any remaining jumps to this TB */
906 tb1 = tb->jmp_first;
907 for(;;) {
908 n1 = (long)tb1 & 3;
909 if (n1 == 2)
910 break;
911 tb1 = (TranslationBlock *)((long)tb1 & ~3);
912 tb2 = tb1->jmp_next[n1];
913 tb_reset_jump(tb1, n1);
914 tb1->jmp_next[n1] = NULL;
915 tb1 = tb2;
916 }
917 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
918
bellarde3db7222005-01-26 22:00:47 +0000919 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000920}
921
922static inline void set_bits(uint8_t *tab, int start, int len)
923{
924 int end, mask, end1;
925
926 end = start + len;
927 tab += start >> 3;
928 mask = 0xff << (start & 7);
929 if ((start & ~7) == (end & ~7)) {
930 if (start < end) {
931 mask &= ~(0xff << (end & 7));
932 *tab |= mask;
933 }
934 } else {
935 *tab++ |= mask;
936 start = (start + 8) & ~7;
937 end1 = end & ~7;
938 while (start < end1) {
939 *tab++ = 0xff;
940 start += 8;
941 }
942 if (start < end) {
943 mask = ~(0xff << (end & 7));
944 *tab |= mask;
945 }
946 }
947}
948
949static void build_page_bitmap(PageDesc *p)
950{
951 int n, tb_start, tb_end;
952 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000953
Anthony Liguori7267c092011-08-20 22:09:37 -0500954 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000955
956 tb = p->first_tb;
957 while (tb != NULL) {
958 n = (long)tb & 3;
959 tb = (TranslationBlock *)((long)tb & ~3);
960 /* NOTE: this is subtle as a TB may span two physical pages */
961 if (n == 0) {
962 /* NOTE: tb_end may be after the end of the page, but
963 it is not a problem */
964 tb_start = tb->pc & ~TARGET_PAGE_MASK;
965 tb_end = tb_start + tb->size;
966 if (tb_end > TARGET_PAGE_SIZE)
967 tb_end = TARGET_PAGE_SIZE;
968 } else {
969 tb_start = 0;
970 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
971 }
972 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
973 tb = tb->page_next[n];
974 }
975}
976
pbrook2e70f6e2008-06-29 01:03:05 +0000977TranslationBlock *tb_gen_code(CPUState *env,
978 target_ulong pc, target_ulong cs_base,
979 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000980{
981 TranslationBlock *tb;
982 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000983 tb_page_addr_t phys_pc, phys_page2;
984 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000985 int code_gen_size;
986
Paul Brook41c1b1c2010-03-12 16:54:58 +0000987 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +0000988 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000989 if (!tb) {
990 /* flush must be done */
991 tb_flush(env);
992 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000993 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000994 /* Don't forget to invalidate previous TB info. */
995 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +0000996 }
997 tc_ptr = code_gen_ptr;
998 tb->tc_ptr = tc_ptr;
999 tb->cs_base = cs_base;
1000 tb->flags = flags;
1001 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001002 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001003 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001004
bellardd720b932004-04-25 17:57:43 +00001005 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001006 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001007 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001008 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001009 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001010 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001011 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001012 return tb;
bellardd720b932004-04-25 17:57:43 +00001013}
ths3b46e622007-09-17 08:09:54 +00001014
bellard9fa3e852004-01-04 18:06:42 +00001015/* invalidate all TBs which intersect with the target physical page
1016 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001017 the same physical page. 'is_cpu_write_access' should be true if called
1018 from a real cpu write access: the virtual CPU will exit the current
1019 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001020void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001021 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001022{
aliguori6b917542008-11-18 19:46:41 +00001023 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001024 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001025 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001026 PageDesc *p;
1027 int n;
1028#ifdef TARGET_HAS_PRECISE_SMC
1029 int current_tb_not_found = is_cpu_write_access;
1030 TranslationBlock *current_tb = NULL;
1031 int current_tb_modified = 0;
1032 target_ulong current_pc = 0;
1033 target_ulong current_cs_base = 0;
1034 int current_flags = 0;
1035#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001036
1037 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001038 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001039 return;
ths5fafdf22007-09-16 21:08:06 +00001040 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001041 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1042 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001043 /* build code bitmap */
1044 build_page_bitmap(p);
1045 }
1046
1047 /* we remove all the TBs in the range [start, end[ */
1048 /* XXX: see if in some cases it could be faster to invalidate all the code */
1049 tb = p->first_tb;
1050 while (tb != NULL) {
1051 n = (long)tb & 3;
1052 tb = (TranslationBlock *)((long)tb & ~3);
1053 tb_next = tb->page_next[n];
1054 /* NOTE: this is subtle as a TB may span two physical pages */
1055 if (n == 0) {
1056 /* NOTE: tb_end may be after the end of the page, but
1057 it is not a problem */
1058 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1059 tb_end = tb_start + tb->size;
1060 } else {
1061 tb_start = tb->page_addr[1];
1062 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1063 }
1064 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001065#ifdef TARGET_HAS_PRECISE_SMC
1066 if (current_tb_not_found) {
1067 current_tb_not_found = 0;
1068 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001069 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001070 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001071 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001072 }
1073 }
1074 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001075 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001076 /* If we are modifying the current TB, we must stop
1077 its execution. We could be more precise by checking
1078 that the modification is after the current PC, but it
1079 would require a specialized function to partially
1080 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001081
bellardd720b932004-04-25 17:57:43 +00001082 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001083 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001084 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1085 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001086 }
1087#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001088 /* we need to do that to handle the case where a signal
1089 occurs while doing tb_phys_invalidate() */
1090 saved_tb = NULL;
1091 if (env) {
1092 saved_tb = env->current_tb;
1093 env->current_tb = NULL;
1094 }
bellard9fa3e852004-01-04 18:06:42 +00001095 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001096 if (env) {
1097 env->current_tb = saved_tb;
1098 if (env->interrupt_request && env->current_tb)
1099 cpu_interrupt(env, env->interrupt_request);
1100 }
bellard9fa3e852004-01-04 18:06:42 +00001101 }
1102 tb = tb_next;
1103 }
1104#if !defined(CONFIG_USER_ONLY)
1105 /* if no code remaining, no need to continue to use slow writes */
1106 if (!p->first_tb) {
1107 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001108 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001109 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001110 }
1111 }
1112#endif
1113#ifdef TARGET_HAS_PRECISE_SMC
1114 if (current_tb_modified) {
1115 /* we generate a block containing just the instruction
1116 modifying the memory. It will ensure that it cannot modify
1117 itself */
bellardea1c1802004-06-14 18:56:36 +00001118 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001119 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001120 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001121 }
1122#endif
1123}
1124
1125/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001126static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001127{
1128 PageDesc *p;
1129 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001130#if 0
bellarda4193c82004-06-03 14:01:43 +00001131 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001132 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1133 cpu_single_env->mem_io_vaddr, len,
1134 cpu_single_env->eip,
1135 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001136 }
1137#endif
bellard9fa3e852004-01-04 18:06:42 +00001138 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001139 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001140 return;
1141 if (p->code_bitmap) {
1142 offset = start & ~TARGET_PAGE_MASK;
1143 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1144 if (b & ((1 << len) - 1))
1145 goto do_invalidate;
1146 } else {
1147 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001148 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001149 }
1150}
1151
bellard9fa3e852004-01-04 18:06:42 +00001152#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001153static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001154 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001155{
aliguori6b917542008-11-18 19:46:41 +00001156 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001157 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001158 int n;
bellardd720b932004-04-25 17:57:43 +00001159#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001160 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001161 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001162 int current_tb_modified = 0;
1163 target_ulong current_pc = 0;
1164 target_ulong current_cs_base = 0;
1165 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001166#endif
bellard9fa3e852004-01-04 18:06:42 +00001167
1168 addr &= TARGET_PAGE_MASK;
1169 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001170 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001171 return;
1172 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001173#ifdef TARGET_HAS_PRECISE_SMC
1174 if (tb && pc != 0) {
1175 current_tb = tb_find_pc(pc);
1176 }
1177#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001178 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001179 n = (long)tb & 3;
1180 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001181#ifdef TARGET_HAS_PRECISE_SMC
1182 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001183 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001184 /* If we are modifying the current TB, we must stop
1185 its execution. We could be more precise by checking
1186 that the modification is after the current PC, but it
1187 would require a specialized function to partially
1188 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001189
bellardd720b932004-04-25 17:57:43 +00001190 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001191 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001192 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1193 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001194 }
1195#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001196 tb_phys_invalidate(tb, addr);
1197 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001198 }
1199 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001200#ifdef TARGET_HAS_PRECISE_SMC
1201 if (current_tb_modified) {
1202 /* we generate a block containing just the instruction
1203 modifying the memory. It will ensure that it cannot modify
1204 itself */
bellardea1c1802004-06-14 18:56:36 +00001205 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001206 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001207 cpu_resume_from_signal(env, puc);
1208 }
1209#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001210}
bellard9fa3e852004-01-04 18:06:42 +00001211#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001212
1213/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001214static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001215 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001216{
1217 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001218#ifndef CONFIG_USER_ONLY
1219 bool page_already_protected;
1220#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001221
bellard9fa3e852004-01-04 18:06:42 +00001222 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001223 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001224 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001225#ifndef CONFIG_USER_ONLY
1226 page_already_protected = p->first_tb != NULL;
1227#endif
bellard9fa3e852004-01-04 18:06:42 +00001228 p->first_tb = (TranslationBlock *)((long)tb | n);
1229 invalidate_page_bitmap(p);
1230
bellard107db442004-06-22 18:48:46 +00001231#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001232
bellard9fa3e852004-01-04 18:06:42 +00001233#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001234 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001235 target_ulong addr;
1236 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001237 int prot;
1238
bellardfd6ce8f2003-05-14 19:00:11 +00001239 /* force the host page as non writable (writes will have a
1240 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001241 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001242 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001243 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1244 addr += TARGET_PAGE_SIZE) {
1245
1246 p2 = page_find (addr >> TARGET_PAGE_BITS);
1247 if (!p2)
1248 continue;
1249 prot |= p2->flags;
1250 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001251 }
ths5fafdf22007-09-16 21:08:06 +00001252 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001253 (prot & PAGE_BITS) & ~PAGE_WRITE);
1254#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001255 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001256 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001257#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001258 }
bellard9fa3e852004-01-04 18:06:42 +00001259#else
1260 /* if some code is already present, then the pages are already
1261 protected. So we handle the case where only the first TB is
1262 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001263 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001264 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001265 }
1266#endif
bellardd720b932004-04-25 17:57:43 +00001267
1268#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001269}
1270
bellard9fa3e852004-01-04 18:06:42 +00001271/* add a new TB and link it to the physical page tables. phys_page2 is
1272 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001273void tb_link_page(TranslationBlock *tb,
1274 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001275{
bellard9fa3e852004-01-04 18:06:42 +00001276 unsigned int h;
1277 TranslationBlock **ptb;
1278
pbrookc8a706f2008-06-02 16:16:42 +00001279 /* Grab the mmap lock to stop another thread invalidating this TB
1280 before we are done. */
1281 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001282 /* add in the physical hash table */
1283 h = tb_phys_hash_func(phys_pc);
1284 ptb = &tb_phys_hash[h];
1285 tb->phys_hash_next = *ptb;
1286 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001287
1288 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001289 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1290 if (phys_page2 != -1)
1291 tb_alloc_page(tb, 1, phys_page2);
1292 else
1293 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001294
bellardd4e81642003-05-25 16:46:15 +00001295 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1296 tb->jmp_next[0] = NULL;
1297 tb->jmp_next[1] = NULL;
1298
1299 /* init original jump addresses */
1300 if (tb->tb_next_offset[0] != 0xffff)
1301 tb_reset_jump(tb, 0);
1302 if (tb->tb_next_offset[1] != 0xffff)
1303 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001304
1305#ifdef DEBUG_TB_CHECK
1306 tb_page_check();
1307#endif
pbrookc8a706f2008-06-02 16:16:42 +00001308 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001309}
1310
bellarda513fe12003-05-27 23:29:48 +00001311/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1312 tb[1].tc_ptr. Return NULL if not found */
1313TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1314{
1315 int m_min, m_max, m;
1316 unsigned long v;
1317 TranslationBlock *tb;
1318
1319 if (nb_tbs <= 0)
1320 return NULL;
1321 if (tc_ptr < (unsigned long)code_gen_buffer ||
1322 tc_ptr >= (unsigned long)code_gen_ptr)
1323 return NULL;
1324 /* binary search (cf Knuth) */
1325 m_min = 0;
1326 m_max = nb_tbs - 1;
1327 while (m_min <= m_max) {
1328 m = (m_min + m_max) >> 1;
1329 tb = &tbs[m];
1330 v = (unsigned long)tb->tc_ptr;
1331 if (v == tc_ptr)
1332 return tb;
1333 else if (tc_ptr < v) {
1334 m_max = m - 1;
1335 } else {
1336 m_min = m + 1;
1337 }
ths5fafdf22007-09-16 21:08:06 +00001338 }
bellarda513fe12003-05-27 23:29:48 +00001339 return &tbs[m_max];
1340}
bellard75012672003-06-21 13:11:07 +00001341
bellardea041c02003-06-25 16:16:50 +00001342static void tb_reset_jump_recursive(TranslationBlock *tb);
1343
1344static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1345{
1346 TranslationBlock *tb1, *tb_next, **ptb;
1347 unsigned int n1;
1348
1349 tb1 = tb->jmp_next[n];
1350 if (tb1 != NULL) {
1351 /* find head of list */
1352 for(;;) {
1353 n1 = (long)tb1 & 3;
1354 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1355 if (n1 == 2)
1356 break;
1357 tb1 = tb1->jmp_next[n1];
1358 }
1359 /* we are now sure now that tb jumps to tb1 */
1360 tb_next = tb1;
1361
1362 /* remove tb from the jmp_first list */
1363 ptb = &tb_next->jmp_first;
1364 for(;;) {
1365 tb1 = *ptb;
1366 n1 = (long)tb1 & 3;
1367 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1368 if (n1 == n && tb1 == tb)
1369 break;
1370 ptb = &tb1->jmp_next[n1];
1371 }
1372 *ptb = tb->jmp_next[n];
1373 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001374
bellardea041c02003-06-25 16:16:50 +00001375 /* suppress the jump to next tb in generated code */
1376 tb_reset_jump(tb, n);
1377
bellard01243112004-01-04 15:48:17 +00001378 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001379 tb_reset_jump_recursive(tb_next);
1380 }
1381}
1382
1383static void tb_reset_jump_recursive(TranslationBlock *tb)
1384{
1385 tb_reset_jump_recursive2(tb, 0);
1386 tb_reset_jump_recursive2(tb, 1);
1387}
1388
bellard1fddef42005-04-17 19:16:13 +00001389#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001390#if defined(CONFIG_USER_ONLY)
1391static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1392{
1393 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1394}
1395#else
bellardd720b932004-04-25 17:57:43 +00001396static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1397{
Anthony Liguoric227f092009-10-01 16:12:16 -05001398 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001399 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001400 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001401 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001402
pbrookc2f07f82006-04-08 17:14:56 +00001403 addr = cpu_get_phys_page_debug(env, pc);
1404 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1405 if (!p) {
1406 pd = IO_MEM_UNASSIGNED;
1407 } else {
1408 pd = p->phys_offset;
1409 }
1410 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001411 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001412}
bellardc27004e2005-01-03 23:35:10 +00001413#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001414#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001415
Paul Brookc527ee82010-03-01 03:31:14 +00001416#if defined(CONFIG_USER_ONLY)
1417void cpu_watchpoint_remove_all(CPUState *env, int mask)
1418
1419{
1420}
1421
1422int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1423 int flags, CPUWatchpoint **watchpoint)
1424{
1425 return -ENOSYS;
1426}
1427#else
pbrook6658ffb2007-03-16 23:58:11 +00001428/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001429int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1430 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001431{
aliguorib4051332008-11-18 20:14:20 +00001432 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001433 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001434
aliguorib4051332008-11-18 20:14:20 +00001435 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1436 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1437 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1438 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1439 return -EINVAL;
1440 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001441 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001442
aliguoria1d1bb32008-11-18 20:07:32 +00001443 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001444 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001445 wp->flags = flags;
1446
aliguori2dc9f412008-11-18 20:56:59 +00001447 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001448 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001449 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001450 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001451 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001452
pbrook6658ffb2007-03-16 23:58:11 +00001453 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001454
1455 if (watchpoint)
1456 *watchpoint = wp;
1457 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001458}
1459
aliguoria1d1bb32008-11-18 20:07:32 +00001460/* Remove a specific watchpoint. */
1461int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1462 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001463{
aliguorib4051332008-11-18 20:14:20 +00001464 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001465 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001466
Blue Swirl72cf2d42009-09-12 07:36:22 +00001467 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001468 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001469 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001470 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001471 return 0;
1472 }
1473 }
aliguoria1d1bb32008-11-18 20:07:32 +00001474 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001475}
1476
aliguoria1d1bb32008-11-18 20:07:32 +00001477/* Remove a specific watchpoint by reference. */
1478void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1479{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001480 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001481
aliguoria1d1bb32008-11-18 20:07:32 +00001482 tlb_flush_page(env, watchpoint->vaddr);
1483
Anthony Liguori7267c092011-08-20 22:09:37 -05001484 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001485}
1486
aliguoria1d1bb32008-11-18 20:07:32 +00001487/* Remove all matching watchpoints. */
1488void cpu_watchpoint_remove_all(CPUState *env, int mask)
1489{
aliguoric0ce9982008-11-25 22:13:57 +00001490 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001491
Blue Swirl72cf2d42009-09-12 07:36:22 +00001492 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001493 if (wp->flags & mask)
1494 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001495 }
aliguoria1d1bb32008-11-18 20:07:32 +00001496}
Paul Brookc527ee82010-03-01 03:31:14 +00001497#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001498
1499/* Add a breakpoint. */
1500int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1501 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001502{
bellard1fddef42005-04-17 19:16:13 +00001503#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001504 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001505
Anthony Liguori7267c092011-08-20 22:09:37 -05001506 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001507
1508 bp->pc = pc;
1509 bp->flags = flags;
1510
aliguori2dc9f412008-11-18 20:56:59 +00001511 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001512 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001513 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001514 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001515 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001516
1517 breakpoint_invalidate(env, pc);
1518
1519 if (breakpoint)
1520 *breakpoint = bp;
1521 return 0;
1522#else
1523 return -ENOSYS;
1524#endif
1525}
1526
1527/* Remove a specific breakpoint. */
1528int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1529{
1530#if defined(TARGET_HAS_ICE)
1531 CPUBreakpoint *bp;
1532
Blue Swirl72cf2d42009-09-12 07:36:22 +00001533 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001534 if (bp->pc == pc && bp->flags == flags) {
1535 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001536 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001537 }
bellard4c3a88a2003-07-26 12:06:08 +00001538 }
aliguoria1d1bb32008-11-18 20:07:32 +00001539 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001540#else
aliguoria1d1bb32008-11-18 20:07:32 +00001541 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001542#endif
1543}
1544
aliguoria1d1bb32008-11-18 20:07:32 +00001545/* Remove a specific breakpoint by reference. */
1546void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001547{
bellard1fddef42005-04-17 19:16:13 +00001548#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001549 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001550
aliguoria1d1bb32008-11-18 20:07:32 +00001551 breakpoint_invalidate(env, breakpoint->pc);
1552
Anthony Liguori7267c092011-08-20 22:09:37 -05001553 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001554#endif
1555}
1556
1557/* Remove all matching breakpoints. */
1558void cpu_breakpoint_remove_all(CPUState *env, int mask)
1559{
1560#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001561 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001562
Blue Swirl72cf2d42009-09-12 07:36:22 +00001563 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001564 if (bp->flags & mask)
1565 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001566 }
bellard4c3a88a2003-07-26 12:06:08 +00001567#endif
1568}
1569
bellardc33a3462003-07-29 20:50:33 +00001570/* enable or disable single step mode. EXCP_DEBUG is returned by the
1571 CPU loop after each instruction */
1572void cpu_single_step(CPUState *env, int enabled)
1573{
bellard1fddef42005-04-17 19:16:13 +00001574#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001575 if (env->singlestep_enabled != enabled) {
1576 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001577 if (kvm_enabled())
1578 kvm_update_guest_debug(env, 0);
1579 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001580 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001581 /* XXX: only flush what is necessary */
1582 tb_flush(env);
1583 }
bellardc33a3462003-07-29 20:50:33 +00001584 }
1585#endif
1586}
1587
bellard34865132003-10-05 14:28:56 +00001588/* enable or disable low levels log */
1589void cpu_set_log(int log_flags)
1590{
1591 loglevel = log_flags;
1592 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001593 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001594 if (!logfile) {
1595 perror(logfilename);
1596 _exit(1);
1597 }
bellard9fa3e852004-01-04 18:06:42 +00001598#if !defined(CONFIG_SOFTMMU)
1599 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1600 {
blueswir1b55266b2008-09-20 08:07:15 +00001601 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001602 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1603 }
Stefan Weildaf767b2011-12-03 22:32:37 +01001604#elif defined(_WIN32)
1605 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1606 setvbuf(logfile, NULL, _IONBF, 0);
1607#else
bellard34865132003-10-05 14:28:56 +00001608 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001609#endif
pbrooke735b912007-06-30 13:53:24 +00001610 log_append = 1;
1611 }
1612 if (!loglevel && logfile) {
1613 fclose(logfile);
1614 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001615 }
1616}
1617
1618void cpu_set_log_filename(const char *filename)
1619{
1620 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001621 if (logfile) {
1622 fclose(logfile);
1623 logfile = NULL;
1624 }
1625 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001626}
bellardc33a3462003-07-29 20:50:33 +00001627
aurel323098dba2009-03-07 21:28:24 +00001628static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001629{
pbrookd5975362008-06-07 20:50:51 +00001630 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1631 problem and hope the cpu will stop of its own accord. For userspace
1632 emulation this often isn't actually as bad as it sounds. Often
1633 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001634 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001635 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001636
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001637 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001638 tb = env->current_tb;
1639 /* if the cpu is currently executing code, we must unlink it and
1640 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001641 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001642 env->current_tb = NULL;
1643 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001644 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001645 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001646}
1647
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001648#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001649/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001650static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001651{
1652 int old_mask;
1653
1654 old_mask = env->interrupt_request;
1655 env->interrupt_request |= mask;
1656
aliguori8edac962009-04-24 18:03:45 +00001657 /*
1658 * If called from iothread context, wake the target cpu in
1659 * case its halted.
1660 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001661 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001662 qemu_cpu_kick(env);
1663 return;
1664 }
aliguori8edac962009-04-24 18:03:45 +00001665
pbrook2e70f6e2008-06-29 01:03:05 +00001666 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001667 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001668 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001669 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001670 cpu_abort(env, "Raised interrupt while not in I/O function");
1671 }
pbrook2e70f6e2008-06-29 01:03:05 +00001672 } else {
aurel323098dba2009-03-07 21:28:24 +00001673 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001674 }
1675}
1676
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001677CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1678
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001679#else /* CONFIG_USER_ONLY */
1680
1681void cpu_interrupt(CPUState *env, int mask)
1682{
1683 env->interrupt_request |= mask;
1684 cpu_unlink_tb(env);
1685}
1686#endif /* CONFIG_USER_ONLY */
1687
bellardb54ad042004-05-20 13:42:52 +00001688void cpu_reset_interrupt(CPUState *env, int mask)
1689{
1690 env->interrupt_request &= ~mask;
1691}
1692
aurel323098dba2009-03-07 21:28:24 +00001693void cpu_exit(CPUState *env)
1694{
1695 env->exit_request = 1;
1696 cpu_unlink_tb(env);
1697}
1698
blueswir1c7cd6a32008-10-02 18:27:46 +00001699const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001700 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001701 "show generated host assembly code for each compiled TB" },
1702 { CPU_LOG_TB_IN_ASM, "in_asm",
1703 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001704 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001705 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001706 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001707 "show micro ops "
1708#ifdef TARGET_I386
1709 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001710#endif
blueswir1e01a1152008-03-14 17:37:11 +00001711 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001712 { CPU_LOG_INT, "int",
1713 "show interrupts/exceptions in short format" },
1714 { CPU_LOG_EXEC, "exec",
1715 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001716 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001717 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001718#ifdef TARGET_I386
1719 { CPU_LOG_PCALL, "pcall",
1720 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001721 { CPU_LOG_RESET, "cpu_reset",
1722 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001723#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001724#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001725 { CPU_LOG_IOPORT, "ioport",
1726 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001727#endif
bellardf193c792004-03-21 17:06:25 +00001728 { 0, NULL, NULL },
1729};
1730
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001731#ifndef CONFIG_USER_ONLY
1732static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1733 = QLIST_HEAD_INITIALIZER(memory_client_list);
1734
1735static void cpu_notify_set_memory(target_phys_addr_t start_addr,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001736 ram_addr_t size,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001737 ram_addr_t phys_offset,
1738 bool log_dirty)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001739{
1740 CPUPhysMemoryClient *client;
1741 QLIST_FOREACH(client, &memory_client_list, list) {
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001742 client->set_memory(client, start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001743 }
1744}
1745
1746static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001747 target_phys_addr_t end)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001748{
1749 CPUPhysMemoryClient *client;
1750 QLIST_FOREACH(client, &memory_client_list, list) {
1751 int r = client->sync_dirty_bitmap(client, start, end);
1752 if (r < 0)
1753 return r;
1754 }
1755 return 0;
1756}
1757
1758static int cpu_notify_migration_log(int enable)
1759{
1760 CPUPhysMemoryClient *client;
1761 QLIST_FOREACH(client, &memory_client_list, list) {
1762 int r = client->migration_log(client, enable);
1763 if (r < 0)
1764 return r;
1765 }
1766 return 0;
1767}
1768
Alex Williamson2173a752011-05-03 12:36:58 -06001769struct last_map {
1770 target_phys_addr_t start_addr;
1771 ram_addr_t size;
1772 ram_addr_t phys_offset;
1773};
1774
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001775/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1776 * address. Each intermediate table provides the next L2_BITs of guest
1777 * physical address space. The number of levels vary based on host and
1778 * guest configuration, making it efficient to build the final guest
1779 * physical address by seeding the L1 offset and shifting and adding in
1780 * each L2 offset as we recurse through them. */
Alex Williamson2173a752011-05-03 12:36:58 -06001781static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
1782 void **lp, target_phys_addr_t addr,
1783 struct last_map *map)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001784{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001785 int i;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001786
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001787 if (*lp == NULL) {
1788 return;
1789 }
1790 if (level == 0) {
1791 PhysPageDesc *pd = *lp;
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001792 addr <<= L2_BITS + TARGET_PAGE_BITS;
Paul Brook7296aba2010-03-14 14:58:46 +00001793 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001794 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
Alex Williamson2173a752011-05-03 12:36:58 -06001795 target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
1796
1797 if (map->size &&
1798 start_addr == map->start_addr + map->size &&
1799 pd[i].phys_offset == map->phys_offset + map->size) {
1800
1801 map->size += TARGET_PAGE_SIZE;
1802 continue;
1803 } else if (map->size) {
1804 client->set_memory(client, map->start_addr,
1805 map->size, map->phys_offset, false);
1806 }
1807
1808 map->start_addr = start_addr;
1809 map->size = TARGET_PAGE_SIZE;
1810 map->phys_offset = pd[i].phys_offset;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001811 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001812 }
1813 } else {
1814 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001815 for (i = 0; i < L2_SIZE; ++i) {
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001816 phys_page_for_each_1(client, level - 1, pp + i,
Alex Williamson2173a752011-05-03 12:36:58 -06001817 (addr << L2_BITS) | i, map);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001818 }
1819 }
1820}
1821
1822static void phys_page_for_each(CPUPhysMemoryClient *client)
1823{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001824 int i;
Alex Williamson2173a752011-05-03 12:36:58 -06001825 struct last_map map = { };
1826
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001827 for (i = 0; i < P_L1_SIZE; ++i) {
1828 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
Alex Williamson2173a752011-05-03 12:36:58 -06001829 l1_phys_map + i, i, &map);
1830 }
1831 if (map.size) {
1832 client->set_memory(client, map.start_addr, map.size, map.phys_offset,
1833 false);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001834 }
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001835}
1836
1837void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1838{
1839 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1840 phys_page_for_each(client);
1841}
1842
1843void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1844{
1845 QLIST_REMOVE(client, list);
1846}
1847#endif
1848
bellardf193c792004-03-21 17:06:25 +00001849static int cmp1(const char *s1, int n, const char *s2)
1850{
1851 if (strlen(s2) != n)
1852 return 0;
1853 return memcmp(s1, s2, n) == 0;
1854}
ths3b46e622007-09-17 08:09:54 +00001855
bellardf193c792004-03-21 17:06:25 +00001856/* takes a comma separated list of log masks. Return 0 if error. */
1857int cpu_str_to_log_mask(const char *str)
1858{
blueswir1c7cd6a32008-10-02 18:27:46 +00001859 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001860 int mask;
1861 const char *p, *p1;
1862
1863 p = str;
1864 mask = 0;
1865 for(;;) {
1866 p1 = strchr(p, ',');
1867 if (!p1)
1868 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001869 if(cmp1(p,p1-p,"all")) {
1870 for(item = cpu_log_items; item->mask != 0; item++) {
1871 mask |= item->mask;
1872 }
1873 } else {
1874 for(item = cpu_log_items; item->mask != 0; item++) {
1875 if (cmp1(p, p1 - p, item->name))
1876 goto found;
1877 }
1878 return 0;
bellardf193c792004-03-21 17:06:25 +00001879 }
bellardf193c792004-03-21 17:06:25 +00001880 found:
1881 mask |= item->mask;
1882 if (*p1 != ',')
1883 break;
1884 p = p1 + 1;
1885 }
1886 return mask;
1887}
bellardea041c02003-06-25 16:16:50 +00001888
bellard75012672003-06-21 13:11:07 +00001889void cpu_abort(CPUState *env, const char *fmt, ...)
1890{
1891 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001892 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001893
1894 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001895 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001896 fprintf(stderr, "qemu: fatal: ");
1897 vfprintf(stderr, fmt, ap);
1898 fprintf(stderr, "\n");
1899#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001900 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1901#else
1902 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001903#endif
aliguori93fcfe32009-01-15 22:34:14 +00001904 if (qemu_log_enabled()) {
1905 qemu_log("qemu: fatal: ");
1906 qemu_log_vprintf(fmt, ap2);
1907 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001908#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001909 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001910#else
aliguori93fcfe32009-01-15 22:34:14 +00001911 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001912#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001913 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001914 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001915 }
pbrook493ae1f2007-11-23 16:53:59 +00001916 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001917 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001918#if defined(CONFIG_USER_ONLY)
1919 {
1920 struct sigaction act;
1921 sigfillset(&act.sa_mask);
1922 act.sa_handler = SIG_DFL;
1923 sigaction(SIGABRT, &act, NULL);
1924 }
1925#endif
bellard75012672003-06-21 13:11:07 +00001926 abort();
1927}
1928
thsc5be9f02007-02-28 20:20:53 +00001929CPUState *cpu_copy(CPUState *env)
1930{
ths01ba9812007-12-09 02:22:57 +00001931 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001932 CPUState *next_cpu = new_env->next_cpu;
1933 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001934#if defined(TARGET_HAS_ICE)
1935 CPUBreakpoint *bp;
1936 CPUWatchpoint *wp;
1937#endif
1938
thsc5be9f02007-02-28 20:20:53 +00001939 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001940
1941 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001942 new_env->next_cpu = next_cpu;
1943 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001944
1945 /* Clone all break/watchpoints.
1946 Note: Once we support ptrace with hw-debug register access, make sure
1947 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001948 QTAILQ_INIT(&env->breakpoints);
1949 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001950#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001951 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001952 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1953 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001954 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001955 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1956 wp->flags, NULL);
1957 }
1958#endif
1959
thsc5be9f02007-02-28 20:20:53 +00001960 return new_env;
1961}
1962
bellard01243112004-01-04 15:48:17 +00001963#if !defined(CONFIG_USER_ONLY)
1964
edgar_igl5c751e92008-05-06 08:44:21 +00001965static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1966{
1967 unsigned int i;
1968
1969 /* Discard jump cache entries for any tb which might potentially
1970 overlap the flushed page. */
1971 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1972 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001973 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001974
1975 i = tb_jmp_cache_hash_page(addr);
1976 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001977 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001978}
1979
Igor Kovalenko08738982009-07-12 02:15:40 +04001980static CPUTLBEntry s_cputlb_empty_entry = {
1981 .addr_read = -1,
1982 .addr_write = -1,
1983 .addr_code = -1,
1984 .addend = -1,
1985};
1986
bellardee8b7022004-02-03 23:35:10 +00001987/* NOTE: if flush_global is true, also flush global entries (not
1988 implemented yet) */
1989void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001990{
bellard33417e72003-08-10 21:47:01 +00001991 int i;
bellard01243112004-01-04 15:48:17 +00001992
bellard9fa3e852004-01-04 18:06:42 +00001993#if defined(DEBUG_TLB)
1994 printf("tlb_flush:\n");
1995#endif
bellard01243112004-01-04 15:48:17 +00001996 /* must reset current TB so that interrupts cannot modify the
1997 links while we are modifying them */
1998 env->current_tb = NULL;
1999
bellard33417e72003-08-10 21:47:01 +00002000 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002001 int mmu_idx;
2002 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04002003 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002004 }
bellard33417e72003-08-10 21:47:01 +00002005 }
bellard9fa3e852004-01-04 18:06:42 +00002006
bellard8a40a182005-11-20 10:35:40 +00002007 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00002008
Paul Brookd4c430a2010-03-17 02:14:28 +00002009 env->tlb_flush_addr = -1;
2010 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00002011 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00002012}
2013
bellard274da6b2004-05-20 21:56:27 +00002014static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00002015{
ths5fafdf22007-09-16 21:08:06 +00002016 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00002017 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00002018 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00002019 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00002020 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00002021 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04002022 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00002023 }
bellard61382a52003-10-27 21:22:23 +00002024}
2025
bellard2e126692004-04-25 21:28:44 +00002026void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00002027{
bellard8a40a182005-11-20 10:35:40 +00002028 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002029 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00002030
bellard9fa3e852004-01-04 18:06:42 +00002031#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00002032 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00002033#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00002034 /* Check if we need to flush due to large pages. */
2035 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2036#if defined(DEBUG_TLB)
2037 printf("tlb_flush_page: forced full flush ("
2038 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2039 env->tlb_flush_addr, env->tlb_flush_mask);
2040#endif
2041 tlb_flush(env, 1);
2042 return;
2043 }
bellard01243112004-01-04 15:48:17 +00002044 /* must reset current TB so that interrupts cannot modify the
2045 links while we are modifying them */
2046 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00002047
bellard61382a52003-10-27 21:22:23 +00002048 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00002049 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002050 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2051 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002052
edgar_igl5c751e92008-05-06 08:44:21 +00002053 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002054}
2055
bellard9fa3e852004-01-04 18:06:42 +00002056/* update the TLBs so that writes to code in the virtual page 'addr'
2057 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002058static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002059{
ths5fafdf22007-09-16 21:08:06 +00002060 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002061 ram_addr + TARGET_PAGE_SIZE,
2062 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002063}
2064
bellard9fa3e852004-01-04 18:06:42 +00002065/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002066 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002067static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002068 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002069{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002070 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002071}
2072
ths5fafdf22007-09-16 21:08:06 +00002073static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002074 unsigned long start, unsigned long length)
2075{
2076 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00002077 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2078 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002079 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002080 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002081 }
2082 }
2083}
2084
pbrook5579c7f2009-04-11 14:47:08 +00002085/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002086void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002087 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002088{
2089 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002090 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002091 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002092
2093 start &= TARGET_PAGE_MASK;
2094 end = TARGET_PAGE_ALIGN(end);
2095
2096 length = end - start;
2097 if (length == 0)
2098 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002099 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002100
bellard1ccde1c2004-02-06 19:46:14 +00002101 /* we modify the TLB cache so that the dirty bit will be set again
2102 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002103 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002104 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002105 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002106 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002107 != (end - 1) - start) {
2108 abort();
2109 }
2110
bellard6a00d602005-11-21 23:25:50 +00002111 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002112 int mmu_idx;
2113 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2114 for(i = 0; i < CPU_TLB_SIZE; i++)
2115 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2116 start1, length);
2117 }
bellard6a00d602005-11-21 23:25:50 +00002118 }
bellard1ccde1c2004-02-06 19:46:14 +00002119}
2120
aliguori74576192008-10-06 14:02:03 +00002121int cpu_physical_memory_set_dirty_tracking(int enable)
2122{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002123 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002124 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002125 ret = cpu_notify_migration_log(!!enable);
2126 return ret;
aliguori74576192008-10-06 14:02:03 +00002127}
2128
2129int cpu_physical_memory_get_dirty_tracking(void)
2130{
2131 return in_migration;
2132}
2133
Anthony Liguoric227f092009-10-01 16:12:16 -05002134int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2135 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00002136{
Michael S. Tsirkin7b8f3b72010-01-27 22:07:21 +02002137 int ret;
Jan Kiszka151f7742009-05-01 20:52:47 +02002138
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002139 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
Jan Kiszka151f7742009-05-01 20:52:47 +02002140 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00002141}
2142
Anthony PERARDe5896b12011-02-07 12:19:23 +01002143int cpu_physical_log_start(target_phys_addr_t start_addr,
2144 ram_addr_t size)
2145{
2146 CPUPhysMemoryClient *client;
2147 QLIST_FOREACH(client, &memory_client_list, list) {
2148 if (client->log_start) {
2149 int r = client->log_start(client, start_addr, size);
2150 if (r < 0) {
2151 return r;
2152 }
2153 }
2154 }
2155 return 0;
2156}
2157
2158int cpu_physical_log_stop(target_phys_addr_t start_addr,
2159 ram_addr_t size)
2160{
2161 CPUPhysMemoryClient *client;
2162 QLIST_FOREACH(client, &memory_client_list, list) {
2163 if (client->log_stop) {
2164 int r = client->log_stop(client, start_addr, size);
2165 if (r < 0) {
2166 return r;
2167 }
2168 }
2169 }
2170 return 0;
2171}
2172
bellard3a7d9292005-08-21 09:26:42 +00002173static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2174{
Anthony Liguoric227f092009-10-01 16:12:16 -05002175 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002176 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002177
bellard84b7b8e2005-11-28 21:19:04 +00002178 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00002179 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2180 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002181 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002182 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002183 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002184 }
2185 }
2186}
2187
2188/* update the TLB according to the current state of the dirty bits */
2189void cpu_tlb_update_dirty(CPUState *env)
2190{
2191 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002192 int mmu_idx;
2193 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2194 for(i = 0; i < CPU_TLB_SIZE; i++)
2195 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2196 }
bellard3a7d9292005-08-21 09:26:42 +00002197}
2198
pbrook0f459d12008-06-09 00:20:13 +00002199static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002200{
pbrook0f459d12008-06-09 00:20:13 +00002201 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2202 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002203}
2204
pbrook0f459d12008-06-09 00:20:13 +00002205/* update the TLB corresponding to virtual page vaddr
2206 so that it is no longer dirty */
2207static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002208{
bellard1ccde1c2004-02-06 19:46:14 +00002209 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002210 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002211
pbrook0f459d12008-06-09 00:20:13 +00002212 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002213 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002214 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2215 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002216}
2217
Paul Brookd4c430a2010-03-17 02:14:28 +00002218/* Our TLB does not support large pages, so remember the area covered by
2219 large pages and trigger a full TLB flush if these are invalidated. */
2220static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2221 target_ulong size)
2222{
2223 target_ulong mask = ~(size - 1);
2224
2225 if (env->tlb_flush_addr == (target_ulong)-1) {
2226 env->tlb_flush_addr = vaddr & mask;
2227 env->tlb_flush_mask = mask;
2228 return;
2229 }
2230 /* Extend the existing region to include the new page.
2231 This is a compromise between unnecessary flushes and the cost
2232 of maintaining a full variable size TLB. */
2233 mask &= env->tlb_flush_mask;
2234 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2235 mask <<= 1;
2236 }
2237 env->tlb_flush_addr &= mask;
2238 env->tlb_flush_mask = mask;
2239}
2240
2241/* Add a new TLB entry. At most one entry for a given virtual address
2242 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2243 supplied size is only used by tlb_flush_page. */
2244void tlb_set_page(CPUState *env, target_ulong vaddr,
2245 target_phys_addr_t paddr, int prot,
2246 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002247{
bellard92e873b2004-05-21 14:52:29 +00002248 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00002249 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002250 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002251 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002252 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002253 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002254 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002255 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002256 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002257
Paul Brookd4c430a2010-03-17 02:14:28 +00002258 assert(size >= TARGET_PAGE_SIZE);
2259 if (size != TARGET_PAGE_SIZE) {
2260 tlb_add_large_page(env, vaddr, size);
2261 }
bellard92e873b2004-05-21 14:52:29 +00002262 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002263 if (!p) {
2264 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00002265 } else {
2266 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002267 }
2268#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002269 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2270 " prot=%x idx=%d pd=0x%08lx\n",
2271 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002272#endif
2273
pbrook0f459d12008-06-09 00:20:13 +00002274 address = vaddr;
2275 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2276 /* IO memory case (romd handled later) */
2277 address |= TLB_MMIO;
2278 }
pbrook5579c7f2009-04-11 14:47:08 +00002279 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00002280 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2281 /* Normal RAM. */
2282 iotlb = pd & TARGET_PAGE_MASK;
2283 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2284 iotlb |= IO_MEM_NOTDIRTY;
2285 else
2286 iotlb |= IO_MEM_ROM;
2287 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002288 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002289 It would be nice to pass an offset from the base address
2290 of that region. This would avoid having to special case RAM,
2291 and avoid full address decoding in every device.
2292 We can't use the high bits of pd for this because
2293 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002294 iotlb = (pd & ~TARGET_PAGE_MASK);
2295 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002296 iotlb += p->region_offset;
2297 } else {
2298 iotlb += paddr;
2299 }
pbrook0f459d12008-06-09 00:20:13 +00002300 }
pbrook6658ffb2007-03-16 23:58:11 +00002301
pbrook0f459d12008-06-09 00:20:13 +00002302 code_address = address;
2303 /* Make accesses to pages with watchpoints go via the
2304 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002305 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002306 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002307 /* Avoid trapping reads of pages with a write breakpoint. */
2308 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2309 iotlb = io_mem_watch + paddr;
2310 address |= TLB_MMIO;
2311 break;
2312 }
pbrook6658ffb2007-03-16 23:58:11 +00002313 }
pbrook0f459d12008-06-09 00:20:13 +00002314 }
balrogd79acba2007-06-26 20:01:13 +00002315
pbrook0f459d12008-06-09 00:20:13 +00002316 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2317 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2318 te = &env->tlb_table[mmu_idx][index];
2319 te->addend = addend - vaddr;
2320 if (prot & PAGE_READ) {
2321 te->addr_read = address;
2322 } else {
2323 te->addr_read = -1;
2324 }
edgar_igl5c751e92008-05-06 08:44:21 +00002325
pbrook0f459d12008-06-09 00:20:13 +00002326 if (prot & PAGE_EXEC) {
2327 te->addr_code = code_address;
2328 } else {
2329 te->addr_code = -1;
2330 }
2331 if (prot & PAGE_WRITE) {
2332 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2333 (pd & IO_MEM_ROMD)) {
2334 /* Write access calls the I/O callback. */
2335 te->addr_write = address | TLB_MMIO;
2336 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2337 !cpu_physical_memory_is_dirty(pd)) {
2338 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002339 } else {
pbrook0f459d12008-06-09 00:20:13 +00002340 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002341 }
pbrook0f459d12008-06-09 00:20:13 +00002342 } else {
2343 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002344 }
bellard9fa3e852004-01-04 18:06:42 +00002345}
2346
bellard01243112004-01-04 15:48:17 +00002347#else
2348
bellardee8b7022004-02-03 23:35:10 +00002349void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002350{
2351}
2352
bellard2e126692004-04-25 21:28:44 +00002353void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002354{
2355}
2356
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002357/*
2358 * Walks guest process memory "regions" one by one
2359 * and calls callback function 'fn' for each region.
2360 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002361
2362struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002363{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002364 walk_memory_regions_fn fn;
2365 void *priv;
2366 unsigned long start;
2367 int prot;
2368};
bellard9fa3e852004-01-04 18:06:42 +00002369
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002370static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002371 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002372{
2373 if (data->start != -1ul) {
2374 int rc = data->fn(data->priv, data->start, end, data->prot);
2375 if (rc != 0) {
2376 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002377 }
bellard33417e72003-08-10 21:47:01 +00002378 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002379
2380 data->start = (new_prot ? end : -1ul);
2381 data->prot = new_prot;
2382
2383 return 0;
2384}
2385
2386static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002387 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002388{
Paul Brookb480d9b2010-03-12 23:23:29 +00002389 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002390 int i, rc;
2391
2392 if (*lp == NULL) {
2393 return walk_memory_regions_end(data, base, 0);
2394 }
2395
2396 if (level == 0) {
2397 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002398 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002399 int prot = pd[i].flags;
2400
2401 pa = base | (i << TARGET_PAGE_BITS);
2402 if (prot != data->prot) {
2403 rc = walk_memory_regions_end(data, pa, prot);
2404 if (rc != 0) {
2405 return rc;
2406 }
2407 }
2408 }
2409 } else {
2410 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002411 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002412 pa = base | ((abi_ulong)i <<
2413 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002414 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2415 if (rc != 0) {
2416 return rc;
2417 }
2418 }
2419 }
2420
2421 return 0;
2422}
2423
2424int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2425{
2426 struct walk_memory_regions_data data;
2427 unsigned long i;
2428
2429 data.fn = fn;
2430 data.priv = priv;
2431 data.start = -1ul;
2432 data.prot = 0;
2433
2434 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002435 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002436 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2437 if (rc != 0) {
2438 return rc;
2439 }
2440 }
2441
2442 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002443}
2444
Paul Brookb480d9b2010-03-12 23:23:29 +00002445static int dump_region(void *priv, abi_ulong start,
2446 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002447{
2448 FILE *f = (FILE *)priv;
2449
Paul Brookb480d9b2010-03-12 23:23:29 +00002450 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2451 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002452 start, end, end - start,
2453 ((prot & PAGE_READ) ? 'r' : '-'),
2454 ((prot & PAGE_WRITE) ? 'w' : '-'),
2455 ((prot & PAGE_EXEC) ? 'x' : '-'));
2456
2457 return (0);
2458}
2459
2460/* dump memory mappings */
2461void page_dump(FILE *f)
2462{
2463 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2464 "start", "end", "size", "prot");
2465 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002466}
2467
pbrook53a59602006-03-25 19:31:22 +00002468int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002469{
bellard9fa3e852004-01-04 18:06:42 +00002470 PageDesc *p;
2471
2472 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002473 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002474 return 0;
2475 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002476}
2477
Richard Henderson376a7902010-03-10 15:57:04 -08002478/* Modify the flags of a page and invalidate the code if necessary.
2479 The flag PAGE_WRITE_ORG is positioned automatically depending
2480 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002481void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002482{
Richard Henderson376a7902010-03-10 15:57:04 -08002483 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002484
Richard Henderson376a7902010-03-10 15:57:04 -08002485 /* This function should never be called with addresses outside the
2486 guest address space. If this assert fires, it probably indicates
2487 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002488#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2489 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002490#endif
2491 assert(start < end);
2492
bellard9fa3e852004-01-04 18:06:42 +00002493 start = start & TARGET_PAGE_MASK;
2494 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002495
2496 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002497 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002498 }
2499
2500 for (addr = start, len = end - start;
2501 len != 0;
2502 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2503 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2504
2505 /* If the write protection bit is set, then we invalidate
2506 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002507 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002508 (flags & PAGE_WRITE) &&
2509 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002510 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002511 }
2512 p->flags = flags;
2513 }
bellard9fa3e852004-01-04 18:06:42 +00002514}
2515
ths3d97b402007-11-02 19:02:07 +00002516int page_check_range(target_ulong start, target_ulong len, int flags)
2517{
2518 PageDesc *p;
2519 target_ulong end;
2520 target_ulong addr;
2521
Richard Henderson376a7902010-03-10 15:57:04 -08002522 /* This function should never be called with addresses outside the
2523 guest address space. If this assert fires, it probably indicates
2524 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002525#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2526 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002527#endif
2528
Richard Henderson3e0650a2010-03-29 10:54:42 -07002529 if (len == 0) {
2530 return 0;
2531 }
Richard Henderson376a7902010-03-10 15:57:04 -08002532 if (start + len - 1 < start) {
2533 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002534 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002535 }
balrog55f280c2008-10-28 10:24:11 +00002536
ths3d97b402007-11-02 19:02:07 +00002537 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2538 start = start & TARGET_PAGE_MASK;
2539
Richard Henderson376a7902010-03-10 15:57:04 -08002540 for (addr = start, len = end - start;
2541 len != 0;
2542 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002543 p = page_find(addr >> TARGET_PAGE_BITS);
2544 if( !p )
2545 return -1;
2546 if( !(p->flags & PAGE_VALID) )
2547 return -1;
2548
bellarddae32702007-11-14 10:51:00 +00002549 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002550 return -1;
bellarddae32702007-11-14 10:51:00 +00002551 if (flags & PAGE_WRITE) {
2552 if (!(p->flags & PAGE_WRITE_ORG))
2553 return -1;
2554 /* unprotect the page if it was put read-only because it
2555 contains translated code */
2556 if (!(p->flags & PAGE_WRITE)) {
2557 if (!page_unprotect(addr, 0, NULL))
2558 return -1;
2559 }
2560 return 0;
2561 }
ths3d97b402007-11-02 19:02:07 +00002562 }
2563 return 0;
2564}
2565
bellard9fa3e852004-01-04 18:06:42 +00002566/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002567 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002568int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002569{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002570 unsigned int prot;
2571 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002572 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002573
pbrookc8a706f2008-06-02 16:16:42 +00002574 /* Technically this isn't safe inside a signal handler. However we
2575 know this only ever happens in a synchronous SEGV handler, so in
2576 practice it seems to be ok. */
2577 mmap_lock();
2578
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002579 p = page_find(address >> TARGET_PAGE_BITS);
2580 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002581 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002582 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002583 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002584
bellard9fa3e852004-01-04 18:06:42 +00002585 /* if the page was really writable, then we change its
2586 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002587 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2588 host_start = address & qemu_host_page_mask;
2589 host_end = host_start + qemu_host_page_size;
2590
2591 prot = 0;
2592 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2593 p = page_find(addr >> TARGET_PAGE_BITS);
2594 p->flags |= PAGE_WRITE;
2595 prot |= p->flags;
2596
bellard9fa3e852004-01-04 18:06:42 +00002597 /* and since the content will be modified, we must invalidate
2598 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002599 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002600#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002601 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002602#endif
bellard9fa3e852004-01-04 18:06:42 +00002603 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002604 mprotect((void *)g2h(host_start), qemu_host_page_size,
2605 prot & PAGE_BITS);
2606
2607 mmap_unlock();
2608 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002609 }
pbrookc8a706f2008-06-02 16:16:42 +00002610 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002611 return 0;
2612}
2613
bellard6a00d602005-11-21 23:25:50 +00002614static inline void tlb_set_dirty(CPUState *env,
2615 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002616{
2617}
bellard9fa3e852004-01-04 18:06:42 +00002618#endif /* defined(CONFIG_USER_ONLY) */
2619
pbrooke2eef172008-06-08 01:09:01 +00002620#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002621
Paul Brookc04b2b72010-03-01 03:31:14 +00002622#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2623typedef struct subpage_t {
2624 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002625 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2626 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002627} subpage_t;
2628
Anthony Liguoric227f092009-10-01 16:12:16 -05002629static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2630 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002631static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2632 ram_addr_t orig_memory,
2633 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002634#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2635 need_subpage) \
2636 do { \
2637 if (addr > start_addr) \
2638 start_addr2 = 0; \
2639 else { \
2640 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2641 if (start_addr2 > 0) \
2642 need_subpage = 1; \
2643 } \
2644 \
blueswir149e9fba2007-05-30 17:25:06 +00002645 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002646 end_addr2 = TARGET_PAGE_SIZE - 1; \
2647 else { \
2648 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2649 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2650 need_subpage = 1; \
2651 } \
2652 } while (0)
2653
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002654/* register physical memory.
2655 For RAM, 'size' must be a multiple of the target page size.
2656 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002657 io memory page. The address used when calling the IO function is
2658 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002659 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002660 before calculating this offset. This should not be a problem unless
2661 the low bits of start_addr and region_offset differ. */
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002662void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
Anthony Liguoric227f092009-10-01 16:12:16 -05002663 ram_addr_t size,
2664 ram_addr_t phys_offset,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002665 ram_addr_t region_offset,
2666 bool log_dirty)
bellard33417e72003-08-10 21:47:01 +00002667{
Anthony Liguoric227f092009-10-01 16:12:16 -05002668 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002669 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002670 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002671 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002672 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002673
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002674 assert(size);
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002675 cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002676
pbrook67c4d232009-02-23 13:16:07 +00002677 if (phys_offset == IO_MEM_UNASSIGNED) {
2678 region_offset = start_addr;
2679 }
pbrook8da3ff12008-12-01 18:59:50 +00002680 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002681 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002682 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002683
2684 addr = start_addr;
2685 do {
blueswir1db7b5422007-05-26 17:36:03 +00002686 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2687 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002688 ram_addr_t orig_memory = p->phys_offset;
2689 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002690 int need_subpage = 0;
2691
2692 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2693 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002694 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002695 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2696 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002697 &p->phys_offset, orig_memory,
2698 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002699 } else {
2700 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2701 >> IO_MEM_SHIFT];
2702 }
pbrook8da3ff12008-12-01 18:59:50 +00002703 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2704 region_offset);
2705 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002706 } else {
2707 p->phys_offset = phys_offset;
2708 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2709 (phys_offset & IO_MEM_ROMD))
2710 phys_offset += TARGET_PAGE_SIZE;
2711 }
2712 } else {
2713 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2714 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002715 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002716 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002717 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002718 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002719 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002720 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002721 int need_subpage = 0;
2722
2723 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2724 end_addr2, need_subpage);
2725
Richard Hendersonf6405242010-04-22 16:47:31 -07002726 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002727 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002728 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002729 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002730 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002731 phys_offset, region_offset);
2732 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002733 }
2734 }
2735 }
pbrook8da3ff12008-12-01 18:59:50 +00002736 region_offset += TARGET_PAGE_SIZE;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002737 addr += TARGET_PAGE_SIZE;
2738 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002739
bellard9d420372006-06-25 22:25:22 +00002740 /* since each CPU stores ram addresses in its TLB cache, we must
2741 reset the modified entries */
2742 /* XXX: slow ! */
2743 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2744 tlb_flush(env, 1);
2745 }
bellard33417e72003-08-10 21:47:01 +00002746}
2747
bellardba863452006-09-24 18:41:10 +00002748/* XXX: temporary until new memory mapping API */
Anthony Liguoric227f092009-10-01 16:12:16 -05002749ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002750{
2751 PhysPageDesc *p;
2752
2753 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2754 if (!p)
2755 return IO_MEM_UNASSIGNED;
2756 return p->phys_offset;
2757}
2758
Anthony Liguoric227f092009-10-01 16:12:16 -05002759void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002760{
2761 if (kvm_enabled())
2762 kvm_coalesce_mmio_region(addr, size);
2763}
2764
Anthony Liguoric227f092009-10-01 16:12:16 -05002765void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002766{
2767 if (kvm_enabled())
2768 kvm_uncoalesce_mmio_region(addr, size);
2769}
2770
Sheng Yang62a27442010-01-26 19:21:16 +08002771void qemu_flush_coalesced_mmio_buffer(void)
2772{
2773 if (kvm_enabled())
2774 kvm_flush_coalesced_mmio_buffer();
2775}
2776
Marcelo Tosattic9027602010-03-01 20:25:08 -03002777#if defined(__linux__) && !defined(TARGET_S390X)
2778
2779#include <sys/vfs.h>
2780
2781#define HUGETLBFS_MAGIC 0x958458f6
2782
2783static long gethugepagesize(const char *path)
2784{
2785 struct statfs fs;
2786 int ret;
2787
2788 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002789 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002790 } while (ret != 0 && errno == EINTR);
2791
2792 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002793 perror(path);
2794 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002795 }
2796
2797 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002798 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002799
2800 return fs.f_bsize;
2801}
2802
Alex Williamson04b16652010-07-02 11:13:17 -06002803static void *file_ram_alloc(RAMBlock *block,
2804 ram_addr_t memory,
2805 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002806{
2807 char *filename;
2808 void *area;
2809 int fd;
2810#ifdef MAP_POPULATE
2811 int flags;
2812#endif
2813 unsigned long hpagesize;
2814
2815 hpagesize = gethugepagesize(path);
2816 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002817 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002818 }
2819
2820 if (memory < hpagesize) {
2821 return NULL;
2822 }
2823
2824 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2825 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2826 return NULL;
2827 }
2828
2829 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002830 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002831 }
2832
2833 fd = mkstemp(filename);
2834 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002835 perror("unable to create backing store for hugepages");
2836 free(filename);
2837 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002838 }
2839 unlink(filename);
2840 free(filename);
2841
2842 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2843
2844 /*
2845 * ftruncate is not supported by hugetlbfs in older
2846 * hosts, so don't bother bailing out on errors.
2847 * If anything goes wrong with it under other filesystems,
2848 * mmap will fail.
2849 */
2850 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002851 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002852
2853#ifdef MAP_POPULATE
2854 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2855 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2856 * to sidestep this quirk.
2857 */
2858 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2859 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2860#else
2861 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2862#endif
2863 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002864 perror("file_ram_alloc: can't mmap RAM pages");
2865 close(fd);
2866 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002867 }
Alex Williamson04b16652010-07-02 11:13:17 -06002868 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002869 return area;
2870}
2871#endif
2872
Alex Williamsond17b5282010-06-25 11:08:38 -06002873static ram_addr_t find_ram_offset(ram_addr_t size)
2874{
Alex Williamson04b16652010-07-02 11:13:17 -06002875 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002876 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002877
2878 if (QLIST_EMPTY(&ram_list.blocks))
2879 return 0;
2880
2881 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002882 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002883
2884 end = block->offset + block->length;
2885
2886 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2887 if (next_block->offset >= end) {
2888 next = MIN(next, next_block->offset);
2889 }
2890 }
2891 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002892 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002893 mingap = next - end;
2894 }
2895 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002896
2897 if (offset == RAM_ADDR_MAX) {
2898 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2899 (uint64_t)size);
2900 abort();
2901 }
2902
Alex Williamson04b16652010-07-02 11:13:17 -06002903 return offset;
2904}
2905
2906static ram_addr_t last_ram_offset(void)
2907{
Alex Williamsond17b5282010-06-25 11:08:38 -06002908 RAMBlock *block;
2909 ram_addr_t last = 0;
2910
2911 QLIST_FOREACH(block, &ram_list.blocks, next)
2912 last = MAX(last, block->offset + block->length);
2913
2914 return last;
2915}
2916
Cam Macdonell84b89d72010-07-26 18:10:57 -06002917ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002918 ram_addr_t size, void *host)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002919{
2920 RAMBlock *new_block, *block;
2921
2922 size = TARGET_PAGE_ALIGN(size);
Anthony Liguori7267c092011-08-20 22:09:37 -05002923 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002924
2925 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2926 char *id = dev->parent_bus->info->get_dev_path(dev);
2927 if (id) {
2928 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002929 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002930 }
2931 }
2932 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2933
2934 QLIST_FOREACH(block, &ram_list.blocks, next) {
2935 if (!strcmp(block->idstr, new_block->idstr)) {
2936 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2937 new_block->idstr);
2938 abort();
2939 }
2940 }
2941
Jun Nakajima432d2682010-08-31 16:41:25 +01002942 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002943 if (host) {
2944 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002945 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002946 } else {
2947 if (mem_path) {
2948#if defined (__linux__) && !defined(TARGET_S390X)
2949 new_block->host = file_ram_alloc(new_block, size, mem_path);
2950 if (!new_block->host) {
2951 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002952 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002953 }
2954#else
2955 fprintf(stderr, "-mem-path option unsupported\n");
2956 exit(1);
2957#endif
2958 } else {
2959#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002960 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2961 an system defined value, which is at least 256GB. Larger systems
2962 have larger values. We put the guest between the end of data
2963 segment (system break) and this value. We use 32GB as a base to
2964 have enough room for the system break to grow. */
2965 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002966 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002967 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002968 if (new_block->host == MAP_FAILED) {
2969 fprintf(stderr, "Allocating RAM failed\n");
2970 abort();
2971 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002972#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002973 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002974 xen_ram_alloc(new_block->offset, size);
2975 } else {
2976 new_block->host = qemu_vmalloc(size);
2977 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002978#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002979 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002980 }
2981 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002982 new_block->length = size;
2983
2984 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2985
Anthony Liguori7267c092011-08-20 22:09:37 -05002986 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002987 last_ram_offset() >> TARGET_PAGE_BITS);
2988 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2989 0xff, size >> TARGET_PAGE_BITS);
2990
2991 if (kvm_enabled())
2992 kvm_setup_guest_memory(new_block->host, size);
2993
2994 return new_block->offset;
2995}
2996
Alex Williamson1724f042010-06-25 11:09:35 -06002997ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
pbrook94a6b542009-04-11 17:15:54 +00002998{
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002999 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
pbrook94a6b542009-04-11 17:15:54 +00003000}
bellarde9a1ab12007-02-08 23:08:38 +00003001
Alex Williamson1f2e98b2011-05-03 12:48:09 -06003002void qemu_ram_free_from_ptr(ram_addr_t addr)
3003{
3004 RAMBlock *block;
3005
3006 QLIST_FOREACH(block, &ram_list.blocks, next) {
3007 if (addr == block->offset) {
3008 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05003009 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06003010 return;
3011 }
3012 }
3013}
3014
Anthony Liguoric227f092009-10-01 16:12:16 -05003015void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00003016{
Alex Williamson04b16652010-07-02 11:13:17 -06003017 RAMBlock *block;
3018
3019 QLIST_FOREACH(block, &ram_list.blocks, next) {
3020 if (addr == block->offset) {
3021 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01003022 if (block->flags & RAM_PREALLOC_MASK) {
3023 ;
3024 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06003025#if defined (__linux__) && !defined(TARGET_S390X)
3026 if (block->fd) {
3027 munmap(block->host, block->length);
3028 close(block->fd);
3029 } else {
3030 qemu_vfree(block->host);
3031 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003032#else
3033 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06003034#endif
3035 } else {
3036#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3037 munmap(block->host, block->length);
3038#else
Jan Kiszka868bb332011-06-21 22:59:09 +02003039 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003040 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01003041 } else {
3042 qemu_vfree(block->host);
3043 }
Alex Williamson04b16652010-07-02 11:13:17 -06003044#endif
3045 }
Anthony Liguori7267c092011-08-20 22:09:37 -05003046 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06003047 return;
3048 }
3049 }
3050
bellarde9a1ab12007-02-08 23:08:38 +00003051}
3052
Huang Yingcd19cfa2011-03-02 08:56:19 +01003053#ifndef _WIN32
3054void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3055{
3056 RAMBlock *block;
3057 ram_addr_t offset;
3058 int flags;
3059 void *area, *vaddr;
3060
3061 QLIST_FOREACH(block, &ram_list.blocks, next) {
3062 offset = addr - block->offset;
3063 if (offset < block->length) {
3064 vaddr = block->host + offset;
3065 if (block->flags & RAM_PREALLOC_MASK) {
3066 ;
3067 } else {
3068 flags = MAP_FIXED;
3069 munmap(vaddr, length);
3070 if (mem_path) {
3071#if defined(__linux__) && !defined(TARGET_S390X)
3072 if (block->fd) {
3073#ifdef MAP_POPULATE
3074 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3075 MAP_PRIVATE;
3076#else
3077 flags |= MAP_PRIVATE;
3078#endif
3079 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3080 flags, block->fd, offset);
3081 } else {
3082 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3083 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3084 flags, -1, 0);
3085 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003086#else
3087 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003088#endif
3089 } else {
3090#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3091 flags |= MAP_SHARED | MAP_ANONYMOUS;
3092 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3093 flags, -1, 0);
3094#else
3095 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3096 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3097 flags, -1, 0);
3098#endif
3099 }
3100 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003101 fprintf(stderr, "Could not remap addr: "
3102 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003103 length, addr);
3104 exit(1);
3105 }
3106 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3107 }
3108 return;
3109 }
3110 }
3111}
3112#endif /* !_WIN32 */
3113
pbrookdc828ca2009-04-09 22:21:07 +00003114/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003115 With the exception of the softmmu code in this file, this should
3116 only be used for local memory (e.g. video ram) that the device owns,
3117 and knows it isn't going to access beyond the end of the block.
3118
3119 It should not be used for general purpose DMA.
3120 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3121 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003122void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003123{
pbrook94a6b542009-04-11 17:15:54 +00003124 RAMBlock *block;
3125
Alex Williamsonf471a172010-06-11 11:11:42 -06003126 QLIST_FOREACH(block, &ram_list.blocks, next) {
3127 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003128 /* Move this entry to to start of the list. */
3129 if (block != QLIST_FIRST(&ram_list.blocks)) {
3130 QLIST_REMOVE(block, next);
3131 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3132 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003133 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003134 /* We need to check if the requested address is in the RAM
3135 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003136 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003137 */
3138 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003139 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003140 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003141 block->host =
3142 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003143 }
3144 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003145 return block->host + (addr - block->offset);
3146 }
pbrook94a6b542009-04-11 17:15:54 +00003147 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003148
3149 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3150 abort();
3151
3152 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003153}
3154
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003155/* Return a host pointer to ram allocated with qemu_ram_alloc.
3156 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3157 */
3158void *qemu_safe_ram_ptr(ram_addr_t addr)
3159{
3160 RAMBlock *block;
3161
3162 QLIST_FOREACH(block, &ram_list.blocks, next) {
3163 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003164 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003165 /* We need to check if the requested address is in the RAM
3166 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003167 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003168 */
3169 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003170 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003171 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003172 block->host =
3173 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003174 }
3175 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003176 return block->host + (addr - block->offset);
3177 }
3178 }
3179
3180 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3181 abort();
3182
3183 return NULL;
3184}
3185
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003186/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3187 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003188void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003189{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003190 if (*size == 0) {
3191 return NULL;
3192 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003193 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003194 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003195 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003196 RAMBlock *block;
3197
3198 QLIST_FOREACH(block, &ram_list.blocks, next) {
3199 if (addr - block->offset < block->length) {
3200 if (addr - block->offset + *size > block->length)
3201 *size = block->length - addr + block->offset;
3202 return block->host + (addr - block->offset);
3203 }
3204 }
3205
3206 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3207 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003208 }
3209}
3210
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003211void qemu_put_ram_ptr(void *addr)
3212{
3213 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003214}
3215
Marcelo Tosattie8902612010-10-11 15:31:19 -03003216int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003217{
pbrook94a6b542009-04-11 17:15:54 +00003218 RAMBlock *block;
3219 uint8_t *host = ptr;
3220
Jan Kiszka868bb332011-06-21 22:59:09 +02003221 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003222 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003223 return 0;
3224 }
3225
Alex Williamsonf471a172010-06-11 11:11:42 -06003226 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003227 /* This case append when the block is not mapped. */
3228 if (block->host == NULL) {
3229 continue;
3230 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003231 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003232 *ram_addr = block->offset + (host - block->host);
3233 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003234 }
pbrook94a6b542009-04-11 17:15:54 +00003235 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003236
Marcelo Tosattie8902612010-10-11 15:31:19 -03003237 return -1;
3238}
Alex Williamsonf471a172010-06-11 11:11:42 -06003239
Marcelo Tosattie8902612010-10-11 15:31:19 -03003240/* Some of the softmmu routines need to translate from a host pointer
3241 (typically a TLB entry) back to a ram offset. */
3242ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3243{
3244 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003245
Marcelo Tosattie8902612010-10-11 15:31:19 -03003246 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3247 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3248 abort();
3249 }
3250 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003251}
3252
Anthony Liguoric227f092009-10-01 16:12:16 -05003253static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00003254{
pbrook67d3b952006-12-18 05:03:52 +00003255#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003256 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003257#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003258#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003259 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
blueswir1e18231a2008-10-06 18:46:28 +00003260#endif
3261 return 0;
3262}
3263
Anthony Liguoric227f092009-10-01 16:12:16 -05003264static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003265{
3266#ifdef DEBUG_UNASSIGNED
3267 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3268#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003269#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003270 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
blueswir1e18231a2008-10-06 18:46:28 +00003271#endif
3272 return 0;
3273}
3274
Anthony Liguoric227f092009-10-01 16:12:16 -05003275static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003276{
3277#ifdef DEBUG_UNASSIGNED
3278 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3279#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003280#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003281 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003282#endif
bellard33417e72003-08-10 21:47:01 +00003283 return 0;
3284}
3285
Anthony Liguoric227f092009-10-01 16:12:16 -05003286static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00003287{
pbrook67d3b952006-12-18 05:03:52 +00003288#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003289 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00003290#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003291#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003292 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
blueswir1e18231a2008-10-06 18:46:28 +00003293#endif
3294}
3295
Anthony Liguoric227f092009-10-01 16:12:16 -05003296static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003297{
3298#ifdef DEBUG_UNASSIGNED
3299 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3300#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003301#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003302 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
blueswir1e18231a2008-10-06 18:46:28 +00003303#endif
3304}
3305
Anthony Liguoric227f092009-10-01 16:12:16 -05003306static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003307{
3308#ifdef DEBUG_UNASSIGNED
3309 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3310#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003311#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003312 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003313#endif
bellard33417e72003-08-10 21:47:01 +00003314}
3315
Blue Swirld60efc62009-08-25 18:29:31 +00003316static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00003317 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00003318 unassigned_mem_readw,
3319 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00003320};
3321
Blue Swirld60efc62009-08-25 18:29:31 +00003322static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00003323 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00003324 unassigned_mem_writew,
3325 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00003326};
3327
Anthony Liguoric227f092009-10-01 16:12:16 -05003328static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003329 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003330{
bellard3a7d9292005-08-21 09:26:42 +00003331 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003332 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003333 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3334#if !defined(CONFIG_USER_ONLY)
3335 tb_invalidate_phys_page_fast(ram_addr, 1);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003336 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003337#endif
3338 }
pbrook5579c7f2009-04-11 14:47:08 +00003339 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003340 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003341 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003342 /* we remove the notdirty callback only if the code has been
3343 flushed */
3344 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003345 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003346}
3347
Anthony Liguoric227f092009-10-01 16:12:16 -05003348static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003349 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003350{
bellard3a7d9292005-08-21 09:26:42 +00003351 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003352 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003353 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3354#if !defined(CONFIG_USER_ONLY)
3355 tb_invalidate_phys_page_fast(ram_addr, 2);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003356 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003357#endif
3358 }
pbrook5579c7f2009-04-11 14:47:08 +00003359 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003360 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003361 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003362 /* we remove the notdirty callback only if the code has been
3363 flushed */
3364 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003365 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003366}
3367
Anthony Liguoric227f092009-10-01 16:12:16 -05003368static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003369 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003370{
bellard3a7d9292005-08-21 09:26:42 +00003371 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003372 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003373 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3374#if !defined(CONFIG_USER_ONLY)
3375 tb_invalidate_phys_page_fast(ram_addr, 4);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003376 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003377#endif
3378 }
pbrook5579c7f2009-04-11 14:47:08 +00003379 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003380 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003381 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003382 /* we remove the notdirty callback only if the code has been
3383 flushed */
3384 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003385 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003386}
3387
Blue Swirld60efc62009-08-25 18:29:31 +00003388static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00003389 NULL, /* never used */
3390 NULL, /* never used */
3391 NULL, /* never used */
3392};
3393
Blue Swirld60efc62009-08-25 18:29:31 +00003394static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00003395 notdirty_mem_writeb,
3396 notdirty_mem_writew,
3397 notdirty_mem_writel,
3398};
3399
pbrook0f459d12008-06-09 00:20:13 +00003400/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003401static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003402{
3403 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003404 target_ulong pc, cs_base;
3405 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003406 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003407 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003408 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003409
aliguori06d55cc2008-11-18 20:24:06 +00003410 if (env->watchpoint_hit) {
3411 /* We re-entered the check after replacing the TB. Now raise
3412 * the debug interrupt so that is will trigger after the
3413 * current instruction. */
3414 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3415 return;
3416 }
pbrook2e70f6e2008-06-29 01:03:05 +00003417 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003418 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003419 if ((vaddr == (wp->vaddr & len_mask) ||
3420 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003421 wp->flags |= BP_WATCHPOINT_HIT;
3422 if (!env->watchpoint_hit) {
3423 env->watchpoint_hit = wp;
3424 tb = tb_find_pc(env->mem_io_pc);
3425 if (!tb) {
3426 cpu_abort(env, "check_watchpoint: could not find TB for "
3427 "pc=%p", (void *)env->mem_io_pc);
3428 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003429 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003430 tb_phys_invalidate(tb, -1);
3431 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3432 env->exception_index = EXCP_DEBUG;
3433 } else {
3434 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3435 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3436 }
3437 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003438 }
aliguori6e140f22008-11-18 20:37:55 +00003439 } else {
3440 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003441 }
3442 }
3443}
3444
pbrook6658ffb2007-03-16 23:58:11 +00003445/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3446 so these check for a hit then pass through to the normal out-of-line
3447 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05003448static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003449{
aliguorib4051332008-11-18 20:14:20 +00003450 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003451 return ldub_phys(addr);
3452}
3453
Anthony Liguoric227f092009-10-01 16:12:16 -05003454static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003455{
aliguorib4051332008-11-18 20:14:20 +00003456 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003457 return lduw_phys(addr);
3458}
3459
Anthony Liguoric227f092009-10-01 16:12:16 -05003460static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003461{
aliguorib4051332008-11-18 20:14:20 +00003462 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003463 return ldl_phys(addr);
3464}
3465
Anthony Liguoric227f092009-10-01 16:12:16 -05003466static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003467 uint32_t val)
3468{
aliguorib4051332008-11-18 20:14:20 +00003469 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003470 stb_phys(addr, val);
3471}
3472
Anthony Liguoric227f092009-10-01 16:12:16 -05003473static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003474 uint32_t val)
3475{
aliguorib4051332008-11-18 20:14:20 +00003476 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003477 stw_phys(addr, val);
3478}
3479
Anthony Liguoric227f092009-10-01 16:12:16 -05003480static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003481 uint32_t val)
3482{
aliguorib4051332008-11-18 20:14:20 +00003483 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003484 stl_phys(addr, val);
3485}
3486
Blue Swirld60efc62009-08-25 18:29:31 +00003487static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003488 watch_mem_readb,
3489 watch_mem_readw,
3490 watch_mem_readl,
3491};
3492
Blue Swirld60efc62009-08-25 18:29:31 +00003493static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003494 watch_mem_writeb,
3495 watch_mem_writew,
3496 watch_mem_writel,
3497};
pbrook6658ffb2007-03-16 23:58:11 +00003498
Richard Hendersonf6405242010-04-22 16:47:31 -07003499static inline uint32_t subpage_readlen (subpage_t *mmio,
3500 target_phys_addr_t addr,
3501 unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003502{
Richard Hendersonf6405242010-04-22 16:47:31 -07003503 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003504#if defined(DEBUG_SUBPAGE)
3505 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3506 mmio, len, addr, idx);
3507#endif
blueswir1db7b5422007-05-26 17:36:03 +00003508
Richard Hendersonf6405242010-04-22 16:47:31 -07003509 addr += mmio->region_offset[idx];
3510 idx = mmio->sub_io_index[idx];
3511 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
blueswir1db7b5422007-05-26 17:36:03 +00003512}
3513
Anthony Liguoric227f092009-10-01 16:12:16 -05003514static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
Richard Hendersonf6405242010-04-22 16:47:31 -07003515 uint32_t value, unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003516{
Richard Hendersonf6405242010-04-22 16:47:31 -07003517 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003518#if defined(DEBUG_SUBPAGE)
Richard Hendersonf6405242010-04-22 16:47:31 -07003519 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3520 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003521#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003522
3523 addr += mmio->region_offset[idx];
3524 idx = mmio->sub_io_index[idx];
3525 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00003526}
3527
Anthony Liguoric227f092009-10-01 16:12:16 -05003528static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003529{
blueswir1db7b5422007-05-26 17:36:03 +00003530 return subpage_readlen(opaque, addr, 0);
3531}
3532
Anthony Liguoric227f092009-10-01 16:12:16 -05003533static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003534 uint32_t value)
3535{
blueswir1db7b5422007-05-26 17:36:03 +00003536 subpage_writelen(opaque, addr, value, 0);
3537}
3538
Anthony Liguoric227f092009-10-01 16:12:16 -05003539static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003540{
blueswir1db7b5422007-05-26 17:36:03 +00003541 return subpage_readlen(opaque, addr, 1);
3542}
3543
Anthony Liguoric227f092009-10-01 16:12:16 -05003544static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003545 uint32_t value)
3546{
blueswir1db7b5422007-05-26 17:36:03 +00003547 subpage_writelen(opaque, addr, value, 1);
3548}
3549
Anthony Liguoric227f092009-10-01 16:12:16 -05003550static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003551{
blueswir1db7b5422007-05-26 17:36:03 +00003552 return subpage_readlen(opaque, addr, 2);
3553}
3554
Richard Hendersonf6405242010-04-22 16:47:31 -07003555static void subpage_writel (void *opaque, target_phys_addr_t addr,
3556 uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00003557{
blueswir1db7b5422007-05-26 17:36:03 +00003558 subpage_writelen(opaque, addr, value, 2);
3559}
3560
Blue Swirld60efc62009-08-25 18:29:31 +00003561static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003562 &subpage_readb,
3563 &subpage_readw,
3564 &subpage_readl,
3565};
3566
Blue Swirld60efc62009-08-25 18:29:31 +00003567static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003568 &subpage_writeb,
3569 &subpage_writew,
3570 &subpage_writel,
3571};
3572
Andreas Färber56384e82011-11-30 16:26:21 +01003573static uint32_t subpage_ram_readb(void *opaque, target_phys_addr_t addr)
3574{
3575 ram_addr_t raddr = addr;
3576 void *ptr = qemu_get_ram_ptr(raddr);
3577 return ldub_p(ptr);
3578}
3579
3580static void subpage_ram_writeb(void *opaque, target_phys_addr_t addr,
3581 uint32_t value)
3582{
3583 ram_addr_t raddr = addr;
3584 void *ptr = qemu_get_ram_ptr(raddr);
3585 stb_p(ptr, value);
3586}
3587
3588static uint32_t subpage_ram_readw(void *opaque, target_phys_addr_t addr)
3589{
3590 ram_addr_t raddr = addr;
3591 void *ptr = qemu_get_ram_ptr(raddr);
3592 return lduw_p(ptr);
3593}
3594
3595static void subpage_ram_writew(void *opaque, target_phys_addr_t addr,
3596 uint32_t value)
3597{
3598 ram_addr_t raddr = addr;
3599 void *ptr = qemu_get_ram_ptr(raddr);
3600 stw_p(ptr, value);
3601}
3602
3603static uint32_t subpage_ram_readl(void *opaque, target_phys_addr_t addr)
3604{
3605 ram_addr_t raddr = addr;
3606 void *ptr = qemu_get_ram_ptr(raddr);
3607 return ldl_p(ptr);
3608}
3609
3610static void subpage_ram_writel(void *opaque, target_phys_addr_t addr,
3611 uint32_t value)
3612{
3613 ram_addr_t raddr = addr;
3614 void *ptr = qemu_get_ram_ptr(raddr);
3615 stl_p(ptr, value);
3616}
3617
3618static CPUReadMemoryFunc * const subpage_ram_read[] = {
3619 &subpage_ram_readb,
3620 &subpage_ram_readw,
3621 &subpage_ram_readl,
3622};
3623
3624static CPUWriteMemoryFunc * const subpage_ram_write[] = {
3625 &subpage_ram_writeb,
3626 &subpage_ram_writew,
3627 &subpage_ram_writel,
3628};
3629
Anthony Liguoric227f092009-10-01 16:12:16 -05003630static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3631 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003632{
3633 int idx, eidx;
3634
3635 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3636 return -1;
3637 idx = SUBPAGE_IDX(start);
3638 eidx = SUBPAGE_IDX(end);
3639#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003640 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003641 mmio, start, end, idx, eidx, memory);
3642#endif
Andreas Färber56384e82011-11-30 16:26:21 +01003643 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
3644 memory = IO_MEM_SUBPAGE_RAM;
3645 }
Richard Hendersonf6405242010-04-22 16:47:31 -07003646 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
blueswir1db7b5422007-05-26 17:36:03 +00003647 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003648 mmio->sub_io_index[idx] = memory;
3649 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003650 }
3651
3652 return 0;
3653}
3654
Richard Hendersonf6405242010-04-22 16:47:31 -07003655static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3656 ram_addr_t orig_memory,
3657 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003658{
Anthony Liguoric227f092009-10-01 16:12:16 -05003659 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003660 int subpage_memory;
3661
Anthony Liguori7267c092011-08-20 22:09:37 -05003662 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003663
3664 mmio->base = base;
Alexander Graf2507c122010-12-08 12:05:37 +01003665 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3666 DEVICE_NATIVE_ENDIAN);
blueswir1db7b5422007-05-26 17:36:03 +00003667#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003668 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3669 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003670#endif
aliguori1eec6142009-02-05 22:06:18 +00003671 *phys = subpage_memory | IO_MEM_SUBPAGE;
Richard Hendersonf6405242010-04-22 16:47:31 -07003672 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003673
3674 return mmio;
3675}
3676
aliguori88715652009-02-11 15:20:58 +00003677static int get_free_io_mem_idx(void)
3678{
3679 int i;
3680
3681 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3682 if (!io_mem_used[i]) {
3683 io_mem_used[i] = 1;
3684 return i;
3685 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003686 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003687 return -1;
3688}
3689
Alexander Grafdd310532010-12-08 12:05:36 +01003690/*
3691 * Usually, devices operate in little endian mode. There are devices out
3692 * there that operate in big endian too. Each device gets byte swapped
3693 * mmio if plugged onto a CPU that does the other endianness.
3694 *
3695 * CPU Device swap?
3696 *
3697 * little little no
3698 * little big yes
3699 * big little yes
3700 * big big no
3701 */
3702
3703typedef struct SwapEndianContainer {
3704 CPUReadMemoryFunc *read[3];
3705 CPUWriteMemoryFunc *write[3];
3706 void *opaque;
3707} SwapEndianContainer;
3708
3709static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3710{
3711 uint32_t val;
3712 SwapEndianContainer *c = opaque;
3713 val = c->read[0](c->opaque, addr);
3714 return val;
3715}
3716
3717static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3718{
3719 uint32_t val;
3720 SwapEndianContainer *c = opaque;
3721 val = bswap16(c->read[1](c->opaque, addr));
3722 return val;
3723}
3724
3725static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3726{
3727 uint32_t val;
3728 SwapEndianContainer *c = opaque;
3729 val = bswap32(c->read[2](c->opaque, addr));
3730 return val;
3731}
3732
3733static CPUReadMemoryFunc * const swapendian_readfn[3]={
3734 swapendian_mem_readb,
3735 swapendian_mem_readw,
3736 swapendian_mem_readl
3737};
3738
3739static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3740 uint32_t val)
3741{
3742 SwapEndianContainer *c = opaque;
3743 c->write[0](c->opaque, addr, val);
3744}
3745
3746static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3747 uint32_t val)
3748{
3749 SwapEndianContainer *c = opaque;
3750 c->write[1](c->opaque, addr, bswap16(val));
3751}
3752
3753static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3754 uint32_t val)
3755{
3756 SwapEndianContainer *c = opaque;
3757 c->write[2](c->opaque, addr, bswap32(val));
3758}
3759
3760static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3761 swapendian_mem_writeb,
3762 swapendian_mem_writew,
3763 swapendian_mem_writel
3764};
3765
3766static void swapendian_init(int io_index)
3767{
Anthony Liguori7267c092011-08-20 22:09:37 -05003768 SwapEndianContainer *c = g_malloc(sizeof(SwapEndianContainer));
Alexander Grafdd310532010-12-08 12:05:36 +01003769 int i;
3770
3771 /* Swap mmio for big endian targets */
3772 c->opaque = io_mem_opaque[io_index];
3773 for (i = 0; i < 3; i++) {
3774 c->read[i] = io_mem_read[io_index][i];
3775 c->write[i] = io_mem_write[io_index][i];
3776
3777 io_mem_read[io_index][i] = swapendian_readfn[i];
3778 io_mem_write[io_index][i] = swapendian_writefn[i];
3779 }
3780 io_mem_opaque[io_index] = c;
3781}
3782
3783static void swapendian_del(int io_index)
3784{
3785 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
Anthony Liguori7267c092011-08-20 22:09:37 -05003786 g_free(io_mem_opaque[io_index]);
Alexander Grafdd310532010-12-08 12:05:36 +01003787 }
3788}
3789
bellard33417e72003-08-10 21:47:01 +00003790/* mem_read and mem_write are arrays of functions containing the
3791 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003792 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003793 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003794 modified. If it is zero, a new io zone is allocated. The return
3795 value can be used with cpu_register_physical_memory(). (-1) is
3796 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003797static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003798 CPUReadMemoryFunc * const *mem_read,
3799 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003800 void *opaque, enum device_endian endian)
bellard33417e72003-08-10 21:47:01 +00003801{
Richard Henderson3cab7212010-05-07 09:52:51 -07003802 int i;
3803
bellard33417e72003-08-10 21:47:01 +00003804 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003805 io_index = get_free_io_mem_idx();
3806 if (io_index == -1)
3807 return io_index;
bellard33417e72003-08-10 21:47:01 +00003808 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003809 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003810 if (io_index >= IO_MEM_NB_ENTRIES)
3811 return -1;
3812 }
bellardb5ff1b32005-11-26 10:38:39 +00003813
Richard Henderson3cab7212010-05-07 09:52:51 -07003814 for (i = 0; i < 3; ++i) {
3815 io_mem_read[io_index][i]
3816 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3817 }
3818 for (i = 0; i < 3; ++i) {
3819 io_mem_write[io_index][i]
3820 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3821 }
bellarda4193c82004-06-03 14:01:43 +00003822 io_mem_opaque[io_index] = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003823
Alexander Grafdd310532010-12-08 12:05:36 +01003824 switch (endian) {
3825 case DEVICE_BIG_ENDIAN:
3826#ifndef TARGET_WORDS_BIGENDIAN
3827 swapendian_init(io_index);
3828#endif
3829 break;
3830 case DEVICE_LITTLE_ENDIAN:
3831#ifdef TARGET_WORDS_BIGENDIAN
3832 swapendian_init(io_index);
3833#endif
3834 break;
3835 case DEVICE_NATIVE_ENDIAN:
3836 default:
3837 break;
3838 }
3839
Richard Hendersonf6405242010-04-22 16:47:31 -07003840 return (io_index << IO_MEM_SHIFT);
bellard33417e72003-08-10 21:47:01 +00003841}
bellard61382a52003-10-27 21:22:23 +00003842
Blue Swirld60efc62009-08-25 18:29:31 +00003843int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3844 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003845 void *opaque, enum device_endian endian)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003846{
Alexander Graf2507c122010-12-08 12:05:37 +01003847 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003848}
3849
aliguori88715652009-02-11 15:20:58 +00003850void cpu_unregister_io_memory(int io_table_address)
3851{
3852 int i;
3853 int io_index = io_table_address >> IO_MEM_SHIFT;
3854
Alexander Grafdd310532010-12-08 12:05:36 +01003855 swapendian_del(io_index);
3856
aliguori88715652009-02-11 15:20:58 +00003857 for (i=0;i < 3; i++) {
3858 io_mem_read[io_index][i] = unassigned_mem_read[i];
3859 io_mem_write[io_index][i] = unassigned_mem_write[i];
3860 }
3861 io_mem_opaque[io_index] = NULL;
3862 io_mem_used[io_index] = 0;
3863}
3864
Avi Kivitye9179ce2009-06-14 11:38:52 +03003865static void io_mem_init(void)
3866{
3867 int i;
3868
Alexander Graf2507c122010-12-08 12:05:37 +01003869 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3870 unassigned_mem_write, NULL,
3871 DEVICE_NATIVE_ENDIAN);
3872 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3873 unassigned_mem_write, NULL,
3874 DEVICE_NATIVE_ENDIAN);
3875 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3876 notdirty_mem_write, NULL,
3877 DEVICE_NATIVE_ENDIAN);
Andreas Färber56384e82011-11-30 16:26:21 +01003878 cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM, subpage_ram_read,
3879 subpage_ram_write, NULL,
3880 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003881 for (i=0; i<5; i++)
3882 io_mem_used[i] = 1;
3883
3884 io_mem_watch = cpu_register_io_memory(watch_mem_read,
Alexander Graf2507c122010-12-08 12:05:37 +01003885 watch_mem_write, NULL,
3886 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003887}
3888
Avi Kivity62152b82011-07-26 14:26:14 +03003889static void memory_map_init(void)
3890{
Anthony Liguori7267c092011-08-20 22:09:37 -05003891 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003892 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003893 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003894
Anthony Liguori7267c092011-08-20 22:09:37 -05003895 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003896 memory_region_init(system_io, "io", 65536);
3897 set_system_io_map(system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003898}
3899
3900MemoryRegion *get_system_memory(void)
3901{
3902 return system_memory;
3903}
3904
Avi Kivity309cb472011-08-08 16:09:03 +03003905MemoryRegion *get_system_io(void)
3906{
3907 return system_io;
3908}
3909
pbrooke2eef172008-06-08 01:09:01 +00003910#endif /* !defined(CONFIG_USER_ONLY) */
3911
bellard13eb76e2004-01-24 15:23:36 +00003912/* physical memory access (slow version, mainly for debug) */
3913#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003914int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3915 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003916{
3917 int l, flags;
3918 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003919 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003920
3921 while (len > 0) {
3922 page = addr & TARGET_PAGE_MASK;
3923 l = (page + TARGET_PAGE_SIZE) - addr;
3924 if (l > len)
3925 l = len;
3926 flags = page_get_flags(page);
3927 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003928 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003929 if (is_write) {
3930 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003931 return -1;
bellard579a97f2007-11-11 14:26:47 +00003932 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003933 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003934 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003935 memcpy(p, buf, l);
3936 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003937 } else {
3938 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003939 return -1;
bellard579a97f2007-11-11 14:26:47 +00003940 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003941 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003942 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003943 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003944 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003945 }
3946 len -= l;
3947 buf += l;
3948 addr += l;
3949 }
Paul Brooka68fe892010-03-01 00:08:59 +00003950 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003951}
bellard8df1cd02005-01-28 22:37:22 +00003952
bellard13eb76e2004-01-24 15:23:36 +00003953#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003954void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003955 int len, int is_write)
3956{
3957 int l, io_index;
3958 uint8_t *ptr;
3959 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003960 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003961 ram_addr_t pd;
bellard92e873b2004-05-21 14:52:29 +00003962 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003963
bellard13eb76e2004-01-24 15:23:36 +00003964 while (len > 0) {
3965 page = addr & TARGET_PAGE_MASK;
3966 l = (page + TARGET_PAGE_SIZE) - addr;
3967 if (l > len)
3968 l = len;
bellard92e873b2004-05-21 14:52:29 +00003969 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003970 if (!p) {
3971 pd = IO_MEM_UNASSIGNED;
3972 } else {
3973 pd = p->phys_offset;
3974 }
ths3b46e622007-09-17 08:09:54 +00003975
bellard13eb76e2004-01-24 15:23:36 +00003976 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003977 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003978 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003979 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003980 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003981 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003982 /* XXX: could force cpu_single_env to NULL to avoid
3983 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003984 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003985 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003986 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003987 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003988 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003989 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003990 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003991 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003992 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003993 l = 2;
3994 } else {
bellard1c213d12005-09-03 10:49:04 +00003995 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003996 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003997 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003998 l = 1;
3999 }
4000 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00004001 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00004002 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00004003 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004004 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00004005 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00004006 if (!cpu_physical_memory_is_dirty(addr1)) {
4007 /* invalidate code */
4008 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4009 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004010 cpu_physical_memory_set_dirty_flags(
4011 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004012 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004013 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00004014 }
4015 } else {
ths5fafdf22007-09-16 21:08:06 +00004016 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00004017 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05004018 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00004019 /* I/O case */
4020 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004021 if (p)
aurel326c2934d2009-02-18 21:37:17 +00004022 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4023 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00004024 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00004025 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00004026 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00004027 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00004028 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00004029 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00004030 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00004031 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00004032 l = 2;
4033 } else {
bellard1c213d12005-09-03 10:49:04 +00004034 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00004035 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00004036 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00004037 l = 1;
4038 }
4039 } else {
4040 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004041 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
4042 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
4043 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00004044 }
4045 }
4046 len -= l;
4047 buf += l;
4048 addr += l;
4049 }
4050}
bellard8df1cd02005-01-28 22:37:22 +00004051
bellardd0ecd2a2006-04-23 17:14:48 +00004052/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05004053void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00004054 const uint8_t *buf, int len)
4055{
4056 int l;
4057 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05004058 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00004059 unsigned long pd;
4060 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00004061
bellardd0ecd2a2006-04-23 17:14:48 +00004062 while (len > 0) {
4063 page = addr & TARGET_PAGE_MASK;
4064 l = (page + TARGET_PAGE_SIZE) - addr;
4065 if (l > len)
4066 l = len;
4067 p = phys_page_find(page >> TARGET_PAGE_BITS);
4068 if (!p) {
4069 pd = IO_MEM_UNASSIGNED;
4070 } else {
4071 pd = p->phys_offset;
4072 }
ths3b46e622007-09-17 08:09:54 +00004073
bellardd0ecd2a2006-04-23 17:14:48 +00004074 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00004075 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
4076 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00004077 /* do nothing */
4078 } else {
4079 unsigned long addr1;
4080 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4081 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004082 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00004083 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004084 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00004085 }
4086 len -= l;
4087 buf += l;
4088 addr += l;
4089 }
4090}
4091
aliguori6d16c2f2009-01-22 16:59:11 +00004092typedef struct {
4093 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05004094 target_phys_addr_t addr;
4095 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00004096} BounceBuffer;
4097
4098static BounceBuffer bounce;
4099
aliguoriba223c22009-01-22 16:59:16 +00004100typedef struct MapClient {
4101 void *opaque;
4102 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00004103 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00004104} MapClient;
4105
Blue Swirl72cf2d42009-09-12 07:36:22 +00004106static QLIST_HEAD(map_client_list, MapClient) map_client_list
4107 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00004108
4109void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
4110{
Anthony Liguori7267c092011-08-20 22:09:37 -05004111 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00004112
4113 client->opaque = opaque;
4114 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00004115 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00004116 return client;
4117}
4118
4119void cpu_unregister_map_client(void *_client)
4120{
4121 MapClient *client = (MapClient *)_client;
4122
Blue Swirl72cf2d42009-09-12 07:36:22 +00004123 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05004124 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00004125}
4126
4127static void cpu_notify_map_clients(void)
4128{
4129 MapClient *client;
4130
Blue Swirl72cf2d42009-09-12 07:36:22 +00004131 while (!QLIST_EMPTY(&map_client_list)) {
4132 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00004133 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09004134 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00004135 }
4136}
4137
aliguori6d16c2f2009-01-22 16:59:11 +00004138/* Map a physical memory region into a host virtual address.
4139 * May map a subset of the requested range, given by and returned in *plen.
4140 * May return NULL if resources needed to perform the mapping are exhausted.
4141 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00004142 * Use cpu_register_map_client() to know when retrying the map operation is
4143 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00004144 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004145void *cpu_physical_memory_map(target_phys_addr_t addr,
4146 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00004147 int is_write)
4148{
Anthony Liguoric227f092009-10-01 16:12:16 -05004149 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004150 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00004151 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004152 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00004153 unsigned long pd;
4154 PhysPageDesc *p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00004155 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004156 ram_addr_t rlen;
4157 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004158
4159 while (len > 0) {
4160 page = addr & TARGET_PAGE_MASK;
4161 l = (page + TARGET_PAGE_SIZE) - addr;
4162 if (l > len)
4163 l = len;
4164 p = phys_page_find(page >> TARGET_PAGE_BITS);
4165 if (!p) {
4166 pd = IO_MEM_UNASSIGNED;
4167 } else {
4168 pd = p->phys_offset;
4169 }
4170
4171 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004172 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00004173 break;
4174 }
4175 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4176 bounce.addr = addr;
4177 bounce.len = l;
4178 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02004179 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00004180 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004181
4182 *plen = l;
4183 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00004184 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004185 if (!todo) {
4186 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4187 }
aliguori6d16c2f2009-01-22 16:59:11 +00004188
4189 len -= l;
4190 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004191 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004192 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004193 rlen = todo;
4194 ret = qemu_ram_ptr_length(raddr, &rlen);
4195 *plen = rlen;
4196 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004197}
4198
4199/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4200 * Will also mark the memory as dirty if is_write == 1. access_len gives
4201 * the amount of memory that was actually read or written by the caller.
4202 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004203void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4204 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004205{
4206 if (buffer != bounce.buffer) {
4207 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004208 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004209 while (access_len) {
4210 unsigned l;
4211 l = TARGET_PAGE_SIZE;
4212 if (l > access_len)
4213 l = access_len;
4214 if (!cpu_physical_memory_is_dirty(addr1)) {
4215 /* invalidate code */
4216 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4217 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004218 cpu_physical_memory_set_dirty_flags(
4219 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004220 }
4221 addr1 += l;
4222 access_len -= l;
4223 }
4224 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004225 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004226 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004227 }
aliguori6d16c2f2009-01-22 16:59:11 +00004228 return;
4229 }
4230 if (is_write) {
4231 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4232 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004233 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004234 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004235 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004236}
bellardd0ecd2a2006-04-23 17:14:48 +00004237
bellard8df1cd02005-01-28 22:37:22 +00004238/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004239static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4240 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004241{
4242 int io_index;
4243 uint8_t *ptr;
4244 uint32_t val;
4245 unsigned long pd;
4246 PhysPageDesc *p;
4247
4248 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4249 if (!p) {
4250 pd = IO_MEM_UNASSIGNED;
4251 } else {
4252 pd = p->phys_offset;
4253 }
ths3b46e622007-09-17 08:09:54 +00004254
ths5fafdf22007-09-16 21:08:06 +00004255 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00004256 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00004257 /* I/O case */
4258 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004259 if (p)
4260 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004261 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004262#if defined(TARGET_WORDS_BIGENDIAN)
4263 if (endian == DEVICE_LITTLE_ENDIAN) {
4264 val = bswap32(val);
4265 }
4266#else
4267 if (endian == DEVICE_BIG_ENDIAN) {
4268 val = bswap32(val);
4269 }
4270#endif
bellard8df1cd02005-01-28 22:37:22 +00004271 } else {
4272 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004273 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00004274 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004275 switch (endian) {
4276 case DEVICE_LITTLE_ENDIAN:
4277 val = ldl_le_p(ptr);
4278 break;
4279 case DEVICE_BIG_ENDIAN:
4280 val = ldl_be_p(ptr);
4281 break;
4282 default:
4283 val = ldl_p(ptr);
4284 break;
4285 }
bellard8df1cd02005-01-28 22:37:22 +00004286 }
4287 return val;
4288}
4289
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004290uint32_t ldl_phys(target_phys_addr_t addr)
4291{
4292 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4293}
4294
4295uint32_t ldl_le_phys(target_phys_addr_t addr)
4296{
4297 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4298}
4299
4300uint32_t ldl_be_phys(target_phys_addr_t addr)
4301{
4302 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4303}
4304
bellard84b7b8e2005-11-28 21:19:04 +00004305/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004306static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4307 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004308{
4309 int io_index;
4310 uint8_t *ptr;
4311 uint64_t val;
4312 unsigned long pd;
4313 PhysPageDesc *p;
4314
4315 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4316 if (!p) {
4317 pd = IO_MEM_UNASSIGNED;
4318 } else {
4319 pd = p->phys_offset;
4320 }
ths3b46e622007-09-17 08:09:54 +00004321
bellard2a4188a2006-06-25 21:54:59 +00004322 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4323 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00004324 /* I/O case */
4325 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004326 if (p)
4327 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004328
4329 /* XXX This is broken when device endian != cpu endian.
4330 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004331#ifdef TARGET_WORDS_BIGENDIAN
4332 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4333 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4334#else
4335 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4336 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4337#endif
4338 } else {
4339 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004340 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00004341 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004342 switch (endian) {
4343 case DEVICE_LITTLE_ENDIAN:
4344 val = ldq_le_p(ptr);
4345 break;
4346 case DEVICE_BIG_ENDIAN:
4347 val = ldq_be_p(ptr);
4348 break;
4349 default:
4350 val = ldq_p(ptr);
4351 break;
4352 }
bellard84b7b8e2005-11-28 21:19:04 +00004353 }
4354 return val;
4355}
4356
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004357uint64_t ldq_phys(target_phys_addr_t addr)
4358{
4359 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4360}
4361
4362uint64_t ldq_le_phys(target_phys_addr_t addr)
4363{
4364 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4365}
4366
4367uint64_t ldq_be_phys(target_phys_addr_t addr)
4368{
4369 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4370}
4371
bellardaab33092005-10-30 20:48:42 +00004372/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004373uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004374{
4375 uint8_t val;
4376 cpu_physical_memory_read(addr, &val, 1);
4377 return val;
4378}
4379
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004380/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004381static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4382 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004383{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004384 int io_index;
4385 uint8_t *ptr;
4386 uint64_t val;
4387 unsigned long pd;
4388 PhysPageDesc *p;
4389
4390 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4391 if (!p) {
4392 pd = IO_MEM_UNASSIGNED;
4393 } else {
4394 pd = p->phys_offset;
4395 }
4396
4397 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4398 !(pd & IO_MEM_ROMD)) {
4399 /* I/O case */
4400 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4401 if (p)
4402 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4403 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004404#if defined(TARGET_WORDS_BIGENDIAN)
4405 if (endian == DEVICE_LITTLE_ENDIAN) {
4406 val = bswap16(val);
4407 }
4408#else
4409 if (endian == DEVICE_BIG_ENDIAN) {
4410 val = bswap16(val);
4411 }
4412#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004413 } else {
4414 /* RAM case */
4415 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4416 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004417 switch (endian) {
4418 case DEVICE_LITTLE_ENDIAN:
4419 val = lduw_le_p(ptr);
4420 break;
4421 case DEVICE_BIG_ENDIAN:
4422 val = lduw_be_p(ptr);
4423 break;
4424 default:
4425 val = lduw_p(ptr);
4426 break;
4427 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004428 }
4429 return val;
bellardaab33092005-10-30 20:48:42 +00004430}
4431
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004432uint32_t lduw_phys(target_phys_addr_t addr)
4433{
4434 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4435}
4436
4437uint32_t lduw_le_phys(target_phys_addr_t addr)
4438{
4439 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4440}
4441
4442uint32_t lduw_be_phys(target_phys_addr_t addr)
4443{
4444 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4445}
4446
bellard8df1cd02005-01-28 22:37:22 +00004447/* warning: addr must be aligned. The ram page is not masked as dirty
4448 and the code inside is not invalidated. It is useful if the dirty
4449 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004450void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004451{
4452 int io_index;
4453 uint8_t *ptr;
4454 unsigned long pd;
4455 PhysPageDesc *p;
4456
4457 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4458 if (!p) {
4459 pd = IO_MEM_UNASSIGNED;
4460 } else {
4461 pd = p->phys_offset;
4462 }
ths3b46e622007-09-17 08:09:54 +00004463
bellard3a7d9292005-08-21 09:26:42 +00004464 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004465 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004466 if (p)
4467 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004468 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4469 } else {
aliguori74576192008-10-06 14:02:03 +00004470 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004471 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004472 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004473
4474 if (unlikely(in_migration)) {
4475 if (!cpu_physical_memory_is_dirty(addr1)) {
4476 /* invalidate code */
4477 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4478 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004479 cpu_physical_memory_set_dirty_flags(
4480 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004481 }
4482 }
bellard8df1cd02005-01-28 22:37:22 +00004483 }
4484}
4485
Anthony Liguoric227f092009-10-01 16:12:16 -05004486void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004487{
4488 int io_index;
4489 uint8_t *ptr;
4490 unsigned long pd;
4491 PhysPageDesc *p;
4492
4493 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4494 if (!p) {
4495 pd = IO_MEM_UNASSIGNED;
4496 } else {
4497 pd = p->phys_offset;
4498 }
ths3b46e622007-09-17 08:09:54 +00004499
j_mayerbc98a7e2007-04-04 07:55:12 +00004500 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4501 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004502 if (p)
4503 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004504#ifdef TARGET_WORDS_BIGENDIAN
4505 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4506 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4507#else
4508 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4509 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4510#endif
4511 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004512 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004513 (addr & ~TARGET_PAGE_MASK);
4514 stq_p(ptr, val);
4515 }
4516}
4517
bellard8df1cd02005-01-28 22:37:22 +00004518/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004519static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4520 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004521{
4522 int io_index;
4523 uint8_t *ptr;
4524 unsigned long pd;
4525 PhysPageDesc *p;
4526
4527 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4528 if (!p) {
4529 pd = IO_MEM_UNASSIGNED;
4530 } else {
4531 pd = p->phys_offset;
4532 }
ths3b46e622007-09-17 08:09:54 +00004533
bellard3a7d9292005-08-21 09:26:42 +00004534 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004535 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004536 if (p)
4537 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004538#if defined(TARGET_WORDS_BIGENDIAN)
4539 if (endian == DEVICE_LITTLE_ENDIAN) {
4540 val = bswap32(val);
4541 }
4542#else
4543 if (endian == DEVICE_BIG_ENDIAN) {
4544 val = bswap32(val);
4545 }
4546#endif
bellard8df1cd02005-01-28 22:37:22 +00004547 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4548 } else {
4549 unsigned long addr1;
4550 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4551 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004552 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004553 switch (endian) {
4554 case DEVICE_LITTLE_ENDIAN:
4555 stl_le_p(ptr, val);
4556 break;
4557 case DEVICE_BIG_ENDIAN:
4558 stl_be_p(ptr, val);
4559 break;
4560 default:
4561 stl_p(ptr, val);
4562 break;
4563 }
bellard3a7d9292005-08-21 09:26:42 +00004564 if (!cpu_physical_memory_is_dirty(addr1)) {
4565 /* invalidate code */
4566 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4567 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004568 cpu_physical_memory_set_dirty_flags(addr1,
4569 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004570 }
bellard8df1cd02005-01-28 22:37:22 +00004571 }
4572}
4573
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004574void stl_phys(target_phys_addr_t addr, uint32_t val)
4575{
4576 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4577}
4578
4579void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4580{
4581 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4582}
4583
4584void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4585{
4586 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4587}
4588
bellardaab33092005-10-30 20:48:42 +00004589/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004590void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004591{
4592 uint8_t v = val;
4593 cpu_physical_memory_write(addr, &v, 1);
4594}
4595
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004596/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004597static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4598 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004599{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004600 int io_index;
4601 uint8_t *ptr;
4602 unsigned long pd;
4603 PhysPageDesc *p;
4604
4605 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4606 if (!p) {
4607 pd = IO_MEM_UNASSIGNED;
4608 } else {
4609 pd = p->phys_offset;
4610 }
4611
4612 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4613 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4614 if (p)
4615 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004616#if defined(TARGET_WORDS_BIGENDIAN)
4617 if (endian == DEVICE_LITTLE_ENDIAN) {
4618 val = bswap16(val);
4619 }
4620#else
4621 if (endian == DEVICE_BIG_ENDIAN) {
4622 val = bswap16(val);
4623 }
4624#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004625 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4626 } else {
4627 unsigned long addr1;
4628 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4629 /* RAM case */
4630 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004631 switch (endian) {
4632 case DEVICE_LITTLE_ENDIAN:
4633 stw_le_p(ptr, val);
4634 break;
4635 case DEVICE_BIG_ENDIAN:
4636 stw_be_p(ptr, val);
4637 break;
4638 default:
4639 stw_p(ptr, val);
4640 break;
4641 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004642 if (!cpu_physical_memory_is_dirty(addr1)) {
4643 /* invalidate code */
4644 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4645 /* set dirty bit */
4646 cpu_physical_memory_set_dirty_flags(addr1,
4647 (0xff & ~CODE_DIRTY_FLAG));
4648 }
4649 }
bellardaab33092005-10-30 20:48:42 +00004650}
4651
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004652void stw_phys(target_phys_addr_t addr, uint32_t val)
4653{
4654 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4655}
4656
4657void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4658{
4659 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4660}
4661
4662void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4663{
4664 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4665}
4666
bellardaab33092005-10-30 20:48:42 +00004667/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004668void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004669{
4670 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004671 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004672}
4673
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004674void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4675{
4676 val = cpu_to_le64(val);
4677 cpu_physical_memory_write(addr, &val, 8);
4678}
4679
4680void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4681{
4682 val = cpu_to_be64(val);
4683 cpu_physical_memory_write(addr, &val, 8);
4684}
4685
aliguori5e2972f2009-03-28 17:51:36 +00004686/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004687int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004688 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004689{
4690 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004691 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004692 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004693
4694 while (len > 0) {
4695 page = addr & TARGET_PAGE_MASK;
4696 phys_addr = cpu_get_phys_page_debug(env, page);
4697 /* if no physical page mapped, return an error */
4698 if (phys_addr == -1)
4699 return -1;
4700 l = (page + TARGET_PAGE_SIZE) - addr;
4701 if (l > len)
4702 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004703 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004704 if (is_write)
4705 cpu_physical_memory_write_rom(phys_addr, buf, l);
4706 else
aliguori5e2972f2009-03-28 17:51:36 +00004707 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004708 len -= l;
4709 buf += l;
4710 addr += l;
4711 }
4712 return 0;
4713}
Paul Brooka68fe892010-03-01 00:08:59 +00004714#endif
bellard13eb76e2004-01-24 15:23:36 +00004715
pbrook2e70f6e2008-06-29 01:03:05 +00004716/* in deterministic execution mode, instructions doing device I/Os
4717 must be at the end of the TB */
4718void cpu_io_recompile(CPUState *env, void *retaddr)
4719{
4720 TranslationBlock *tb;
4721 uint32_t n, cflags;
4722 target_ulong pc, cs_base;
4723 uint64_t flags;
4724
4725 tb = tb_find_pc((unsigned long)retaddr);
4726 if (!tb) {
4727 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4728 retaddr);
4729 }
4730 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004731 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004732 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004733 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004734 n = n - env->icount_decr.u16.low;
4735 /* Generate a new TB ending on the I/O insn. */
4736 n++;
4737 /* On MIPS and SH, delay slot instructions can only be restarted if
4738 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004739 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004740 branch. */
4741#if defined(TARGET_MIPS)
4742 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4743 env->active_tc.PC -= 4;
4744 env->icount_decr.u16.low++;
4745 env->hflags &= ~MIPS_HFLAG_BMASK;
4746 }
4747#elif defined(TARGET_SH4)
4748 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4749 && n > 1) {
4750 env->pc -= 2;
4751 env->icount_decr.u16.low++;
4752 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4753 }
4754#endif
4755 /* This should never happen. */
4756 if (n > CF_COUNT_MASK)
4757 cpu_abort(env, "TB too big during recompile");
4758
4759 cflags = n | CF_LAST_IO;
4760 pc = tb->pc;
4761 cs_base = tb->cs_base;
4762 flags = tb->flags;
4763 tb_phys_invalidate(tb, -1);
4764 /* FIXME: In theory this could raise an exception. In practice
4765 we have already translated the block once so it's probably ok. */
4766 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004767 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004768 the first in the TB) then we end up generating a whole new TB and
4769 repeating the fault, which is horribly inefficient.
4770 Better would be to execute just this insn uncached, or generate a
4771 second new TB. */
4772 cpu_resume_from_signal(env, NULL);
4773}
4774
Paul Brookb3755a92010-03-12 16:54:58 +00004775#if !defined(CONFIG_USER_ONLY)
4776
Stefan Weil055403b2010-10-22 23:03:32 +02004777void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004778{
4779 int i, target_code_size, max_target_code_size;
4780 int direct_jmp_count, direct_jmp2_count, cross_page;
4781 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004782
bellarde3db7222005-01-26 22:00:47 +00004783 target_code_size = 0;
4784 max_target_code_size = 0;
4785 cross_page = 0;
4786 direct_jmp_count = 0;
4787 direct_jmp2_count = 0;
4788 for(i = 0; i < nb_tbs; i++) {
4789 tb = &tbs[i];
4790 target_code_size += tb->size;
4791 if (tb->size > max_target_code_size)
4792 max_target_code_size = tb->size;
4793 if (tb->page_addr[1] != -1)
4794 cross_page++;
4795 if (tb->tb_next_offset[0] != 0xffff) {
4796 direct_jmp_count++;
4797 if (tb->tb_next_offset[1] != 0xffff) {
4798 direct_jmp2_count++;
4799 }
4800 }
4801 }
4802 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004803 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004804 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004805 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4806 cpu_fprintf(f, "TB count %d/%d\n",
4807 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004808 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004809 nb_tbs ? target_code_size / nb_tbs : 0,
4810 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004811 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004812 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4813 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004814 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4815 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004816 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4817 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004818 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004819 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4820 direct_jmp2_count,
4821 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004822 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004823 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4824 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4825 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004826 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004827}
4828
bellard61382a52003-10-27 21:22:23 +00004829#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004830#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004831#define GETPC() NULL
4832#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004833#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004834
4835#define SHIFT 0
4836#include "softmmu_template.h"
4837
4838#define SHIFT 1
4839#include "softmmu_template.h"
4840
4841#define SHIFT 2
4842#include "softmmu_template.h"
4843
4844#define SHIFT 3
4845#include "softmmu_template.h"
4846
4847#undef env
4848
4849#endif