blob: 7899042ce9a3afb49c91fe01ce0ff67a2372fb25 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Blue Swirl0cac1b62012-04-09 16:50:52 +000060#include "cputlb.h"
61
Avi Kivity67d95c12011-12-15 15:25:22 +020062#define WANT_EXEC_OBSOLETE
63#include "exec-obsolete.h"
64
bellardfd6ce8f2003-05-14 19:00:11 +000065//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000066//#define DEBUG_FLUSH
pbrook67d3b952006-12-18 05:03:52 +000067//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000068
69/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000070//#define DEBUG_TB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
Richard Henderson9b9c37c2012-09-21 10:34:21 -070089#if defined(__arm__) || defined(__sparc__)
blueswir1141ac462008-07-26 15:05:57 +000090/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weil68409812012-04-04 07:45:21 +020096#elif defined(_WIN32) && !defined(_WIN64)
Stefan Weilf8e2af12009-06-18 23:04:48 +020097#define code_gen_section \
98 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000099#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000107/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000108static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200109static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000110
pbrooke2eef172008-06-08 01:09:01 +0000111#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000112int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000113static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000114
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200115RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300116
117static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300118static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300119
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200120MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200121static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200122
pbrooke2eef172008-06-08 01:09:01 +0000123#endif
bellard9fa3e852004-01-04 18:06:42 +0000124
Andreas Färber9349b4f2012-03-14 01:38:32 +0100125CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000126/* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100128DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000129/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000130 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000131 2 = Adaptive rate instruction counting. */
132int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000133
bellard54936002003-05-13 00:25:15 +0000134typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000135 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000136 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
bellard54936002003-05-13 00:25:15 +0000144} PageDesc;
145
Paul Brook41c1b1c2010-03-12 16:54:58 +0000146/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800147 while in user mode we want it to be based on virtual addresses. */
148#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000149#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800152# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000153#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000154#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800155# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000156#endif
bellard54936002003-05-13 00:25:15 +0000157
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800158/* Size of the L2 (and L3, etc) page tables. */
159#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000160#define L2_SIZE (1 << L2_BITS)
161
Avi Kivity3eef53d2012-02-10 14:57:31 +0200162#define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800165/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800169#if V_L1_BITS_REM < 4
170#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171#else
172#define V_L1_BITS V_L1_BITS_REM
173#endif
174
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800175#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
Stefan Weilc6d50672012-03-16 20:23:49 +0100179uintptr_t qemu_real_host_page_size;
180uintptr_t qemu_host_page_size;
181uintptr_t qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000182
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800183/* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000186
pbrooke2eef172008-06-08 01:09:01 +0000187#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200188typedef struct PhysPageEntry PhysPageEntry;
189
Avi Kivity5312bd82012-02-12 18:32:55 +0200190static MemoryRegionSection *phys_sections;
191static unsigned phys_sections_nb, phys_sections_nb_alloc;
192static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200193static uint16_t phys_section_notdirty;
194static uint16_t phys_section_rom;
195static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200196
Avi Kivity4346ae32012-02-10 17:00:01 +0200197struct PhysPageEntry {
Avi Kivity07f07b32012-02-13 20:45:32 +0200198 uint16_t is_leaf : 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 uint16_t ptr : 15;
Avi Kivity4346ae32012-02-10 17:00:01 +0200201};
202
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200203/* Simple allocator for PhysPageEntry nodes */
204static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206
Avi Kivity07f07b32012-02-13 20:45:32 +0200207#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800209/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200210 The bottom level has pointers to MemoryRegionSections. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200211static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
Paul Brook6d9a1302010-02-28 23:55:53 +0000212
pbrooke2eef172008-06-08 01:09:01 +0000213static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300214static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000215
Avi Kivity1ec9b902012-01-02 12:47:48 +0200216static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000217#endif
bellard33417e72003-08-10 21:47:01 +0000218
bellarde3db7222005-01-26 22:00:47 +0000219/* statistics */
bellarde3db7222005-01-26 22:00:47 +0000220static int tb_flush_count;
221static int tb_phys_invalidate_count;
222
bellard7cb69ca2008-05-10 10:55:51 +0000223#ifdef _WIN32
224static void map_exec(void *addr, long size)
225{
226 DWORD old_protect;
227 VirtualProtect(addr, size,
228 PAGE_EXECUTE_READWRITE, &old_protect);
229
230}
231#else
232static void map_exec(void *addr, long size)
233{
bellard43694152008-05-29 09:35:57 +0000234 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000235
bellard43694152008-05-29 09:35:57 +0000236 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000237 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000238 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000239
240 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000241 end += page_size - 1;
242 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000243
244 mprotect((void *)start, end - start,
245 PROT_READ | PROT_WRITE | PROT_EXEC);
246}
247#endif
248
bellardb346ff42003-06-15 20:05:50 +0000249static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000250{
bellard83fb7ad2004-07-05 21:25:26 +0000251 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000252 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000253#ifdef _WIN32
254 {
255 SYSTEM_INFO system_info;
256
257 GetSystemInfo(&system_info);
258 qemu_real_host_page_size = system_info.dwPageSize;
259 }
260#else
261 qemu_real_host_page_size = getpagesize();
262#endif
bellard83fb7ad2004-07-05 21:25:26 +0000263 if (qemu_host_page_size == 0)
264 qemu_host_page_size = qemu_real_host_page_size;
265 if (qemu_host_page_size < TARGET_PAGE_SIZE)
266 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000267 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000268
Paul Brook2e9a5712010-05-05 16:32:59 +0100269#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000270 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100271#ifdef HAVE_KINFO_GETVMMAP
272 struct kinfo_vmentry *freep;
273 int i, cnt;
274
275 freep = kinfo_getvmmap(getpid(), &cnt);
276 if (freep) {
277 mmap_lock();
278 for (i = 0; i < cnt; i++) {
279 unsigned long startaddr, endaddr;
280
281 startaddr = freep[i].kve_start;
282 endaddr = freep[i].kve_end;
283 if (h2g_valid(startaddr)) {
284 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
285
286 if (h2g_valid(endaddr)) {
287 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200288 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100289 } else {
290#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
291 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200292 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100293#endif
294 }
295 }
296 }
297 free(freep);
298 mmap_unlock();
299 }
300#else
balrog50a95692007-12-12 01:16:23 +0000301 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000302
pbrook07765902008-05-31 16:33:53 +0000303 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800304
Aurelien Jarnofd436902010-04-10 17:20:36 +0200305 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000306 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800307 mmap_lock();
308
balrog50a95692007-12-12 01:16:23 +0000309 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800310 unsigned long startaddr, endaddr;
311 int n;
312
313 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
314
315 if (n == 2 && h2g_valid(startaddr)) {
316 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
317
318 if (h2g_valid(endaddr)) {
319 endaddr = h2g(endaddr);
320 } else {
321 endaddr = ~0ul;
322 }
323 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000324 }
325 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800326
balrog50a95692007-12-12 01:16:23 +0000327 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800328 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000329 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100330#endif
balrog50a95692007-12-12 01:16:23 +0000331 }
332#endif
bellard54936002003-05-13 00:25:15 +0000333}
334
Paul Brook41c1b1c2010-03-12 16:54:58 +0000335static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000336{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000337 PageDesc *pd;
338 void **lp;
339 int i;
340
pbrook17e23772008-06-09 13:47:45 +0000341#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500342 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800343# define ALLOC(P, SIZE) \
344 do { \
345 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
346 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800347 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000348#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800349# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500350 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000351#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800352
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353 /* Level 1. Always allocated. */
354 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
355
356 /* Level 2..N-1. */
357 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
358 void **p = *lp;
359
360 if (p == NULL) {
361 if (!alloc) {
362 return NULL;
363 }
364 ALLOC(p, sizeof(void *) * L2_SIZE);
365 *lp = p;
366 }
367
368 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000369 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800370
371 pd = *lp;
372 if (pd == NULL) {
373 if (!alloc) {
374 return NULL;
375 }
376 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
377 *lp = pd;
378 }
379
380#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800381
382 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000383}
384
Paul Brook41c1b1c2010-03-12 16:54:58 +0000385static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000386{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800387 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000388}
389
Paul Brook6d9a1302010-02-28 23:55:53 +0000390#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200391
Avi Kivityf7bf5462012-02-13 20:12:05 +0200392static void phys_map_node_reserve(unsigned nodes)
393{
394 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
395 typedef PhysPageEntry Node[L2_SIZE];
396 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
397 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
398 phys_map_nodes_nb + nodes);
399 phys_map_nodes = g_renew(Node, phys_map_nodes,
400 phys_map_nodes_nb_alloc);
401 }
402}
403
404static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200405{
406 unsigned i;
407 uint16_t ret;
408
Avi Kivityf7bf5462012-02-13 20:12:05 +0200409 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200410 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200411 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200412 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200413 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200414 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200415 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200416 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200417}
418
419static void phys_map_nodes_reset(void)
420{
421 phys_map_nodes_nb = 0;
422}
423
Avi Kivityf7bf5462012-02-13 20:12:05 +0200424
Avi Kivity29990972012-02-13 20:21:20 +0200425static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
426 target_phys_addr_t *nb, uint16_t leaf,
427 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200428{
429 PhysPageEntry *p;
430 int i;
Avi Kivity07f07b32012-02-13 20:45:32 +0200431 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200432
Avi Kivity07f07b32012-02-13 20:45:32 +0200433 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200434 lp->ptr = phys_map_node_alloc();
435 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200436 if (level == 0) {
437 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200438 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200439 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200440 }
441 }
442 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200443 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200444 }
Avi Kivity29990972012-02-13 20:21:20 +0200445 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200446
Avi Kivity29990972012-02-13 20:21:20 +0200447 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200448 if ((*index & (step - 1)) == 0 && *nb >= step) {
449 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200450 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200451 *index += step;
452 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200453 } else {
454 phys_page_set_level(lp, index, nb, leaf, level - 1);
455 }
456 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200457 }
458}
459
Avi Kivity29990972012-02-13 20:21:20 +0200460static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
461 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000462{
Avi Kivity29990972012-02-13 20:21:20 +0200463 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200464 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000465
Avi Kivity29990972012-02-13 20:21:20 +0200466 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000467}
468
Blue Swirl0cac1b62012-04-09 16:50:52 +0000469MemoryRegionSection *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000470{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200471 PhysPageEntry lp = phys_map;
472 PhysPageEntry *p;
473 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200474 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200475
Avi Kivity07f07b32012-02-13 20:45:32 +0200476 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200477 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200478 goto not_found;
479 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200480 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200481 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200482 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200483
Avi Kivityc19e8802012-02-13 20:25:31 +0200484 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200485not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200486 return &phys_sections[s_index];
487}
488
Blue Swirle5548612012-04-21 13:08:33 +0000489bool memory_region_is_unassigned(MemoryRegion *mr)
490{
491 return mr != &io_mem_ram && mr != &io_mem_rom
492 && mr != &io_mem_notdirty && !mr->rom_device
493 && mr != &io_mem_watch;
494}
495
pbrookc8a706f2008-06-02 16:16:42 +0000496#define mmap_lock() do { } while(0)
497#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000498#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000499
bellard43694152008-05-29 09:35:57 +0000500#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
501
502#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100503/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000504 user mode. It will change when a dedicated libc will be used */
505#define USE_STATIC_CODE_GEN_BUFFER
506#endif
507
508#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200509static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
510 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000511#endif
512
blueswir18fcd3692008-08-17 20:26:25 +0000513static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000514{
bellard43694152008-05-29 09:35:57 +0000515#ifdef USE_STATIC_CODE_GEN_BUFFER
516 code_gen_buffer = static_code_gen_buffer;
517 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
518 map_exec(code_gen_buffer, code_gen_buffer_size);
519#else
bellard26a5f132008-05-28 12:30:31 +0000520 code_gen_buffer_size = tb_size;
521 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000522#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000523 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
524#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100525 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000526 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000527#endif
bellard26a5f132008-05-28 12:30:31 +0000528 }
529 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
530 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
531 /* The code gen buffer location may have constraints depending on
532 the host cpu and OS */
533#if defined(__linux__)
534 {
535 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000536 void *start = NULL;
537
bellard26a5f132008-05-28 12:30:31 +0000538 flags = MAP_PRIVATE | MAP_ANONYMOUS;
539#if defined(__x86_64__)
540 flags |= MAP_32BIT;
541 /* Cannot map more than that */
542 if (code_gen_buffer_size > (800 * 1024 * 1024))
543 code_gen_buffer_size = (800 * 1024 * 1024);
Richard Henderson9b9c37c2012-09-21 10:34:21 -0700544#elif defined(__sparc__) && HOST_LONG_BITS == 64
blueswir1141ac462008-07-26 15:05:57 +0000545 // Map the buffer below 2G, so we can use direct calls and branches
Richard Hendersond5dd6962012-09-21 10:40:48 -0700546 start = (void *) 0x40000000UL;
blueswir1141ac462008-07-26 15:05:57 +0000547 if (code_gen_buffer_size > (512 * 1024 * 1024))
548 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000549#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100550 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000551 if (code_gen_buffer_size > 16 * 1024 * 1024)
552 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700553#elif defined(__s390x__)
554 /* Map the buffer so that we can use direct calls and branches. */
555 /* We have a +- 4GB range on the branches; leave some slop. */
556 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
557 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
558 }
559 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000560#endif
blueswir1141ac462008-07-26 15:05:57 +0000561 code_gen_buffer = mmap(start, code_gen_buffer_size,
562 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000563 flags, -1, 0);
564 if (code_gen_buffer == MAP_FAILED) {
565 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
566 exit(1);
567 }
568 }
Bradcbb608a2010-12-20 21:25:40 -0500569#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000570 || defined(__DragonFly__) || defined(__OpenBSD__) \
571 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000572 {
573 int flags;
574 void *addr = NULL;
575 flags = MAP_PRIVATE | MAP_ANONYMOUS;
576#if defined(__x86_64__)
577 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
578 * 0x40000000 is free */
579 flags |= MAP_FIXED;
580 addr = (void *)0x40000000;
581 /* Cannot map more than that */
582 if (code_gen_buffer_size > (800 * 1024 * 1024))
583 code_gen_buffer_size = (800 * 1024 * 1024);
Richard Henderson9b9c37c2012-09-21 10:34:21 -0700584#elif defined(__sparc__) && HOST_LONG_BITS == 64
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000585 // Map the buffer below 2G, so we can use direct calls and branches
Richard Hendersond5dd6962012-09-21 10:40:48 -0700586 addr = (void *) 0x40000000UL;
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000587 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
588 code_gen_buffer_size = (512 * 1024 * 1024);
589 }
aliguori06e67a82008-09-27 15:32:41 +0000590#endif
591 code_gen_buffer = mmap(addr, code_gen_buffer_size,
592 PROT_WRITE | PROT_READ | PROT_EXEC,
593 flags, -1, 0);
594 if (code_gen_buffer == MAP_FAILED) {
595 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
596 exit(1);
597 }
598 }
bellard26a5f132008-05-28 12:30:31 +0000599#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500600 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000601 map_exec(code_gen_buffer, code_gen_buffer_size);
602#endif
bellard43694152008-05-29 09:35:57 +0000603#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000604 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100605 code_gen_buffer_max_size = code_gen_buffer_size -
606 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000607 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500608 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000609}
610
611/* Must be called before using the QEMU cpus. 'tb_size' is the size
612 (in bytes) allocated to the translation buffer. Zero means default
613 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200614void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000615{
bellard26a5f132008-05-28 12:30:31 +0000616 cpu_gen_init();
617 code_gen_alloc(tb_size);
618 code_gen_ptr = code_gen_buffer;
Richard Henderson813da622012-03-19 12:25:11 -0700619 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
bellard43694152008-05-29 09:35:57 +0000620 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700621#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
622 /* There's no guest base to take into account, so go ahead and
623 initialize the prologue now. */
624 tcg_prologue_init(&tcg_ctx);
625#endif
bellard26a5f132008-05-28 12:30:31 +0000626}
627
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200628bool tcg_enabled(void)
629{
630 return code_gen_buffer != NULL;
631}
632
633void cpu_exec_init_all(void)
634{
635#if !defined(CONFIG_USER_ONLY)
636 memory_map_init();
637 io_mem_init();
638#endif
639}
640
pbrook9656f322008-07-01 20:01:19 +0000641#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
642
Juan Quintelae59fb372009-09-29 22:48:21 +0200643static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200644{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100645 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200646
aurel323098dba2009-03-07 21:28:24 +0000647 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
648 version_id is increased. */
649 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000650 tlb_flush(env, 1);
651
652 return 0;
653}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200654
655static const VMStateDescription vmstate_cpu_common = {
656 .name = "cpu_common",
657 .version_id = 1,
658 .minimum_version_id = 1,
659 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200660 .post_load = cpu_common_post_load,
661 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100662 VMSTATE_UINT32(halted, CPUArchState),
663 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200664 VMSTATE_END_OF_LIST()
665 }
666};
pbrook9656f322008-07-01 20:01:19 +0000667#endif
668
Andreas Färber9349b4f2012-03-14 01:38:32 +0100669CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400670{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100671 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400672
673 while (env) {
674 if (env->cpu_index == cpu)
675 break;
676 env = env->next_cpu;
677 }
678
679 return env;
680}
681
Andreas Färber9349b4f2012-03-14 01:38:32 +0100682void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000683{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100684 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000685 int cpu_index;
686
pbrookc2764712009-03-07 15:24:59 +0000687#if defined(CONFIG_USER_ONLY)
688 cpu_list_lock();
689#endif
bellard6a00d602005-11-21 23:25:50 +0000690 env->next_cpu = NULL;
691 penv = &first_cpu;
692 cpu_index = 0;
693 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700694 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000695 cpu_index++;
696 }
697 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000698 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000699 QTAILQ_INIT(&env->breakpoints);
700 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100701#ifndef CONFIG_USER_ONLY
702 env->thread_id = qemu_get_thread_id();
703#endif
bellard6a00d602005-11-21 23:25:50 +0000704 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000705#if defined(CONFIG_USER_ONLY)
706 cpu_list_unlock();
707#endif
pbrookb3c77242008-06-30 16:31:04 +0000708#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600709 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
710 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000711 cpu_save, cpu_load, env);
712#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000713}
714
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100715/* Allocate a new translation block. Flush the translation buffer if
716 too many translation blocks or too much generated code. */
717static TranslationBlock *tb_alloc(target_ulong pc)
718{
719 TranslationBlock *tb;
720
721 if (nb_tbs >= code_gen_max_blocks ||
722 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
723 return NULL;
724 tb = &tbs[nb_tbs++];
725 tb->pc = pc;
726 tb->cflags = 0;
727 return tb;
728}
729
730void tb_free(TranslationBlock *tb)
731{
732 /* In practice this is mostly used for single use temporary TB
733 Ignore the hard cases and just back up if this TB happens to
734 be the last one generated. */
735 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
736 code_gen_ptr = tb->tc_ptr;
737 nb_tbs--;
738 }
739}
740
bellard9fa3e852004-01-04 18:06:42 +0000741static inline void invalidate_page_bitmap(PageDesc *p)
742{
743 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500744 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000745 p->code_bitmap = NULL;
746 }
747 p->code_write_count = 0;
748}
749
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800750/* Set to NULL all the 'first_tb' fields in all PageDescs. */
751
752static void page_flush_tb_1 (int level, void **lp)
753{
754 int i;
755
756 if (*lp == NULL) {
757 return;
758 }
759 if (level == 0) {
760 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000761 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800762 pd[i].first_tb = NULL;
763 invalidate_page_bitmap(pd + i);
764 }
765 } else {
766 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000767 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800768 page_flush_tb_1 (level - 1, pp + i);
769 }
770 }
771}
772
bellardfd6ce8f2003-05-14 19:00:11 +0000773static void page_flush_tb(void)
774{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800775 int i;
776 for (i = 0; i < V_L1_SIZE; i++) {
777 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000778 }
779}
780
781/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000782/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100783void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000784{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100785 CPUArchState *env;
bellard01243112004-01-04 15:48:17 +0000786#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000787 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
788 (unsigned long)(code_gen_ptr - code_gen_buffer),
789 nb_tbs, nb_tbs > 0 ?
790 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000791#endif
bellard26a5f132008-05-28 12:30:31 +0000792 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000793 cpu_abort(env1, "Internal error: code buffer overflow\n");
794
bellardfd6ce8f2003-05-14 19:00:11 +0000795 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000796
bellard6a00d602005-11-21 23:25:50 +0000797 for(env = first_cpu; env != NULL; env = env->next_cpu) {
798 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
799 }
bellard9fa3e852004-01-04 18:06:42 +0000800
bellard8a8a6082004-10-03 13:36:49 +0000801 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000802 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000803
bellardfd6ce8f2003-05-14 19:00:11 +0000804 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000805 /* XXX: flush processor icache at this point if cache flush is
806 expensive */
bellarde3db7222005-01-26 22:00:47 +0000807 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000808}
809
810#ifdef DEBUG_TB_CHECK
811
j_mayerbc98a7e2007-04-04 07:55:12 +0000812static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000813{
814 TranslationBlock *tb;
815 int i;
816 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000817 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
818 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000819 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
820 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000821 printf("ERROR invalidate: address=" TARGET_FMT_lx
822 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000823 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000824 }
825 }
826 }
827}
828
829/* verify that all the pages have correct rights for code */
830static void tb_page_check(void)
831{
832 TranslationBlock *tb;
833 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000834
pbrook99773bd2006-04-16 15:14:59 +0000835 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
836 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000837 flags1 = page_get_flags(tb->pc);
838 flags2 = page_get_flags(tb->pc + tb->size - 1);
839 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
840 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000841 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000842 }
843 }
844 }
845}
846
847#endif
848
849/* invalidate one TB */
850static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
851 int next_offset)
852{
853 TranslationBlock *tb1;
854 for(;;) {
855 tb1 = *ptb;
856 if (tb1 == tb) {
857 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
858 break;
859 }
860 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
861 }
862}
863
bellard9fa3e852004-01-04 18:06:42 +0000864static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
865{
866 TranslationBlock *tb1;
867 unsigned int n1;
868
869 for(;;) {
870 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200871 n1 = (uintptr_t)tb1 & 3;
872 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard9fa3e852004-01-04 18:06:42 +0000873 if (tb1 == tb) {
874 *ptb = tb1->page_next[n1];
875 break;
876 }
877 ptb = &tb1->page_next[n1];
878 }
879}
880
bellardd4e81642003-05-25 16:46:15 +0000881static inline void tb_jmp_remove(TranslationBlock *tb, int n)
882{
883 TranslationBlock *tb1, **ptb;
884 unsigned int n1;
885
886 ptb = &tb->jmp_next[n];
887 tb1 = *ptb;
888 if (tb1) {
889 /* find tb(n) in circular list */
890 for(;;) {
891 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200892 n1 = (uintptr_t)tb1 & 3;
893 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardd4e81642003-05-25 16:46:15 +0000894 if (n1 == n && tb1 == tb)
895 break;
896 if (n1 == 2) {
897 ptb = &tb1->jmp_first;
898 } else {
899 ptb = &tb1->jmp_next[n1];
900 }
901 }
902 /* now we can suppress tb(n) from the list */
903 *ptb = tb->jmp_next[n];
904
905 tb->jmp_next[n] = NULL;
906 }
907}
908
909/* reset the jump entry 'n' of a TB so that it is not chained to
910 another TB */
911static inline void tb_reset_jump(TranslationBlock *tb, int n)
912{
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200913 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
bellardd4e81642003-05-25 16:46:15 +0000914}
915
Paul Brook41c1b1c2010-03-12 16:54:58 +0000916void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000917{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100918 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000919 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000920 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000921 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000922 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000923
bellard9fa3e852004-01-04 18:06:42 +0000924 /* remove the TB from the hash list */
925 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
926 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000927 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000928 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000929
bellard9fa3e852004-01-04 18:06:42 +0000930 /* remove the TB from the page list */
931 if (tb->page_addr[0] != page_addr) {
932 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
933 tb_page_remove(&p->first_tb, tb);
934 invalidate_page_bitmap(p);
935 }
936 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
937 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
938 tb_page_remove(&p->first_tb, tb);
939 invalidate_page_bitmap(p);
940 }
941
bellard8a40a182005-11-20 10:35:40 +0000942 tb_invalidated_flag = 1;
943
944 /* remove the TB from the hash list */
945 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000946 for(env = first_cpu; env != NULL; env = env->next_cpu) {
947 if (env->tb_jmp_cache[h] == tb)
948 env->tb_jmp_cache[h] = NULL;
949 }
bellard8a40a182005-11-20 10:35:40 +0000950
951 /* suppress this TB from the two jump lists */
952 tb_jmp_remove(tb, 0);
953 tb_jmp_remove(tb, 1);
954
955 /* suppress any remaining jumps to this TB */
956 tb1 = tb->jmp_first;
957 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200958 n1 = (uintptr_t)tb1 & 3;
bellard8a40a182005-11-20 10:35:40 +0000959 if (n1 == 2)
960 break;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200961 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard8a40a182005-11-20 10:35:40 +0000962 tb2 = tb1->jmp_next[n1];
963 tb_reset_jump(tb1, n1);
964 tb1->jmp_next[n1] = NULL;
965 tb1 = tb2;
966 }
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200967 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
bellard8a40a182005-11-20 10:35:40 +0000968
bellarde3db7222005-01-26 22:00:47 +0000969 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000970}
971
972static inline void set_bits(uint8_t *tab, int start, int len)
973{
974 int end, mask, end1;
975
976 end = start + len;
977 tab += start >> 3;
978 mask = 0xff << (start & 7);
979 if ((start & ~7) == (end & ~7)) {
980 if (start < end) {
981 mask &= ~(0xff << (end & 7));
982 *tab |= mask;
983 }
984 } else {
985 *tab++ |= mask;
986 start = (start + 8) & ~7;
987 end1 = end & ~7;
988 while (start < end1) {
989 *tab++ = 0xff;
990 start += 8;
991 }
992 if (start < end) {
993 mask = ~(0xff << (end & 7));
994 *tab |= mask;
995 }
996 }
997}
998
999static void build_page_bitmap(PageDesc *p)
1000{
1001 int n, tb_start, tb_end;
1002 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001003
Anthony Liguori7267c092011-08-20 22:09:37 -05001004 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001005
1006 tb = p->first_tb;
1007 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001008 n = (uintptr_t)tb & 3;
1009 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001010 /* NOTE: this is subtle as a TB may span two physical pages */
1011 if (n == 0) {
1012 /* NOTE: tb_end may be after the end of the page, but
1013 it is not a problem */
1014 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1015 tb_end = tb_start + tb->size;
1016 if (tb_end > TARGET_PAGE_SIZE)
1017 tb_end = TARGET_PAGE_SIZE;
1018 } else {
1019 tb_start = 0;
1020 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1021 }
1022 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1023 tb = tb->page_next[n];
1024 }
1025}
1026
Andreas Färber9349b4f2012-03-14 01:38:32 +01001027TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001028 target_ulong pc, target_ulong cs_base,
1029 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001030{
1031 TranslationBlock *tb;
1032 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001033 tb_page_addr_t phys_pc, phys_page2;
1034 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001035 int code_gen_size;
1036
Paul Brook41c1b1c2010-03-12 16:54:58 +00001037 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001038 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001039 if (!tb) {
1040 /* flush must be done */
1041 tb_flush(env);
1042 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001043 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001044 /* Don't forget to invalidate previous TB info. */
1045 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001046 }
1047 tc_ptr = code_gen_ptr;
1048 tb->tc_ptr = tc_ptr;
1049 tb->cs_base = cs_base;
1050 tb->flags = flags;
1051 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001052 cpu_gen_code(env, tb, &code_gen_size);
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001053 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1054 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001055
bellardd720b932004-04-25 17:57:43 +00001056 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001057 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001058 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001059 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001060 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001061 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001062 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001063 return tb;
bellardd720b932004-04-25 17:57:43 +00001064}
ths3b46e622007-09-17 08:09:54 +00001065
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001066/*
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001067 * Invalidate all TBs which intersect with the target physical address range
1068 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1069 * 'is_cpu_write_access' should be true if called from a real cpu write
1070 * access: the virtual CPU will exit the current TB if code is modified inside
1071 * this TB.
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001072 */
1073void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1074 int is_cpu_write_access)
1075{
1076 while (start < end) {
1077 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1078 start &= TARGET_PAGE_MASK;
1079 start += TARGET_PAGE_SIZE;
1080 }
1081}
1082
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001083/*
1084 * Invalidate all TBs which intersect with the target physical address range
1085 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1086 * 'is_cpu_write_access' should be true if called from a real cpu write
1087 * access: the virtual CPU will exit the current TB if code is modified inside
1088 * this TB.
1089 */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001090void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001091 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001092{
aliguori6b917542008-11-18 19:46:41 +00001093 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001094 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001095 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001096 PageDesc *p;
1097 int n;
1098#ifdef TARGET_HAS_PRECISE_SMC
1099 int current_tb_not_found = is_cpu_write_access;
1100 TranslationBlock *current_tb = NULL;
1101 int current_tb_modified = 0;
1102 target_ulong current_pc = 0;
1103 target_ulong current_cs_base = 0;
1104 int current_flags = 0;
1105#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001106
1107 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001108 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001109 return;
ths5fafdf22007-09-16 21:08:06 +00001110 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001111 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1112 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001113 /* build code bitmap */
1114 build_page_bitmap(p);
1115 }
1116
1117 /* we remove all the TBs in the range [start, end[ */
1118 /* XXX: see if in some cases it could be faster to invalidate all the code */
1119 tb = p->first_tb;
1120 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001121 n = (uintptr_t)tb & 3;
1122 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001123 tb_next = tb->page_next[n];
1124 /* NOTE: this is subtle as a TB may span two physical pages */
1125 if (n == 0) {
1126 /* NOTE: tb_end may be after the end of the page, but
1127 it is not a problem */
1128 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1129 tb_end = tb_start + tb->size;
1130 } else {
1131 tb_start = tb->page_addr[1];
1132 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1133 }
1134 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001135#ifdef TARGET_HAS_PRECISE_SMC
1136 if (current_tb_not_found) {
1137 current_tb_not_found = 0;
1138 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001139 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001140 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001141 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001142 }
1143 }
1144 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001145 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001146 /* If we are modifying the current TB, we must stop
1147 its execution. We could be more precise by checking
1148 that the modification is after the current PC, but it
1149 would require a specialized function to partially
1150 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001151
bellardd720b932004-04-25 17:57:43 +00001152 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001153 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001154 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1155 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001156 }
1157#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001158 /* we need to do that to handle the case where a signal
1159 occurs while doing tb_phys_invalidate() */
1160 saved_tb = NULL;
1161 if (env) {
1162 saved_tb = env->current_tb;
1163 env->current_tb = NULL;
1164 }
bellard9fa3e852004-01-04 18:06:42 +00001165 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001166 if (env) {
1167 env->current_tb = saved_tb;
1168 if (env->interrupt_request && env->current_tb)
1169 cpu_interrupt(env, env->interrupt_request);
1170 }
bellard9fa3e852004-01-04 18:06:42 +00001171 }
1172 tb = tb_next;
1173 }
1174#if !defined(CONFIG_USER_ONLY)
1175 /* if no code remaining, no need to continue to use slow writes */
1176 if (!p->first_tb) {
1177 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001178 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001179 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001180 }
1181 }
1182#endif
1183#ifdef TARGET_HAS_PRECISE_SMC
1184 if (current_tb_modified) {
1185 /* we generate a block containing just the instruction
1186 modifying the memory. It will ensure that it cannot modify
1187 itself */
bellardea1c1802004-06-14 18:56:36 +00001188 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001189 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001190 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001191 }
1192#endif
1193}
1194
1195/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001196static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001197{
1198 PageDesc *p;
1199 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001200#if 0
bellarda4193c82004-06-03 14:01:43 +00001201 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001202 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1203 cpu_single_env->mem_io_vaddr, len,
1204 cpu_single_env->eip,
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001205 cpu_single_env->eip +
1206 (intptr_t)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001207 }
1208#endif
bellard9fa3e852004-01-04 18:06:42 +00001209 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001210 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001211 return;
1212 if (p->code_bitmap) {
1213 offset = start & ~TARGET_PAGE_MASK;
1214 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1215 if (b & ((1 << len) - 1))
1216 goto do_invalidate;
1217 } else {
1218 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001219 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001220 }
1221}
1222
bellard9fa3e852004-01-04 18:06:42 +00001223#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001224static void tb_invalidate_phys_page(tb_page_addr_t addr,
Blue Swirl20503962012-04-09 14:20:20 +00001225 uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001226{
aliguori6b917542008-11-18 19:46:41 +00001227 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001228 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001229 int n;
bellardd720b932004-04-25 17:57:43 +00001230#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001231 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001232 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001233 int current_tb_modified = 0;
1234 target_ulong current_pc = 0;
1235 target_ulong current_cs_base = 0;
1236 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001237#endif
bellard9fa3e852004-01-04 18:06:42 +00001238
1239 addr &= TARGET_PAGE_MASK;
1240 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001241 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001242 return;
1243 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001244#ifdef TARGET_HAS_PRECISE_SMC
1245 if (tb && pc != 0) {
1246 current_tb = tb_find_pc(pc);
1247 }
1248#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001249 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001250 n = (uintptr_t)tb & 3;
1251 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001252#ifdef TARGET_HAS_PRECISE_SMC
1253 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001254 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001255 /* If we are modifying the current TB, we must stop
1256 its execution. We could be more precise by checking
1257 that the modification is after the current PC, but it
1258 would require a specialized function to partially
1259 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001260
bellardd720b932004-04-25 17:57:43 +00001261 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001262 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001263 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1264 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001265 }
1266#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001267 tb_phys_invalidate(tb, addr);
1268 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001269 }
1270 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001271#ifdef TARGET_HAS_PRECISE_SMC
1272 if (current_tb_modified) {
1273 /* we generate a block containing just the instruction
1274 modifying the memory. It will ensure that it cannot modify
1275 itself */
bellardea1c1802004-06-14 18:56:36 +00001276 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001277 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001278 cpu_resume_from_signal(env, puc);
1279 }
1280#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001281}
bellard9fa3e852004-01-04 18:06:42 +00001282#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001283
1284/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001285static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001286 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001287{
1288 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001289#ifndef CONFIG_USER_ONLY
1290 bool page_already_protected;
1291#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001292
bellard9fa3e852004-01-04 18:06:42 +00001293 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001294 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001295 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001296#ifndef CONFIG_USER_ONLY
1297 page_already_protected = p->first_tb != NULL;
1298#endif
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001299 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
bellard9fa3e852004-01-04 18:06:42 +00001300 invalidate_page_bitmap(p);
1301
bellard107db442004-06-22 18:48:46 +00001302#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001303
bellard9fa3e852004-01-04 18:06:42 +00001304#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001305 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001306 target_ulong addr;
1307 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001308 int prot;
1309
bellardfd6ce8f2003-05-14 19:00:11 +00001310 /* force the host page as non writable (writes will have a
1311 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001312 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001313 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001314 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1315 addr += TARGET_PAGE_SIZE) {
1316
1317 p2 = page_find (addr >> TARGET_PAGE_BITS);
1318 if (!p2)
1319 continue;
1320 prot |= p2->flags;
1321 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001322 }
ths5fafdf22007-09-16 21:08:06 +00001323 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001324 (prot & PAGE_BITS) & ~PAGE_WRITE);
1325#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001326 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001327 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001328#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001329 }
bellard9fa3e852004-01-04 18:06:42 +00001330#else
1331 /* if some code is already present, then the pages are already
1332 protected. So we handle the case where only the first TB is
1333 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001334 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001335 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001336 }
1337#endif
bellardd720b932004-04-25 17:57:43 +00001338
1339#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001340}
1341
bellard9fa3e852004-01-04 18:06:42 +00001342/* add a new TB and link it to the physical page tables. phys_page2 is
1343 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001344void tb_link_page(TranslationBlock *tb,
1345 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001346{
bellard9fa3e852004-01-04 18:06:42 +00001347 unsigned int h;
1348 TranslationBlock **ptb;
1349
pbrookc8a706f2008-06-02 16:16:42 +00001350 /* Grab the mmap lock to stop another thread invalidating this TB
1351 before we are done. */
1352 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001353 /* add in the physical hash table */
1354 h = tb_phys_hash_func(phys_pc);
1355 ptb = &tb_phys_hash[h];
1356 tb->phys_hash_next = *ptb;
1357 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001358
1359 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001360 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1361 if (phys_page2 != -1)
1362 tb_alloc_page(tb, 1, phys_page2);
1363 else
1364 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001365
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001366 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
bellardd4e81642003-05-25 16:46:15 +00001367 tb->jmp_next[0] = NULL;
1368 tb->jmp_next[1] = NULL;
1369
1370 /* init original jump addresses */
1371 if (tb->tb_next_offset[0] != 0xffff)
1372 tb_reset_jump(tb, 0);
1373 if (tb->tb_next_offset[1] != 0xffff)
1374 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001375
1376#ifdef DEBUG_TB_CHECK
1377 tb_page_check();
1378#endif
pbrookc8a706f2008-06-02 16:16:42 +00001379 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001380}
1381
bellarda513fe12003-05-27 23:29:48 +00001382/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1383 tb[1].tc_ptr. Return NULL if not found */
Stefan Weil6375e092012-04-06 22:26:15 +02001384TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
bellarda513fe12003-05-27 23:29:48 +00001385{
1386 int m_min, m_max, m;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001387 uintptr_t v;
bellarda513fe12003-05-27 23:29:48 +00001388 TranslationBlock *tb;
1389
1390 if (nb_tbs <= 0)
1391 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001392 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1393 tc_ptr >= (uintptr_t)code_gen_ptr) {
bellarda513fe12003-05-27 23:29:48 +00001394 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001395 }
bellarda513fe12003-05-27 23:29:48 +00001396 /* binary search (cf Knuth) */
1397 m_min = 0;
1398 m_max = nb_tbs - 1;
1399 while (m_min <= m_max) {
1400 m = (m_min + m_max) >> 1;
1401 tb = &tbs[m];
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001402 v = (uintptr_t)tb->tc_ptr;
bellarda513fe12003-05-27 23:29:48 +00001403 if (v == tc_ptr)
1404 return tb;
1405 else if (tc_ptr < v) {
1406 m_max = m - 1;
1407 } else {
1408 m_min = m + 1;
1409 }
ths5fafdf22007-09-16 21:08:06 +00001410 }
bellarda513fe12003-05-27 23:29:48 +00001411 return &tbs[m_max];
1412}
bellard75012672003-06-21 13:11:07 +00001413
bellardea041c02003-06-25 16:16:50 +00001414static void tb_reset_jump_recursive(TranslationBlock *tb);
1415
1416static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1417{
1418 TranslationBlock *tb1, *tb_next, **ptb;
1419 unsigned int n1;
1420
1421 tb1 = tb->jmp_next[n];
1422 if (tb1 != NULL) {
1423 /* find head of list */
1424 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001425 n1 = (uintptr_t)tb1 & 3;
1426 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001427 if (n1 == 2)
1428 break;
1429 tb1 = tb1->jmp_next[n1];
1430 }
1431 /* we are now sure now that tb jumps to tb1 */
1432 tb_next = tb1;
1433
1434 /* remove tb from the jmp_first list */
1435 ptb = &tb_next->jmp_first;
1436 for(;;) {
1437 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001438 n1 = (uintptr_t)tb1 & 3;
1439 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001440 if (n1 == n && tb1 == tb)
1441 break;
1442 ptb = &tb1->jmp_next[n1];
1443 }
1444 *ptb = tb->jmp_next[n];
1445 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001446
bellardea041c02003-06-25 16:16:50 +00001447 /* suppress the jump to next tb in generated code */
1448 tb_reset_jump(tb, n);
1449
bellard01243112004-01-04 15:48:17 +00001450 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001451 tb_reset_jump_recursive(tb_next);
1452 }
1453}
1454
1455static void tb_reset_jump_recursive(TranslationBlock *tb)
1456{
1457 tb_reset_jump_recursive2(tb, 0);
1458 tb_reset_jump_recursive2(tb, 1);
1459}
1460
bellard1fddef42005-04-17 19:16:13 +00001461#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001462#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001463static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001464{
1465 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1466}
1467#else
Max Filippov1e7855a2012-04-10 02:48:17 +04001468void tb_invalidate_phys_addr(target_phys_addr_t addr)
bellardd720b932004-04-25 17:57:43 +00001469{
Anthony Liguoric227f092009-10-01 16:12:16 -05001470 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001471 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001472
Avi Kivity06ef3522012-02-13 16:11:22 +02001473 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001474 if (!(memory_region_is_ram(section->mr)
1475 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001476 return;
1477 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001478 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001479 + memory_region_section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001480 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001481}
Max Filippov1e7855a2012-04-10 02:48:17 +04001482
1483static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1484{
Max Filippov9d70c4b2012-05-27 20:21:08 +04001485 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1486 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +04001487}
bellardc27004e2005-01-03 23:35:10 +00001488#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001489#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001490
Paul Brookc527ee82010-03-01 03:31:14 +00001491#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001492void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001493
1494{
1495}
1496
Andreas Färber9349b4f2012-03-14 01:38:32 +01001497int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001498 int flags, CPUWatchpoint **watchpoint)
1499{
1500 return -ENOSYS;
1501}
1502#else
pbrook6658ffb2007-03-16 23:58:11 +00001503/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001504int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001505 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001506{
aliguorib4051332008-11-18 20:14:20 +00001507 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001508 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001509
aliguorib4051332008-11-18 20:14:20 +00001510 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001511 if ((len & (len - 1)) || (addr & ~len_mask) ||
1512 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001513 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1514 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1515 return -EINVAL;
1516 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001517 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001518
aliguoria1d1bb32008-11-18 20:07:32 +00001519 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001520 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001521 wp->flags = flags;
1522
aliguori2dc9f412008-11-18 20:56:59 +00001523 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001524 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001525 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001526 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001527 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001528
pbrook6658ffb2007-03-16 23:58:11 +00001529 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001530
1531 if (watchpoint)
1532 *watchpoint = wp;
1533 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001534}
1535
aliguoria1d1bb32008-11-18 20:07:32 +00001536/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001537int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001538 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001539{
aliguorib4051332008-11-18 20:14:20 +00001540 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001541 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001542
Blue Swirl72cf2d42009-09-12 07:36:22 +00001543 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001544 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001545 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001546 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001547 return 0;
1548 }
1549 }
aliguoria1d1bb32008-11-18 20:07:32 +00001550 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001551}
1552
aliguoria1d1bb32008-11-18 20:07:32 +00001553/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001554void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001555{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001556 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001557
aliguoria1d1bb32008-11-18 20:07:32 +00001558 tlb_flush_page(env, watchpoint->vaddr);
1559
Anthony Liguori7267c092011-08-20 22:09:37 -05001560 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001561}
1562
aliguoria1d1bb32008-11-18 20:07:32 +00001563/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001564void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001565{
aliguoric0ce9982008-11-25 22:13:57 +00001566 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001567
Blue Swirl72cf2d42009-09-12 07:36:22 +00001568 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001569 if (wp->flags & mask)
1570 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001571 }
aliguoria1d1bb32008-11-18 20:07:32 +00001572}
Paul Brookc527ee82010-03-01 03:31:14 +00001573#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001574
1575/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001576int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001577 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001578{
bellard1fddef42005-04-17 19:16:13 +00001579#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001580 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001581
Anthony Liguori7267c092011-08-20 22:09:37 -05001582 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001583
1584 bp->pc = pc;
1585 bp->flags = flags;
1586
aliguori2dc9f412008-11-18 20:56:59 +00001587 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001588 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001589 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001590 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001591 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001592
1593 breakpoint_invalidate(env, pc);
1594
1595 if (breakpoint)
1596 *breakpoint = bp;
1597 return 0;
1598#else
1599 return -ENOSYS;
1600#endif
1601}
1602
1603/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001604int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001605{
1606#if defined(TARGET_HAS_ICE)
1607 CPUBreakpoint *bp;
1608
Blue Swirl72cf2d42009-09-12 07:36:22 +00001609 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001610 if (bp->pc == pc && bp->flags == flags) {
1611 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001612 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001613 }
bellard4c3a88a2003-07-26 12:06:08 +00001614 }
aliguoria1d1bb32008-11-18 20:07:32 +00001615 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001616#else
aliguoria1d1bb32008-11-18 20:07:32 +00001617 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001618#endif
1619}
1620
aliguoria1d1bb32008-11-18 20:07:32 +00001621/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001622void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001623{
bellard1fddef42005-04-17 19:16:13 +00001624#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001625 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001626
aliguoria1d1bb32008-11-18 20:07:32 +00001627 breakpoint_invalidate(env, breakpoint->pc);
1628
Anthony Liguori7267c092011-08-20 22:09:37 -05001629 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001630#endif
1631}
1632
1633/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001634void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001635{
1636#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001637 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001638
Blue Swirl72cf2d42009-09-12 07:36:22 +00001639 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001640 if (bp->flags & mask)
1641 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001642 }
bellard4c3a88a2003-07-26 12:06:08 +00001643#endif
1644}
1645
bellardc33a3462003-07-29 20:50:33 +00001646/* enable or disable single step mode. EXCP_DEBUG is returned by the
1647 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001648void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001649{
bellard1fddef42005-04-17 19:16:13 +00001650#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001651 if (env->singlestep_enabled != enabled) {
1652 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001653 if (kvm_enabled())
1654 kvm_update_guest_debug(env, 0);
1655 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001656 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001657 /* XXX: only flush what is necessary */
1658 tb_flush(env);
1659 }
bellardc33a3462003-07-29 20:50:33 +00001660 }
1661#endif
1662}
1663
Andreas Färber9349b4f2012-03-14 01:38:32 +01001664static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001665{
pbrookd5975362008-06-07 20:50:51 +00001666 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1667 problem and hope the cpu will stop of its own accord. For userspace
1668 emulation this often isn't actually as bad as it sounds. Often
1669 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001670 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001671 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001672
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001673 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001674 tb = env->current_tb;
1675 /* if the cpu is currently executing code, we must unlink it and
1676 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001677 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001678 env->current_tb = NULL;
1679 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001680 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001681 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001682}
1683
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001684#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001685/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001686static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001687{
1688 int old_mask;
1689
1690 old_mask = env->interrupt_request;
1691 env->interrupt_request |= mask;
1692
aliguori8edac962009-04-24 18:03:45 +00001693 /*
1694 * If called from iothread context, wake the target cpu in
1695 * case its halted.
1696 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001697 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001698 qemu_cpu_kick(env);
1699 return;
1700 }
aliguori8edac962009-04-24 18:03:45 +00001701
pbrook2e70f6e2008-06-29 01:03:05 +00001702 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001703 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001704 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001705 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001706 cpu_abort(env, "Raised interrupt while not in I/O function");
1707 }
pbrook2e70f6e2008-06-29 01:03:05 +00001708 } else {
aurel323098dba2009-03-07 21:28:24 +00001709 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001710 }
1711}
1712
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001713CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1714
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001715#else /* CONFIG_USER_ONLY */
1716
Andreas Färber9349b4f2012-03-14 01:38:32 +01001717void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001718{
1719 env->interrupt_request |= mask;
1720 cpu_unlink_tb(env);
1721}
1722#endif /* CONFIG_USER_ONLY */
1723
Andreas Färber9349b4f2012-03-14 01:38:32 +01001724void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001725{
1726 env->interrupt_request &= ~mask;
1727}
1728
Andreas Färber9349b4f2012-03-14 01:38:32 +01001729void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001730{
1731 env->exit_request = 1;
1732 cpu_unlink_tb(env);
1733}
1734
Andreas Färber9349b4f2012-03-14 01:38:32 +01001735void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001736{
1737 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001738 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001739
1740 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001741 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001742 fprintf(stderr, "qemu: fatal: ");
1743 vfprintf(stderr, fmt, ap);
1744 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001745 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +00001746 if (qemu_log_enabled()) {
1747 qemu_log("qemu: fatal: ");
1748 qemu_log_vprintf(fmt, ap2);
1749 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001750 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +00001751 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001752 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001753 }
pbrook493ae1f2007-11-23 16:53:59 +00001754 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001755 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001756#if defined(CONFIG_USER_ONLY)
1757 {
1758 struct sigaction act;
1759 sigfillset(&act.sa_mask);
1760 act.sa_handler = SIG_DFL;
1761 sigaction(SIGABRT, &act, NULL);
1762 }
1763#endif
bellard75012672003-06-21 13:11:07 +00001764 abort();
1765}
1766
Andreas Färber9349b4f2012-03-14 01:38:32 +01001767CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +00001768{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001769 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1770 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001771 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001772#if defined(TARGET_HAS_ICE)
1773 CPUBreakpoint *bp;
1774 CPUWatchpoint *wp;
1775#endif
1776
Andreas Färber9349b4f2012-03-14 01:38:32 +01001777 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001778
1779 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001780 new_env->next_cpu = next_cpu;
1781 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001782
1783 /* Clone all break/watchpoints.
1784 Note: Once we support ptrace with hw-debug register access, make sure
1785 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001786 QTAILQ_INIT(&env->breakpoints);
1787 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001788#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001789 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001790 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1791 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001792 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001793 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1794 wp->flags, NULL);
1795 }
1796#endif
1797
thsc5be9f02007-02-28 20:20:53 +00001798 return new_env;
1799}
1800
bellard01243112004-01-04 15:48:17 +00001801#if !defined(CONFIG_USER_ONLY)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001802void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001803{
1804 unsigned int i;
1805
1806 /* Discard jump cache entries for any tb which might potentially
1807 overlap the flushed page. */
1808 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1809 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001810 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001811
1812 i = tb_jmp_cache_hash_page(addr);
1813 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001814 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001815}
1816
Juan Quintelad24981d2012-05-22 00:42:40 +02001817static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
1818 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +00001819{
Juan Quintelad24981d2012-05-22 00:42:40 +02001820 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +00001821
bellard1ccde1c2004-02-06 19:46:14 +00001822 /* we modify the TLB cache so that the dirty bit will be set again
1823 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001824 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001825 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001826 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001827 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001828 != (end - 1) - start) {
1829 abort();
1830 }
Blue Swirle5548612012-04-21 13:08:33 +00001831 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001832
1833}
1834
1835/* Note: start and end must be within the same ram block. */
1836void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1837 int dirty_flags)
1838{
1839 uintptr_t length;
1840
1841 start &= TARGET_PAGE_MASK;
1842 end = TARGET_PAGE_ALIGN(end);
1843
1844 length = end - start;
1845 if (length == 0)
1846 return;
1847 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1848
1849 if (tcg_enabled()) {
1850 tlb_reset_dirty_range_all(start, end, length);
1851 }
bellard1ccde1c2004-02-06 19:46:14 +00001852}
1853
aliguori74576192008-10-06 14:02:03 +00001854int cpu_physical_memory_set_dirty_tracking(int enable)
1855{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001856 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00001857 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001858 return ret;
aliguori74576192008-10-06 14:02:03 +00001859}
1860
Blue Swirle5548612012-04-21 13:08:33 +00001861target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1862 MemoryRegionSection *section,
1863 target_ulong vaddr,
1864 target_phys_addr_t paddr,
1865 int prot,
1866 target_ulong *address)
1867{
1868 target_phys_addr_t iotlb;
1869 CPUWatchpoint *wp;
1870
Blue Swirlcc5bea62012-04-14 14:56:48 +00001871 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001872 /* Normal RAM. */
1873 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001874 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001875 if (!section->readonly) {
1876 iotlb |= phys_section_notdirty;
1877 } else {
1878 iotlb |= phys_section_rom;
1879 }
1880 } else {
1881 /* IO handlers are currently passed a physical address.
1882 It would be nice to pass an offset from the base address
1883 of that region. This would avoid having to special case RAM,
1884 and avoid full address decoding in every device.
1885 We can't use the high bits of pd for this because
1886 IO_MEM_ROMD uses these as a ram address. */
1887 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001888 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001889 }
1890
1891 /* Make accesses to pages with watchpoints go via the
1892 watchpoint trap routines. */
1893 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1894 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1895 /* Avoid trapping reads of pages with a write breakpoint. */
1896 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1897 iotlb = phys_section_watch + paddr;
1898 *address |= TLB_MMIO;
1899 break;
1900 }
1901 }
1902 }
1903
1904 return iotlb;
1905}
1906
bellard01243112004-01-04 15:48:17 +00001907#else
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03001908/*
1909 * Walks guest process memory "regions" one by one
1910 * and calls callback function 'fn' for each region.
1911 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001912
1913struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00001914{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001915 walk_memory_regions_fn fn;
1916 void *priv;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001917 uintptr_t start;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001918 int prot;
1919};
bellard9fa3e852004-01-04 18:06:42 +00001920
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001921static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001922 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001923{
1924 if (data->start != -1ul) {
1925 int rc = data->fn(data->priv, data->start, end, data->prot);
1926 if (rc != 0) {
1927 return rc;
bellard9fa3e852004-01-04 18:06:42 +00001928 }
bellard33417e72003-08-10 21:47:01 +00001929 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001930
1931 data->start = (new_prot ? end : -1ul);
1932 data->prot = new_prot;
1933
1934 return 0;
1935}
1936
1937static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001938 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001939{
Paul Brookb480d9b2010-03-12 23:23:29 +00001940 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001941 int i, rc;
1942
1943 if (*lp == NULL) {
1944 return walk_memory_regions_end(data, base, 0);
1945 }
1946
1947 if (level == 0) {
1948 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001949 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001950 int prot = pd[i].flags;
1951
1952 pa = base | (i << TARGET_PAGE_BITS);
1953 if (prot != data->prot) {
1954 rc = walk_memory_regions_end(data, pa, prot);
1955 if (rc != 0) {
1956 return rc;
1957 }
1958 }
1959 }
1960 } else {
1961 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001962 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00001963 pa = base | ((abi_ulong)i <<
1964 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001965 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1966 if (rc != 0) {
1967 return rc;
1968 }
1969 }
1970 }
1971
1972 return 0;
1973}
1974
1975int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1976{
1977 struct walk_memory_regions_data data;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001978 uintptr_t i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001979
1980 data.fn = fn;
1981 data.priv = priv;
1982 data.start = -1ul;
1983 data.prot = 0;
1984
1985 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00001986 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001987 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
1988 if (rc != 0) {
1989 return rc;
1990 }
1991 }
1992
1993 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03001994}
1995
Paul Brookb480d9b2010-03-12 23:23:29 +00001996static int dump_region(void *priv, abi_ulong start,
1997 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03001998{
1999 FILE *f = (FILE *)priv;
2000
Paul Brookb480d9b2010-03-12 23:23:29 +00002001 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2002 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002003 start, end, end - start,
2004 ((prot & PAGE_READ) ? 'r' : '-'),
2005 ((prot & PAGE_WRITE) ? 'w' : '-'),
2006 ((prot & PAGE_EXEC) ? 'x' : '-'));
2007
2008 return (0);
2009}
2010
2011/* dump memory mappings */
2012void page_dump(FILE *f)
2013{
2014 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2015 "start", "end", "size", "prot");
2016 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002017}
2018
pbrook53a59602006-03-25 19:31:22 +00002019int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002020{
bellard9fa3e852004-01-04 18:06:42 +00002021 PageDesc *p;
2022
2023 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002024 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002025 return 0;
2026 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002027}
2028
Richard Henderson376a7902010-03-10 15:57:04 -08002029/* Modify the flags of a page and invalidate the code if necessary.
2030 The flag PAGE_WRITE_ORG is positioned automatically depending
2031 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002032void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002033{
Richard Henderson376a7902010-03-10 15:57:04 -08002034 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002035
Richard Henderson376a7902010-03-10 15:57:04 -08002036 /* This function should never be called with addresses outside the
2037 guest address space. If this assert fires, it probably indicates
2038 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002039#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2040 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002041#endif
2042 assert(start < end);
2043
bellard9fa3e852004-01-04 18:06:42 +00002044 start = start & TARGET_PAGE_MASK;
2045 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002046
2047 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002048 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002049 }
2050
2051 for (addr = start, len = end - start;
2052 len != 0;
2053 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2054 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2055
2056 /* If the write protection bit is set, then we invalidate
2057 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002058 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002059 (flags & PAGE_WRITE) &&
2060 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002061 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002062 }
2063 p->flags = flags;
2064 }
bellard9fa3e852004-01-04 18:06:42 +00002065}
2066
ths3d97b402007-11-02 19:02:07 +00002067int page_check_range(target_ulong start, target_ulong len, int flags)
2068{
2069 PageDesc *p;
2070 target_ulong end;
2071 target_ulong addr;
2072
Richard Henderson376a7902010-03-10 15:57:04 -08002073 /* This function should never be called with addresses outside the
2074 guest address space. If this assert fires, it probably indicates
2075 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002076#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2077 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002078#endif
2079
Richard Henderson3e0650a2010-03-29 10:54:42 -07002080 if (len == 0) {
2081 return 0;
2082 }
Richard Henderson376a7902010-03-10 15:57:04 -08002083 if (start + len - 1 < start) {
2084 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002085 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002086 }
balrog55f280c2008-10-28 10:24:11 +00002087
ths3d97b402007-11-02 19:02:07 +00002088 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2089 start = start & TARGET_PAGE_MASK;
2090
Richard Henderson376a7902010-03-10 15:57:04 -08002091 for (addr = start, len = end - start;
2092 len != 0;
2093 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002094 p = page_find(addr >> TARGET_PAGE_BITS);
2095 if( !p )
2096 return -1;
2097 if( !(p->flags & PAGE_VALID) )
2098 return -1;
2099
bellarddae32702007-11-14 10:51:00 +00002100 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002101 return -1;
bellarddae32702007-11-14 10:51:00 +00002102 if (flags & PAGE_WRITE) {
2103 if (!(p->flags & PAGE_WRITE_ORG))
2104 return -1;
2105 /* unprotect the page if it was put read-only because it
2106 contains translated code */
2107 if (!(p->flags & PAGE_WRITE)) {
2108 if (!page_unprotect(addr, 0, NULL))
2109 return -1;
2110 }
2111 return 0;
2112 }
ths3d97b402007-11-02 19:02:07 +00002113 }
2114 return 0;
2115}
2116
bellard9fa3e852004-01-04 18:06:42 +00002117/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002118 page. Return TRUE if the fault was successfully handled. */
Stefan Weil6375e092012-04-06 22:26:15 +02002119int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002120{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002121 unsigned int prot;
2122 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002123 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002124
pbrookc8a706f2008-06-02 16:16:42 +00002125 /* Technically this isn't safe inside a signal handler. However we
2126 know this only ever happens in a synchronous SEGV handler, so in
2127 practice it seems to be ok. */
2128 mmap_lock();
2129
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002130 p = page_find(address >> TARGET_PAGE_BITS);
2131 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002132 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002133 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002134 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002135
bellard9fa3e852004-01-04 18:06:42 +00002136 /* if the page was really writable, then we change its
2137 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002138 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2139 host_start = address & qemu_host_page_mask;
2140 host_end = host_start + qemu_host_page_size;
2141
2142 prot = 0;
2143 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2144 p = page_find(addr >> TARGET_PAGE_BITS);
2145 p->flags |= PAGE_WRITE;
2146 prot |= p->flags;
2147
bellard9fa3e852004-01-04 18:06:42 +00002148 /* and since the content will be modified, we must invalidate
2149 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002150 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002151#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002152 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002153#endif
bellard9fa3e852004-01-04 18:06:42 +00002154 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002155 mprotect((void *)g2h(host_start), qemu_host_page_size,
2156 prot & PAGE_BITS);
2157
2158 mmap_unlock();
2159 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002160 }
pbrookc8a706f2008-06-02 16:16:42 +00002161 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002162 return 0;
2163}
bellard9fa3e852004-01-04 18:06:42 +00002164#endif /* defined(CONFIG_USER_ONLY) */
2165
pbrooke2eef172008-06-08 01:09:01 +00002166#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002167
Paul Brookc04b2b72010-03-01 03:31:14 +00002168#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2169typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002170 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002171 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002172 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002173} subpage_t;
2174
Anthony Liguoric227f092009-10-01 16:12:16 -05002175static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002176 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002177static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002178static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002179{
Avi Kivity5312bd82012-02-12 18:32:55 +02002180 MemoryRegionSection *section = &phys_sections[section_index];
2181 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002182
2183 if (mr->subpage) {
2184 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2185 memory_region_destroy(&subpage->iomem);
2186 g_free(subpage);
2187 }
2188}
2189
Avi Kivity4346ae32012-02-10 17:00:01 +02002190static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002191{
2192 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002193 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002194
Avi Kivityc19e8802012-02-13 20:25:31 +02002195 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002196 return;
2197 }
2198
Avi Kivityc19e8802012-02-13 20:25:31 +02002199 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002200 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002201 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002202 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002203 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002204 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002205 }
Avi Kivity54688b12012-02-09 17:34:32 +02002206 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002207 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002208 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002209}
2210
2211static void destroy_all_mappings(void)
2212{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002213 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002214 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002215}
2216
Avi Kivity5312bd82012-02-12 18:32:55 +02002217static uint16_t phys_section_add(MemoryRegionSection *section)
2218{
2219 if (phys_sections_nb == phys_sections_nb_alloc) {
2220 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2221 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2222 phys_sections_nb_alloc);
2223 }
2224 phys_sections[phys_sections_nb] = *section;
2225 return phys_sections_nb++;
2226}
2227
2228static void phys_sections_clear(void)
2229{
2230 phys_sections_nb = 0;
2231}
2232
Avi Kivity0f0cb162012-02-13 17:14:32 +02002233static void register_subpage(MemoryRegionSection *section)
2234{
2235 subpage_t *subpage;
2236 target_phys_addr_t base = section->offset_within_address_space
2237 & TARGET_PAGE_MASK;
Avi Kivityf3705d52012-03-08 16:16:34 +02002238 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002239 MemoryRegionSection subsection = {
2240 .offset_within_address_space = base,
2241 .size = TARGET_PAGE_SIZE,
2242 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002243 target_phys_addr_t start, end;
2244
Avi Kivityf3705d52012-03-08 16:16:34 +02002245 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002246
Avi Kivityf3705d52012-03-08 16:16:34 +02002247 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002248 subpage = subpage_init(base);
2249 subsection.mr = &subpage->iomem;
Avi Kivity29990972012-02-13 20:21:20 +02002250 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2251 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002252 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002253 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002254 }
2255 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -04002256 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +02002257 subpage_register(subpage, start, end, phys_section_add(section));
2258}
2259
2260
2261static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002262{
Avi Kivitydd811242012-01-02 12:17:03 +02002263 target_phys_addr_t start_addr = section->offset_within_address_space;
2264 ram_addr_t size = section->size;
Avi Kivity29990972012-02-13 20:21:20 +02002265 target_phys_addr_t addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002266 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002267
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002268 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002269
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002270 addr = start_addr;
Avi Kivity29990972012-02-13 20:21:20 +02002271 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2272 section_index);
bellard33417e72003-08-10 21:47:01 +00002273}
2274
Avi Kivity0f0cb162012-02-13 17:14:32 +02002275void cpu_register_physical_memory_log(MemoryRegionSection *section,
2276 bool readonly)
2277{
2278 MemoryRegionSection now = *section, remain = *section;
2279
2280 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2281 || (now.size < TARGET_PAGE_SIZE)) {
2282 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2283 - now.offset_within_address_space,
2284 now.size);
2285 register_subpage(&now);
2286 remain.size -= now.size;
2287 remain.offset_within_address_space += now.size;
2288 remain.offset_within_region += now.size;
2289 }
Tyler Hall69b67642012-07-25 18:45:04 -04002290 while (remain.size >= TARGET_PAGE_SIZE) {
2291 now = remain;
2292 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
2293 now.size = TARGET_PAGE_SIZE;
2294 register_subpage(&now);
2295 } else {
2296 now.size &= TARGET_PAGE_MASK;
2297 register_multipage(&now);
2298 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02002299 remain.size -= now.size;
2300 remain.offset_within_address_space += now.size;
2301 remain.offset_within_region += now.size;
2302 }
2303 now = remain;
2304 if (now.size) {
2305 register_subpage(&now);
2306 }
2307}
2308
2309
Anthony Liguoric227f092009-10-01 16:12:16 -05002310void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002311{
2312 if (kvm_enabled())
2313 kvm_coalesce_mmio_region(addr, size);
2314}
2315
Anthony Liguoric227f092009-10-01 16:12:16 -05002316void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002317{
2318 if (kvm_enabled())
2319 kvm_uncoalesce_mmio_region(addr, size);
2320}
2321
Sheng Yang62a27442010-01-26 19:21:16 +08002322void qemu_flush_coalesced_mmio_buffer(void)
2323{
2324 if (kvm_enabled())
2325 kvm_flush_coalesced_mmio_buffer();
2326}
2327
Marcelo Tosattic9027602010-03-01 20:25:08 -03002328#if defined(__linux__) && !defined(TARGET_S390X)
2329
2330#include <sys/vfs.h>
2331
2332#define HUGETLBFS_MAGIC 0x958458f6
2333
2334static long gethugepagesize(const char *path)
2335{
2336 struct statfs fs;
2337 int ret;
2338
2339 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002340 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002341 } while (ret != 0 && errno == EINTR);
2342
2343 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002344 perror(path);
2345 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002346 }
2347
2348 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002349 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002350
2351 return fs.f_bsize;
2352}
2353
Alex Williamson04b16652010-07-02 11:13:17 -06002354static void *file_ram_alloc(RAMBlock *block,
2355 ram_addr_t memory,
2356 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002357{
2358 char *filename;
2359 void *area;
2360 int fd;
2361#ifdef MAP_POPULATE
2362 int flags;
2363#endif
2364 unsigned long hpagesize;
2365
2366 hpagesize = gethugepagesize(path);
2367 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002368 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002369 }
2370
2371 if (memory < hpagesize) {
2372 return NULL;
2373 }
2374
2375 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2376 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2377 return NULL;
2378 }
2379
2380 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002381 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002382 }
2383
2384 fd = mkstemp(filename);
2385 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002386 perror("unable to create backing store for hugepages");
2387 free(filename);
2388 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002389 }
2390 unlink(filename);
2391 free(filename);
2392
2393 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2394
2395 /*
2396 * ftruncate is not supported by hugetlbfs in older
2397 * hosts, so don't bother bailing out on errors.
2398 * If anything goes wrong with it under other filesystems,
2399 * mmap will fail.
2400 */
2401 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002402 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002403
2404#ifdef MAP_POPULATE
2405 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2406 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2407 * to sidestep this quirk.
2408 */
2409 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2410 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2411#else
2412 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2413#endif
2414 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002415 perror("file_ram_alloc: can't mmap RAM pages");
2416 close(fd);
2417 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002418 }
Alex Williamson04b16652010-07-02 11:13:17 -06002419 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002420 return area;
2421}
2422#endif
2423
Alex Williamsond17b5282010-06-25 11:08:38 -06002424static ram_addr_t find_ram_offset(ram_addr_t size)
2425{
Alex Williamson04b16652010-07-02 11:13:17 -06002426 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002427 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002428
2429 if (QLIST_EMPTY(&ram_list.blocks))
2430 return 0;
2431
2432 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002433 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002434
2435 end = block->offset + block->length;
2436
2437 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2438 if (next_block->offset >= end) {
2439 next = MIN(next, next_block->offset);
2440 }
2441 }
2442 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002443 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002444 mingap = next - end;
2445 }
2446 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002447
2448 if (offset == RAM_ADDR_MAX) {
2449 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2450 (uint64_t)size);
2451 abort();
2452 }
2453
Alex Williamson04b16652010-07-02 11:13:17 -06002454 return offset;
2455}
2456
2457static ram_addr_t last_ram_offset(void)
2458{
Alex Williamsond17b5282010-06-25 11:08:38 -06002459 RAMBlock *block;
2460 ram_addr_t last = 0;
2461
2462 QLIST_FOREACH(block, &ram_list.blocks, next)
2463 last = MAX(last, block->offset + block->length);
2464
2465 return last;
2466}
2467
Jason Baronddb97f12012-08-02 15:44:16 -04002468static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
2469{
2470 int ret;
2471 QemuOpts *machine_opts;
2472
2473 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2474 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2475 if (machine_opts &&
2476 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
2477 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
2478 if (ret) {
2479 perror("qemu_madvise");
2480 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
2481 "but dump_guest_core=off specified\n");
2482 }
2483 }
2484}
2485
Avi Kivityc5705a72011-12-20 15:59:12 +02002486void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002487{
2488 RAMBlock *new_block, *block;
2489
Avi Kivityc5705a72011-12-20 15:59:12 +02002490 new_block = NULL;
2491 QLIST_FOREACH(block, &ram_list.blocks, next) {
2492 if (block->offset == addr) {
2493 new_block = block;
2494 break;
2495 }
2496 }
2497 assert(new_block);
2498 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002499
Anthony Liguori09e5ab62012-02-03 12:28:43 -06002500 if (dev) {
2501 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002502 if (id) {
2503 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002504 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002505 }
2506 }
2507 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2508
2509 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002510 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002511 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2512 new_block->idstr);
2513 abort();
2514 }
2515 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002516}
2517
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002518static int memory_try_enable_merging(void *addr, size_t len)
2519{
2520 QemuOpts *opts;
2521
2522 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2523 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
2524 /* disabled by the user */
2525 return 0;
2526 }
2527
2528 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
2529}
2530
Avi Kivityc5705a72011-12-20 15:59:12 +02002531ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2532 MemoryRegion *mr)
2533{
2534 RAMBlock *new_block;
2535
2536 size = TARGET_PAGE_ALIGN(size);
2537 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002538
Avi Kivity7c637362011-12-21 13:09:49 +02002539 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002540 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002541 if (host) {
2542 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002543 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002544 } else {
2545 if (mem_path) {
2546#if defined (__linux__) && !defined(TARGET_S390X)
2547 new_block->host = file_ram_alloc(new_block, size, mem_path);
2548 if (!new_block->host) {
2549 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002550 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002551 }
2552#else
2553 fprintf(stderr, "-mem-path option unsupported\n");
2554 exit(1);
2555#endif
2556 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02002557 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002558 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00002559 } else if (kvm_enabled()) {
2560 /* some s390/kvm configurations have special constraints */
2561 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01002562 } else {
2563 new_block->host = qemu_vmalloc(size);
2564 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002565 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002566 }
2567 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002568 new_block->length = size;
2569
2570 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2571
Anthony Liguori7267c092011-08-20 22:09:37 -05002572 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002573 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04002574 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2575 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02002576 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002577
Jason Baronddb97f12012-08-02 15:44:16 -04002578 qemu_ram_setup_dump(new_block->host, size);
2579
Cam Macdonell84b89d72010-07-26 18:10:57 -06002580 if (kvm_enabled())
2581 kvm_setup_guest_memory(new_block->host, size);
2582
2583 return new_block->offset;
2584}
2585
Avi Kivityc5705a72011-12-20 15:59:12 +02002586ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002587{
Avi Kivityc5705a72011-12-20 15:59:12 +02002588 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002589}
bellarde9a1ab12007-02-08 23:08:38 +00002590
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002591void qemu_ram_free_from_ptr(ram_addr_t addr)
2592{
2593 RAMBlock *block;
2594
2595 QLIST_FOREACH(block, &ram_list.blocks, next) {
2596 if (addr == block->offset) {
2597 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002598 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002599 return;
2600 }
2601 }
2602}
2603
Anthony Liguoric227f092009-10-01 16:12:16 -05002604void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002605{
Alex Williamson04b16652010-07-02 11:13:17 -06002606 RAMBlock *block;
2607
2608 QLIST_FOREACH(block, &ram_list.blocks, next) {
2609 if (addr == block->offset) {
2610 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002611 if (block->flags & RAM_PREALLOC_MASK) {
2612 ;
2613 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002614#if defined (__linux__) && !defined(TARGET_S390X)
2615 if (block->fd) {
2616 munmap(block->host, block->length);
2617 close(block->fd);
2618 } else {
2619 qemu_vfree(block->host);
2620 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002621#else
2622 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002623#endif
2624 } else {
2625#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2626 munmap(block->host, block->length);
2627#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002628 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002629 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002630 } else {
2631 qemu_vfree(block->host);
2632 }
Alex Williamson04b16652010-07-02 11:13:17 -06002633#endif
2634 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002635 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002636 return;
2637 }
2638 }
2639
bellarde9a1ab12007-02-08 23:08:38 +00002640}
2641
Huang Yingcd19cfa2011-03-02 08:56:19 +01002642#ifndef _WIN32
2643void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2644{
2645 RAMBlock *block;
2646 ram_addr_t offset;
2647 int flags;
2648 void *area, *vaddr;
2649
2650 QLIST_FOREACH(block, &ram_list.blocks, next) {
2651 offset = addr - block->offset;
2652 if (offset < block->length) {
2653 vaddr = block->host + offset;
2654 if (block->flags & RAM_PREALLOC_MASK) {
2655 ;
2656 } else {
2657 flags = MAP_FIXED;
2658 munmap(vaddr, length);
2659 if (mem_path) {
2660#if defined(__linux__) && !defined(TARGET_S390X)
2661 if (block->fd) {
2662#ifdef MAP_POPULATE
2663 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2664 MAP_PRIVATE;
2665#else
2666 flags |= MAP_PRIVATE;
2667#endif
2668 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2669 flags, block->fd, offset);
2670 } else {
2671 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2672 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2673 flags, -1, 0);
2674 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002675#else
2676 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002677#endif
2678 } else {
2679#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2680 flags |= MAP_SHARED | MAP_ANONYMOUS;
2681 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2682 flags, -1, 0);
2683#else
2684 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2685 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2686 flags, -1, 0);
2687#endif
2688 }
2689 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002690 fprintf(stderr, "Could not remap addr: "
2691 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002692 length, addr);
2693 exit(1);
2694 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002695 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04002696 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002697 }
2698 return;
2699 }
2700 }
2701}
2702#endif /* !_WIN32 */
2703
pbrookdc828ca2009-04-09 22:21:07 +00002704/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002705 With the exception of the softmmu code in this file, this should
2706 only be used for local memory (e.g. video ram) that the device owns,
2707 and knows it isn't going to access beyond the end of the block.
2708
2709 It should not be used for general purpose DMA.
2710 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2711 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002712void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002713{
pbrook94a6b542009-04-11 17:15:54 +00002714 RAMBlock *block;
2715
Alex Williamsonf471a172010-06-11 11:11:42 -06002716 QLIST_FOREACH(block, &ram_list.blocks, next) {
2717 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002718 /* Move this entry to to start of the list. */
2719 if (block != QLIST_FIRST(&ram_list.blocks)) {
2720 QLIST_REMOVE(block, next);
2721 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2722 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002723 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002724 /* We need to check if the requested address is in the RAM
2725 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002726 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002727 */
2728 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002729 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002730 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002731 block->host =
2732 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002733 }
2734 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002735 return block->host + (addr - block->offset);
2736 }
pbrook94a6b542009-04-11 17:15:54 +00002737 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002738
2739 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2740 abort();
2741
2742 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00002743}
2744
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002745/* Return a host pointer to ram allocated with qemu_ram_alloc.
2746 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2747 */
2748void *qemu_safe_ram_ptr(ram_addr_t addr)
2749{
2750 RAMBlock *block;
2751
2752 QLIST_FOREACH(block, &ram_list.blocks, next) {
2753 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02002754 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002755 /* We need to check if the requested address is in the RAM
2756 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002757 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002758 */
2759 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002760 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002761 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002762 block->host =
2763 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002764 }
2765 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002766 return block->host + (addr - block->offset);
2767 }
2768 }
2769
2770 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2771 abort();
2772
2773 return NULL;
2774}
2775
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002776/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2777 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002778void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002779{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002780 if (*size == 0) {
2781 return NULL;
2782 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002783 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002784 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02002785 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002786 RAMBlock *block;
2787
2788 QLIST_FOREACH(block, &ram_list.blocks, next) {
2789 if (addr - block->offset < block->length) {
2790 if (addr - block->offset + *size > block->length)
2791 *size = block->length - addr + block->offset;
2792 return block->host + (addr - block->offset);
2793 }
2794 }
2795
2796 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2797 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002798 }
2799}
2800
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002801void qemu_put_ram_ptr(void *addr)
2802{
2803 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002804}
2805
Marcelo Tosattie8902612010-10-11 15:31:19 -03002806int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00002807{
pbrook94a6b542009-04-11 17:15:54 +00002808 RAMBlock *block;
2809 uint8_t *host = ptr;
2810
Jan Kiszka868bb332011-06-21 22:59:09 +02002811 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002812 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002813 return 0;
2814 }
2815
Alex Williamsonf471a172010-06-11 11:11:42 -06002816 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002817 /* This case append when the block is not mapped. */
2818 if (block->host == NULL) {
2819 continue;
2820 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002821 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002822 *ram_addr = block->offset + (host - block->host);
2823 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06002824 }
pbrook94a6b542009-04-11 17:15:54 +00002825 }
Jun Nakajima432d2682010-08-31 16:41:25 +01002826
Marcelo Tosattie8902612010-10-11 15:31:19 -03002827 return -1;
2828}
Alex Williamsonf471a172010-06-11 11:11:42 -06002829
Marcelo Tosattie8902612010-10-11 15:31:19 -03002830/* Some of the softmmu routines need to translate from a host pointer
2831 (typically a TLB entry) back to a ram offset. */
2832ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2833{
2834 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06002835
Marcelo Tosattie8902612010-10-11 15:31:19 -03002836 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2837 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2838 abort();
2839 }
2840 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002841}
2842
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002843static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2844 unsigned size)
bellard33417e72003-08-10 21:47:01 +00002845{
pbrook67d3b952006-12-18 05:03:52 +00002846#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002847 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002848#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002849#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002850 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002851#endif
2852 return 0;
2853}
2854
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002855static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2856 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00002857{
2858#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002859 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00002860#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002861#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002862 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002863#endif
2864}
2865
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002866static const MemoryRegionOps unassigned_mem_ops = {
2867 .read = unassigned_mem_read,
2868 .write = unassigned_mem_write,
2869 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002870};
2871
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002872static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2873 unsigned size)
2874{
2875 abort();
2876}
2877
2878static void error_mem_write(void *opaque, target_phys_addr_t addr,
2879 uint64_t value, unsigned size)
2880{
2881 abort();
2882}
2883
2884static const MemoryRegionOps error_mem_ops = {
2885 .read = error_mem_read,
2886 .write = error_mem_write,
2887 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002888};
2889
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002890static const MemoryRegionOps rom_mem_ops = {
2891 .read = error_mem_read,
2892 .write = unassigned_mem_write,
2893 .endianness = DEVICE_NATIVE_ENDIAN,
2894};
2895
2896static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2897 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002898{
bellard3a7d9292005-08-21 09:26:42 +00002899 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002900 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002901 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2902#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002903 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002904 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002905#endif
2906 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002907 switch (size) {
2908 case 1:
2909 stb_p(qemu_get_ram_ptr(ram_addr), val);
2910 break;
2911 case 2:
2912 stw_p(qemu_get_ram_ptr(ram_addr), val);
2913 break;
2914 case 4:
2915 stl_p(qemu_get_ram_ptr(ram_addr), val);
2916 break;
2917 default:
2918 abort();
2919 }
bellardf23db162005-08-21 19:12:28 +00002920 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002921 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002922 /* we remove the notdirty callback only if the code has been
2923 flushed */
2924 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002925 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002926}
2927
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002928static const MemoryRegionOps notdirty_mem_ops = {
2929 .read = error_mem_read,
2930 .write = notdirty_mem_write,
2931 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002932};
2933
pbrook0f459d12008-06-09 00:20:13 +00002934/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00002935static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002936{
Andreas Färber9349b4f2012-03-14 01:38:32 +01002937 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00002938 target_ulong pc, cs_base;
2939 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00002940 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002941 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002942 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002943
aliguori06d55cc2008-11-18 20:24:06 +00002944 if (env->watchpoint_hit) {
2945 /* We re-entered the check after replacing the TB. Now raise
2946 * the debug interrupt so that is will trigger after the
2947 * current instruction. */
2948 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2949 return;
2950 }
pbrook2e70f6e2008-06-29 01:03:05 +00002951 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002952 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00002953 if ((vaddr == (wp->vaddr & len_mask) ||
2954 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00002955 wp->flags |= BP_WATCHPOINT_HIT;
2956 if (!env->watchpoint_hit) {
2957 env->watchpoint_hit = wp;
2958 tb = tb_find_pc(env->mem_io_pc);
2959 if (!tb) {
2960 cpu_abort(env, "check_watchpoint: could not find TB for "
2961 "pc=%p", (void *)env->mem_io_pc);
2962 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00002963 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00002964 tb_phys_invalidate(tb, -1);
2965 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2966 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04002967 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00002968 } else {
2969 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2970 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04002971 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002972 }
aliguori06d55cc2008-11-18 20:24:06 +00002973 }
aliguori6e140f22008-11-18 20:37:55 +00002974 } else {
2975 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002976 }
2977 }
2978}
2979
pbrook6658ffb2007-03-16 23:58:11 +00002980/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2981 so these check for a hit then pass through to the normal out-of-line
2982 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02002983static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
2984 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00002985{
Avi Kivity1ec9b902012-01-02 12:47:48 +02002986 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
2987 switch (size) {
2988 case 1: return ldub_phys(addr);
2989 case 2: return lduw_phys(addr);
2990 case 4: return ldl_phys(addr);
2991 default: abort();
2992 }
pbrook6658ffb2007-03-16 23:58:11 +00002993}
2994
Avi Kivity1ec9b902012-01-02 12:47:48 +02002995static void watch_mem_write(void *opaque, target_phys_addr_t addr,
2996 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00002997{
Avi Kivity1ec9b902012-01-02 12:47:48 +02002998 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
2999 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04003000 case 1:
3001 stb_phys(addr, val);
3002 break;
3003 case 2:
3004 stw_phys(addr, val);
3005 break;
3006 case 4:
3007 stl_phys(addr, val);
3008 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003009 default: abort();
3010 }
pbrook6658ffb2007-03-16 23:58:11 +00003011}
3012
Avi Kivity1ec9b902012-01-02 12:47:48 +02003013static const MemoryRegionOps watch_mem_ops = {
3014 .read = watch_mem_read,
3015 .write = watch_mem_write,
3016 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003017};
pbrook6658ffb2007-03-16 23:58:11 +00003018
Avi Kivity70c68e42012-01-02 12:32:48 +02003019static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3020 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003021{
Avi Kivity70c68e42012-01-02 12:32:48 +02003022 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003023 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003024 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003025#if defined(DEBUG_SUBPAGE)
3026 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3027 mmio, len, addr, idx);
3028#endif
blueswir1db7b5422007-05-26 17:36:03 +00003029
Avi Kivity5312bd82012-02-12 18:32:55 +02003030 section = &phys_sections[mmio->sub_section[idx]];
3031 addr += mmio->base;
3032 addr -= section->offset_within_address_space;
3033 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003034 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003035}
3036
Avi Kivity70c68e42012-01-02 12:32:48 +02003037static void subpage_write(void *opaque, target_phys_addr_t addr,
3038 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003039{
Avi Kivity70c68e42012-01-02 12:32:48 +02003040 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003041 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003042 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003043#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003044 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3045 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003046 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003047#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003048
Avi Kivity5312bd82012-02-12 18:32:55 +02003049 section = &phys_sections[mmio->sub_section[idx]];
3050 addr += mmio->base;
3051 addr -= section->offset_within_address_space;
3052 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003053 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003054}
3055
Avi Kivity70c68e42012-01-02 12:32:48 +02003056static const MemoryRegionOps subpage_ops = {
3057 .read = subpage_read,
3058 .write = subpage_write,
3059 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003060};
3061
Avi Kivityde712f92012-01-02 12:41:07 +02003062static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3063 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003064{
3065 ram_addr_t raddr = addr;
3066 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003067 switch (size) {
3068 case 1: return ldub_p(ptr);
3069 case 2: return lduw_p(ptr);
3070 case 4: return ldl_p(ptr);
3071 default: abort();
3072 }
Andreas Färber56384e82011-11-30 16:26:21 +01003073}
3074
Avi Kivityde712f92012-01-02 12:41:07 +02003075static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3076 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003077{
3078 ram_addr_t raddr = addr;
3079 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003080 switch (size) {
3081 case 1: return stb_p(ptr, value);
3082 case 2: return stw_p(ptr, value);
3083 case 4: return stl_p(ptr, value);
3084 default: abort();
3085 }
Andreas Färber56384e82011-11-30 16:26:21 +01003086}
3087
Avi Kivityde712f92012-01-02 12:41:07 +02003088static const MemoryRegionOps subpage_ram_ops = {
3089 .read = subpage_ram_read,
3090 .write = subpage_ram_write,
3091 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003092};
3093
Anthony Liguoric227f092009-10-01 16:12:16 -05003094static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003095 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003096{
3097 int idx, eidx;
3098
3099 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3100 return -1;
3101 idx = SUBPAGE_IDX(start);
3102 eidx = SUBPAGE_IDX(end);
3103#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003104 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003105 mmio, start, end, idx, eidx, memory);
3106#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003107 if (memory_region_is_ram(phys_sections[section].mr)) {
3108 MemoryRegionSection new_section = phys_sections[section];
3109 new_section.mr = &io_mem_subpage_ram;
3110 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003111 }
blueswir1db7b5422007-05-26 17:36:03 +00003112 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003113 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003114 }
3115
3116 return 0;
3117}
3118
Avi Kivity0f0cb162012-02-13 17:14:32 +02003119static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003120{
Anthony Liguoric227f092009-10-01 16:12:16 -05003121 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003122
Anthony Liguori7267c092011-08-20 22:09:37 -05003123 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003124
3125 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003126 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3127 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003128 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003129#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003130 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3131 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003132#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003133 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003134
3135 return mmio;
3136}
3137
Avi Kivity5312bd82012-02-12 18:32:55 +02003138static uint16_t dummy_section(MemoryRegion *mr)
3139{
3140 MemoryRegionSection section = {
3141 .mr = mr,
3142 .offset_within_address_space = 0,
3143 .offset_within_region = 0,
3144 .size = UINT64_MAX,
3145 };
3146
3147 return phys_section_add(&section);
3148}
3149
Avi Kivity37ec01d2012-03-08 18:08:35 +02003150MemoryRegion *iotlb_to_region(target_phys_addr_t index)
Avi Kivityaa102232012-03-08 17:06:55 +02003151{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003152 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003153}
3154
Avi Kivitye9179ce2009-06-14 11:38:52 +03003155static void io_mem_init(void)
3156{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003157 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003158 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3159 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3160 "unassigned", UINT64_MAX);
3161 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3162 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003163 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3164 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003165 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3166 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003167}
3168
Avi Kivity50c1e142012-02-08 21:36:02 +02003169static void core_begin(MemoryListener *listener)
3170{
Avi Kivity54688b12012-02-09 17:34:32 +02003171 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003172 phys_sections_clear();
Avi Kivityc19e8802012-02-13 20:25:31 +02003173 phys_map.ptr = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003174 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003175 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3176 phys_section_rom = dummy_section(&io_mem_rom);
3177 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003178}
3179
3180static void core_commit(MemoryListener *listener)
3181{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003182 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003183
3184 /* since each CPU stores ram addresses in its TLB cache, we must
3185 reset the modified entries */
3186 /* XXX: slow ! */
3187 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3188 tlb_flush(env, 1);
3189 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003190}
3191
Avi Kivity93632742012-02-08 16:54:16 +02003192static void core_region_add(MemoryListener *listener,
3193 MemoryRegionSection *section)
3194{
Avi Kivity4855d412012-02-08 21:16:05 +02003195 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003196}
3197
3198static void core_region_del(MemoryListener *listener,
3199 MemoryRegionSection *section)
3200{
Avi Kivity93632742012-02-08 16:54:16 +02003201}
3202
Avi Kivity50c1e142012-02-08 21:36:02 +02003203static void core_region_nop(MemoryListener *listener,
3204 MemoryRegionSection *section)
3205{
Avi Kivity54688b12012-02-09 17:34:32 +02003206 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003207}
3208
Avi Kivity93632742012-02-08 16:54:16 +02003209static void core_log_start(MemoryListener *listener,
3210 MemoryRegionSection *section)
3211{
3212}
3213
3214static void core_log_stop(MemoryListener *listener,
3215 MemoryRegionSection *section)
3216{
3217}
3218
3219static void core_log_sync(MemoryListener *listener,
3220 MemoryRegionSection *section)
3221{
3222}
3223
3224static void core_log_global_start(MemoryListener *listener)
3225{
3226 cpu_physical_memory_set_dirty_tracking(1);
3227}
3228
3229static void core_log_global_stop(MemoryListener *listener)
3230{
3231 cpu_physical_memory_set_dirty_tracking(0);
3232}
3233
3234static void core_eventfd_add(MemoryListener *listener,
3235 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003236 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity93632742012-02-08 16:54:16 +02003237{
3238}
3239
3240static void core_eventfd_del(MemoryListener *listener,
3241 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003242 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity93632742012-02-08 16:54:16 +02003243{
3244}
3245
Avi Kivity50c1e142012-02-08 21:36:02 +02003246static void io_begin(MemoryListener *listener)
3247{
3248}
3249
3250static void io_commit(MemoryListener *listener)
3251{
3252}
3253
Avi Kivity4855d412012-02-08 21:16:05 +02003254static void io_region_add(MemoryListener *listener,
3255 MemoryRegionSection *section)
3256{
Avi Kivitya2d33522012-03-05 17:40:12 +02003257 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3258
3259 mrio->mr = section->mr;
3260 mrio->offset = section->offset_within_region;
3261 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003262 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003263 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003264}
3265
3266static void io_region_del(MemoryListener *listener,
3267 MemoryRegionSection *section)
3268{
3269 isa_unassign_ioport(section->offset_within_address_space, section->size);
3270}
3271
Avi Kivity50c1e142012-02-08 21:36:02 +02003272static void io_region_nop(MemoryListener *listener,
3273 MemoryRegionSection *section)
3274{
3275}
3276
Avi Kivity4855d412012-02-08 21:16:05 +02003277static void io_log_start(MemoryListener *listener,
3278 MemoryRegionSection *section)
3279{
3280}
3281
3282static void io_log_stop(MemoryListener *listener,
3283 MemoryRegionSection *section)
3284{
3285}
3286
3287static void io_log_sync(MemoryListener *listener,
3288 MemoryRegionSection *section)
3289{
3290}
3291
3292static void io_log_global_start(MemoryListener *listener)
3293{
3294}
3295
3296static void io_log_global_stop(MemoryListener *listener)
3297{
3298}
3299
3300static void io_eventfd_add(MemoryListener *listener,
3301 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003302 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity4855d412012-02-08 21:16:05 +02003303{
3304}
3305
3306static void io_eventfd_del(MemoryListener *listener,
3307 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003308 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity4855d412012-02-08 21:16:05 +02003309{
3310}
3311
Avi Kivity93632742012-02-08 16:54:16 +02003312static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003313 .begin = core_begin,
3314 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003315 .region_add = core_region_add,
3316 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003317 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003318 .log_start = core_log_start,
3319 .log_stop = core_log_stop,
3320 .log_sync = core_log_sync,
3321 .log_global_start = core_log_global_start,
3322 .log_global_stop = core_log_global_stop,
3323 .eventfd_add = core_eventfd_add,
3324 .eventfd_del = core_eventfd_del,
3325 .priority = 0,
3326};
3327
Avi Kivity4855d412012-02-08 21:16:05 +02003328static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003329 .begin = io_begin,
3330 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003331 .region_add = io_region_add,
3332 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003333 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003334 .log_start = io_log_start,
3335 .log_stop = io_log_stop,
3336 .log_sync = io_log_sync,
3337 .log_global_start = io_log_global_start,
3338 .log_global_stop = io_log_global_stop,
3339 .eventfd_add = io_eventfd_add,
3340 .eventfd_del = io_eventfd_del,
3341 .priority = 0,
3342};
3343
Avi Kivity62152b82011-07-26 14:26:14 +03003344static void memory_map_init(void)
3345{
Anthony Liguori7267c092011-08-20 22:09:37 -05003346 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003347 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003348 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003349
Anthony Liguori7267c092011-08-20 22:09:37 -05003350 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003351 memory_region_init(system_io, "io", 65536);
3352 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003353
Avi Kivity4855d412012-02-08 21:16:05 +02003354 memory_listener_register(&core_memory_listener, system_memory);
3355 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003356}
3357
3358MemoryRegion *get_system_memory(void)
3359{
3360 return system_memory;
3361}
3362
Avi Kivity309cb472011-08-08 16:09:03 +03003363MemoryRegion *get_system_io(void)
3364{
3365 return system_io;
3366}
3367
pbrooke2eef172008-06-08 01:09:01 +00003368#endif /* !defined(CONFIG_USER_ONLY) */
3369
bellard13eb76e2004-01-24 15:23:36 +00003370/* physical memory access (slow version, mainly for debug) */
3371#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003372int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003373 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003374{
3375 int l, flags;
3376 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003377 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003378
3379 while (len > 0) {
3380 page = addr & TARGET_PAGE_MASK;
3381 l = (page + TARGET_PAGE_SIZE) - addr;
3382 if (l > len)
3383 l = len;
3384 flags = page_get_flags(page);
3385 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003386 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003387 if (is_write) {
3388 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003389 return -1;
bellard579a97f2007-11-11 14:26:47 +00003390 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003391 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003392 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003393 memcpy(p, buf, l);
3394 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003395 } else {
3396 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003397 return -1;
bellard579a97f2007-11-11 14:26:47 +00003398 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003399 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003400 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003401 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003402 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003403 }
3404 len -= l;
3405 buf += l;
3406 addr += l;
3407 }
Paul Brooka68fe892010-03-01 00:08:59 +00003408 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003409}
bellard8df1cd02005-01-28 22:37:22 +00003410
bellard13eb76e2004-01-24 15:23:36 +00003411#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003412
3413static void invalidate_and_set_dirty(target_phys_addr_t addr,
3414 target_phys_addr_t length)
3415{
3416 if (!cpu_physical_memory_is_dirty(addr)) {
3417 /* invalidate code */
3418 tb_invalidate_phys_page_range(addr, addr + length, 0);
3419 /* set dirty bit */
3420 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
3421 }
Anthony PERARDe2269392012-10-03 13:49:22 +00003422 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003423}
3424
Anthony Liguoric227f092009-10-01 16:12:16 -05003425void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003426 int len, int is_write)
3427{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003428 int l;
bellard13eb76e2004-01-24 15:23:36 +00003429 uint8_t *ptr;
3430 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003431 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003432 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003433
bellard13eb76e2004-01-24 15:23:36 +00003434 while (len > 0) {
3435 page = addr & TARGET_PAGE_MASK;
3436 l = (page + TARGET_PAGE_SIZE) - addr;
3437 if (l > len)
3438 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003439 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003440
bellard13eb76e2004-01-24 15:23:36 +00003441 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003442 if (!memory_region_is_ram(section->mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003443 target_phys_addr_t addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00003444 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003445 /* XXX: could force cpu_single_env to NULL to avoid
3446 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003447 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003448 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003449 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003450 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003451 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003452 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003453 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003454 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003455 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003456 l = 2;
3457 } else {
bellard1c213d12005-09-03 10:49:04 +00003458 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003459 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003460 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003461 l = 1;
3462 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003463 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003464 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003465 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003466 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003467 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003468 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003469 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003470 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003471 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003472 }
3473 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003474 if (!(memory_region_is_ram(section->mr) ||
3475 memory_region_is_romd(section->mr))) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003476 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003477 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003478 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003479 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003480 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003481 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003482 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003483 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003484 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003485 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003486 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003487 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003488 l = 2;
3489 } else {
bellard1c213d12005-09-03 10:49:04 +00003490 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003491 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003492 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003493 l = 1;
3494 }
3495 } else {
3496 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00003497 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00003498 + memory_region_section_addr(section,
3499 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02003500 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003501 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003502 }
3503 }
3504 len -= l;
3505 buf += l;
3506 addr += l;
3507 }
3508}
bellard8df1cd02005-01-28 22:37:22 +00003509
bellardd0ecd2a2006-04-23 17:14:48 +00003510/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003511void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003512 const uint8_t *buf, int len)
3513{
3514 int l;
3515 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003516 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003517 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003518
bellardd0ecd2a2006-04-23 17:14:48 +00003519 while (len > 0) {
3520 page = addr & TARGET_PAGE_MASK;
3521 l = (page + TARGET_PAGE_SIZE) - addr;
3522 if (l > len)
3523 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003524 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003525
Blue Swirlcc5bea62012-04-14 14:56:48 +00003526 if (!(memory_region_is_ram(section->mr) ||
3527 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00003528 /* do nothing */
3529 } else {
3530 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003531 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003532 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003533 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003534 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003535 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003536 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003537 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003538 }
3539 len -= l;
3540 buf += l;
3541 addr += l;
3542 }
3543}
3544
aliguori6d16c2f2009-01-22 16:59:11 +00003545typedef struct {
3546 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003547 target_phys_addr_t addr;
3548 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003549} BounceBuffer;
3550
3551static BounceBuffer bounce;
3552
aliguoriba223c22009-01-22 16:59:16 +00003553typedef struct MapClient {
3554 void *opaque;
3555 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003556 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003557} MapClient;
3558
Blue Swirl72cf2d42009-09-12 07:36:22 +00003559static QLIST_HEAD(map_client_list, MapClient) map_client_list
3560 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003561
3562void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3563{
Anthony Liguori7267c092011-08-20 22:09:37 -05003564 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003565
3566 client->opaque = opaque;
3567 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003568 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003569 return client;
3570}
3571
3572void cpu_unregister_map_client(void *_client)
3573{
3574 MapClient *client = (MapClient *)_client;
3575
Blue Swirl72cf2d42009-09-12 07:36:22 +00003576 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003577 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003578}
3579
3580static void cpu_notify_map_clients(void)
3581{
3582 MapClient *client;
3583
Blue Swirl72cf2d42009-09-12 07:36:22 +00003584 while (!QLIST_EMPTY(&map_client_list)) {
3585 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003586 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003587 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003588 }
3589}
3590
aliguori6d16c2f2009-01-22 16:59:11 +00003591/* Map a physical memory region into a host virtual address.
3592 * May map a subset of the requested range, given by and returned in *plen.
3593 * May return NULL if resources needed to perform the mapping are exhausted.
3594 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003595 * Use cpu_register_map_client() to know when retrying the map operation is
3596 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003597 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003598void *cpu_physical_memory_map(target_phys_addr_t addr,
3599 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003600 int is_write)
3601{
Anthony Liguoric227f092009-10-01 16:12:16 -05003602 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003603 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003604 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003605 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003606 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003607 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003608 ram_addr_t rlen;
3609 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003610
3611 while (len > 0) {
3612 page = addr & TARGET_PAGE_MASK;
3613 l = (page + TARGET_PAGE_SIZE) - addr;
3614 if (l > len)
3615 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003616 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003617
Avi Kivityf3705d52012-03-08 16:16:34 +02003618 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003619 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003620 break;
3621 }
3622 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3623 bounce.addr = addr;
3624 bounce.len = l;
3625 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003626 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003627 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003628
3629 *plen = l;
3630 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003631 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003632 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003633 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003634 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003635 }
aliguori6d16c2f2009-01-22 16:59:11 +00003636
3637 len -= l;
3638 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003639 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003640 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003641 rlen = todo;
3642 ret = qemu_ram_ptr_length(raddr, &rlen);
3643 *plen = rlen;
3644 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003645}
3646
3647/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3648 * Will also mark the memory as dirty if is_write == 1. access_len gives
3649 * the amount of memory that was actually read or written by the caller.
3650 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003651void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3652 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003653{
3654 if (buffer != bounce.buffer) {
3655 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003656 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003657 while (access_len) {
3658 unsigned l;
3659 l = TARGET_PAGE_SIZE;
3660 if (l > access_len)
3661 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003662 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003663 addr1 += l;
3664 access_len -= l;
3665 }
3666 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003667 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003668 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003669 }
aliguori6d16c2f2009-01-22 16:59:11 +00003670 return;
3671 }
3672 if (is_write) {
3673 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3674 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003675 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003676 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003677 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003678}
bellardd0ecd2a2006-04-23 17:14:48 +00003679
bellard8df1cd02005-01-28 22:37:22 +00003680/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003681static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3682 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003683{
bellard8df1cd02005-01-28 22:37:22 +00003684 uint8_t *ptr;
3685 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003686 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003687
Avi Kivity06ef3522012-02-13 16:11:22 +02003688 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003689
Blue Swirlcc5bea62012-04-14 14:56:48 +00003690 if (!(memory_region_is_ram(section->mr) ||
3691 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00003692 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003693 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003694 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003695#if defined(TARGET_WORDS_BIGENDIAN)
3696 if (endian == DEVICE_LITTLE_ENDIAN) {
3697 val = bswap32(val);
3698 }
3699#else
3700 if (endian == DEVICE_BIG_ENDIAN) {
3701 val = bswap32(val);
3702 }
3703#endif
bellard8df1cd02005-01-28 22:37:22 +00003704 } else {
3705 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003706 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003707 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003708 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003709 switch (endian) {
3710 case DEVICE_LITTLE_ENDIAN:
3711 val = ldl_le_p(ptr);
3712 break;
3713 case DEVICE_BIG_ENDIAN:
3714 val = ldl_be_p(ptr);
3715 break;
3716 default:
3717 val = ldl_p(ptr);
3718 break;
3719 }
bellard8df1cd02005-01-28 22:37:22 +00003720 }
3721 return val;
3722}
3723
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003724uint32_t ldl_phys(target_phys_addr_t addr)
3725{
3726 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3727}
3728
3729uint32_t ldl_le_phys(target_phys_addr_t addr)
3730{
3731 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3732}
3733
3734uint32_t ldl_be_phys(target_phys_addr_t addr)
3735{
3736 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3737}
3738
bellard84b7b8e2005-11-28 21:19:04 +00003739/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003740static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3741 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003742{
bellard84b7b8e2005-11-28 21:19:04 +00003743 uint8_t *ptr;
3744 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003745 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00003746
Avi Kivity06ef3522012-02-13 16:11:22 +02003747 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003748
Blue Swirlcc5bea62012-04-14 14:56:48 +00003749 if (!(memory_region_is_ram(section->mr) ||
3750 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00003751 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003752 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003753
3754 /* XXX This is broken when device endian != cpu endian.
3755 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003756#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003757 val = io_mem_read(section->mr, addr, 4) << 32;
3758 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003759#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003760 val = io_mem_read(section->mr, addr, 4);
3761 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003762#endif
3763 } else {
3764 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003765 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003766 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003767 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003768 switch (endian) {
3769 case DEVICE_LITTLE_ENDIAN:
3770 val = ldq_le_p(ptr);
3771 break;
3772 case DEVICE_BIG_ENDIAN:
3773 val = ldq_be_p(ptr);
3774 break;
3775 default:
3776 val = ldq_p(ptr);
3777 break;
3778 }
bellard84b7b8e2005-11-28 21:19:04 +00003779 }
3780 return val;
3781}
3782
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003783uint64_t ldq_phys(target_phys_addr_t addr)
3784{
3785 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3786}
3787
3788uint64_t ldq_le_phys(target_phys_addr_t addr)
3789{
3790 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3791}
3792
3793uint64_t ldq_be_phys(target_phys_addr_t addr)
3794{
3795 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3796}
3797
bellardaab33092005-10-30 20:48:42 +00003798/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003799uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003800{
3801 uint8_t val;
3802 cpu_physical_memory_read(addr, &val, 1);
3803 return val;
3804}
3805
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003806/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003807static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3808 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003809{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003810 uint8_t *ptr;
3811 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003812 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003813
Avi Kivity06ef3522012-02-13 16:11:22 +02003814 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003815
Blue Swirlcc5bea62012-04-14 14:56:48 +00003816 if (!(memory_region_is_ram(section->mr) ||
3817 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003818 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003819 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003820 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003821#if defined(TARGET_WORDS_BIGENDIAN)
3822 if (endian == DEVICE_LITTLE_ENDIAN) {
3823 val = bswap16(val);
3824 }
3825#else
3826 if (endian == DEVICE_BIG_ENDIAN) {
3827 val = bswap16(val);
3828 }
3829#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003830 } else {
3831 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003832 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003833 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003834 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003835 switch (endian) {
3836 case DEVICE_LITTLE_ENDIAN:
3837 val = lduw_le_p(ptr);
3838 break;
3839 case DEVICE_BIG_ENDIAN:
3840 val = lduw_be_p(ptr);
3841 break;
3842 default:
3843 val = lduw_p(ptr);
3844 break;
3845 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003846 }
3847 return val;
bellardaab33092005-10-30 20:48:42 +00003848}
3849
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003850uint32_t lduw_phys(target_phys_addr_t addr)
3851{
3852 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3853}
3854
3855uint32_t lduw_le_phys(target_phys_addr_t addr)
3856{
3857 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3858}
3859
3860uint32_t lduw_be_phys(target_phys_addr_t addr)
3861{
3862 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3863}
3864
bellard8df1cd02005-01-28 22:37:22 +00003865/* warning: addr must be aligned. The ram page is not masked as dirty
3866 and the code inside is not invalidated. It is useful if the dirty
3867 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003868void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003869{
bellard8df1cd02005-01-28 22:37:22 +00003870 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003871 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003872
Avi Kivity06ef3522012-02-13 16:11:22 +02003873 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003874
Avi Kivityf3705d52012-03-08 16:16:34 +02003875 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003876 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003877 if (memory_region_is_ram(section->mr)) {
3878 section = &phys_sections[phys_section_rom];
3879 }
3880 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003881 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003882 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003883 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003884 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00003885 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003886 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003887
3888 if (unlikely(in_migration)) {
3889 if (!cpu_physical_memory_is_dirty(addr1)) {
3890 /* invalidate code */
3891 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3892 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003893 cpu_physical_memory_set_dirty_flags(
3894 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00003895 }
3896 }
bellard8df1cd02005-01-28 22:37:22 +00003897 }
3898}
3899
Anthony Liguoric227f092009-10-01 16:12:16 -05003900void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003901{
j_mayerbc98a7e2007-04-04 07:55:12 +00003902 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003903 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00003904
Avi Kivity06ef3522012-02-13 16:11:22 +02003905 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003906
Avi Kivityf3705d52012-03-08 16:16:34 +02003907 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003908 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003909 if (memory_region_is_ram(section->mr)) {
3910 section = &phys_sections[phys_section_rom];
3911 }
j_mayerbc98a7e2007-04-04 07:55:12 +00003912#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003913 io_mem_write(section->mr, addr, val >> 32, 4);
3914 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003915#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003916 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3917 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003918#endif
3919 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003920 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003921 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003922 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00003923 stq_p(ptr, val);
3924 }
3925}
3926
bellard8df1cd02005-01-28 22:37:22 +00003927/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003928static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
3929 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003930{
bellard8df1cd02005-01-28 22:37:22 +00003931 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003932 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003933
Avi Kivity06ef3522012-02-13 16:11:22 +02003934 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003935
Avi Kivityf3705d52012-03-08 16:16:34 +02003936 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003937 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003938 if (memory_region_is_ram(section->mr)) {
3939 section = &phys_sections[phys_section_rom];
3940 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003941#if defined(TARGET_WORDS_BIGENDIAN)
3942 if (endian == DEVICE_LITTLE_ENDIAN) {
3943 val = bswap32(val);
3944 }
3945#else
3946 if (endian == DEVICE_BIG_ENDIAN) {
3947 val = bswap32(val);
3948 }
3949#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02003950 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003951 } else {
3952 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003953 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003954 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00003955 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003956 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003957 switch (endian) {
3958 case DEVICE_LITTLE_ENDIAN:
3959 stl_le_p(ptr, val);
3960 break;
3961 case DEVICE_BIG_ENDIAN:
3962 stl_be_p(ptr, val);
3963 break;
3964 default:
3965 stl_p(ptr, val);
3966 break;
3967 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003968 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00003969 }
3970}
3971
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003972void stl_phys(target_phys_addr_t addr, uint32_t val)
3973{
3974 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
3975}
3976
3977void stl_le_phys(target_phys_addr_t addr, uint32_t val)
3978{
3979 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
3980}
3981
3982void stl_be_phys(target_phys_addr_t addr, uint32_t val)
3983{
3984 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
3985}
3986
bellardaab33092005-10-30 20:48:42 +00003987/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003988void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003989{
3990 uint8_t v = val;
3991 cpu_physical_memory_write(addr, &v, 1);
3992}
3993
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003994/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003995static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
3996 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003997{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003998 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003999 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004000
Avi Kivity06ef3522012-02-13 16:11:22 +02004001 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004002
Avi Kivityf3705d52012-03-08 16:16:34 +02004003 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00004004 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004005 if (memory_region_is_ram(section->mr)) {
4006 section = &phys_sections[phys_section_rom];
4007 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004008#if defined(TARGET_WORDS_BIGENDIAN)
4009 if (endian == DEVICE_LITTLE_ENDIAN) {
4010 val = bswap16(val);
4011 }
4012#else
4013 if (endian == DEVICE_BIG_ENDIAN) {
4014 val = bswap16(val);
4015 }
4016#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004017 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004018 } else {
4019 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004020 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004021 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004022 /* RAM case */
4023 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004024 switch (endian) {
4025 case DEVICE_LITTLE_ENDIAN:
4026 stw_le_p(ptr, val);
4027 break;
4028 case DEVICE_BIG_ENDIAN:
4029 stw_be_p(ptr, val);
4030 break;
4031 default:
4032 stw_p(ptr, val);
4033 break;
4034 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00004035 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004036 }
bellardaab33092005-10-30 20:48:42 +00004037}
4038
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004039void stw_phys(target_phys_addr_t addr, uint32_t val)
4040{
4041 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4042}
4043
4044void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4045{
4046 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4047}
4048
4049void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4050{
4051 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4052}
4053
bellardaab33092005-10-30 20:48:42 +00004054/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004055void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004056{
4057 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004058 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004059}
4060
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004061void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4062{
4063 val = cpu_to_le64(val);
4064 cpu_physical_memory_write(addr, &val, 8);
4065}
4066
4067void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4068{
4069 val = cpu_to_be64(val);
4070 cpu_physical_memory_write(addr, &val, 8);
4071}
4072
aliguori5e2972f2009-03-28 17:51:36 +00004073/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004074int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004075 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004076{
4077 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004078 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004079 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004080
4081 while (len > 0) {
4082 page = addr & TARGET_PAGE_MASK;
4083 phys_addr = cpu_get_phys_page_debug(env, page);
4084 /* if no physical page mapped, return an error */
4085 if (phys_addr == -1)
4086 return -1;
4087 l = (page + TARGET_PAGE_SIZE) - addr;
4088 if (l > len)
4089 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004090 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004091 if (is_write)
4092 cpu_physical_memory_write_rom(phys_addr, buf, l);
4093 else
aliguori5e2972f2009-03-28 17:51:36 +00004094 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004095 len -= l;
4096 buf += l;
4097 addr += l;
4098 }
4099 return 0;
4100}
Paul Brooka68fe892010-03-01 00:08:59 +00004101#endif
bellard13eb76e2004-01-24 15:23:36 +00004102
pbrook2e70f6e2008-06-29 01:03:05 +00004103/* in deterministic execution mode, instructions doing device I/Os
4104 must be at the end of the TB */
Blue Swirl20503962012-04-09 14:20:20 +00004105void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004106{
4107 TranslationBlock *tb;
4108 uint32_t n, cflags;
4109 target_ulong pc, cs_base;
4110 uint64_t flags;
4111
Blue Swirl20503962012-04-09 14:20:20 +00004112 tb = tb_find_pc(retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004113 if (!tb) {
4114 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl20503962012-04-09 14:20:20 +00004115 (void *)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004116 }
4117 n = env->icount_decr.u16.low + tb->icount;
Blue Swirl20503962012-04-09 14:20:20 +00004118 cpu_restore_state(tb, env, retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004119 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004120 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004121 n = n - env->icount_decr.u16.low;
4122 /* Generate a new TB ending on the I/O insn. */
4123 n++;
4124 /* On MIPS and SH, delay slot instructions can only be restarted if
4125 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004126 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004127 branch. */
4128#if defined(TARGET_MIPS)
4129 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4130 env->active_tc.PC -= 4;
4131 env->icount_decr.u16.low++;
4132 env->hflags &= ~MIPS_HFLAG_BMASK;
4133 }
4134#elif defined(TARGET_SH4)
4135 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4136 && n > 1) {
4137 env->pc -= 2;
4138 env->icount_decr.u16.low++;
4139 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4140 }
4141#endif
4142 /* This should never happen. */
4143 if (n > CF_COUNT_MASK)
4144 cpu_abort(env, "TB too big during recompile");
4145
4146 cflags = n | CF_LAST_IO;
4147 pc = tb->pc;
4148 cs_base = tb->cs_base;
4149 flags = tb->flags;
4150 tb_phys_invalidate(tb, -1);
4151 /* FIXME: In theory this could raise an exception. In practice
4152 we have already translated the block once so it's probably ok. */
4153 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004154 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004155 the first in the TB) then we end up generating a whole new TB and
4156 repeating the fault, which is horribly inefficient.
4157 Better would be to execute just this insn uncached, or generate a
4158 second new TB. */
4159 cpu_resume_from_signal(env, NULL);
4160}
4161
Paul Brookb3755a92010-03-12 16:54:58 +00004162#if !defined(CONFIG_USER_ONLY)
4163
Stefan Weil055403b2010-10-22 23:03:32 +02004164void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004165{
4166 int i, target_code_size, max_target_code_size;
4167 int direct_jmp_count, direct_jmp2_count, cross_page;
4168 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004169
bellarde3db7222005-01-26 22:00:47 +00004170 target_code_size = 0;
4171 max_target_code_size = 0;
4172 cross_page = 0;
4173 direct_jmp_count = 0;
4174 direct_jmp2_count = 0;
4175 for(i = 0; i < nb_tbs; i++) {
4176 tb = &tbs[i];
4177 target_code_size += tb->size;
4178 if (tb->size > max_target_code_size)
4179 max_target_code_size = tb->size;
4180 if (tb->page_addr[1] != -1)
4181 cross_page++;
4182 if (tb->tb_next_offset[0] != 0xffff) {
4183 direct_jmp_count++;
4184 if (tb->tb_next_offset[1] != 0xffff) {
4185 direct_jmp2_count++;
4186 }
4187 }
4188 }
4189 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004190 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004191 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004192 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4193 cpu_fprintf(f, "TB count %d/%d\n",
4194 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004195 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004196 nb_tbs ? target_code_size / nb_tbs : 0,
4197 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004198 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004199 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4200 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004201 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4202 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004203 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4204 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004205 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004206 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4207 direct_jmp2_count,
4208 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004209 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004210 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4211 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4212 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004213 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004214}
4215
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004216/*
4217 * A helper function for the _utterly broken_ virtio device model to find out if
4218 * it's running on a big endian machine. Don't do this at home kids!
4219 */
4220bool virtio_is_big_endian(void);
4221bool virtio_is_big_endian(void)
4222{
4223#if defined(TARGET_WORDS_BIGENDIAN)
4224 return true;
4225#else
4226 return false;
4227#endif
4228}
4229
bellard61382a52003-10-27 21:22:23 +00004230#endif
Wen Congyang76f35532012-05-07 12:04:18 +08004231
4232#ifndef CONFIG_USER_ONLY
4233bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr)
4234{
4235 MemoryRegionSection *section;
4236
4237 section = phys_page_find(phys_addr >> TARGET_PAGE_BITS);
4238
4239 return !(memory_region_is_ram(section->mr) ||
4240 memory_region_is_romd(section->mr));
4241}
4242#endif