blob: 5e33a3d435c45f9b6f64e48052ddd375947a2c8d [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Blue Swirl0cac1b62012-04-09 16:50:52 +000060#include "cputlb.h"
61
Avi Kivity67d95c12011-12-15 15:25:22 +020062#define WANT_EXEC_OBSOLETE
63#include "exec-obsolete.h"
64
bellardfd6ce8f2003-05-14 19:00:11 +000065//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000066//#define DEBUG_FLUSH
pbrook67d3b952006-12-18 05:03:52 +000067//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000068
69/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000070//#define DEBUG_TB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
Richard Henderson9b9c37c2012-09-21 10:34:21 -070089#if defined(__arm__) || defined(__sparc__)
blueswir1141ac462008-07-26 15:05:57 +000090/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weil68409812012-04-04 07:45:21 +020096#elif defined(_WIN32) && !defined(_WIN64)
Stefan Weilf8e2af12009-06-18 23:04:48 +020097#define code_gen_section \
98 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000099#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000105static uint8_t *code_gen_buffer;
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000106static size_t code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000107/* threshold to flush the translated code buffer */
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000108static size_t code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200109static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000110
pbrooke2eef172008-06-08 01:09:01 +0000111#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000112int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000113static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000114
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200115RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300116
117static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300118static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300119
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200120MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200121static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200122
pbrooke2eef172008-06-08 01:09:01 +0000123#endif
bellard9fa3e852004-01-04 18:06:42 +0000124
Andreas Färber9349b4f2012-03-14 01:38:32 +0100125CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000126/* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100128DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000129/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000130 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000131 2 = Adaptive rate instruction counting. */
132int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000133
bellard54936002003-05-13 00:25:15 +0000134typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000135 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000136 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
bellard54936002003-05-13 00:25:15 +0000144} PageDesc;
145
Paul Brook41c1b1c2010-03-12 16:54:58 +0000146/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800147 while in user mode we want it to be based on virtual addresses. */
148#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000149#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800152# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000153#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000154#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800155# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000156#endif
bellard54936002003-05-13 00:25:15 +0000157
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800158/* Size of the L2 (and L3, etc) page tables. */
159#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000160#define L2_SIZE (1 << L2_BITS)
161
Avi Kivity3eef53d2012-02-10 14:57:31 +0200162#define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800165/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800169#if V_L1_BITS_REM < 4
170#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171#else
172#define V_L1_BITS V_L1_BITS_REM
173#endif
174
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800175#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
Stefan Weilc6d50672012-03-16 20:23:49 +0100179uintptr_t qemu_real_host_page_size;
180uintptr_t qemu_host_page_size;
181uintptr_t qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000182
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800183/* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000186
pbrooke2eef172008-06-08 01:09:01 +0000187#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200188typedef struct PhysPageEntry PhysPageEntry;
189
Avi Kivity5312bd82012-02-12 18:32:55 +0200190static MemoryRegionSection *phys_sections;
191static unsigned phys_sections_nb, phys_sections_nb_alloc;
192static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200193static uint16_t phys_section_notdirty;
194static uint16_t phys_section_rom;
195static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200196
Avi Kivity4346ae32012-02-10 17:00:01 +0200197struct PhysPageEntry {
Avi Kivity07f07b32012-02-13 20:45:32 +0200198 uint16_t is_leaf : 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 uint16_t ptr : 15;
Avi Kivity4346ae32012-02-10 17:00:01 +0200201};
202
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200203/* Simple allocator for PhysPageEntry nodes */
204static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206
Avi Kivity07f07b32012-02-13 20:45:32 +0200207#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800209/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200210 The bottom level has pointers to MemoryRegionSections. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200211static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
Paul Brook6d9a1302010-02-28 23:55:53 +0000212
pbrooke2eef172008-06-08 01:09:01 +0000213static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300214static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000215
Avi Kivity1ec9b902012-01-02 12:47:48 +0200216static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000217#endif
bellard33417e72003-08-10 21:47:01 +0000218
bellarde3db7222005-01-26 22:00:47 +0000219/* statistics */
bellarde3db7222005-01-26 22:00:47 +0000220static int tb_flush_count;
221static int tb_phys_invalidate_count;
222
bellard7cb69ca2008-05-10 10:55:51 +0000223#ifdef _WIN32
224static void map_exec(void *addr, long size)
225{
226 DWORD old_protect;
227 VirtualProtect(addr, size,
228 PAGE_EXECUTE_READWRITE, &old_protect);
229
230}
231#else
232static void map_exec(void *addr, long size)
233{
bellard43694152008-05-29 09:35:57 +0000234 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000235
bellard43694152008-05-29 09:35:57 +0000236 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000237 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000238 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000239
240 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000241 end += page_size - 1;
242 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000243
244 mprotect((void *)start, end - start,
245 PROT_READ | PROT_WRITE | PROT_EXEC);
246}
247#endif
248
bellardb346ff42003-06-15 20:05:50 +0000249static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000250{
bellard83fb7ad2004-07-05 21:25:26 +0000251 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000252 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000253#ifdef _WIN32
254 {
255 SYSTEM_INFO system_info;
256
257 GetSystemInfo(&system_info);
258 qemu_real_host_page_size = system_info.dwPageSize;
259 }
260#else
261 qemu_real_host_page_size = getpagesize();
262#endif
bellard83fb7ad2004-07-05 21:25:26 +0000263 if (qemu_host_page_size == 0)
264 qemu_host_page_size = qemu_real_host_page_size;
265 if (qemu_host_page_size < TARGET_PAGE_SIZE)
266 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000267 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000268
Paul Brook2e9a5712010-05-05 16:32:59 +0100269#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000270 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100271#ifdef HAVE_KINFO_GETVMMAP
272 struct kinfo_vmentry *freep;
273 int i, cnt;
274
275 freep = kinfo_getvmmap(getpid(), &cnt);
276 if (freep) {
277 mmap_lock();
278 for (i = 0; i < cnt; i++) {
279 unsigned long startaddr, endaddr;
280
281 startaddr = freep[i].kve_start;
282 endaddr = freep[i].kve_end;
283 if (h2g_valid(startaddr)) {
284 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
285
286 if (h2g_valid(endaddr)) {
287 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200288 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100289 } else {
290#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
291 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200292 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100293#endif
294 }
295 }
296 }
297 free(freep);
298 mmap_unlock();
299 }
300#else
balrog50a95692007-12-12 01:16:23 +0000301 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000302
pbrook07765902008-05-31 16:33:53 +0000303 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800304
Aurelien Jarnofd436902010-04-10 17:20:36 +0200305 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000306 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800307 mmap_lock();
308
balrog50a95692007-12-12 01:16:23 +0000309 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800310 unsigned long startaddr, endaddr;
311 int n;
312
313 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
314
315 if (n == 2 && h2g_valid(startaddr)) {
316 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
317
318 if (h2g_valid(endaddr)) {
319 endaddr = h2g(endaddr);
320 } else {
321 endaddr = ~0ul;
322 }
323 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000324 }
325 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800326
balrog50a95692007-12-12 01:16:23 +0000327 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800328 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000329 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100330#endif
balrog50a95692007-12-12 01:16:23 +0000331 }
332#endif
bellard54936002003-05-13 00:25:15 +0000333}
334
Paul Brook41c1b1c2010-03-12 16:54:58 +0000335static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000336{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000337 PageDesc *pd;
338 void **lp;
339 int i;
340
pbrook17e23772008-06-09 13:47:45 +0000341#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500342 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800343# define ALLOC(P, SIZE) \
344 do { \
345 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
346 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800347 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000348#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800349# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500350 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000351#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800352
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353 /* Level 1. Always allocated. */
354 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
355
356 /* Level 2..N-1. */
357 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
358 void **p = *lp;
359
360 if (p == NULL) {
361 if (!alloc) {
362 return NULL;
363 }
364 ALLOC(p, sizeof(void *) * L2_SIZE);
365 *lp = p;
366 }
367
368 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000369 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800370
371 pd = *lp;
372 if (pd == NULL) {
373 if (!alloc) {
374 return NULL;
375 }
376 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
377 *lp = pd;
378 }
379
380#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800381
382 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000383}
384
Paul Brook41c1b1c2010-03-12 16:54:58 +0000385static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000386{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800387 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000388}
389
Paul Brook6d9a1302010-02-28 23:55:53 +0000390#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200391
Avi Kivityf7bf5462012-02-13 20:12:05 +0200392static void phys_map_node_reserve(unsigned nodes)
393{
394 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
395 typedef PhysPageEntry Node[L2_SIZE];
396 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
397 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
398 phys_map_nodes_nb + nodes);
399 phys_map_nodes = g_renew(Node, phys_map_nodes,
400 phys_map_nodes_nb_alloc);
401 }
402}
403
404static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200405{
406 unsigned i;
407 uint16_t ret;
408
Avi Kivityf7bf5462012-02-13 20:12:05 +0200409 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200410 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200411 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200412 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200413 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200414 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200415 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200416 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200417}
418
419static void phys_map_nodes_reset(void)
420{
421 phys_map_nodes_nb = 0;
422}
423
Avi Kivityf7bf5462012-02-13 20:12:05 +0200424
Avi Kivity29990972012-02-13 20:21:20 +0200425static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
426 target_phys_addr_t *nb, uint16_t leaf,
427 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200428{
429 PhysPageEntry *p;
430 int i;
Avi Kivity07f07b32012-02-13 20:45:32 +0200431 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200432
Avi Kivity07f07b32012-02-13 20:45:32 +0200433 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200434 lp->ptr = phys_map_node_alloc();
435 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200436 if (level == 0) {
437 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200438 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200439 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200440 }
441 }
442 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200443 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200444 }
Avi Kivity29990972012-02-13 20:21:20 +0200445 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200446
Avi Kivity29990972012-02-13 20:21:20 +0200447 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200448 if ((*index & (step - 1)) == 0 && *nb >= step) {
449 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200450 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200451 *index += step;
452 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200453 } else {
454 phys_page_set_level(lp, index, nb, leaf, level - 1);
455 }
456 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200457 }
458}
459
Avi Kivity29990972012-02-13 20:21:20 +0200460static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
461 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000462{
Avi Kivity29990972012-02-13 20:21:20 +0200463 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200464 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000465
Avi Kivity29990972012-02-13 20:21:20 +0200466 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000467}
468
Blue Swirl0cac1b62012-04-09 16:50:52 +0000469MemoryRegionSection *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000470{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200471 PhysPageEntry lp = phys_map;
472 PhysPageEntry *p;
473 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200474 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200475
Avi Kivity07f07b32012-02-13 20:45:32 +0200476 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200477 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200478 goto not_found;
479 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200480 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200481 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200482 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200483
Avi Kivityc19e8802012-02-13 20:25:31 +0200484 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200485not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200486 return &phys_sections[s_index];
487}
488
Blue Swirle5548612012-04-21 13:08:33 +0000489bool memory_region_is_unassigned(MemoryRegion *mr)
490{
491 return mr != &io_mem_ram && mr != &io_mem_rom
492 && mr != &io_mem_notdirty && !mr->rom_device
493 && mr != &io_mem_watch;
494}
495
pbrookc8a706f2008-06-02 16:16:42 +0000496#define mmap_lock() do { } while(0)
497#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000498#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000499
bellard43694152008-05-29 09:35:57 +0000500#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100501/* Currently it is not recommended to allocate big chunks of data in
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000502 user mode. It will change when a dedicated libc will be used. */
503/* ??? 64-bit hosts ought to have no problem mmaping data outside the
504 region in which the guest needs to run. Revisit this. */
bellard43694152008-05-29 09:35:57 +0000505#define USE_STATIC_CODE_GEN_BUFFER
506#endif
507
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000508/* ??? Should configure for this, not list operating systems here. */
509#if (defined(__linux__) \
510 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
511 || defined(__DragonFly__) || defined(__OpenBSD__) \
512 || defined(__NetBSD__))
513# define USE_MMAP
514#endif
515
516/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
517 indicated, this is constrained by the range of direct branches on the
518 host cpu, as used by the TCG implementation of goto_tb. */
519#if defined(__x86_64__)
520# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
521#elif defined(__sparc__)
522# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
523#elif defined(__arm__)
524# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
525#elif defined(__s390x__)
526 /* We have a +- 4GB range on the branches; leave some slop. */
527# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
528#else
529# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
530#endif
531
Richard Henderson3d85a722012-10-16 17:30:11 +1000532#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
533
534#define DEFAULT_CODE_GEN_BUFFER_SIZE \
535 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
536 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000537
538static inline size_t size_code_gen_buffer(size_t tb_size)
539{
540 /* Size the buffer. */
541 if (tb_size == 0) {
542#ifdef USE_STATIC_CODE_GEN_BUFFER
543 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
544#else
545 /* ??? Needs adjustments. */
546 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
547 static buffer, we could size this on RESERVED_VA, on the text
548 segment size of the executable, or continue to use the default. */
549 tb_size = (unsigned long)(ram_size / 4);
550#endif
551 }
552 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
553 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
554 }
555 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
556 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
557 }
558 code_gen_buffer_size = tb_size;
559 return tb_size;
560}
561
bellard43694152008-05-29 09:35:57 +0000562#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200563static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000564 __attribute__((aligned(CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000565
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000566static inline void *alloc_code_gen_buffer(void)
bellard26a5f132008-05-28 12:30:31 +0000567{
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000568 map_exec(static_code_gen_buffer, code_gen_buffer_size);
569 return static_code_gen_buffer;
570}
571#elif defined(USE_MMAP)
572static inline void *alloc_code_gen_buffer(void)
573{
574 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
575 uintptr_t start = 0;
576 void *buf;
blueswir1141ac462008-07-26 15:05:57 +0000577
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000578 /* Constrain the position of the buffer based on the host cpu.
579 Note that these addresses are chosen in concert with the
580 addresses assigned in the relevant linker script file. */
Richard Henderson405def12012-10-16 17:30:12 +1000581# if defined(__PIE__) || defined(__PIC__)
582 /* Don't bother setting a preferred location if we're building
583 a position-independent executable. We're more likely to get
584 an address near the main executable if we let the kernel
585 choose the address. */
586# elif defined(__x86_64__) && defined(MAP_32BIT)
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000587 /* Force the memory down into low memory with the executable.
588 Leave the choice of exact location with the kernel. */
589 flags |= MAP_32BIT;
590 /* Cannot expect to map more than 800MB in low memory. */
591 if (code_gen_buffer_size > 800u * 1024 * 1024) {
592 code_gen_buffer_size = 800u * 1024 * 1024;
bellard26a5f132008-05-28 12:30:31 +0000593 }
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000594# elif defined(__sparc__)
595 start = 0x40000000ul;
596# elif defined(__s390x__)
597 start = 0x90000000ul;
598# endif
599
600 buf = mmap((void *)start, code_gen_buffer_size,
601 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
602 return buf == MAP_FAILED ? NULL : buf;
603}
bellard26a5f132008-05-28 12:30:31 +0000604#else
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000605static inline void *alloc_code_gen_buffer(void)
606{
607 void *buf = g_malloc(code_gen_buffer_size);
608 if (buf) {
609 map_exec(buf, code_gen_buffer_size);
610 }
611 return buf;
612}
613#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
614
615static inline void code_gen_alloc(size_t tb_size)
616{
617 code_gen_buffer_size = size_code_gen_buffer(tb_size);
618 code_gen_buffer = alloc_code_gen_buffer();
619 if (code_gen_buffer == NULL) {
620 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
621 exit(1);
622 }
623
bellard26a5f132008-05-28 12:30:31 +0000624 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100625 code_gen_buffer_max_size = code_gen_buffer_size -
626 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000627 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500628 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000629}
630
631/* Must be called before using the QEMU cpus. 'tb_size' is the size
632 (in bytes) allocated to the translation buffer. Zero means default
633 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200634void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000635{
bellard26a5f132008-05-28 12:30:31 +0000636 cpu_gen_init();
637 code_gen_alloc(tb_size);
638 code_gen_ptr = code_gen_buffer;
Richard Henderson813da622012-03-19 12:25:11 -0700639 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
bellard43694152008-05-29 09:35:57 +0000640 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700641#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
642 /* There's no guest base to take into account, so go ahead and
643 initialize the prologue now. */
644 tcg_prologue_init(&tcg_ctx);
645#endif
bellard26a5f132008-05-28 12:30:31 +0000646}
647
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200648bool tcg_enabled(void)
649{
650 return code_gen_buffer != NULL;
651}
652
653void cpu_exec_init_all(void)
654{
655#if !defined(CONFIG_USER_ONLY)
656 memory_map_init();
657 io_mem_init();
658#endif
659}
660
pbrook9656f322008-07-01 20:01:19 +0000661#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
662
Juan Quintelae59fb372009-09-29 22:48:21 +0200663static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200664{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100665 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200666
aurel323098dba2009-03-07 21:28:24 +0000667 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
668 version_id is increased. */
669 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000670 tlb_flush(env, 1);
671
672 return 0;
673}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200674
675static const VMStateDescription vmstate_cpu_common = {
676 .name = "cpu_common",
677 .version_id = 1,
678 .minimum_version_id = 1,
679 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200680 .post_load = cpu_common_post_load,
681 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100682 VMSTATE_UINT32(halted, CPUArchState),
683 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200684 VMSTATE_END_OF_LIST()
685 }
686};
pbrook9656f322008-07-01 20:01:19 +0000687#endif
688
Andreas Färber9349b4f2012-03-14 01:38:32 +0100689CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400690{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100691 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400692
693 while (env) {
694 if (env->cpu_index == cpu)
695 break;
696 env = env->next_cpu;
697 }
698
699 return env;
700}
701
Andreas Färber9349b4f2012-03-14 01:38:32 +0100702void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000703{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100704 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000705 int cpu_index;
706
pbrookc2764712009-03-07 15:24:59 +0000707#if defined(CONFIG_USER_ONLY)
708 cpu_list_lock();
709#endif
bellard6a00d602005-11-21 23:25:50 +0000710 env->next_cpu = NULL;
711 penv = &first_cpu;
712 cpu_index = 0;
713 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700714 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000715 cpu_index++;
716 }
717 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000718 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000719 QTAILQ_INIT(&env->breakpoints);
720 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100721#ifndef CONFIG_USER_ONLY
722 env->thread_id = qemu_get_thread_id();
723#endif
bellard6a00d602005-11-21 23:25:50 +0000724 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000725#if defined(CONFIG_USER_ONLY)
726 cpu_list_unlock();
727#endif
pbrookb3c77242008-06-30 16:31:04 +0000728#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600729 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
730 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000731 cpu_save, cpu_load, env);
732#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000733}
734
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100735/* Allocate a new translation block. Flush the translation buffer if
736 too many translation blocks or too much generated code. */
737static TranslationBlock *tb_alloc(target_ulong pc)
738{
739 TranslationBlock *tb;
740
741 if (nb_tbs >= code_gen_max_blocks ||
742 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
743 return NULL;
744 tb = &tbs[nb_tbs++];
745 tb->pc = pc;
746 tb->cflags = 0;
747 return tb;
748}
749
750void tb_free(TranslationBlock *tb)
751{
752 /* In practice this is mostly used for single use temporary TB
753 Ignore the hard cases and just back up if this TB happens to
754 be the last one generated. */
755 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
756 code_gen_ptr = tb->tc_ptr;
757 nb_tbs--;
758 }
759}
760
bellard9fa3e852004-01-04 18:06:42 +0000761static inline void invalidate_page_bitmap(PageDesc *p)
762{
763 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500764 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000765 p->code_bitmap = NULL;
766 }
767 p->code_write_count = 0;
768}
769
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800770/* Set to NULL all the 'first_tb' fields in all PageDescs. */
771
772static void page_flush_tb_1 (int level, void **lp)
773{
774 int i;
775
776 if (*lp == NULL) {
777 return;
778 }
779 if (level == 0) {
780 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000781 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800782 pd[i].first_tb = NULL;
783 invalidate_page_bitmap(pd + i);
784 }
785 } else {
786 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000787 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800788 page_flush_tb_1 (level - 1, pp + i);
789 }
790 }
791}
792
bellardfd6ce8f2003-05-14 19:00:11 +0000793static void page_flush_tb(void)
794{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800795 int i;
796 for (i = 0; i < V_L1_SIZE; i++) {
797 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000798 }
799}
800
801/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000802/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100803void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000804{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100805 CPUArchState *env;
bellard01243112004-01-04 15:48:17 +0000806#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000807 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
808 (unsigned long)(code_gen_ptr - code_gen_buffer),
809 nb_tbs, nb_tbs > 0 ?
810 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000811#endif
bellard26a5f132008-05-28 12:30:31 +0000812 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000813 cpu_abort(env1, "Internal error: code buffer overflow\n");
814
bellardfd6ce8f2003-05-14 19:00:11 +0000815 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000816
bellard6a00d602005-11-21 23:25:50 +0000817 for(env = first_cpu; env != NULL; env = env->next_cpu) {
818 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
819 }
bellard9fa3e852004-01-04 18:06:42 +0000820
bellard8a8a6082004-10-03 13:36:49 +0000821 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000822 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000823
bellardfd6ce8f2003-05-14 19:00:11 +0000824 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000825 /* XXX: flush processor icache at this point if cache flush is
826 expensive */
bellarde3db7222005-01-26 22:00:47 +0000827 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000828}
829
830#ifdef DEBUG_TB_CHECK
831
j_mayerbc98a7e2007-04-04 07:55:12 +0000832static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000833{
834 TranslationBlock *tb;
835 int i;
836 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000837 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
838 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000839 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
840 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000841 printf("ERROR invalidate: address=" TARGET_FMT_lx
842 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000843 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000844 }
845 }
846 }
847}
848
849/* verify that all the pages have correct rights for code */
850static void tb_page_check(void)
851{
852 TranslationBlock *tb;
853 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000854
pbrook99773bd2006-04-16 15:14:59 +0000855 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
856 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000857 flags1 = page_get_flags(tb->pc);
858 flags2 = page_get_flags(tb->pc + tb->size - 1);
859 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
860 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000861 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000862 }
863 }
864 }
865}
866
867#endif
868
869/* invalidate one TB */
870static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
871 int next_offset)
872{
873 TranslationBlock *tb1;
874 for(;;) {
875 tb1 = *ptb;
876 if (tb1 == tb) {
877 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
878 break;
879 }
880 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
881 }
882}
883
bellard9fa3e852004-01-04 18:06:42 +0000884static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
885{
886 TranslationBlock *tb1;
887 unsigned int n1;
888
889 for(;;) {
890 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200891 n1 = (uintptr_t)tb1 & 3;
892 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard9fa3e852004-01-04 18:06:42 +0000893 if (tb1 == tb) {
894 *ptb = tb1->page_next[n1];
895 break;
896 }
897 ptb = &tb1->page_next[n1];
898 }
899}
900
bellardd4e81642003-05-25 16:46:15 +0000901static inline void tb_jmp_remove(TranslationBlock *tb, int n)
902{
903 TranslationBlock *tb1, **ptb;
904 unsigned int n1;
905
906 ptb = &tb->jmp_next[n];
907 tb1 = *ptb;
908 if (tb1) {
909 /* find tb(n) in circular list */
910 for(;;) {
911 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200912 n1 = (uintptr_t)tb1 & 3;
913 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardd4e81642003-05-25 16:46:15 +0000914 if (n1 == n && tb1 == tb)
915 break;
916 if (n1 == 2) {
917 ptb = &tb1->jmp_first;
918 } else {
919 ptb = &tb1->jmp_next[n1];
920 }
921 }
922 /* now we can suppress tb(n) from the list */
923 *ptb = tb->jmp_next[n];
924
925 tb->jmp_next[n] = NULL;
926 }
927}
928
929/* reset the jump entry 'n' of a TB so that it is not chained to
930 another TB */
931static inline void tb_reset_jump(TranslationBlock *tb, int n)
932{
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200933 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
bellardd4e81642003-05-25 16:46:15 +0000934}
935
Paul Brook41c1b1c2010-03-12 16:54:58 +0000936void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000937{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100938 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000939 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000940 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000941 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000942 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000943
bellard9fa3e852004-01-04 18:06:42 +0000944 /* remove the TB from the hash list */
945 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
946 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000947 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000948 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000949
bellard9fa3e852004-01-04 18:06:42 +0000950 /* remove the TB from the page list */
951 if (tb->page_addr[0] != page_addr) {
952 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
953 tb_page_remove(&p->first_tb, tb);
954 invalidate_page_bitmap(p);
955 }
956 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
957 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
958 tb_page_remove(&p->first_tb, tb);
959 invalidate_page_bitmap(p);
960 }
961
bellard8a40a182005-11-20 10:35:40 +0000962 tb_invalidated_flag = 1;
963
964 /* remove the TB from the hash list */
965 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000966 for(env = first_cpu; env != NULL; env = env->next_cpu) {
967 if (env->tb_jmp_cache[h] == tb)
968 env->tb_jmp_cache[h] = NULL;
969 }
bellard8a40a182005-11-20 10:35:40 +0000970
971 /* suppress this TB from the two jump lists */
972 tb_jmp_remove(tb, 0);
973 tb_jmp_remove(tb, 1);
974
975 /* suppress any remaining jumps to this TB */
976 tb1 = tb->jmp_first;
977 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200978 n1 = (uintptr_t)tb1 & 3;
bellard8a40a182005-11-20 10:35:40 +0000979 if (n1 == 2)
980 break;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200981 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard8a40a182005-11-20 10:35:40 +0000982 tb2 = tb1->jmp_next[n1];
983 tb_reset_jump(tb1, n1);
984 tb1->jmp_next[n1] = NULL;
985 tb1 = tb2;
986 }
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200987 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
bellard8a40a182005-11-20 10:35:40 +0000988
bellarde3db7222005-01-26 22:00:47 +0000989 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000990}
991
992static inline void set_bits(uint8_t *tab, int start, int len)
993{
994 int end, mask, end1;
995
996 end = start + len;
997 tab += start >> 3;
998 mask = 0xff << (start & 7);
999 if ((start & ~7) == (end & ~7)) {
1000 if (start < end) {
1001 mask &= ~(0xff << (end & 7));
1002 *tab |= mask;
1003 }
1004 } else {
1005 *tab++ |= mask;
1006 start = (start + 8) & ~7;
1007 end1 = end & ~7;
1008 while (start < end1) {
1009 *tab++ = 0xff;
1010 start += 8;
1011 }
1012 if (start < end) {
1013 mask = ~(0xff << (end & 7));
1014 *tab |= mask;
1015 }
1016 }
1017}
1018
1019static void build_page_bitmap(PageDesc *p)
1020{
1021 int n, tb_start, tb_end;
1022 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001023
Anthony Liguori7267c092011-08-20 22:09:37 -05001024 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001025
1026 tb = p->first_tb;
1027 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001028 n = (uintptr_t)tb & 3;
1029 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001030 /* NOTE: this is subtle as a TB may span two physical pages */
1031 if (n == 0) {
1032 /* NOTE: tb_end may be after the end of the page, but
1033 it is not a problem */
1034 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1035 tb_end = tb_start + tb->size;
1036 if (tb_end > TARGET_PAGE_SIZE)
1037 tb_end = TARGET_PAGE_SIZE;
1038 } else {
1039 tb_start = 0;
1040 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1041 }
1042 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1043 tb = tb->page_next[n];
1044 }
1045}
1046
Andreas Färber9349b4f2012-03-14 01:38:32 +01001047TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001048 target_ulong pc, target_ulong cs_base,
1049 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001050{
1051 TranslationBlock *tb;
1052 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001053 tb_page_addr_t phys_pc, phys_page2;
1054 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001055 int code_gen_size;
1056
Paul Brook41c1b1c2010-03-12 16:54:58 +00001057 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001058 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001059 if (!tb) {
1060 /* flush must be done */
1061 tb_flush(env);
1062 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001063 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001064 /* Don't forget to invalidate previous TB info. */
1065 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001066 }
1067 tc_ptr = code_gen_ptr;
1068 tb->tc_ptr = tc_ptr;
1069 tb->cs_base = cs_base;
1070 tb->flags = flags;
1071 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001072 cpu_gen_code(env, tb, &code_gen_size);
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001073 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1074 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001075
bellardd720b932004-04-25 17:57:43 +00001076 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001077 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001078 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001079 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001080 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001081 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001082 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001083 return tb;
bellardd720b932004-04-25 17:57:43 +00001084}
ths3b46e622007-09-17 08:09:54 +00001085
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001086/*
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001087 * Invalidate all TBs which intersect with the target physical address range
1088 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1089 * 'is_cpu_write_access' should be true if called from a real cpu write
1090 * access: the virtual CPU will exit the current TB if code is modified inside
1091 * this TB.
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001092 */
1093void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1094 int is_cpu_write_access)
1095{
1096 while (start < end) {
1097 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1098 start &= TARGET_PAGE_MASK;
1099 start += TARGET_PAGE_SIZE;
1100 }
1101}
1102
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001103/*
1104 * Invalidate all TBs which intersect with the target physical address range
1105 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1106 * 'is_cpu_write_access' should be true if called from a real cpu write
1107 * access: the virtual CPU will exit the current TB if code is modified inside
1108 * this TB.
1109 */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001110void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001111 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001112{
aliguori6b917542008-11-18 19:46:41 +00001113 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001114 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001115 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001116 PageDesc *p;
1117 int n;
1118#ifdef TARGET_HAS_PRECISE_SMC
1119 int current_tb_not_found = is_cpu_write_access;
1120 TranslationBlock *current_tb = NULL;
1121 int current_tb_modified = 0;
1122 target_ulong current_pc = 0;
1123 target_ulong current_cs_base = 0;
1124 int current_flags = 0;
1125#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001126
1127 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001128 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001129 return;
ths5fafdf22007-09-16 21:08:06 +00001130 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001131 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1132 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001133 /* build code bitmap */
1134 build_page_bitmap(p);
1135 }
1136
1137 /* we remove all the TBs in the range [start, end[ */
1138 /* XXX: see if in some cases it could be faster to invalidate all the code */
1139 tb = p->first_tb;
1140 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001141 n = (uintptr_t)tb & 3;
1142 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001143 tb_next = tb->page_next[n];
1144 /* NOTE: this is subtle as a TB may span two physical pages */
1145 if (n == 0) {
1146 /* NOTE: tb_end may be after the end of the page, but
1147 it is not a problem */
1148 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1149 tb_end = tb_start + tb->size;
1150 } else {
1151 tb_start = tb->page_addr[1];
1152 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1153 }
1154 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001155#ifdef TARGET_HAS_PRECISE_SMC
1156 if (current_tb_not_found) {
1157 current_tb_not_found = 0;
1158 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001159 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001160 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001161 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001162 }
1163 }
1164 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001165 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001166 /* If we are modifying the current TB, we must stop
1167 its execution. We could be more precise by checking
1168 that the modification is after the current PC, but it
1169 would require a specialized function to partially
1170 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001171
bellardd720b932004-04-25 17:57:43 +00001172 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001173 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001174 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1175 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001176 }
1177#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001178 /* we need to do that to handle the case where a signal
1179 occurs while doing tb_phys_invalidate() */
1180 saved_tb = NULL;
1181 if (env) {
1182 saved_tb = env->current_tb;
1183 env->current_tb = NULL;
1184 }
bellard9fa3e852004-01-04 18:06:42 +00001185 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001186 if (env) {
1187 env->current_tb = saved_tb;
1188 if (env->interrupt_request && env->current_tb)
1189 cpu_interrupt(env, env->interrupt_request);
1190 }
bellard9fa3e852004-01-04 18:06:42 +00001191 }
1192 tb = tb_next;
1193 }
1194#if !defined(CONFIG_USER_ONLY)
1195 /* if no code remaining, no need to continue to use slow writes */
1196 if (!p->first_tb) {
1197 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001198 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001199 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001200 }
1201 }
1202#endif
1203#ifdef TARGET_HAS_PRECISE_SMC
1204 if (current_tb_modified) {
1205 /* we generate a block containing just the instruction
1206 modifying the memory. It will ensure that it cannot modify
1207 itself */
bellardea1c1802004-06-14 18:56:36 +00001208 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001209 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001210 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001211 }
1212#endif
1213}
1214
1215/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001216static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001217{
1218 PageDesc *p;
1219 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001220#if 0
bellarda4193c82004-06-03 14:01:43 +00001221 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001222 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1223 cpu_single_env->mem_io_vaddr, len,
1224 cpu_single_env->eip,
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001225 cpu_single_env->eip +
1226 (intptr_t)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001227 }
1228#endif
bellard9fa3e852004-01-04 18:06:42 +00001229 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001230 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001231 return;
1232 if (p->code_bitmap) {
1233 offset = start & ~TARGET_PAGE_MASK;
1234 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1235 if (b & ((1 << len) - 1))
1236 goto do_invalidate;
1237 } else {
1238 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001239 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001240 }
1241}
1242
bellard9fa3e852004-01-04 18:06:42 +00001243#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001244static void tb_invalidate_phys_page(tb_page_addr_t addr,
Blue Swirl20503962012-04-09 14:20:20 +00001245 uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001246{
aliguori6b917542008-11-18 19:46:41 +00001247 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001248 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001249 int n;
bellardd720b932004-04-25 17:57:43 +00001250#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001251 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001252 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001253 int current_tb_modified = 0;
1254 target_ulong current_pc = 0;
1255 target_ulong current_cs_base = 0;
1256 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001257#endif
bellard9fa3e852004-01-04 18:06:42 +00001258
1259 addr &= TARGET_PAGE_MASK;
1260 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001261 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001262 return;
1263 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001264#ifdef TARGET_HAS_PRECISE_SMC
1265 if (tb && pc != 0) {
1266 current_tb = tb_find_pc(pc);
1267 }
1268#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001269 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001270 n = (uintptr_t)tb & 3;
1271 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001272#ifdef TARGET_HAS_PRECISE_SMC
1273 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001274 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001275 /* If we are modifying the current TB, we must stop
1276 its execution. We could be more precise by checking
1277 that the modification is after the current PC, but it
1278 would require a specialized function to partially
1279 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001280
bellardd720b932004-04-25 17:57:43 +00001281 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001282 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001283 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1284 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001285 }
1286#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001287 tb_phys_invalidate(tb, addr);
1288 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001289 }
1290 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001291#ifdef TARGET_HAS_PRECISE_SMC
1292 if (current_tb_modified) {
1293 /* we generate a block containing just the instruction
1294 modifying the memory. It will ensure that it cannot modify
1295 itself */
bellardea1c1802004-06-14 18:56:36 +00001296 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001297 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001298 cpu_resume_from_signal(env, puc);
1299 }
1300#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001301}
bellard9fa3e852004-01-04 18:06:42 +00001302#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001303
1304/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001305static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001306 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001307{
1308 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001309#ifndef CONFIG_USER_ONLY
1310 bool page_already_protected;
1311#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001312
bellard9fa3e852004-01-04 18:06:42 +00001313 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001314 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001315 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001316#ifndef CONFIG_USER_ONLY
1317 page_already_protected = p->first_tb != NULL;
1318#endif
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001319 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
bellard9fa3e852004-01-04 18:06:42 +00001320 invalidate_page_bitmap(p);
1321
bellard107db442004-06-22 18:48:46 +00001322#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001323
bellard9fa3e852004-01-04 18:06:42 +00001324#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001325 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001326 target_ulong addr;
1327 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001328 int prot;
1329
bellardfd6ce8f2003-05-14 19:00:11 +00001330 /* force the host page as non writable (writes will have a
1331 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001332 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001333 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001334 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1335 addr += TARGET_PAGE_SIZE) {
1336
1337 p2 = page_find (addr >> TARGET_PAGE_BITS);
1338 if (!p2)
1339 continue;
1340 prot |= p2->flags;
1341 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001342 }
ths5fafdf22007-09-16 21:08:06 +00001343 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001344 (prot & PAGE_BITS) & ~PAGE_WRITE);
1345#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001346 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001347 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001348#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001349 }
bellard9fa3e852004-01-04 18:06:42 +00001350#else
1351 /* if some code is already present, then the pages are already
1352 protected. So we handle the case where only the first TB is
1353 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001354 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001355 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001356 }
1357#endif
bellardd720b932004-04-25 17:57:43 +00001358
1359#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001360}
1361
bellard9fa3e852004-01-04 18:06:42 +00001362/* add a new TB and link it to the physical page tables. phys_page2 is
1363 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001364void tb_link_page(TranslationBlock *tb,
1365 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001366{
bellard9fa3e852004-01-04 18:06:42 +00001367 unsigned int h;
1368 TranslationBlock **ptb;
1369
pbrookc8a706f2008-06-02 16:16:42 +00001370 /* Grab the mmap lock to stop another thread invalidating this TB
1371 before we are done. */
1372 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001373 /* add in the physical hash table */
1374 h = tb_phys_hash_func(phys_pc);
1375 ptb = &tb_phys_hash[h];
1376 tb->phys_hash_next = *ptb;
1377 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001378
1379 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001380 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1381 if (phys_page2 != -1)
1382 tb_alloc_page(tb, 1, phys_page2);
1383 else
1384 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001385
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001386 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
bellardd4e81642003-05-25 16:46:15 +00001387 tb->jmp_next[0] = NULL;
1388 tb->jmp_next[1] = NULL;
1389
1390 /* init original jump addresses */
1391 if (tb->tb_next_offset[0] != 0xffff)
1392 tb_reset_jump(tb, 0);
1393 if (tb->tb_next_offset[1] != 0xffff)
1394 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001395
1396#ifdef DEBUG_TB_CHECK
1397 tb_page_check();
1398#endif
pbrookc8a706f2008-06-02 16:16:42 +00001399 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001400}
1401
bellarda513fe12003-05-27 23:29:48 +00001402/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1403 tb[1].tc_ptr. Return NULL if not found */
Stefan Weil6375e092012-04-06 22:26:15 +02001404TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
bellarda513fe12003-05-27 23:29:48 +00001405{
1406 int m_min, m_max, m;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001407 uintptr_t v;
bellarda513fe12003-05-27 23:29:48 +00001408 TranslationBlock *tb;
1409
1410 if (nb_tbs <= 0)
1411 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001412 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1413 tc_ptr >= (uintptr_t)code_gen_ptr) {
bellarda513fe12003-05-27 23:29:48 +00001414 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001415 }
bellarda513fe12003-05-27 23:29:48 +00001416 /* binary search (cf Knuth) */
1417 m_min = 0;
1418 m_max = nb_tbs - 1;
1419 while (m_min <= m_max) {
1420 m = (m_min + m_max) >> 1;
1421 tb = &tbs[m];
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001422 v = (uintptr_t)tb->tc_ptr;
bellarda513fe12003-05-27 23:29:48 +00001423 if (v == tc_ptr)
1424 return tb;
1425 else if (tc_ptr < v) {
1426 m_max = m - 1;
1427 } else {
1428 m_min = m + 1;
1429 }
ths5fafdf22007-09-16 21:08:06 +00001430 }
bellarda513fe12003-05-27 23:29:48 +00001431 return &tbs[m_max];
1432}
bellard75012672003-06-21 13:11:07 +00001433
bellardea041c02003-06-25 16:16:50 +00001434static void tb_reset_jump_recursive(TranslationBlock *tb);
1435
1436static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1437{
1438 TranslationBlock *tb1, *tb_next, **ptb;
1439 unsigned int n1;
1440
1441 tb1 = tb->jmp_next[n];
1442 if (tb1 != NULL) {
1443 /* find head of list */
1444 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001445 n1 = (uintptr_t)tb1 & 3;
1446 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001447 if (n1 == 2)
1448 break;
1449 tb1 = tb1->jmp_next[n1];
1450 }
1451 /* we are now sure now that tb jumps to tb1 */
1452 tb_next = tb1;
1453
1454 /* remove tb from the jmp_first list */
1455 ptb = &tb_next->jmp_first;
1456 for(;;) {
1457 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001458 n1 = (uintptr_t)tb1 & 3;
1459 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001460 if (n1 == n && tb1 == tb)
1461 break;
1462 ptb = &tb1->jmp_next[n1];
1463 }
1464 *ptb = tb->jmp_next[n];
1465 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001466
bellardea041c02003-06-25 16:16:50 +00001467 /* suppress the jump to next tb in generated code */
1468 tb_reset_jump(tb, n);
1469
bellard01243112004-01-04 15:48:17 +00001470 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001471 tb_reset_jump_recursive(tb_next);
1472 }
1473}
1474
1475static void tb_reset_jump_recursive(TranslationBlock *tb)
1476{
1477 tb_reset_jump_recursive2(tb, 0);
1478 tb_reset_jump_recursive2(tb, 1);
1479}
1480
bellard1fddef42005-04-17 19:16:13 +00001481#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001482#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001483static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001484{
1485 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1486}
1487#else
Max Filippov1e7855a2012-04-10 02:48:17 +04001488void tb_invalidate_phys_addr(target_phys_addr_t addr)
bellardd720b932004-04-25 17:57:43 +00001489{
Anthony Liguoric227f092009-10-01 16:12:16 -05001490 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001491 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001492
Avi Kivity06ef3522012-02-13 16:11:22 +02001493 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001494 if (!(memory_region_is_ram(section->mr)
1495 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001496 return;
1497 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001498 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001499 + memory_region_section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001500 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001501}
Max Filippov1e7855a2012-04-10 02:48:17 +04001502
1503static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1504{
Max Filippov9d70c4b2012-05-27 20:21:08 +04001505 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1506 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +04001507}
bellardc27004e2005-01-03 23:35:10 +00001508#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001509#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001510
Paul Brookc527ee82010-03-01 03:31:14 +00001511#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001512void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001513
1514{
1515}
1516
Andreas Färber9349b4f2012-03-14 01:38:32 +01001517int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001518 int flags, CPUWatchpoint **watchpoint)
1519{
1520 return -ENOSYS;
1521}
1522#else
pbrook6658ffb2007-03-16 23:58:11 +00001523/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001524int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001525 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001526{
aliguorib4051332008-11-18 20:14:20 +00001527 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001528 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001529
aliguorib4051332008-11-18 20:14:20 +00001530 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001531 if ((len & (len - 1)) || (addr & ~len_mask) ||
1532 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001533 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1534 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1535 return -EINVAL;
1536 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001537 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001538
aliguoria1d1bb32008-11-18 20:07:32 +00001539 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001540 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001541 wp->flags = flags;
1542
aliguori2dc9f412008-11-18 20:56:59 +00001543 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001544 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001545 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001546 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001547 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001548
pbrook6658ffb2007-03-16 23:58:11 +00001549 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001550
1551 if (watchpoint)
1552 *watchpoint = wp;
1553 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001554}
1555
aliguoria1d1bb32008-11-18 20:07:32 +00001556/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001557int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001558 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001559{
aliguorib4051332008-11-18 20:14:20 +00001560 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001561 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001562
Blue Swirl72cf2d42009-09-12 07:36:22 +00001563 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001564 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001565 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001566 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001567 return 0;
1568 }
1569 }
aliguoria1d1bb32008-11-18 20:07:32 +00001570 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001571}
1572
aliguoria1d1bb32008-11-18 20:07:32 +00001573/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001574void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001575{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001576 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001577
aliguoria1d1bb32008-11-18 20:07:32 +00001578 tlb_flush_page(env, watchpoint->vaddr);
1579
Anthony Liguori7267c092011-08-20 22:09:37 -05001580 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001581}
1582
aliguoria1d1bb32008-11-18 20:07:32 +00001583/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001584void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001585{
aliguoric0ce9982008-11-25 22:13:57 +00001586 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001587
Blue Swirl72cf2d42009-09-12 07:36:22 +00001588 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001589 if (wp->flags & mask)
1590 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001591 }
aliguoria1d1bb32008-11-18 20:07:32 +00001592}
Paul Brookc527ee82010-03-01 03:31:14 +00001593#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001594
1595/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001596int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001597 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001598{
bellard1fddef42005-04-17 19:16:13 +00001599#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001600 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001601
Anthony Liguori7267c092011-08-20 22:09:37 -05001602 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001603
1604 bp->pc = pc;
1605 bp->flags = flags;
1606
aliguori2dc9f412008-11-18 20:56:59 +00001607 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001608 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001609 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001610 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001611 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001612
1613 breakpoint_invalidate(env, pc);
1614
1615 if (breakpoint)
1616 *breakpoint = bp;
1617 return 0;
1618#else
1619 return -ENOSYS;
1620#endif
1621}
1622
1623/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001624int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001625{
1626#if defined(TARGET_HAS_ICE)
1627 CPUBreakpoint *bp;
1628
Blue Swirl72cf2d42009-09-12 07:36:22 +00001629 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001630 if (bp->pc == pc && bp->flags == flags) {
1631 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001632 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001633 }
bellard4c3a88a2003-07-26 12:06:08 +00001634 }
aliguoria1d1bb32008-11-18 20:07:32 +00001635 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001636#else
aliguoria1d1bb32008-11-18 20:07:32 +00001637 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001638#endif
1639}
1640
aliguoria1d1bb32008-11-18 20:07:32 +00001641/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001642void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001643{
bellard1fddef42005-04-17 19:16:13 +00001644#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001645 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001646
aliguoria1d1bb32008-11-18 20:07:32 +00001647 breakpoint_invalidate(env, breakpoint->pc);
1648
Anthony Liguori7267c092011-08-20 22:09:37 -05001649 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001650#endif
1651}
1652
1653/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001654void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001655{
1656#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001657 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001658
Blue Swirl72cf2d42009-09-12 07:36:22 +00001659 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001660 if (bp->flags & mask)
1661 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001662 }
bellard4c3a88a2003-07-26 12:06:08 +00001663#endif
1664}
1665
bellardc33a3462003-07-29 20:50:33 +00001666/* enable or disable single step mode. EXCP_DEBUG is returned by the
1667 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001668void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001669{
bellard1fddef42005-04-17 19:16:13 +00001670#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001671 if (env->singlestep_enabled != enabled) {
1672 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001673 if (kvm_enabled())
1674 kvm_update_guest_debug(env, 0);
1675 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001676 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001677 /* XXX: only flush what is necessary */
1678 tb_flush(env);
1679 }
bellardc33a3462003-07-29 20:50:33 +00001680 }
1681#endif
1682}
1683
Andreas Färber9349b4f2012-03-14 01:38:32 +01001684static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001685{
pbrookd5975362008-06-07 20:50:51 +00001686 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1687 problem and hope the cpu will stop of its own accord. For userspace
1688 emulation this often isn't actually as bad as it sounds. Often
1689 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001690 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001691 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001692
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001693 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001694 tb = env->current_tb;
1695 /* if the cpu is currently executing code, we must unlink it and
1696 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001697 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001698 env->current_tb = NULL;
1699 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001700 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001701 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001702}
1703
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001704#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001705/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001706static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001707{
1708 int old_mask;
1709
1710 old_mask = env->interrupt_request;
1711 env->interrupt_request |= mask;
1712
aliguori8edac962009-04-24 18:03:45 +00001713 /*
1714 * If called from iothread context, wake the target cpu in
1715 * case its halted.
1716 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001717 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001718 qemu_cpu_kick(env);
1719 return;
1720 }
aliguori8edac962009-04-24 18:03:45 +00001721
pbrook2e70f6e2008-06-29 01:03:05 +00001722 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001723 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001724 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001725 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001726 cpu_abort(env, "Raised interrupt while not in I/O function");
1727 }
pbrook2e70f6e2008-06-29 01:03:05 +00001728 } else {
aurel323098dba2009-03-07 21:28:24 +00001729 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001730 }
1731}
1732
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001733CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1734
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001735#else /* CONFIG_USER_ONLY */
1736
Andreas Färber9349b4f2012-03-14 01:38:32 +01001737void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001738{
1739 env->interrupt_request |= mask;
1740 cpu_unlink_tb(env);
1741}
1742#endif /* CONFIG_USER_ONLY */
1743
Andreas Färber9349b4f2012-03-14 01:38:32 +01001744void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001745{
1746 env->interrupt_request &= ~mask;
1747}
1748
Andreas Färber9349b4f2012-03-14 01:38:32 +01001749void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001750{
1751 env->exit_request = 1;
1752 cpu_unlink_tb(env);
1753}
1754
Andreas Färber9349b4f2012-03-14 01:38:32 +01001755void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001756{
1757 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001758 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001759
1760 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001761 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001762 fprintf(stderr, "qemu: fatal: ");
1763 vfprintf(stderr, fmt, ap);
1764 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001765 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +00001766 if (qemu_log_enabled()) {
1767 qemu_log("qemu: fatal: ");
1768 qemu_log_vprintf(fmt, ap2);
1769 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001770 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +00001771 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001772 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001773 }
pbrook493ae1f2007-11-23 16:53:59 +00001774 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001775 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001776#if defined(CONFIG_USER_ONLY)
1777 {
1778 struct sigaction act;
1779 sigfillset(&act.sa_mask);
1780 act.sa_handler = SIG_DFL;
1781 sigaction(SIGABRT, &act, NULL);
1782 }
1783#endif
bellard75012672003-06-21 13:11:07 +00001784 abort();
1785}
1786
Andreas Färber9349b4f2012-03-14 01:38:32 +01001787CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +00001788{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001789 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1790 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001791 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001792#if defined(TARGET_HAS_ICE)
1793 CPUBreakpoint *bp;
1794 CPUWatchpoint *wp;
1795#endif
1796
Andreas Färber9349b4f2012-03-14 01:38:32 +01001797 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001798
1799 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001800 new_env->next_cpu = next_cpu;
1801 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001802
1803 /* Clone all break/watchpoints.
1804 Note: Once we support ptrace with hw-debug register access, make sure
1805 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001806 QTAILQ_INIT(&env->breakpoints);
1807 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001808#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001809 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001810 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1811 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001812 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001813 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1814 wp->flags, NULL);
1815 }
1816#endif
1817
thsc5be9f02007-02-28 20:20:53 +00001818 return new_env;
1819}
1820
bellard01243112004-01-04 15:48:17 +00001821#if !defined(CONFIG_USER_ONLY)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001822void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001823{
1824 unsigned int i;
1825
1826 /* Discard jump cache entries for any tb which might potentially
1827 overlap the flushed page. */
1828 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1829 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001830 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001831
1832 i = tb_jmp_cache_hash_page(addr);
1833 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001834 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001835}
1836
Juan Quintelad24981d2012-05-22 00:42:40 +02001837static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
1838 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +00001839{
Juan Quintelad24981d2012-05-22 00:42:40 +02001840 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +00001841
bellard1ccde1c2004-02-06 19:46:14 +00001842 /* we modify the TLB cache so that the dirty bit will be set again
1843 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001844 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001845 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001846 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001847 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001848 != (end - 1) - start) {
1849 abort();
1850 }
Blue Swirle5548612012-04-21 13:08:33 +00001851 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001852
1853}
1854
1855/* Note: start and end must be within the same ram block. */
1856void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1857 int dirty_flags)
1858{
1859 uintptr_t length;
1860
1861 start &= TARGET_PAGE_MASK;
1862 end = TARGET_PAGE_ALIGN(end);
1863
1864 length = end - start;
1865 if (length == 0)
1866 return;
1867 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1868
1869 if (tcg_enabled()) {
1870 tlb_reset_dirty_range_all(start, end, length);
1871 }
bellard1ccde1c2004-02-06 19:46:14 +00001872}
1873
aliguori74576192008-10-06 14:02:03 +00001874int cpu_physical_memory_set_dirty_tracking(int enable)
1875{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001876 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00001877 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001878 return ret;
aliguori74576192008-10-06 14:02:03 +00001879}
1880
Blue Swirle5548612012-04-21 13:08:33 +00001881target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1882 MemoryRegionSection *section,
1883 target_ulong vaddr,
1884 target_phys_addr_t paddr,
1885 int prot,
1886 target_ulong *address)
1887{
1888 target_phys_addr_t iotlb;
1889 CPUWatchpoint *wp;
1890
Blue Swirlcc5bea62012-04-14 14:56:48 +00001891 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001892 /* Normal RAM. */
1893 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001894 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001895 if (!section->readonly) {
1896 iotlb |= phys_section_notdirty;
1897 } else {
1898 iotlb |= phys_section_rom;
1899 }
1900 } else {
1901 /* IO handlers are currently passed a physical address.
1902 It would be nice to pass an offset from the base address
1903 of that region. This would avoid having to special case RAM,
1904 and avoid full address decoding in every device.
1905 We can't use the high bits of pd for this because
1906 IO_MEM_ROMD uses these as a ram address. */
1907 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001908 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001909 }
1910
1911 /* Make accesses to pages with watchpoints go via the
1912 watchpoint trap routines. */
1913 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1914 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1915 /* Avoid trapping reads of pages with a write breakpoint. */
1916 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1917 iotlb = phys_section_watch + paddr;
1918 *address |= TLB_MMIO;
1919 break;
1920 }
1921 }
1922 }
1923
1924 return iotlb;
1925}
1926
bellard01243112004-01-04 15:48:17 +00001927#else
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03001928/*
1929 * Walks guest process memory "regions" one by one
1930 * and calls callback function 'fn' for each region.
1931 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001932
1933struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00001934{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001935 walk_memory_regions_fn fn;
1936 void *priv;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001937 uintptr_t start;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001938 int prot;
1939};
bellard9fa3e852004-01-04 18:06:42 +00001940
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001941static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001942 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001943{
1944 if (data->start != -1ul) {
1945 int rc = data->fn(data->priv, data->start, end, data->prot);
1946 if (rc != 0) {
1947 return rc;
bellard9fa3e852004-01-04 18:06:42 +00001948 }
bellard33417e72003-08-10 21:47:01 +00001949 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001950
1951 data->start = (new_prot ? end : -1ul);
1952 data->prot = new_prot;
1953
1954 return 0;
1955}
1956
1957static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001958 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001959{
Paul Brookb480d9b2010-03-12 23:23:29 +00001960 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001961 int i, rc;
1962
1963 if (*lp == NULL) {
1964 return walk_memory_regions_end(data, base, 0);
1965 }
1966
1967 if (level == 0) {
1968 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001969 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001970 int prot = pd[i].flags;
1971
1972 pa = base | (i << TARGET_PAGE_BITS);
1973 if (prot != data->prot) {
1974 rc = walk_memory_regions_end(data, pa, prot);
1975 if (rc != 0) {
1976 return rc;
1977 }
1978 }
1979 }
1980 } else {
1981 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001982 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00001983 pa = base | ((abi_ulong)i <<
1984 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001985 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1986 if (rc != 0) {
1987 return rc;
1988 }
1989 }
1990 }
1991
1992 return 0;
1993}
1994
1995int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1996{
1997 struct walk_memory_regions_data data;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001998 uintptr_t i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001999
2000 data.fn = fn;
2001 data.priv = priv;
2002 data.start = -1ul;
2003 data.prot = 0;
2004
2005 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002006 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002007 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2008 if (rc != 0) {
2009 return rc;
2010 }
2011 }
2012
2013 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002014}
2015
Paul Brookb480d9b2010-03-12 23:23:29 +00002016static int dump_region(void *priv, abi_ulong start,
2017 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002018{
2019 FILE *f = (FILE *)priv;
2020
Paul Brookb480d9b2010-03-12 23:23:29 +00002021 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2022 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002023 start, end, end - start,
2024 ((prot & PAGE_READ) ? 'r' : '-'),
2025 ((prot & PAGE_WRITE) ? 'w' : '-'),
2026 ((prot & PAGE_EXEC) ? 'x' : '-'));
2027
2028 return (0);
2029}
2030
2031/* dump memory mappings */
2032void page_dump(FILE *f)
2033{
2034 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2035 "start", "end", "size", "prot");
2036 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002037}
2038
pbrook53a59602006-03-25 19:31:22 +00002039int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002040{
bellard9fa3e852004-01-04 18:06:42 +00002041 PageDesc *p;
2042
2043 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002044 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002045 return 0;
2046 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002047}
2048
Richard Henderson376a7902010-03-10 15:57:04 -08002049/* Modify the flags of a page and invalidate the code if necessary.
2050 The flag PAGE_WRITE_ORG is positioned automatically depending
2051 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002052void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002053{
Richard Henderson376a7902010-03-10 15:57:04 -08002054 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002055
Richard Henderson376a7902010-03-10 15:57:04 -08002056 /* This function should never be called with addresses outside the
2057 guest address space. If this assert fires, it probably indicates
2058 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002059#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2060 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002061#endif
2062 assert(start < end);
2063
bellard9fa3e852004-01-04 18:06:42 +00002064 start = start & TARGET_PAGE_MASK;
2065 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002066
2067 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002068 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002069 }
2070
2071 for (addr = start, len = end - start;
2072 len != 0;
2073 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2074 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2075
2076 /* If the write protection bit is set, then we invalidate
2077 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002078 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002079 (flags & PAGE_WRITE) &&
2080 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002081 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002082 }
2083 p->flags = flags;
2084 }
bellard9fa3e852004-01-04 18:06:42 +00002085}
2086
ths3d97b402007-11-02 19:02:07 +00002087int page_check_range(target_ulong start, target_ulong len, int flags)
2088{
2089 PageDesc *p;
2090 target_ulong end;
2091 target_ulong addr;
2092
Richard Henderson376a7902010-03-10 15:57:04 -08002093 /* This function should never be called with addresses outside the
2094 guest address space. If this assert fires, it probably indicates
2095 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002096#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2097 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002098#endif
2099
Richard Henderson3e0650a2010-03-29 10:54:42 -07002100 if (len == 0) {
2101 return 0;
2102 }
Richard Henderson376a7902010-03-10 15:57:04 -08002103 if (start + len - 1 < start) {
2104 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002105 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002106 }
balrog55f280c2008-10-28 10:24:11 +00002107
ths3d97b402007-11-02 19:02:07 +00002108 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2109 start = start & TARGET_PAGE_MASK;
2110
Richard Henderson376a7902010-03-10 15:57:04 -08002111 for (addr = start, len = end - start;
2112 len != 0;
2113 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002114 p = page_find(addr >> TARGET_PAGE_BITS);
2115 if( !p )
2116 return -1;
2117 if( !(p->flags & PAGE_VALID) )
2118 return -1;
2119
bellarddae32702007-11-14 10:51:00 +00002120 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002121 return -1;
bellarddae32702007-11-14 10:51:00 +00002122 if (flags & PAGE_WRITE) {
2123 if (!(p->flags & PAGE_WRITE_ORG))
2124 return -1;
2125 /* unprotect the page if it was put read-only because it
2126 contains translated code */
2127 if (!(p->flags & PAGE_WRITE)) {
2128 if (!page_unprotect(addr, 0, NULL))
2129 return -1;
2130 }
2131 return 0;
2132 }
ths3d97b402007-11-02 19:02:07 +00002133 }
2134 return 0;
2135}
2136
bellard9fa3e852004-01-04 18:06:42 +00002137/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002138 page. Return TRUE if the fault was successfully handled. */
Stefan Weil6375e092012-04-06 22:26:15 +02002139int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002140{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002141 unsigned int prot;
2142 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002143 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002144
pbrookc8a706f2008-06-02 16:16:42 +00002145 /* Technically this isn't safe inside a signal handler. However we
2146 know this only ever happens in a synchronous SEGV handler, so in
2147 practice it seems to be ok. */
2148 mmap_lock();
2149
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002150 p = page_find(address >> TARGET_PAGE_BITS);
2151 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002152 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002153 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002154 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002155
bellard9fa3e852004-01-04 18:06:42 +00002156 /* if the page was really writable, then we change its
2157 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002158 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2159 host_start = address & qemu_host_page_mask;
2160 host_end = host_start + qemu_host_page_size;
2161
2162 prot = 0;
2163 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2164 p = page_find(addr >> TARGET_PAGE_BITS);
2165 p->flags |= PAGE_WRITE;
2166 prot |= p->flags;
2167
bellard9fa3e852004-01-04 18:06:42 +00002168 /* and since the content will be modified, we must invalidate
2169 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002170 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002171#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002172 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002173#endif
bellard9fa3e852004-01-04 18:06:42 +00002174 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002175 mprotect((void *)g2h(host_start), qemu_host_page_size,
2176 prot & PAGE_BITS);
2177
2178 mmap_unlock();
2179 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002180 }
pbrookc8a706f2008-06-02 16:16:42 +00002181 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002182 return 0;
2183}
bellard9fa3e852004-01-04 18:06:42 +00002184#endif /* defined(CONFIG_USER_ONLY) */
2185
pbrooke2eef172008-06-08 01:09:01 +00002186#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002187
Paul Brookc04b2b72010-03-01 03:31:14 +00002188#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2189typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002190 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002191 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002192 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002193} subpage_t;
2194
Anthony Liguoric227f092009-10-01 16:12:16 -05002195static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002196 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002197static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002198static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002199{
Avi Kivity5312bd82012-02-12 18:32:55 +02002200 MemoryRegionSection *section = &phys_sections[section_index];
2201 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002202
2203 if (mr->subpage) {
2204 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2205 memory_region_destroy(&subpage->iomem);
2206 g_free(subpage);
2207 }
2208}
2209
Avi Kivity4346ae32012-02-10 17:00:01 +02002210static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002211{
2212 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002213 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002214
Avi Kivityc19e8802012-02-13 20:25:31 +02002215 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002216 return;
2217 }
2218
Avi Kivityc19e8802012-02-13 20:25:31 +02002219 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002220 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002221 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002222 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002223 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002224 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002225 }
Avi Kivity54688b12012-02-09 17:34:32 +02002226 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002227 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002228 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002229}
2230
2231static void destroy_all_mappings(void)
2232{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002233 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002234 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002235}
2236
Avi Kivity5312bd82012-02-12 18:32:55 +02002237static uint16_t phys_section_add(MemoryRegionSection *section)
2238{
2239 if (phys_sections_nb == phys_sections_nb_alloc) {
2240 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2241 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2242 phys_sections_nb_alloc);
2243 }
2244 phys_sections[phys_sections_nb] = *section;
2245 return phys_sections_nb++;
2246}
2247
2248static void phys_sections_clear(void)
2249{
2250 phys_sections_nb = 0;
2251}
2252
Avi Kivity0f0cb162012-02-13 17:14:32 +02002253static void register_subpage(MemoryRegionSection *section)
2254{
2255 subpage_t *subpage;
2256 target_phys_addr_t base = section->offset_within_address_space
2257 & TARGET_PAGE_MASK;
Avi Kivityf3705d52012-03-08 16:16:34 +02002258 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002259 MemoryRegionSection subsection = {
2260 .offset_within_address_space = base,
2261 .size = TARGET_PAGE_SIZE,
2262 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002263 target_phys_addr_t start, end;
2264
Avi Kivityf3705d52012-03-08 16:16:34 +02002265 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002266
Avi Kivityf3705d52012-03-08 16:16:34 +02002267 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002268 subpage = subpage_init(base);
2269 subsection.mr = &subpage->iomem;
Avi Kivity29990972012-02-13 20:21:20 +02002270 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2271 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002272 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002273 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002274 }
2275 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -04002276 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +02002277 subpage_register(subpage, start, end, phys_section_add(section));
2278}
2279
2280
2281static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002282{
Avi Kivitydd811242012-01-02 12:17:03 +02002283 target_phys_addr_t start_addr = section->offset_within_address_space;
2284 ram_addr_t size = section->size;
Avi Kivity29990972012-02-13 20:21:20 +02002285 target_phys_addr_t addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002286 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002287
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002288 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002289
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002290 addr = start_addr;
Avi Kivity29990972012-02-13 20:21:20 +02002291 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2292 section_index);
bellard33417e72003-08-10 21:47:01 +00002293}
2294
Avi Kivity0f0cb162012-02-13 17:14:32 +02002295void cpu_register_physical_memory_log(MemoryRegionSection *section,
2296 bool readonly)
2297{
2298 MemoryRegionSection now = *section, remain = *section;
2299
2300 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2301 || (now.size < TARGET_PAGE_SIZE)) {
2302 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2303 - now.offset_within_address_space,
2304 now.size);
2305 register_subpage(&now);
2306 remain.size -= now.size;
2307 remain.offset_within_address_space += now.size;
2308 remain.offset_within_region += now.size;
2309 }
Tyler Hall69b67642012-07-25 18:45:04 -04002310 while (remain.size >= TARGET_PAGE_SIZE) {
2311 now = remain;
2312 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
2313 now.size = TARGET_PAGE_SIZE;
2314 register_subpage(&now);
2315 } else {
2316 now.size &= TARGET_PAGE_MASK;
2317 register_multipage(&now);
2318 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02002319 remain.size -= now.size;
2320 remain.offset_within_address_space += now.size;
2321 remain.offset_within_region += now.size;
2322 }
2323 now = remain;
2324 if (now.size) {
2325 register_subpage(&now);
2326 }
2327}
2328
2329
Anthony Liguoric227f092009-10-01 16:12:16 -05002330void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002331{
2332 if (kvm_enabled())
2333 kvm_coalesce_mmio_region(addr, size);
2334}
2335
Anthony Liguoric227f092009-10-01 16:12:16 -05002336void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002337{
2338 if (kvm_enabled())
2339 kvm_uncoalesce_mmio_region(addr, size);
2340}
2341
Sheng Yang62a27442010-01-26 19:21:16 +08002342void qemu_flush_coalesced_mmio_buffer(void)
2343{
2344 if (kvm_enabled())
2345 kvm_flush_coalesced_mmio_buffer();
2346}
2347
Marcelo Tosattic9027602010-03-01 20:25:08 -03002348#if defined(__linux__) && !defined(TARGET_S390X)
2349
2350#include <sys/vfs.h>
2351
2352#define HUGETLBFS_MAGIC 0x958458f6
2353
2354static long gethugepagesize(const char *path)
2355{
2356 struct statfs fs;
2357 int ret;
2358
2359 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002360 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002361 } while (ret != 0 && errno == EINTR);
2362
2363 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002364 perror(path);
2365 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002366 }
2367
2368 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002369 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002370
2371 return fs.f_bsize;
2372}
2373
Alex Williamson04b16652010-07-02 11:13:17 -06002374static void *file_ram_alloc(RAMBlock *block,
2375 ram_addr_t memory,
2376 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002377{
2378 char *filename;
2379 void *area;
2380 int fd;
2381#ifdef MAP_POPULATE
2382 int flags;
2383#endif
2384 unsigned long hpagesize;
2385
2386 hpagesize = gethugepagesize(path);
2387 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002388 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002389 }
2390
2391 if (memory < hpagesize) {
2392 return NULL;
2393 }
2394
2395 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2396 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2397 return NULL;
2398 }
2399
2400 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002401 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002402 }
2403
2404 fd = mkstemp(filename);
2405 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002406 perror("unable to create backing store for hugepages");
2407 free(filename);
2408 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002409 }
2410 unlink(filename);
2411 free(filename);
2412
2413 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2414
2415 /*
2416 * ftruncate is not supported by hugetlbfs in older
2417 * hosts, so don't bother bailing out on errors.
2418 * If anything goes wrong with it under other filesystems,
2419 * mmap will fail.
2420 */
2421 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002422 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002423
2424#ifdef MAP_POPULATE
2425 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2426 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2427 * to sidestep this quirk.
2428 */
2429 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2430 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2431#else
2432 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2433#endif
2434 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002435 perror("file_ram_alloc: can't mmap RAM pages");
2436 close(fd);
2437 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002438 }
Alex Williamson04b16652010-07-02 11:13:17 -06002439 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002440 return area;
2441}
2442#endif
2443
Alex Williamsond17b5282010-06-25 11:08:38 -06002444static ram_addr_t find_ram_offset(ram_addr_t size)
2445{
Alex Williamson04b16652010-07-02 11:13:17 -06002446 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002447 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002448
2449 if (QLIST_EMPTY(&ram_list.blocks))
2450 return 0;
2451
2452 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002453 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002454
2455 end = block->offset + block->length;
2456
2457 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2458 if (next_block->offset >= end) {
2459 next = MIN(next, next_block->offset);
2460 }
2461 }
2462 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002463 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002464 mingap = next - end;
2465 }
2466 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002467
2468 if (offset == RAM_ADDR_MAX) {
2469 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2470 (uint64_t)size);
2471 abort();
2472 }
2473
Alex Williamson04b16652010-07-02 11:13:17 -06002474 return offset;
2475}
2476
2477static ram_addr_t last_ram_offset(void)
2478{
Alex Williamsond17b5282010-06-25 11:08:38 -06002479 RAMBlock *block;
2480 ram_addr_t last = 0;
2481
2482 QLIST_FOREACH(block, &ram_list.blocks, next)
2483 last = MAX(last, block->offset + block->length);
2484
2485 return last;
2486}
2487
Jason Baronddb97f12012-08-02 15:44:16 -04002488static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
2489{
2490 int ret;
2491 QemuOpts *machine_opts;
2492
2493 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2494 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2495 if (machine_opts &&
2496 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
2497 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
2498 if (ret) {
2499 perror("qemu_madvise");
2500 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
2501 "but dump_guest_core=off specified\n");
2502 }
2503 }
2504}
2505
Avi Kivityc5705a72011-12-20 15:59:12 +02002506void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002507{
2508 RAMBlock *new_block, *block;
2509
Avi Kivityc5705a72011-12-20 15:59:12 +02002510 new_block = NULL;
2511 QLIST_FOREACH(block, &ram_list.blocks, next) {
2512 if (block->offset == addr) {
2513 new_block = block;
2514 break;
2515 }
2516 }
2517 assert(new_block);
2518 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002519
Anthony Liguori09e5ab62012-02-03 12:28:43 -06002520 if (dev) {
2521 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002522 if (id) {
2523 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002524 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002525 }
2526 }
2527 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2528
2529 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002530 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002531 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2532 new_block->idstr);
2533 abort();
2534 }
2535 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002536}
2537
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002538static int memory_try_enable_merging(void *addr, size_t len)
2539{
2540 QemuOpts *opts;
2541
2542 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2543 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
2544 /* disabled by the user */
2545 return 0;
2546 }
2547
2548 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
2549}
2550
Avi Kivityc5705a72011-12-20 15:59:12 +02002551ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2552 MemoryRegion *mr)
2553{
2554 RAMBlock *new_block;
2555
2556 size = TARGET_PAGE_ALIGN(size);
2557 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002558
Avi Kivity7c637362011-12-21 13:09:49 +02002559 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002560 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002561 if (host) {
2562 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002563 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002564 } else {
2565 if (mem_path) {
2566#if defined (__linux__) && !defined(TARGET_S390X)
2567 new_block->host = file_ram_alloc(new_block, size, mem_path);
2568 if (!new_block->host) {
2569 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002570 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002571 }
2572#else
2573 fprintf(stderr, "-mem-path option unsupported\n");
2574 exit(1);
2575#endif
2576 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02002577 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002578 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00002579 } else if (kvm_enabled()) {
2580 /* some s390/kvm configurations have special constraints */
2581 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01002582 } else {
2583 new_block->host = qemu_vmalloc(size);
2584 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002585 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002586 }
2587 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002588 new_block->length = size;
2589
2590 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2591
Anthony Liguori7267c092011-08-20 22:09:37 -05002592 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002593 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04002594 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2595 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02002596 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002597
Jason Baronddb97f12012-08-02 15:44:16 -04002598 qemu_ram_setup_dump(new_block->host, size);
2599
Cam Macdonell84b89d72010-07-26 18:10:57 -06002600 if (kvm_enabled())
2601 kvm_setup_guest_memory(new_block->host, size);
2602
2603 return new_block->offset;
2604}
2605
Avi Kivityc5705a72011-12-20 15:59:12 +02002606ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002607{
Avi Kivityc5705a72011-12-20 15:59:12 +02002608 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002609}
bellarde9a1ab12007-02-08 23:08:38 +00002610
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002611void qemu_ram_free_from_ptr(ram_addr_t addr)
2612{
2613 RAMBlock *block;
2614
2615 QLIST_FOREACH(block, &ram_list.blocks, next) {
2616 if (addr == block->offset) {
2617 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002618 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002619 return;
2620 }
2621 }
2622}
2623
Anthony Liguoric227f092009-10-01 16:12:16 -05002624void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002625{
Alex Williamson04b16652010-07-02 11:13:17 -06002626 RAMBlock *block;
2627
2628 QLIST_FOREACH(block, &ram_list.blocks, next) {
2629 if (addr == block->offset) {
2630 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002631 if (block->flags & RAM_PREALLOC_MASK) {
2632 ;
2633 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002634#if defined (__linux__) && !defined(TARGET_S390X)
2635 if (block->fd) {
2636 munmap(block->host, block->length);
2637 close(block->fd);
2638 } else {
2639 qemu_vfree(block->host);
2640 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002641#else
2642 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002643#endif
2644 } else {
2645#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2646 munmap(block->host, block->length);
2647#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002648 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002649 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002650 } else {
2651 qemu_vfree(block->host);
2652 }
Alex Williamson04b16652010-07-02 11:13:17 -06002653#endif
2654 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002655 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002656 return;
2657 }
2658 }
2659
bellarde9a1ab12007-02-08 23:08:38 +00002660}
2661
Huang Yingcd19cfa2011-03-02 08:56:19 +01002662#ifndef _WIN32
2663void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2664{
2665 RAMBlock *block;
2666 ram_addr_t offset;
2667 int flags;
2668 void *area, *vaddr;
2669
2670 QLIST_FOREACH(block, &ram_list.blocks, next) {
2671 offset = addr - block->offset;
2672 if (offset < block->length) {
2673 vaddr = block->host + offset;
2674 if (block->flags & RAM_PREALLOC_MASK) {
2675 ;
2676 } else {
2677 flags = MAP_FIXED;
2678 munmap(vaddr, length);
2679 if (mem_path) {
2680#if defined(__linux__) && !defined(TARGET_S390X)
2681 if (block->fd) {
2682#ifdef MAP_POPULATE
2683 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2684 MAP_PRIVATE;
2685#else
2686 flags |= MAP_PRIVATE;
2687#endif
2688 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2689 flags, block->fd, offset);
2690 } else {
2691 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2692 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2693 flags, -1, 0);
2694 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002695#else
2696 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002697#endif
2698 } else {
2699#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2700 flags |= MAP_SHARED | MAP_ANONYMOUS;
2701 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2702 flags, -1, 0);
2703#else
2704 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2705 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2706 flags, -1, 0);
2707#endif
2708 }
2709 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002710 fprintf(stderr, "Could not remap addr: "
2711 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002712 length, addr);
2713 exit(1);
2714 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002715 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04002716 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002717 }
2718 return;
2719 }
2720 }
2721}
2722#endif /* !_WIN32 */
2723
pbrookdc828ca2009-04-09 22:21:07 +00002724/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002725 With the exception of the softmmu code in this file, this should
2726 only be used for local memory (e.g. video ram) that the device owns,
2727 and knows it isn't going to access beyond the end of the block.
2728
2729 It should not be used for general purpose DMA.
2730 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2731 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002732void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002733{
pbrook94a6b542009-04-11 17:15:54 +00002734 RAMBlock *block;
2735
Alex Williamsonf471a172010-06-11 11:11:42 -06002736 QLIST_FOREACH(block, &ram_list.blocks, next) {
2737 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002738 /* Move this entry to to start of the list. */
2739 if (block != QLIST_FIRST(&ram_list.blocks)) {
2740 QLIST_REMOVE(block, next);
2741 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2742 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002743 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002744 /* We need to check if the requested address is in the RAM
2745 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002746 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002747 */
2748 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002749 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002750 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002751 block->host =
2752 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002753 }
2754 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002755 return block->host + (addr - block->offset);
2756 }
pbrook94a6b542009-04-11 17:15:54 +00002757 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002758
2759 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2760 abort();
2761
2762 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00002763}
2764
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002765/* Return a host pointer to ram allocated with qemu_ram_alloc.
2766 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2767 */
2768void *qemu_safe_ram_ptr(ram_addr_t addr)
2769{
2770 RAMBlock *block;
2771
2772 QLIST_FOREACH(block, &ram_list.blocks, next) {
2773 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02002774 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002775 /* We need to check if the requested address is in the RAM
2776 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002777 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002778 */
2779 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002780 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002781 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002782 block->host =
2783 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002784 }
2785 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002786 return block->host + (addr - block->offset);
2787 }
2788 }
2789
2790 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2791 abort();
2792
2793 return NULL;
2794}
2795
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002796/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2797 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002798void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002799{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002800 if (*size == 0) {
2801 return NULL;
2802 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002803 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002804 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02002805 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002806 RAMBlock *block;
2807
2808 QLIST_FOREACH(block, &ram_list.blocks, next) {
2809 if (addr - block->offset < block->length) {
2810 if (addr - block->offset + *size > block->length)
2811 *size = block->length - addr + block->offset;
2812 return block->host + (addr - block->offset);
2813 }
2814 }
2815
2816 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2817 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002818 }
2819}
2820
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002821void qemu_put_ram_ptr(void *addr)
2822{
2823 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002824}
2825
Marcelo Tosattie8902612010-10-11 15:31:19 -03002826int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00002827{
pbrook94a6b542009-04-11 17:15:54 +00002828 RAMBlock *block;
2829 uint8_t *host = ptr;
2830
Jan Kiszka868bb332011-06-21 22:59:09 +02002831 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002832 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002833 return 0;
2834 }
2835
Alex Williamsonf471a172010-06-11 11:11:42 -06002836 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002837 /* This case append when the block is not mapped. */
2838 if (block->host == NULL) {
2839 continue;
2840 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002841 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002842 *ram_addr = block->offset + (host - block->host);
2843 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06002844 }
pbrook94a6b542009-04-11 17:15:54 +00002845 }
Jun Nakajima432d2682010-08-31 16:41:25 +01002846
Marcelo Tosattie8902612010-10-11 15:31:19 -03002847 return -1;
2848}
Alex Williamsonf471a172010-06-11 11:11:42 -06002849
Marcelo Tosattie8902612010-10-11 15:31:19 -03002850/* Some of the softmmu routines need to translate from a host pointer
2851 (typically a TLB entry) back to a ram offset. */
2852ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2853{
2854 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06002855
Marcelo Tosattie8902612010-10-11 15:31:19 -03002856 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2857 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2858 abort();
2859 }
2860 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002861}
2862
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002863static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2864 unsigned size)
bellard33417e72003-08-10 21:47:01 +00002865{
pbrook67d3b952006-12-18 05:03:52 +00002866#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002867 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002868#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002869#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002870 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002871#endif
2872 return 0;
2873}
2874
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002875static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2876 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00002877{
2878#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002879 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00002880#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002881#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002882 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002883#endif
2884}
2885
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002886static const MemoryRegionOps unassigned_mem_ops = {
2887 .read = unassigned_mem_read,
2888 .write = unassigned_mem_write,
2889 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002890};
2891
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002892static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2893 unsigned size)
2894{
2895 abort();
2896}
2897
2898static void error_mem_write(void *opaque, target_phys_addr_t addr,
2899 uint64_t value, unsigned size)
2900{
2901 abort();
2902}
2903
2904static const MemoryRegionOps error_mem_ops = {
2905 .read = error_mem_read,
2906 .write = error_mem_write,
2907 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002908};
2909
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002910static const MemoryRegionOps rom_mem_ops = {
2911 .read = error_mem_read,
2912 .write = unassigned_mem_write,
2913 .endianness = DEVICE_NATIVE_ENDIAN,
2914};
2915
2916static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2917 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002918{
bellard3a7d9292005-08-21 09:26:42 +00002919 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002920 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002921 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2922#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002923 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002924 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002925#endif
2926 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002927 switch (size) {
2928 case 1:
2929 stb_p(qemu_get_ram_ptr(ram_addr), val);
2930 break;
2931 case 2:
2932 stw_p(qemu_get_ram_ptr(ram_addr), val);
2933 break;
2934 case 4:
2935 stl_p(qemu_get_ram_ptr(ram_addr), val);
2936 break;
2937 default:
2938 abort();
2939 }
bellardf23db162005-08-21 19:12:28 +00002940 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002941 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002942 /* we remove the notdirty callback only if the code has been
2943 flushed */
2944 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002945 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002946}
2947
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002948static const MemoryRegionOps notdirty_mem_ops = {
2949 .read = error_mem_read,
2950 .write = notdirty_mem_write,
2951 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002952};
2953
pbrook0f459d12008-06-09 00:20:13 +00002954/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00002955static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002956{
Andreas Färber9349b4f2012-03-14 01:38:32 +01002957 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00002958 target_ulong pc, cs_base;
2959 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00002960 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002961 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002962 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002963
aliguori06d55cc2008-11-18 20:24:06 +00002964 if (env->watchpoint_hit) {
2965 /* We re-entered the check after replacing the TB. Now raise
2966 * the debug interrupt so that is will trigger after the
2967 * current instruction. */
2968 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2969 return;
2970 }
pbrook2e70f6e2008-06-29 01:03:05 +00002971 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002972 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00002973 if ((vaddr == (wp->vaddr & len_mask) ||
2974 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00002975 wp->flags |= BP_WATCHPOINT_HIT;
2976 if (!env->watchpoint_hit) {
2977 env->watchpoint_hit = wp;
2978 tb = tb_find_pc(env->mem_io_pc);
2979 if (!tb) {
2980 cpu_abort(env, "check_watchpoint: could not find TB for "
2981 "pc=%p", (void *)env->mem_io_pc);
2982 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00002983 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00002984 tb_phys_invalidate(tb, -1);
2985 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2986 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04002987 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00002988 } else {
2989 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2990 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04002991 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002992 }
aliguori06d55cc2008-11-18 20:24:06 +00002993 }
aliguori6e140f22008-11-18 20:37:55 +00002994 } else {
2995 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002996 }
2997 }
2998}
2999
pbrook6658ffb2007-03-16 23:58:11 +00003000/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3001 so these check for a hit then pass through to the normal out-of-line
3002 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02003003static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3004 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003005{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003006 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3007 switch (size) {
3008 case 1: return ldub_phys(addr);
3009 case 2: return lduw_phys(addr);
3010 case 4: return ldl_phys(addr);
3011 default: abort();
3012 }
pbrook6658ffb2007-03-16 23:58:11 +00003013}
3014
Avi Kivity1ec9b902012-01-02 12:47:48 +02003015static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3016 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003017{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003018 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3019 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04003020 case 1:
3021 stb_phys(addr, val);
3022 break;
3023 case 2:
3024 stw_phys(addr, val);
3025 break;
3026 case 4:
3027 stl_phys(addr, val);
3028 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003029 default: abort();
3030 }
pbrook6658ffb2007-03-16 23:58:11 +00003031}
3032
Avi Kivity1ec9b902012-01-02 12:47:48 +02003033static const MemoryRegionOps watch_mem_ops = {
3034 .read = watch_mem_read,
3035 .write = watch_mem_write,
3036 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003037};
pbrook6658ffb2007-03-16 23:58:11 +00003038
Avi Kivity70c68e42012-01-02 12:32:48 +02003039static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3040 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003041{
Avi Kivity70c68e42012-01-02 12:32:48 +02003042 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003043 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003044 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003045#if defined(DEBUG_SUBPAGE)
3046 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3047 mmio, len, addr, idx);
3048#endif
blueswir1db7b5422007-05-26 17:36:03 +00003049
Avi Kivity5312bd82012-02-12 18:32:55 +02003050 section = &phys_sections[mmio->sub_section[idx]];
3051 addr += mmio->base;
3052 addr -= section->offset_within_address_space;
3053 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003054 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003055}
3056
Avi Kivity70c68e42012-01-02 12:32:48 +02003057static void subpage_write(void *opaque, target_phys_addr_t addr,
3058 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003059{
Avi Kivity70c68e42012-01-02 12:32:48 +02003060 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003061 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003062 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003063#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003064 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3065 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003066 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003067#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003068
Avi Kivity5312bd82012-02-12 18:32:55 +02003069 section = &phys_sections[mmio->sub_section[idx]];
3070 addr += mmio->base;
3071 addr -= section->offset_within_address_space;
3072 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003073 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003074}
3075
Avi Kivity70c68e42012-01-02 12:32:48 +02003076static const MemoryRegionOps subpage_ops = {
3077 .read = subpage_read,
3078 .write = subpage_write,
3079 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003080};
3081
Avi Kivityde712f92012-01-02 12:41:07 +02003082static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3083 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003084{
3085 ram_addr_t raddr = addr;
3086 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003087 switch (size) {
3088 case 1: return ldub_p(ptr);
3089 case 2: return lduw_p(ptr);
3090 case 4: return ldl_p(ptr);
3091 default: abort();
3092 }
Andreas Färber56384e82011-11-30 16:26:21 +01003093}
3094
Avi Kivityde712f92012-01-02 12:41:07 +02003095static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3096 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003097{
3098 ram_addr_t raddr = addr;
3099 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003100 switch (size) {
3101 case 1: return stb_p(ptr, value);
3102 case 2: return stw_p(ptr, value);
3103 case 4: return stl_p(ptr, value);
3104 default: abort();
3105 }
Andreas Färber56384e82011-11-30 16:26:21 +01003106}
3107
Avi Kivityde712f92012-01-02 12:41:07 +02003108static const MemoryRegionOps subpage_ram_ops = {
3109 .read = subpage_ram_read,
3110 .write = subpage_ram_write,
3111 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003112};
3113
Anthony Liguoric227f092009-10-01 16:12:16 -05003114static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003115 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003116{
3117 int idx, eidx;
3118
3119 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3120 return -1;
3121 idx = SUBPAGE_IDX(start);
3122 eidx = SUBPAGE_IDX(end);
3123#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003124 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003125 mmio, start, end, idx, eidx, memory);
3126#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003127 if (memory_region_is_ram(phys_sections[section].mr)) {
3128 MemoryRegionSection new_section = phys_sections[section];
3129 new_section.mr = &io_mem_subpage_ram;
3130 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003131 }
blueswir1db7b5422007-05-26 17:36:03 +00003132 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003133 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003134 }
3135
3136 return 0;
3137}
3138
Avi Kivity0f0cb162012-02-13 17:14:32 +02003139static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003140{
Anthony Liguoric227f092009-10-01 16:12:16 -05003141 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003142
Anthony Liguori7267c092011-08-20 22:09:37 -05003143 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003144
3145 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003146 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3147 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003148 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003149#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003150 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3151 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003152#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003153 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003154
3155 return mmio;
3156}
3157
Avi Kivity5312bd82012-02-12 18:32:55 +02003158static uint16_t dummy_section(MemoryRegion *mr)
3159{
3160 MemoryRegionSection section = {
3161 .mr = mr,
3162 .offset_within_address_space = 0,
3163 .offset_within_region = 0,
3164 .size = UINT64_MAX,
3165 };
3166
3167 return phys_section_add(&section);
3168}
3169
Avi Kivity37ec01d2012-03-08 18:08:35 +02003170MemoryRegion *iotlb_to_region(target_phys_addr_t index)
Avi Kivityaa102232012-03-08 17:06:55 +02003171{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003172 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003173}
3174
Avi Kivitye9179ce2009-06-14 11:38:52 +03003175static void io_mem_init(void)
3176{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003177 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003178 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3179 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3180 "unassigned", UINT64_MAX);
3181 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3182 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003183 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3184 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003185 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3186 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003187}
3188
Avi Kivity50c1e142012-02-08 21:36:02 +02003189static void core_begin(MemoryListener *listener)
3190{
Avi Kivity54688b12012-02-09 17:34:32 +02003191 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003192 phys_sections_clear();
Avi Kivityc19e8802012-02-13 20:25:31 +02003193 phys_map.ptr = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003194 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003195 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3196 phys_section_rom = dummy_section(&io_mem_rom);
3197 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003198}
3199
3200static void core_commit(MemoryListener *listener)
3201{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003202 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003203
3204 /* since each CPU stores ram addresses in its TLB cache, we must
3205 reset the modified entries */
3206 /* XXX: slow ! */
3207 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3208 tlb_flush(env, 1);
3209 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003210}
3211
Avi Kivity93632742012-02-08 16:54:16 +02003212static void core_region_add(MemoryListener *listener,
3213 MemoryRegionSection *section)
3214{
Avi Kivity4855d412012-02-08 21:16:05 +02003215 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003216}
3217
3218static void core_region_del(MemoryListener *listener,
3219 MemoryRegionSection *section)
3220{
Avi Kivity93632742012-02-08 16:54:16 +02003221}
3222
Avi Kivity50c1e142012-02-08 21:36:02 +02003223static void core_region_nop(MemoryListener *listener,
3224 MemoryRegionSection *section)
3225{
Avi Kivity54688b12012-02-09 17:34:32 +02003226 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003227}
3228
Avi Kivity93632742012-02-08 16:54:16 +02003229static void core_log_start(MemoryListener *listener,
3230 MemoryRegionSection *section)
3231{
3232}
3233
3234static void core_log_stop(MemoryListener *listener,
3235 MemoryRegionSection *section)
3236{
3237}
3238
3239static void core_log_sync(MemoryListener *listener,
3240 MemoryRegionSection *section)
3241{
3242}
3243
3244static void core_log_global_start(MemoryListener *listener)
3245{
3246 cpu_physical_memory_set_dirty_tracking(1);
3247}
3248
3249static void core_log_global_stop(MemoryListener *listener)
3250{
3251 cpu_physical_memory_set_dirty_tracking(0);
3252}
3253
3254static void core_eventfd_add(MemoryListener *listener,
3255 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003256 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity93632742012-02-08 16:54:16 +02003257{
3258}
3259
3260static void core_eventfd_del(MemoryListener *listener,
3261 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003262 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity93632742012-02-08 16:54:16 +02003263{
3264}
3265
Avi Kivity50c1e142012-02-08 21:36:02 +02003266static void io_begin(MemoryListener *listener)
3267{
3268}
3269
3270static void io_commit(MemoryListener *listener)
3271{
3272}
3273
Avi Kivity4855d412012-02-08 21:16:05 +02003274static void io_region_add(MemoryListener *listener,
3275 MemoryRegionSection *section)
3276{
Avi Kivitya2d33522012-03-05 17:40:12 +02003277 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3278
3279 mrio->mr = section->mr;
3280 mrio->offset = section->offset_within_region;
3281 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003282 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003283 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003284}
3285
3286static void io_region_del(MemoryListener *listener,
3287 MemoryRegionSection *section)
3288{
3289 isa_unassign_ioport(section->offset_within_address_space, section->size);
3290}
3291
Avi Kivity50c1e142012-02-08 21:36:02 +02003292static void io_region_nop(MemoryListener *listener,
3293 MemoryRegionSection *section)
3294{
3295}
3296
Avi Kivity4855d412012-02-08 21:16:05 +02003297static void io_log_start(MemoryListener *listener,
3298 MemoryRegionSection *section)
3299{
3300}
3301
3302static void io_log_stop(MemoryListener *listener,
3303 MemoryRegionSection *section)
3304{
3305}
3306
3307static void io_log_sync(MemoryListener *listener,
3308 MemoryRegionSection *section)
3309{
3310}
3311
3312static void io_log_global_start(MemoryListener *listener)
3313{
3314}
3315
3316static void io_log_global_stop(MemoryListener *listener)
3317{
3318}
3319
3320static void io_eventfd_add(MemoryListener *listener,
3321 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003322 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity4855d412012-02-08 21:16:05 +02003323{
3324}
3325
3326static void io_eventfd_del(MemoryListener *listener,
3327 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003328 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity4855d412012-02-08 21:16:05 +02003329{
3330}
3331
Avi Kivity93632742012-02-08 16:54:16 +02003332static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003333 .begin = core_begin,
3334 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003335 .region_add = core_region_add,
3336 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003337 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003338 .log_start = core_log_start,
3339 .log_stop = core_log_stop,
3340 .log_sync = core_log_sync,
3341 .log_global_start = core_log_global_start,
3342 .log_global_stop = core_log_global_stop,
3343 .eventfd_add = core_eventfd_add,
3344 .eventfd_del = core_eventfd_del,
3345 .priority = 0,
3346};
3347
Avi Kivity4855d412012-02-08 21:16:05 +02003348static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003349 .begin = io_begin,
3350 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003351 .region_add = io_region_add,
3352 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003353 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003354 .log_start = io_log_start,
3355 .log_stop = io_log_stop,
3356 .log_sync = io_log_sync,
3357 .log_global_start = io_log_global_start,
3358 .log_global_stop = io_log_global_stop,
3359 .eventfd_add = io_eventfd_add,
3360 .eventfd_del = io_eventfd_del,
3361 .priority = 0,
3362};
3363
Avi Kivity62152b82011-07-26 14:26:14 +03003364static void memory_map_init(void)
3365{
Anthony Liguori7267c092011-08-20 22:09:37 -05003366 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003367 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003368 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003369
Anthony Liguori7267c092011-08-20 22:09:37 -05003370 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003371 memory_region_init(system_io, "io", 65536);
3372 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003373
Avi Kivity4855d412012-02-08 21:16:05 +02003374 memory_listener_register(&core_memory_listener, system_memory);
3375 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003376}
3377
3378MemoryRegion *get_system_memory(void)
3379{
3380 return system_memory;
3381}
3382
Avi Kivity309cb472011-08-08 16:09:03 +03003383MemoryRegion *get_system_io(void)
3384{
3385 return system_io;
3386}
3387
pbrooke2eef172008-06-08 01:09:01 +00003388#endif /* !defined(CONFIG_USER_ONLY) */
3389
bellard13eb76e2004-01-24 15:23:36 +00003390/* physical memory access (slow version, mainly for debug) */
3391#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003392int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003393 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003394{
3395 int l, flags;
3396 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003397 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003398
3399 while (len > 0) {
3400 page = addr & TARGET_PAGE_MASK;
3401 l = (page + TARGET_PAGE_SIZE) - addr;
3402 if (l > len)
3403 l = len;
3404 flags = page_get_flags(page);
3405 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003406 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003407 if (is_write) {
3408 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003409 return -1;
bellard579a97f2007-11-11 14:26:47 +00003410 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003411 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003412 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003413 memcpy(p, buf, l);
3414 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003415 } else {
3416 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003417 return -1;
bellard579a97f2007-11-11 14:26:47 +00003418 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003419 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003420 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003421 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003422 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003423 }
3424 len -= l;
3425 buf += l;
3426 addr += l;
3427 }
Paul Brooka68fe892010-03-01 00:08:59 +00003428 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003429}
bellard8df1cd02005-01-28 22:37:22 +00003430
bellard13eb76e2004-01-24 15:23:36 +00003431#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003432
3433static void invalidate_and_set_dirty(target_phys_addr_t addr,
3434 target_phys_addr_t length)
3435{
3436 if (!cpu_physical_memory_is_dirty(addr)) {
3437 /* invalidate code */
3438 tb_invalidate_phys_page_range(addr, addr + length, 0);
3439 /* set dirty bit */
3440 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
3441 }
Anthony PERARDe2269392012-10-03 13:49:22 +00003442 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003443}
3444
Anthony Liguoric227f092009-10-01 16:12:16 -05003445void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003446 int len, int is_write)
3447{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003448 int l;
bellard13eb76e2004-01-24 15:23:36 +00003449 uint8_t *ptr;
3450 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003451 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003452 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003453
bellard13eb76e2004-01-24 15:23:36 +00003454 while (len > 0) {
3455 page = addr & TARGET_PAGE_MASK;
3456 l = (page + TARGET_PAGE_SIZE) - addr;
3457 if (l > len)
3458 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003459 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003460
bellard13eb76e2004-01-24 15:23:36 +00003461 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003462 if (!memory_region_is_ram(section->mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003463 target_phys_addr_t addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00003464 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003465 /* XXX: could force cpu_single_env to NULL to avoid
3466 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003467 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003468 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003469 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003470 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003471 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003472 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003473 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003474 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003475 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003476 l = 2;
3477 } else {
bellard1c213d12005-09-03 10:49:04 +00003478 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003479 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003480 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003481 l = 1;
3482 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003483 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003484 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003485 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003486 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003487 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003488 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003489 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003490 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003491 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003492 }
3493 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003494 if (!(memory_region_is_ram(section->mr) ||
3495 memory_region_is_romd(section->mr))) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003496 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003497 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003498 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003499 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003500 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003501 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003502 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003503 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003504 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003505 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003506 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003507 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003508 l = 2;
3509 } else {
bellard1c213d12005-09-03 10:49:04 +00003510 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003511 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003512 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003513 l = 1;
3514 }
3515 } else {
3516 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00003517 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00003518 + memory_region_section_addr(section,
3519 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02003520 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003521 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003522 }
3523 }
3524 len -= l;
3525 buf += l;
3526 addr += l;
3527 }
3528}
bellard8df1cd02005-01-28 22:37:22 +00003529
bellardd0ecd2a2006-04-23 17:14:48 +00003530/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003531void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003532 const uint8_t *buf, int len)
3533{
3534 int l;
3535 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003536 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003537 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003538
bellardd0ecd2a2006-04-23 17:14:48 +00003539 while (len > 0) {
3540 page = addr & TARGET_PAGE_MASK;
3541 l = (page + TARGET_PAGE_SIZE) - addr;
3542 if (l > len)
3543 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003544 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003545
Blue Swirlcc5bea62012-04-14 14:56:48 +00003546 if (!(memory_region_is_ram(section->mr) ||
3547 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00003548 /* do nothing */
3549 } else {
3550 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003551 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003552 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003553 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003554 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003555 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003556 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003557 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003558 }
3559 len -= l;
3560 buf += l;
3561 addr += l;
3562 }
3563}
3564
aliguori6d16c2f2009-01-22 16:59:11 +00003565typedef struct {
3566 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003567 target_phys_addr_t addr;
3568 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003569} BounceBuffer;
3570
3571static BounceBuffer bounce;
3572
aliguoriba223c22009-01-22 16:59:16 +00003573typedef struct MapClient {
3574 void *opaque;
3575 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003576 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003577} MapClient;
3578
Blue Swirl72cf2d42009-09-12 07:36:22 +00003579static QLIST_HEAD(map_client_list, MapClient) map_client_list
3580 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003581
3582void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3583{
Anthony Liguori7267c092011-08-20 22:09:37 -05003584 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003585
3586 client->opaque = opaque;
3587 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003588 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003589 return client;
3590}
3591
3592void cpu_unregister_map_client(void *_client)
3593{
3594 MapClient *client = (MapClient *)_client;
3595
Blue Swirl72cf2d42009-09-12 07:36:22 +00003596 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003597 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003598}
3599
3600static void cpu_notify_map_clients(void)
3601{
3602 MapClient *client;
3603
Blue Swirl72cf2d42009-09-12 07:36:22 +00003604 while (!QLIST_EMPTY(&map_client_list)) {
3605 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003606 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003607 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003608 }
3609}
3610
aliguori6d16c2f2009-01-22 16:59:11 +00003611/* Map a physical memory region into a host virtual address.
3612 * May map a subset of the requested range, given by and returned in *plen.
3613 * May return NULL if resources needed to perform the mapping are exhausted.
3614 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003615 * Use cpu_register_map_client() to know when retrying the map operation is
3616 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003617 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003618void *cpu_physical_memory_map(target_phys_addr_t addr,
3619 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003620 int is_write)
3621{
Anthony Liguoric227f092009-10-01 16:12:16 -05003622 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003623 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003624 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003625 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003626 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003627 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003628 ram_addr_t rlen;
3629 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003630
3631 while (len > 0) {
3632 page = addr & TARGET_PAGE_MASK;
3633 l = (page + TARGET_PAGE_SIZE) - addr;
3634 if (l > len)
3635 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003636 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003637
Avi Kivityf3705d52012-03-08 16:16:34 +02003638 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003639 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003640 break;
3641 }
3642 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3643 bounce.addr = addr;
3644 bounce.len = l;
3645 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003646 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003647 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003648
3649 *plen = l;
3650 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003651 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003652 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003653 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003654 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003655 }
aliguori6d16c2f2009-01-22 16:59:11 +00003656
3657 len -= l;
3658 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003659 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003660 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003661 rlen = todo;
3662 ret = qemu_ram_ptr_length(raddr, &rlen);
3663 *plen = rlen;
3664 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003665}
3666
3667/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3668 * Will also mark the memory as dirty if is_write == 1. access_len gives
3669 * the amount of memory that was actually read or written by the caller.
3670 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003671void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3672 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003673{
3674 if (buffer != bounce.buffer) {
3675 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003676 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003677 while (access_len) {
3678 unsigned l;
3679 l = TARGET_PAGE_SIZE;
3680 if (l > access_len)
3681 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003682 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003683 addr1 += l;
3684 access_len -= l;
3685 }
3686 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003687 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003688 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003689 }
aliguori6d16c2f2009-01-22 16:59:11 +00003690 return;
3691 }
3692 if (is_write) {
3693 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3694 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003695 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003696 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003697 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003698}
bellardd0ecd2a2006-04-23 17:14:48 +00003699
bellard8df1cd02005-01-28 22:37:22 +00003700/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003701static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3702 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003703{
bellard8df1cd02005-01-28 22:37:22 +00003704 uint8_t *ptr;
3705 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003706 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003707
Avi Kivity06ef3522012-02-13 16:11:22 +02003708 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003709
Blue Swirlcc5bea62012-04-14 14:56:48 +00003710 if (!(memory_region_is_ram(section->mr) ||
3711 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00003712 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003713 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003714 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003715#if defined(TARGET_WORDS_BIGENDIAN)
3716 if (endian == DEVICE_LITTLE_ENDIAN) {
3717 val = bswap32(val);
3718 }
3719#else
3720 if (endian == DEVICE_BIG_ENDIAN) {
3721 val = bswap32(val);
3722 }
3723#endif
bellard8df1cd02005-01-28 22:37:22 +00003724 } else {
3725 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003726 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003727 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003728 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003729 switch (endian) {
3730 case DEVICE_LITTLE_ENDIAN:
3731 val = ldl_le_p(ptr);
3732 break;
3733 case DEVICE_BIG_ENDIAN:
3734 val = ldl_be_p(ptr);
3735 break;
3736 default:
3737 val = ldl_p(ptr);
3738 break;
3739 }
bellard8df1cd02005-01-28 22:37:22 +00003740 }
3741 return val;
3742}
3743
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003744uint32_t ldl_phys(target_phys_addr_t addr)
3745{
3746 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3747}
3748
3749uint32_t ldl_le_phys(target_phys_addr_t addr)
3750{
3751 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3752}
3753
3754uint32_t ldl_be_phys(target_phys_addr_t addr)
3755{
3756 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3757}
3758
bellard84b7b8e2005-11-28 21:19:04 +00003759/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003760static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3761 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003762{
bellard84b7b8e2005-11-28 21:19:04 +00003763 uint8_t *ptr;
3764 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003765 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00003766
Avi Kivity06ef3522012-02-13 16:11:22 +02003767 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003768
Blue Swirlcc5bea62012-04-14 14:56:48 +00003769 if (!(memory_region_is_ram(section->mr) ||
3770 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00003771 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003772 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003773
3774 /* XXX This is broken when device endian != cpu endian.
3775 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003776#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003777 val = io_mem_read(section->mr, addr, 4) << 32;
3778 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003779#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003780 val = io_mem_read(section->mr, addr, 4);
3781 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003782#endif
3783 } else {
3784 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003785 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003786 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003787 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003788 switch (endian) {
3789 case DEVICE_LITTLE_ENDIAN:
3790 val = ldq_le_p(ptr);
3791 break;
3792 case DEVICE_BIG_ENDIAN:
3793 val = ldq_be_p(ptr);
3794 break;
3795 default:
3796 val = ldq_p(ptr);
3797 break;
3798 }
bellard84b7b8e2005-11-28 21:19:04 +00003799 }
3800 return val;
3801}
3802
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003803uint64_t ldq_phys(target_phys_addr_t addr)
3804{
3805 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3806}
3807
3808uint64_t ldq_le_phys(target_phys_addr_t addr)
3809{
3810 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3811}
3812
3813uint64_t ldq_be_phys(target_phys_addr_t addr)
3814{
3815 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3816}
3817
bellardaab33092005-10-30 20:48:42 +00003818/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003819uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003820{
3821 uint8_t val;
3822 cpu_physical_memory_read(addr, &val, 1);
3823 return val;
3824}
3825
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003826/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003827static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3828 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003829{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003830 uint8_t *ptr;
3831 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003832 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003833
Avi Kivity06ef3522012-02-13 16:11:22 +02003834 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003835
Blue Swirlcc5bea62012-04-14 14:56:48 +00003836 if (!(memory_region_is_ram(section->mr) ||
3837 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003838 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003839 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003840 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003841#if defined(TARGET_WORDS_BIGENDIAN)
3842 if (endian == DEVICE_LITTLE_ENDIAN) {
3843 val = bswap16(val);
3844 }
3845#else
3846 if (endian == DEVICE_BIG_ENDIAN) {
3847 val = bswap16(val);
3848 }
3849#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003850 } else {
3851 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003852 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003853 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003854 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003855 switch (endian) {
3856 case DEVICE_LITTLE_ENDIAN:
3857 val = lduw_le_p(ptr);
3858 break;
3859 case DEVICE_BIG_ENDIAN:
3860 val = lduw_be_p(ptr);
3861 break;
3862 default:
3863 val = lduw_p(ptr);
3864 break;
3865 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003866 }
3867 return val;
bellardaab33092005-10-30 20:48:42 +00003868}
3869
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003870uint32_t lduw_phys(target_phys_addr_t addr)
3871{
3872 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3873}
3874
3875uint32_t lduw_le_phys(target_phys_addr_t addr)
3876{
3877 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3878}
3879
3880uint32_t lduw_be_phys(target_phys_addr_t addr)
3881{
3882 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3883}
3884
bellard8df1cd02005-01-28 22:37:22 +00003885/* warning: addr must be aligned. The ram page is not masked as dirty
3886 and the code inside is not invalidated. It is useful if the dirty
3887 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003888void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003889{
bellard8df1cd02005-01-28 22:37:22 +00003890 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003891 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003892
Avi Kivity06ef3522012-02-13 16:11:22 +02003893 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003894
Avi Kivityf3705d52012-03-08 16:16:34 +02003895 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003896 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003897 if (memory_region_is_ram(section->mr)) {
3898 section = &phys_sections[phys_section_rom];
3899 }
3900 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003901 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003902 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003903 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003904 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00003905 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003906 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003907
3908 if (unlikely(in_migration)) {
3909 if (!cpu_physical_memory_is_dirty(addr1)) {
3910 /* invalidate code */
3911 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3912 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003913 cpu_physical_memory_set_dirty_flags(
3914 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00003915 }
3916 }
bellard8df1cd02005-01-28 22:37:22 +00003917 }
3918}
3919
Anthony Liguoric227f092009-10-01 16:12:16 -05003920void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003921{
j_mayerbc98a7e2007-04-04 07:55:12 +00003922 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003923 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00003924
Avi Kivity06ef3522012-02-13 16:11:22 +02003925 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003926
Avi Kivityf3705d52012-03-08 16:16:34 +02003927 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003928 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003929 if (memory_region_is_ram(section->mr)) {
3930 section = &phys_sections[phys_section_rom];
3931 }
j_mayerbc98a7e2007-04-04 07:55:12 +00003932#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003933 io_mem_write(section->mr, addr, val >> 32, 4);
3934 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003935#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003936 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3937 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003938#endif
3939 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003940 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003941 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003942 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00003943 stq_p(ptr, val);
3944 }
3945}
3946
bellard8df1cd02005-01-28 22:37:22 +00003947/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003948static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
3949 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003950{
bellard8df1cd02005-01-28 22:37:22 +00003951 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003952 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003953
Avi Kivity06ef3522012-02-13 16:11:22 +02003954 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003955
Avi Kivityf3705d52012-03-08 16:16:34 +02003956 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003957 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003958 if (memory_region_is_ram(section->mr)) {
3959 section = &phys_sections[phys_section_rom];
3960 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003961#if defined(TARGET_WORDS_BIGENDIAN)
3962 if (endian == DEVICE_LITTLE_ENDIAN) {
3963 val = bswap32(val);
3964 }
3965#else
3966 if (endian == DEVICE_BIG_ENDIAN) {
3967 val = bswap32(val);
3968 }
3969#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02003970 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003971 } else {
3972 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003973 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003974 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00003975 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003976 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003977 switch (endian) {
3978 case DEVICE_LITTLE_ENDIAN:
3979 stl_le_p(ptr, val);
3980 break;
3981 case DEVICE_BIG_ENDIAN:
3982 stl_be_p(ptr, val);
3983 break;
3984 default:
3985 stl_p(ptr, val);
3986 break;
3987 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003988 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00003989 }
3990}
3991
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003992void stl_phys(target_phys_addr_t addr, uint32_t val)
3993{
3994 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
3995}
3996
3997void stl_le_phys(target_phys_addr_t addr, uint32_t val)
3998{
3999 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4000}
4001
4002void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4003{
4004 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4005}
4006
bellardaab33092005-10-30 20:48:42 +00004007/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004008void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004009{
4010 uint8_t v = val;
4011 cpu_physical_memory_write(addr, &v, 1);
4012}
4013
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004014/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004015static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4016 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004017{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004018 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004019 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004020
Avi Kivity06ef3522012-02-13 16:11:22 +02004021 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004022
Avi Kivityf3705d52012-03-08 16:16:34 +02004023 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00004024 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004025 if (memory_region_is_ram(section->mr)) {
4026 section = &phys_sections[phys_section_rom];
4027 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004028#if defined(TARGET_WORDS_BIGENDIAN)
4029 if (endian == DEVICE_LITTLE_ENDIAN) {
4030 val = bswap16(val);
4031 }
4032#else
4033 if (endian == DEVICE_BIG_ENDIAN) {
4034 val = bswap16(val);
4035 }
4036#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004037 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004038 } else {
4039 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004040 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004041 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004042 /* RAM case */
4043 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004044 switch (endian) {
4045 case DEVICE_LITTLE_ENDIAN:
4046 stw_le_p(ptr, val);
4047 break;
4048 case DEVICE_BIG_ENDIAN:
4049 stw_be_p(ptr, val);
4050 break;
4051 default:
4052 stw_p(ptr, val);
4053 break;
4054 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00004055 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004056 }
bellardaab33092005-10-30 20:48:42 +00004057}
4058
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004059void stw_phys(target_phys_addr_t addr, uint32_t val)
4060{
4061 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4062}
4063
4064void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4065{
4066 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4067}
4068
4069void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4070{
4071 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4072}
4073
bellardaab33092005-10-30 20:48:42 +00004074/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004075void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004076{
4077 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004078 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004079}
4080
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004081void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4082{
4083 val = cpu_to_le64(val);
4084 cpu_physical_memory_write(addr, &val, 8);
4085}
4086
4087void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4088{
4089 val = cpu_to_be64(val);
4090 cpu_physical_memory_write(addr, &val, 8);
4091}
4092
aliguori5e2972f2009-03-28 17:51:36 +00004093/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004094int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004095 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004096{
4097 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004098 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004099 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004100
4101 while (len > 0) {
4102 page = addr & TARGET_PAGE_MASK;
4103 phys_addr = cpu_get_phys_page_debug(env, page);
4104 /* if no physical page mapped, return an error */
4105 if (phys_addr == -1)
4106 return -1;
4107 l = (page + TARGET_PAGE_SIZE) - addr;
4108 if (l > len)
4109 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004110 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004111 if (is_write)
4112 cpu_physical_memory_write_rom(phys_addr, buf, l);
4113 else
aliguori5e2972f2009-03-28 17:51:36 +00004114 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004115 len -= l;
4116 buf += l;
4117 addr += l;
4118 }
4119 return 0;
4120}
Paul Brooka68fe892010-03-01 00:08:59 +00004121#endif
bellard13eb76e2004-01-24 15:23:36 +00004122
pbrook2e70f6e2008-06-29 01:03:05 +00004123/* in deterministic execution mode, instructions doing device I/Os
4124 must be at the end of the TB */
Blue Swirl20503962012-04-09 14:20:20 +00004125void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004126{
4127 TranslationBlock *tb;
4128 uint32_t n, cflags;
4129 target_ulong pc, cs_base;
4130 uint64_t flags;
4131
Blue Swirl20503962012-04-09 14:20:20 +00004132 tb = tb_find_pc(retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004133 if (!tb) {
4134 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl20503962012-04-09 14:20:20 +00004135 (void *)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004136 }
4137 n = env->icount_decr.u16.low + tb->icount;
Blue Swirl20503962012-04-09 14:20:20 +00004138 cpu_restore_state(tb, env, retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004139 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004140 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004141 n = n - env->icount_decr.u16.low;
4142 /* Generate a new TB ending on the I/O insn. */
4143 n++;
4144 /* On MIPS and SH, delay slot instructions can only be restarted if
4145 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004146 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004147 branch. */
4148#if defined(TARGET_MIPS)
4149 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4150 env->active_tc.PC -= 4;
4151 env->icount_decr.u16.low++;
4152 env->hflags &= ~MIPS_HFLAG_BMASK;
4153 }
4154#elif defined(TARGET_SH4)
4155 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4156 && n > 1) {
4157 env->pc -= 2;
4158 env->icount_decr.u16.low++;
4159 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4160 }
4161#endif
4162 /* This should never happen. */
4163 if (n > CF_COUNT_MASK)
4164 cpu_abort(env, "TB too big during recompile");
4165
4166 cflags = n | CF_LAST_IO;
4167 pc = tb->pc;
4168 cs_base = tb->cs_base;
4169 flags = tb->flags;
4170 tb_phys_invalidate(tb, -1);
4171 /* FIXME: In theory this could raise an exception. In practice
4172 we have already translated the block once so it's probably ok. */
4173 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004174 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004175 the first in the TB) then we end up generating a whole new TB and
4176 repeating the fault, which is horribly inefficient.
4177 Better would be to execute just this insn uncached, or generate a
4178 second new TB. */
4179 cpu_resume_from_signal(env, NULL);
4180}
4181
Paul Brookb3755a92010-03-12 16:54:58 +00004182#if !defined(CONFIG_USER_ONLY)
4183
Stefan Weil055403b2010-10-22 23:03:32 +02004184void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004185{
4186 int i, target_code_size, max_target_code_size;
4187 int direct_jmp_count, direct_jmp2_count, cross_page;
4188 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004189
bellarde3db7222005-01-26 22:00:47 +00004190 target_code_size = 0;
4191 max_target_code_size = 0;
4192 cross_page = 0;
4193 direct_jmp_count = 0;
4194 direct_jmp2_count = 0;
4195 for(i = 0; i < nb_tbs; i++) {
4196 tb = &tbs[i];
4197 target_code_size += tb->size;
4198 if (tb->size > max_target_code_size)
4199 max_target_code_size = tb->size;
4200 if (tb->page_addr[1] != -1)
4201 cross_page++;
4202 if (tb->tb_next_offset[0] != 0xffff) {
4203 direct_jmp_count++;
4204 if (tb->tb_next_offset[1] != 0xffff) {
4205 direct_jmp2_count++;
4206 }
4207 }
4208 }
4209 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004210 cpu_fprintf(f, "Translation buffer state:\n");
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +10004211 cpu_fprintf(f, "gen code size %td/%zd\n",
bellard26a5f132008-05-28 12:30:31 +00004212 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4213 cpu_fprintf(f, "TB count %d/%d\n",
4214 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004215 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004216 nb_tbs ? target_code_size / nb_tbs : 0,
4217 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004218 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004219 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4220 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004221 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4222 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004223 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4224 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004225 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004226 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4227 direct_jmp2_count,
4228 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004229 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004230 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4231 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4232 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004233 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004234}
4235
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004236/*
4237 * A helper function for the _utterly broken_ virtio device model to find out if
4238 * it's running on a big endian machine. Don't do this at home kids!
4239 */
4240bool virtio_is_big_endian(void);
4241bool virtio_is_big_endian(void)
4242{
4243#if defined(TARGET_WORDS_BIGENDIAN)
4244 return true;
4245#else
4246 return false;
4247#endif
4248}
4249
bellard61382a52003-10-27 21:22:23 +00004250#endif
Wen Congyang76f35532012-05-07 12:04:18 +08004251
4252#ifndef CONFIG_USER_ONLY
4253bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr)
4254{
4255 MemoryRegionSection *section;
4256
4257 section = phys_page_find(phys_addr >> TARGET_PAGE_BITS);
4258
4259 return !(memory_region_is_ram(section->mr) ||
4260 memory_region_is_romd(section->mr));
4261}
4262#endif