blob: 6c0b2d715a4117c3e4f741332a8a37620ce050ee [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Blue Swirl0cac1b62012-04-09 16:50:52 +000060#include "cputlb.h"
61
Avi Kivity67d95c12011-12-15 15:25:22 +020062#define WANT_EXEC_OBSOLETE
63#include "exec-obsolete.h"
64
bellardfd6ce8f2003-05-14 19:00:11 +000065//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000066//#define DEBUG_FLUSH
pbrook67d3b952006-12-18 05:03:52 +000067//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000068
69/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000070//#define DEBUG_TB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
Richard Henderson9b9c37c2012-09-21 10:34:21 -070089#if defined(__arm__) || defined(__sparc__)
blueswir1141ac462008-07-26 15:05:57 +000090/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weil68409812012-04-04 07:45:21 +020096#elif defined(_WIN32) && !defined(_WIN64)
Stefan Weilf8e2af12009-06-18 23:04:48 +020097#define code_gen_section \
98 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000099#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000105static uint8_t *code_gen_buffer;
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000106static size_t code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000107/* threshold to flush the translated code buffer */
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000108static size_t code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200109static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000110
pbrooke2eef172008-06-08 01:09:01 +0000111#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000112int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000113static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000114
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200115RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300116
117static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300118static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300119
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200120MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200121static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200122
pbrooke2eef172008-06-08 01:09:01 +0000123#endif
bellard9fa3e852004-01-04 18:06:42 +0000124
Andreas Färber9349b4f2012-03-14 01:38:32 +0100125CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000126/* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100128DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000129/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000130 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000131 2 = Adaptive rate instruction counting. */
132int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000133
bellard54936002003-05-13 00:25:15 +0000134typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000135 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000136 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
bellard54936002003-05-13 00:25:15 +0000144} PageDesc;
145
Paul Brook41c1b1c2010-03-12 16:54:58 +0000146/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800147 while in user mode we want it to be based on virtual addresses. */
148#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000149#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800152# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000153#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000154#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800155# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000156#endif
bellard54936002003-05-13 00:25:15 +0000157
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800158/* Size of the L2 (and L3, etc) page tables. */
159#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000160#define L2_SIZE (1 << L2_BITS)
161
Avi Kivity3eef53d2012-02-10 14:57:31 +0200162#define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800165/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800169#if V_L1_BITS_REM < 4
170#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171#else
172#define V_L1_BITS V_L1_BITS_REM
173#endif
174
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800175#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
Stefan Weilc6d50672012-03-16 20:23:49 +0100179uintptr_t qemu_real_host_page_size;
180uintptr_t qemu_host_page_size;
181uintptr_t qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000182
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800183/* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000186
pbrooke2eef172008-06-08 01:09:01 +0000187#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200188typedef struct PhysPageEntry PhysPageEntry;
189
Avi Kivity5312bd82012-02-12 18:32:55 +0200190static MemoryRegionSection *phys_sections;
191static unsigned phys_sections_nb, phys_sections_nb_alloc;
192static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200193static uint16_t phys_section_notdirty;
194static uint16_t phys_section_rom;
195static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200196
Avi Kivity4346ae32012-02-10 17:00:01 +0200197struct PhysPageEntry {
Avi Kivity07f07b32012-02-13 20:45:32 +0200198 uint16_t is_leaf : 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 uint16_t ptr : 15;
Avi Kivity4346ae32012-02-10 17:00:01 +0200201};
202
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200203/* Simple allocator for PhysPageEntry nodes */
204static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206
Avi Kivity07f07b32012-02-13 20:45:32 +0200207#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800209/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200210 The bottom level has pointers to MemoryRegionSections. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200211static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
Paul Brook6d9a1302010-02-28 23:55:53 +0000212
pbrooke2eef172008-06-08 01:09:01 +0000213static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300214static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000215
Avi Kivity1ec9b902012-01-02 12:47:48 +0200216static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000217#endif
bellard33417e72003-08-10 21:47:01 +0000218
bellarde3db7222005-01-26 22:00:47 +0000219/* statistics */
bellarde3db7222005-01-26 22:00:47 +0000220static int tb_flush_count;
221static int tb_phys_invalidate_count;
222
bellard7cb69ca2008-05-10 10:55:51 +0000223#ifdef _WIN32
224static void map_exec(void *addr, long size)
225{
226 DWORD old_protect;
227 VirtualProtect(addr, size,
228 PAGE_EXECUTE_READWRITE, &old_protect);
229
230}
231#else
232static void map_exec(void *addr, long size)
233{
bellard43694152008-05-29 09:35:57 +0000234 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000235
bellard43694152008-05-29 09:35:57 +0000236 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000237 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000238 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000239
240 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000241 end += page_size - 1;
242 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000243
244 mprotect((void *)start, end - start,
245 PROT_READ | PROT_WRITE | PROT_EXEC);
246}
247#endif
248
bellardb346ff42003-06-15 20:05:50 +0000249static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000250{
bellard83fb7ad2004-07-05 21:25:26 +0000251 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000252 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000253#ifdef _WIN32
254 {
255 SYSTEM_INFO system_info;
256
257 GetSystemInfo(&system_info);
258 qemu_real_host_page_size = system_info.dwPageSize;
259 }
260#else
261 qemu_real_host_page_size = getpagesize();
262#endif
bellard83fb7ad2004-07-05 21:25:26 +0000263 if (qemu_host_page_size == 0)
264 qemu_host_page_size = qemu_real_host_page_size;
265 if (qemu_host_page_size < TARGET_PAGE_SIZE)
266 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000267 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000268
Paul Brook2e9a5712010-05-05 16:32:59 +0100269#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000270 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100271#ifdef HAVE_KINFO_GETVMMAP
272 struct kinfo_vmentry *freep;
273 int i, cnt;
274
275 freep = kinfo_getvmmap(getpid(), &cnt);
276 if (freep) {
277 mmap_lock();
278 for (i = 0; i < cnt; i++) {
279 unsigned long startaddr, endaddr;
280
281 startaddr = freep[i].kve_start;
282 endaddr = freep[i].kve_end;
283 if (h2g_valid(startaddr)) {
284 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
285
286 if (h2g_valid(endaddr)) {
287 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200288 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100289 } else {
290#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
291 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200292 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100293#endif
294 }
295 }
296 }
297 free(freep);
298 mmap_unlock();
299 }
300#else
balrog50a95692007-12-12 01:16:23 +0000301 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000302
pbrook07765902008-05-31 16:33:53 +0000303 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800304
Aurelien Jarnofd436902010-04-10 17:20:36 +0200305 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000306 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800307 mmap_lock();
308
balrog50a95692007-12-12 01:16:23 +0000309 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800310 unsigned long startaddr, endaddr;
311 int n;
312
313 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
314
315 if (n == 2 && h2g_valid(startaddr)) {
316 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
317
318 if (h2g_valid(endaddr)) {
319 endaddr = h2g(endaddr);
320 } else {
321 endaddr = ~0ul;
322 }
323 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000324 }
325 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800326
balrog50a95692007-12-12 01:16:23 +0000327 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800328 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000329 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100330#endif
balrog50a95692007-12-12 01:16:23 +0000331 }
332#endif
bellard54936002003-05-13 00:25:15 +0000333}
334
Paul Brook41c1b1c2010-03-12 16:54:58 +0000335static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000336{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000337 PageDesc *pd;
338 void **lp;
339 int i;
340
pbrook17e23772008-06-09 13:47:45 +0000341#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500342 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800343# define ALLOC(P, SIZE) \
344 do { \
345 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
346 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800347 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000348#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800349# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500350 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000351#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800352
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353 /* Level 1. Always allocated. */
354 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
355
356 /* Level 2..N-1. */
357 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
358 void **p = *lp;
359
360 if (p == NULL) {
361 if (!alloc) {
362 return NULL;
363 }
364 ALLOC(p, sizeof(void *) * L2_SIZE);
365 *lp = p;
366 }
367
368 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000369 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800370
371 pd = *lp;
372 if (pd == NULL) {
373 if (!alloc) {
374 return NULL;
375 }
376 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
377 *lp = pd;
378 }
379
380#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800381
382 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000383}
384
Paul Brook41c1b1c2010-03-12 16:54:58 +0000385static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000386{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800387 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000388}
389
Paul Brook6d9a1302010-02-28 23:55:53 +0000390#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200391
Avi Kivityf7bf5462012-02-13 20:12:05 +0200392static void phys_map_node_reserve(unsigned nodes)
393{
394 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
395 typedef PhysPageEntry Node[L2_SIZE];
396 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
397 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
398 phys_map_nodes_nb + nodes);
399 phys_map_nodes = g_renew(Node, phys_map_nodes,
400 phys_map_nodes_nb_alloc);
401 }
402}
403
404static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200405{
406 unsigned i;
407 uint16_t ret;
408
Avi Kivityf7bf5462012-02-13 20:12:05 +0200409 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200410 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200411 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200412 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200413 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200414 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200415 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200416 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200417}
418
419static void phys_map_nodes_reset(void)
420{
421 phys_map_nodes_nb = 0;
422}
423
Avi Kivityf7bf5462012-02-13 20:12:05 +0200424
Avi Kivity29990972012-02-13 20:21:20 +0200425static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
426 target_phys_addr_t *nb, uint16_t leaf,
427 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200428{
429 PhysPageEntry *p;
430 int i;
Avi Kivity07f07b32012-02-13 20:45:32 +0200431 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200432
Avi Kivity07f07b32012-02-13 20:45:32 +0200433 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200434 lp->ptr = phys_map_node_alloc();
435 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200436 if (level == 0) {
437 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200438 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200439 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200440 }
441 }
442 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200443 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200444 }
Avi Kivity29990972012-02-13 20:21:20 +0200445 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200446
Avi Kivity29990972012-02-13 20:21:20 +0200447 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200448 if ((*index & (step - 1)) == 0 && *nb >= step) {
449 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200450 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200451 *index += step;
452 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200453 } else {
454 phys_page_set_level(lp, index, nb, leaf, level - 1);
455 }
456 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200457 }
458}
459
Avi Kivity29990972012-02-13 20:21:20 +0200460static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
461 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000462{
Avi Kivity29990972012-02-13 20:21:20 +0200463 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200464 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000465
Avi Kivity29990972012-02-13 20:21:20 +0200466 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000467}
468
Blue Swirl0cac1b62012-04-09 16:50:52 +0000469MemoryRegionSection *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000470{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200471 PhysPageEntry lp = phys_map;
472 PhysPageEntry *p;
473 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200474 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200475
Avi Kivity07f07b32012-02-13 20:45:32 +0200476 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200477 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200478 goto not_found;
479 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200480 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200481 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200482 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200483
Avi Kivityc19e8802012-02-13 20:25:31 +0200484 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200485not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200486 return &phys_sections[s_index];
487}
488
Blue Swirle5548612012-04-21 13:08:33 +0000489bool memory_region_is_unassigned(MemoryRegion *mr)
490{
491 return mr != &io_mem_ram && mr != &io_mem_rom
492 && mr != &io_mem_notdirty && !mr->rom_device
493 && mr != &io_mem_watch;
494}
495
pbrookc8a706f2008-06-02 16:16:42 +0000496#define mmap_lock() do { } while(0)
497#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000498#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000499
bellard43694152008-05-29 09:35:57 +0000500#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100501/* Currently it is not recommended to allocate big chunks of data in
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000502 user mode. It will change when a dedicated libc will be used. */
503/* ??? 64-bit hosts ought to have no problem mmaping data outside the
504 region in which the guest needs to run. Revisit this. */
bellard43694152008-05-29 09:35:57 +0000505#define USE_STATIC_CODE_GEN_BUFFER
506#endif
507
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000508/* ??? Should configure for this, not list operating systems here. */
509#if (defined(__linux__) \
510 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
511 || defined(__DragonFly__) || defined(__OpenBSD__) \
512 || defined(__NetBSD__))
513# define USE_MMAP
514#endif
515
516/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
517 indicated, this is constrained by the range of direct branches on the
518 host cpu, as used by the TCG implementation of goto_tb. */
519#if defined(__x86_64__)
520# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
521#elif defined(__sparc__)
522# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
523#elif defined(__arm__)
524# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
525#elif defined(__s390x__)
526 /* We have a +- 4GB range on the branches; leave some slop. */
527# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
528#else
529# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
530#endif
531
Richard Henderson3d85a722012-10-16 17:30:11 +1000532#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
533
534#define DEFAULT_CODE_GEN_BUFFER_SIZE \
535 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
536 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000537
538static inline size_t size_code_gen_buffer(size_t tb_size)
539{
540 /* Size the buffer. */
541 if (tb_size == 0) {
542#ifdef USE_STATIC_CODE_GEN_BUFFER
543 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
544#else
545 /* ??? Needs adjustments. */
546 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
547 static buffer, we could size this on RESERVED_VA, on the text
548 segment size of the executable, or continue to use the default. */
549 tb_size = (unsigned long)(ram_size / 4);
550#endif
551 }
552 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
553 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
554 }
555 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
556 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
557 }
558 code_gen_buffer_size = tb_size;
559 return tb_size;
560}
561
bellard43694152008-05-29 09:35:57 +0000562#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200563static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000564 __attribute__((aligned(CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000565
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000566static inline void *alloc_code_gen_buffer(void)
bellard26a5f132008-05-28 12:30:31 +0000567{
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000568 map_exec(static_code_gen_buffer, code_gen_buffer_size);
569 return static_code_gen_buffer;
570}
571#elif defined(USE_MMAP)
572static inline void *alloc_code_gen_buffer(void)
573{
574 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
575 uintptr_t start = 0;
576 void *buf;
blueswir1141ac462008-07-26 15:05:57 +0000577
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000578 /* Constrain the position of the buffer based on the host cpu.
579 Note that these addresses are chosen in concert with the
580 addresses assigned in the relevant linker script file. */
581# if defined(__x86_64__) && defined(MAP_32BIT)
582 /* Force the memory down into low memory with the executable.
583 Leave the choice of exact location with the kernel. */
584 flags |= MAP_32BIT;
585 /* Cannot expect to map more than 800MB in low memory. */
586 if (code_gen_buffer_size > 800u * 1024 * 1024) {
587 code_gen_buffer_size = 800u * 1024 * 1024;
bellard26a5f132008-05-28 12:30:31 +0000588 }
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000589# elif defined(__sparc__)
590 start = 0x40000000ul;
591# elif defined(__s390x__)
592 start = 0x90000000ul;
593# endif
594
595 buf = mmap((void *)start, code_gen_buffer_size,
596 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
597 return buf == MAP_FAILED ? NULL : buf;
598}
bellard26a5f132008-05-28 12:30:31 +0000599#else
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000600static inline void *alloc_code_gen_buffer(void)
601{
602 void *buf = g_malloc(code_gen_buffer_size);
603 if (buf) {
604 map_exec(buf, code_gen_buffer_size);
605 }
606 return buf;
607}
608#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
609
610static inline void code_gen_alloc(size_t tb_size)
611{
612 code_gen_buffer_size = size_code_gen_buffer(tb_size);
613 code_gen_buffer = alloc_code_gen_buffer();
614 if (code_gen_buffer == NULL) {
615 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
616 exit(1);
617 }
618
bellard26a5f132008-05-28 12:30:31 +0000619 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100620 code_gen_buffer_max_size = code_gen_buffer_size -
621 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000622 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500623 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000624}
625
626/* Must be called before using the QEMU cpus. 'tb_size' is the size
627 (in bytes) allocated to the translation buffer. Zero means default
628 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200629void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000630{
bellard26a5f132008-05-28 12:30:31 +0000631 cpu_gen_init();
632 code_gen_alloc(tb_size);
633 code_gen_ptr = code_gen_buffer;
Richard Henderson813da622012-03-19 12:25:11 -0700634 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
bellard43694152008-05-29 09:35:57 +0000635 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700636#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
637 /* There's no guest base to take into account, so go ahead and
638 initialize the prologue now. */
639 tcg_prologue_init(&tcg_ctx);
640#endif
bellard26a5f132008-05-28 12:30:31 +0000641}
642
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200643bool tcg_enabled(void)
644{
645 return code_gen_buffer != NULL;
646}
647
648void cpu_exec_init_all(void)
649{
650#if !defined(CONFIG_USER_ONLY)
651 memory_map_init();
652 io_mem_init();
653#endif
654}
655
pbrook9656f322008-07-01 20:01:19 +0000656#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
657
Juan Quintelae59fb372009-09-29 22:48:21 +0200658static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200659{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100660 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200661
aurel323098dba2009-03-07 21:28:24 +0000662 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
663 version_id is increased. */
664 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000665 tlb_flush(env, 1);
666
667 return 0;
668}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200669
670static const VMStateDescription vmstate_cpu_common = {
671 .name = "cpu_common",
672 .version_id = 1,
673 .minimum_version_id = 1,
674 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200675 .post_load = cpu_common_post_load,
676 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100677 VMSTATE_UINT32(halted, CPUArchState),
678 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200679 VMSTATE_END_OF_LIST()
680 }
681};
pbrook9656f322008-07-01 20:01:19 +0000682#endif
683
Andreas Färber9349b4f2012-03-14 01:38:32 +0100684CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400685{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100686 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400687
688 while (env) {
689 if (env->cpu_index == cpu)
690 break;
691 env = env->next_cpu;
692 }
693
694 return env;
695}
696
Andreas Färber9349b4f2012-03-14 01:38:32 +0100697void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000698{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100699 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000700 int cpu_index;
701
pbrookc2764712009-03-07 15:24:59 +0000702#if defined(CONFIG_USER_ONLY)
703 cpu_list_lock();
704#endif
bellard6a00d602005-11-21 23:25:50 +0000705 env->next_cpu = NULL;
706 penv = &first_cpu;
707 cpu_index = 0;
708 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700709 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000710 cpu_index++;
711 }
712 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000713 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000714 QTAILQ_INIT(&env->breakpoints);
715 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100716#ifndef CONFIG_USER_ONLY
717 env->thread_id = qemu_get_thread_id();
718#endif
bellard6a00d602005-11-21 23:25:50 +0000719 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000720#if defined(CONFIG_USER_ONLY)
721 cpu_list_unlock();
722#endif
pbrookb3c77242008-06-30 16:31:04 +0000723#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600724 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
725 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000726 cpu_save, cpu_load, env);
727#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000728}
729
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100730/* Allocate a new translation block. Flush the translation buffer if
731 too many translation blocks or too much generated code. */
732static TranslationBlock *tb_alloc(target_ulong pc)
733{
734 TranslationBlock *tb;
735
736 if (nb_tbs >= code_gen_max_blocks ||
737 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
738 return NULL;
739 tb = &tbs[nb_tbs++];
740 tb->pc = pc;
741 tb->cflags = 0;
742 return tb;
743}
744
745void tb_free(TranslationBlock *tb)
746{
747 /* In practice this is mostly used for single use temporary TB
748 Ignore the hard cases and just back up if this TB happens to
749 be the last one generated. */
750 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
751 code_gen_ptr = tb->tc_ptr;
752 nb_tbs--;
753 }
754}
755
bellard9fa3e852004-01-04 18:06:42 +0000756static inline void invalidate_page_bitmap(PageDesc *p)
757{
758 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500759 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000760 p->code_bitmap = NULL;
761 }
762 p->code_write_count = 0;
763}
764
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800765/* Set to NULL all the 'first_tb' fields in all PageDescs. */
766
767static void page_flush_tb_1 (int level, void **lp)
768{
769 int i;
770
771 if (*lp == NULL) {
772 return;
773 }
774 if (level == 0) {
775 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000776 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800777 pd[i].first_tb = NULL;
778 invalidate_page_bitmap(pd + i);
779 }
780 } else {
781 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000782 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800783 page_flush_tb_1 (level - 1, pp + i);
784 }
785 }
786}
787
bellardfd6ce8f2003-05-14 19:00:11 +0000788static void page_flush_tb(void)
789{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800790 int i;
791 for (i = 0; i < V_L1_SIZE; i++) {
792 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000793 }
794}
795
796/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000797/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100798void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000799{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100800 CPUArchState *env;
bellard01243112004-01-04 15:48:17 +0000801#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000802 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
803 (unsigned long)(code_gen_ptr - code_gen_buffer),
804 nb_tbs, nb_tbs > 0 ?
805 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000806#endif
bellard26a5f132008-05-28 12:30:31 +0000807 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000808 cpu_abort(env1, "Internal error: code buffer overflow\n");
809
bellardfd6ce8f2003-05-14 19:00:11 +0000810 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000811
bellard6a00d602005-11-21 23:25:50 +0000812 for(env = first_cpu; env != NULL; env = env->next_cpu) {
813 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
814 }
bellard9fa3e852004-01-04 18:06:42 +0000815
bellard8a8a6082004-10-03 13:36:49 +0000816 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000817 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000818
bellardfd6ce8f2003-05-14 19:00:11 +0000819 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000820 /* XXX: flush processor icache at this point if cache flush is
821 expensive */
bellarde3db7222005-01-26 22:00:47 +0000822 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000823}
824
825#ifdef DEBUG_TB_CHECK
826
j_mayerbc98a7e2007-04-04 07:55:12 +0000827static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000828{
829 TranslationBlock *tb;
830 int i;
831 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000832 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
833 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000834 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
835 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000836 printf("ERROR invalidate: address=" TARGET_FMT_lx
837 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000838 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000839 }
840 }
841 }
842}
843
844/* verify that all the pages have correct rights for code */
845static void tb_page_check(void)
846{
847 TranslationBlock *tb;
848 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000849
pbrook99773bd2006-04-16 15:14:59 +0000850 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
851 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000852 flags1 = page_get_flags(tb->pc);
853 flags2 = page_get_flags(tb->pc + tb->size - 1);
854 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
855 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000856 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000857 }
858 }
859 }
860}
861
862#endif
863
864/* invalidate one TB */
865static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
866 int next_offset)
867{
868 TranslationBlock *tb1;
869 for(;;) {
870 tb1 = *ptb;
871 if (tb1 == tb) {
872 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
873 break;
874 }
875 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
876 }
877}
878
bellard9fa3e852004-01-04 18:06:42 +0000879static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
880{
881 TranslationBlock *tb1;
882 unsigned int n1;
883
884 for(;;) {
885 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200886 n1 = (uintptr_t)tb1 & 3;
887 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard9fa3e852004-01-04 18:06:42 +0000888 if (tb1 == tb) {
889 *ptb = tb1->page_next[n1];
890 break;
891 }
892 ptb = &tb1->page_next[n1];
893 }
894}
895
bellardd4e81642003-05-25 16:46:15 +0000896static inline void tb_jmp_remove(TranslationBlock *tb, int n)
897{
898 TranslationBlock *tb1, **ptb;
899 unsigned int n1;
900
901 ptb = &tb->jmp_next[n];
902 tb1 = *ptb;
903 if (tb1) {
904 /* find tb(n) in circular list */
905 for(;;) {
906 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200907 n1 = (uintptr_t)tb1 & 3;
908 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardd4e81642003-05-25 16:46:15 +0000909 if (n1 == n && tb1 == tb)
910 break;
911 if (n1 == 2) {
912 ptb = &tb1->jmp_first;
913 } else {
914 ptb = &tb1->jmp_next[n1];
915 }
916 }
917 /* now we can suppress tb(n) from the list */
918 *ptb = tb->jmp_next[n];
919
920 tb->jmp_next[n] = NULL;
921 }
922}
923
924/* reset the jump entry 'n' of a TB so that it is not chained to
925 another TB */
926static inline void tb_reset_jump(TranslationBlock *tb, int n)
927{
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200928 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
bellardd4e81642003-05-25 16:46:15 +0000929}
930
Paul Brook41c1b1c2010-03-12 16:54:58 +0000931void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000932{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100933 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000934 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000935 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000936 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000937 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000938
bellard9fa3e852004-01-04 18:06:42 +0000939 /* remove the TB from the hash list */
940 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
941 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000942 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000943 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000944
bellard9fa3e852004-01-04 18:06:42 +0000945 /* remove the TB from the page list */
946 if (tb->page_addr[0] != page_addr) {
947 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
948 tb_page_remove(&p->first_tb, tb);
949 invalidate_page_bitmap(p);
950 }
951 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
952 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
953 tb_page_remove(&p->first_tb, tb);
954 invalidate_page_bitmap(p);
955 }
956
bellard8a40a182005-11-20 10:35:40 +0000957 tb_invalidated_flag = 1;
958
959 /* remove the TB from the hash list */
960 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000961 for(env = first_cpu; env != NULL; env = env->next_cpu) {
962 if (env->tb_jmp_cache[h] == tb)
963 env->tb_jmp_cache[h] = NULL;
964 }
bellard8a40a182005-11-20 10:35:40 +0000965
966 /* suppress this TB from the two jump lists */
967 tb_jmp_remove(tb, 0);
968 tb_jmp_remove(tb, 1);
969
970 /* suppress any remaining jumps to this TB */
971 tb1 = tb->jmp_first;
972 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200973 n1 = (uintptr_t)tb1 & 3;
bellard8a40a182005-11-20 10:35:40 +0000974 if (n1 == 2)
975 break;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200976 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard8a40a182005-11-20 10:35:40 +0000977 tb2 = tb1->jmp_next[n1];
978 tb_reset_jump(tb1, n1);
979 tb1->jmp_next[n1] = NULL;
980 tb1 = tb2;
981 }
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200982 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
bellard8a40a182005-11-20 10:35:40 +0000983
bellarde3db7222005-01-26 22:00:47 +0000984 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000985}
986
987static inline void set_bits(uint8_t *tab, int start, int len)
988{
989 int end, mask, end1;
990
991 end = start + len;
992 tab += start >> 3;
993 mask = 0xff << (start & 7);
994 if ((start & ~7) == (end & ~7)) {
995 if (start < end) {
996 mask &= ~(0xff << (end & 7));
997 *tab |= mask;
998 }
999 } else {
1000 *tab++ |= mask;
1001 start = (start + 8) & ~7;
1002 end1 = end & ~7;
1003 while (start < end1) {
1004 *tab++ = 0xff;
1005 start += 8;
1006 }
1007 if (start < end) {
1008 mask = ~(0xff << (end & 7));
1009 *tab |= mask;
1010 }
1011 }
1012}
1013
1014static void build_page_bitmap(PageDesc *p)
1015{
1016 int n, tb_start, tb_end;
1017 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001018
Anthony Liguori7267c092011-08-20 22:09:37 -05001019 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001020
1021 tb = p->first_tb;
1022 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001023 n = (uintptr_t)tb & 3;
1024 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001025 /* NOTE: this is subtle as a TB may span two physical pages */
1026 if (n == 0) {
1027 /* NOTE: tb_end may be after the end of the page, but
1028 it is not a problem */
1029 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1030 tb_end = tb_start + tb->size;
1031 if (tb_end > TARGET_PAGE_SIZE)
1032 tb_end = TARGET_PAGE_SIZE;
1033 } else {
1034 tb_start = 0;
1035 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1036 }
1037 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1038 tb = tb->page_next[n];
1039 }
1040}
1041
Andreas Färber9349b4f2012-03-14 01:38:32 +01001042TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001043 target_ulong pc, target_ulong cs_base,
1044 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001045{
1046 TranslationBlock *tb;
1047 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001048 tb_page_addr_t phys_pc, phys_page2;
1049 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001050 int code_gen_size;
1051
Paul Brook41c1b1c2010-03-12 16:54:58 +00001052 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001053 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001054 if (!tb) {
1055 /* flush must be done */
1056 tb_flush(env);
1057 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001058 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001059 /* Don't forget to invalidate previous TB info. */
1060 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001061 }
1062 tc_ptr = code_gen_ptr;
1063 tb->tc_ptr = tc_ptr;
1064 tb->cs_base = cs_base;
1065 tb->flags = flags;
1066 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001067 cpu_gen_code(env, tb, &code_gen_size);
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001068 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1069 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001070
bellardd720b932004-04-25 17:57:43 +00001071 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001072 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001073 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001074 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001075 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001076 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001077 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001078 return tb;
bellardd720b932004-04-25 17:57:43 +00001079}
ths3b46e622007-09-17 08:09:54 +00001080
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001081/*
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001082 * Invalidate all TBs which intersect with the target physical address range
1083 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1084 * 'is_cpu_write_access' should be true if called from a real cpu write
1085 * access: the virtual CPU will exit the current TB if code is modified inside
1086 * this TB.
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001087 */
1088void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1089 int is_cpu_write_access)
1090{
1091 while (start < end) {
1092 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1093 start &= TARGET_PAGE_MASK;
1094 start += TARGET_PAGE_SIZE;
1095 }
1096}
1097
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001098/*
1099 * Invalidate all TBs which intersect with the target physical address range
1100 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1101 * 'is_cpu_write_access' should be true if called from a real cpu write
1102 * access: the virtual CPU will exit the current TB if code is modified inside
1103 * this TB.
1104 */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001105void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001106 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001107{
aliguori6b917542008-11-18 19:46:41 +00001108 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001109 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001110 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001111 PageDesc *p;
1112 int n;
1113#ifdef TARGET_HAS_PRECISE_SMC
1114 int current_tb_not_found = is_cpu_write_access;
1115 TranslationBlock *current_tb = NULL;
1116 int current_tb_modified = 0;
1117 target_ulong current_pc = 0;
1118 target_ulong current_cs_base = 0;
1119 int current_flags = 0;
1120#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001121
1122 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001123 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001124 return;
ths5fafdf22007-09-16 21:08:06 +00001125 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001126 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1127 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001128 /* build code bitmap */
1129 build_page_bitmap(p);
1130 }
1131
1132 /* we remove all the TBs in the range [start, end[ */
1133 /* XXX: see if in some cases it could be faster to invalidate all the code */
1134 tb = p->first_tb;
1135 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001136 n = (uintptr_t)tb & 3;
1137 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001138 tb_next = tb->page_next[n];
1139 /* NOTE: this is subtle as a TB may span two physical pages */
1140 if (n == 0) {
1141 /* NOTE: tb_end may be after the end of the page, but
1142 it is not a problem */
1143 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1144 tb_end = tb_start + tb->size;
1145 } else {
1146 tb_start = tb->page_addr[1];
1147 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1148 }
1149 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001150#ifdef TARGET_HAS_PRECISE_SMC
1151 if (current_tb_not_found) {
1152 current_tb_not_found = 0;
1153 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001154 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001155 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001156 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001157 }
1158 }
1159 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001160 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001161 /* If we are modifying the current TB, we must stop
1162 its execution. We could be more precise by checking
1163 that the modification is after the current PC, but it
1164 would require a specialized function to partially
1165 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001166
bellardd720b932004-04-25 17:57:43 +00001167 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001168 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001169 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1170 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001171 }
1172#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001173 /* we need to do that to handle the case where a signal
1174 occurs while doing tb_phys_invalidate() */
1175 saved_tb = NULL;
1176 if (env) {
1177 saved_tb = env->current_tb;
1178 env->current_tb = NULL;
1179 }
bellard9fa3e852004-01-04 18:06:42 +00001180 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001181 if (env) {
1182 env->current_tb = saved_tb;
1183 if (env->interrupt_request && env->current_tb)
1184 cpu_interrupt(env, env->interrupt_request);
1185 }
bellard9fa3e852004-01-04 18:06:42 +00001186 }
1187 tb = tb_next;
1188 }
1189#if !defined(CONFIG_USER_ONLY)
1190 /* if no code remaining, no need to continue to use slow writes */
1191 if (!p->first_tb) {
1192 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001193 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001194 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001195 }
1196 }
1197#endif
1198#ifdef TARGET_HAS_PRECISE_SMC
1199 if (current_tb_modified) {
1200 /* we generate a block containing just the instruction
1201 modifying the memory. It will ensure that it cannot modify
1202 itself */
bellardea1c1802004-06-14 18:56:36 +00001203 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001204 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001205 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001206 }
1207#endif
1208}
1209
1210/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001211static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001212{
1213 PageDesc *p;
1214 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001215#if 0
bellarda4193c82004-06-03 14:01:43 +00001216 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001217 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1218 cpu_single_env->mem_io_vaddr, len,
1219 cpu_single_env->eip,
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001220 cpu_single_env->eip +
1221 (intptr_t)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001222 }
1223#endif
bellard9fa3e852004-01-04 18:06:42 +00001224 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001225 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001226 return;
1227 if (p->code_bitmap) {
1228 offset = start & ~TARGET_PAGE_MASK;
1229 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1230 if (b & ((1 << len) - 1))
1231 goto do_invalidate;
1232 } else {
1233 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001234 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001235 }
1236}
1237
bellard9fa3e852004-01-04 18:06:42 +00001238#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001239static void tb_invalidate_phys_page(tb_page_addr_t addr,
Blue Swirl20503962012-04-09 14:20:20 +00001240 uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001241{
aliguori6b917542008-11-18 19:46:41 +00001242 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001243 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001244 int n;
bellardd720b932004-04-25 17:57:43 +00001245#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001246 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001247 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001248 int current_tb_modified = 0;
1249 target_ulong current_pc = 0;
1250 target_ulong current_cs_base = 0;
1251 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001252#endif
bellard9fa3e852004-01-04 18:06:42 +00001253
1254 addr &= TARGET_PAGE_MASK;
1255 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001256 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001257 return;
1258 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001259#ifdef TARGET_HAS_PRECISE_SMC
1260 if (tb && pc != 0) {
1261 current_tb = tb_find_pc(pc);
1262 }
1263#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001264 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001265 n = (uintptr_t)tb & 3;
1266 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001267#ifdef TARGET_HAS_PRECISE_SMC
1268 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001269 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001270 /* If we are modifying the current TB, we must stop
1271 its execution. We could be more precise by checking
1272 that the modification is after the current PC, but it
1273 would require a specialized function to partially
1274 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001275
bellardd720b932004-04-25 17:57:43 +00001276 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001277 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001278 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1279 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001280 }
1281#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001282 tb_phys_invalidate(tb, addr);
1283 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001284 }
1285 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001286#ifdef TARGET_HAS_PRECISE_SMC
1287 if (current_tb_modified) {
1288 /* we generate a block containing just the instruction
1289 modifying the memory. It will ensure that it cannot modify
1290 itself */
bellardea1c1802004-06-14 18:56:36 +00001291 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001292 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001293 cpu_resume_from_signal(env, puc);
1294 }
1295#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001296}
bellard9fa3e852004-01-04 18:06:42 +00001297#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001298
1299/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001300static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001301 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001302{
1303 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001304#ifndef CONFIG_USER_ONLY
1305 bool page_already_protected;
1306#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001307
bellard9fa3e852004-01-04 18:06:42 +00001308 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001309 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001310 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001311#ifndef CONFIG_USER_ONLY
1312 page_already_protected = p->first_tb != NULL;
1313#endif
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001314 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
bellard9fa3e852004-01-04 18:06:42 +00001315 invalidate_page_bitmap(p);
1316
bellard107db442004-06-22 18:48:46 +00001317#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001318
bellard9fa3e852004-01-04 18:06:42 +00001319#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001320 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001321 target_ulong addr;
1322 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001323 int prot;
1324
bellardfd6ce8f2003-05-14 19:00:11 +00001325 /* force the host page as non writable (writes will have a
1326 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001327 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001328 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001329 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1330 addr += TARGET_PAGE_SIZE) {
1331
1332 p2 = page_find (addr >> TARGET_PAGE_BITS);
1333 if (!p2)
1334 continue;
1335 prot |= p2->flags;
1336 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001337 }
ths5fafdf22007-09-16 21:08:06 +00001338 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001339 (prot & PAGE_BITS) & ~PAGE_WRITE);
1340#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001341 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001342 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001343#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001344 }
bellard9fa3e852004-01-04 18:06:42 +00001345#else
1346 /* if some code is already present, then the pages are already
1347 protected. So we handle the case where only the first TB is
1348 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001349 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001350 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001351 }
1352#endif
bellardd720b932004-04-25 17:57:43 +00001353
1354#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001355}
1356
bellard9fa3e852004-01-04 18:06:42 +00001357/* add a new TB and link it to the physical page tables. phys_page2 is
1358 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001359void tb_link_page(TranslationBlock *tb,
1360 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001361{
bellard9fa3e852004-01-04 18:06:42 +00001362 unsigned int h;
1363 TranslationBlock **ptb;
1364
pbrookc8a706f2008-06-02 16:16:42 +00001365 /* Grab the mmap lock to stop another thread invalidating this TB
1366 before we are done. */
1367 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001368 /* add in the physical hash table */
1369 h = tb_phys_hash_func(phys_pc);
1370 ptb = &tb_phys_hash[h];
1371 tb->phys_hash_next = *ptb;
1372 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001373
1374 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001375 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1376 if (phys_page2 != -1)
1377 tb_alloc_page(tb, 1, phys_page2);
1378 else
1379 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001380
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001381 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
bellardd4e81642003-05-25 16:46:15 +00001382 tb->jmp_next[0] = NULL;
1383 tb->jmp_next[1] = NULL;
1384
1385 /* init original jump addresses */
1386 if (tb->tb_next_offset[0] != 0xffff)
1387 tb_reset_jump(tb, 0);
1388 if (tb->tb_next_offset[1] != 0xffff)
1389 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001390
1391#ifdef DEBUG_TB_CHECK
1392 tb_page_check();
1393#endif
pbrookc8a706f2008-06-02 16:16:42 +00001394 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001395}
1396
bellarda513fe12003-05-27 23:29:48 +00001397/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1398 tb[1].tc_ptr. Return NULL if not found */
Stefan Weil6375e092012-04-06 22:26:15 +02001399TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
bellarda513fe12003-05-27 23:29:48 +00001400{
1401 int m_min, m_max, m;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001402 uintptr_t v;
bellarda513fe12003-05-27 23:29:48 +00001403 TranslationBlock *tb;
1404
1405 if (nb_tbs <= 0)
1406 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001407 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1408 tc_ptr >= (uintptr_t)code_gen_ptr) {
bellarda513fe12003-05-27 23:29:48 +00001409 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001410 }
bellarda513fe12003-05-27 23:29:48 +00001411 /* binary search (cf Knuth) */
1412 m_min = 0;
1413 m_max = nb_tbs - 1;
1414 while (m_min <= m_max) {
1415 m = (m_min + m_max) >> 1;
1416 tb = &tbs[m];
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001417 v = (uintptr_t)tb->tc_ptr;
bellarda513fe12003-05-27 23:29:48 +00001418 if (v == tc_ptr)
1419 return tb;
1420 else if (tc_ptr < v) {
1421 m_max = m - 1;
1422 } else {
1423 m_min = m + 1;
1424 }
ths5fafdf22007-09-16 21:08:06 +00001425 }
bellarda513fe12003-05-27 23:29:48 +00001426 return &tbs[m_max];
1427}
bellard75012672003-06-21 13:11:07 +00001428
bellardea041c02003-06-25 16:16:50 +00001429static void tb_reset_jump_recursive(TranslationBlock *tb);
1430
1431static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1432{
1433 TranslationBlock *tb1, *tb_next, **ptb;
1434 unsigned int n1;
1435
1436 tb1 = tb->jmp_next[n];
1437 if (tb1 != NULL) {
1438 /* find head of list */
1439 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001440 n1 = (uintptr_t)tb1 & 3;
1441 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001442 if (n1 == 2)
1443 break;
1444 tb1 = tb1->jmp_next[n1];
1445 }
1446 /* we are now sure now that tb jumps to tb1 */
1447 tb_next = tb1;
1448
1449 /* remove tb from the jmp_first list */
1450 ptb = &tb_next->jmp_first;
1451 for(;;) {
1452 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001453 n1 = (uintptr_t)tb1 & 3;
1454 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001455 if (n1 == n && tb1 == tb)
1456 break;
1457 ptb = &tb1->jmp_next[n1];
1458 }
1459 *ptb = tb->jmp_next[n];
1460 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001461
bellardea041c02003-06-25 16:16:50 +00001462 /* suppress the jump to next tb in generated code */
1463 tb_reset_jump(tb, n);
1464
bellard01243112004-01-04 15:48:17 +00001465 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001466 tb_reset_jump_recursive(tb_next);
1467 }
1468}
1469
1470static void tb_reset_jump_recursive(TranslationBlock *tb)
1471{
1472 tb_reset_jump_recursive2(tb, 0);
1473 tb_reset_jump_recursive2(tb, 1);
1474}
1475
bellard1fddef42005-04-17 19:16:13 +00001476#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001477#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001478static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001479{
1480 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1481}
1482#else
Max Filippov1e7855a2012-04-10 02:48:17 +04001483void tb_invalidate_phys_addr(target_phys_addr_t addr)
bellardd720b932004-04-25 17:57:43 +00001484{
Anthony Liguoric227f092009-10-01 16:12:16 -05001485 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001486 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001487
Avi Kivity06ef3522012-02-13 16:11:22 +02001488 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001489 if (!(memory_region_is_ram(section->mr)
1490 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001491 return;
1492 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001493 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001494 + memory_region_section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001495 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001496}
Max Filippov1e7855a2012-04-10 02:48:17 +04001497
1498static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1499{
Max Filippov9d70c4b2012-05-27 20:21:08 +04001500 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1501 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +04001502}
bellardc27004e2005-01-03 23:35:10 +00001503#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001504#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001505
Paul Brookc527ee82010-03-01 03:31:14 +00001506#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001507void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001508
1509{
1510}
1511
Andreas Färber9349b4f2012-03-14 01:38:32 +01001512int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001513 int flags, CPUWatchpoint **watchpoint)
1514{
1515 return -ENOSYS;
1516}
1517#else
pbrook6658ffb2007-03-16 23:58:11 +00001518/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001519int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001520 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001521{
aliguorib4051332008-11-18 20:14:20 +00001522 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001523 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001524
aliguorib4051332008-11-18 20:14:20 +00001525 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001526 if ((len & (len - 1)) || (addr & ~len_mask) ||
1527 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001528 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1529 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1530 return -EINVAL;
1531 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001532 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001533
aliguoria1d1bb32008-11-18 20:07:32 +00001534 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001535 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001536 wp->flags = flags;
1537
aliguori2dc9f412008-11-18 20:56:59 +00001538 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001539 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001540 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001541 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001542 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001543
pbrook6658ffb2007-03-16 23:58:11 +00001544 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001545
1546 if (watchpoint)
1547 *watchpoint = wp;
1548 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001549}
1550
aliguoria1d1bb32008-11-18 20:07:32 +00001551/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001552int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001553 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001554{
aliguorib4051332008-11-18 20:14:20 +00001555 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001556 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001557
Blue Swirl72cf2d42009-09-12 07:36:22 +00001558 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001559 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001560 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001561 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001562 return 0;
1563 }
1564 }
aliguoria1d1bb32008-11-18 20:07:32 +00001565 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001566}
1567
aliguoria1d1bb32008-11-18 20:07:32 +00001568/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001569void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001570{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001571 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001572
aliguoria1d1bb32008-11-18 20:07:32 +00001573 tlb_flush_page(env, watchpoint->vaddr);
1574
Anthony Liguori7267c092011-08-20 22:09:37 -05001575 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001576}
1577
aliguoria1d1bb32008-11-18 20:07:32 +00001578/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001579void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001580{
aliguoric0ce9982008-11-25 22:13:57 +00001581 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001582
Blue Swirl72cf2d42009-09-12 07:36:22 +00001583 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001584 if (wp->flags & mask)
1585 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001586 }
aliguoria1d1bb32008-11-18 20:07:32 +00001587}
Paul Brookc527ee82010-03-01 03:31:14 +00001588#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001589
1590/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001591int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001592 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001593{
bellard1fddef42005-04-17 19:16:13 +00001594#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001595 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001596
Anthony Liguori7267c092011-08-20 22:09:37 -05001597 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001598
1599 bp->pc = pc;
1600 bp->flags = flags;
1601
aliguori2dc9f412008-11-18 20:56:59 +00001602 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001603 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001604 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001605 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001606 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001607
1608 breakpoint_invalidate(env, pc);
1609
1610 if (breakpoint)
1611 *breakpoint = bp;
1612 return 0;
1613#else
1614 return -ENOSYS;
1615#endif
1616}
1617
1618/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001619int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001620{
1621#if defined(TARGET_HAS_ICE)
1622 CPUBreakpoint *bp;
1623
Blue Swirl72cf2d42009-09-12 07:36:22 +00001624 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001625 if (bp->pc == pc && bp->flags == flags) {
1626 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001627 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001628 }
bellard4c3a88a2003-07-26 12:06:08 +00001629 }
aliguoria1d1bb32008-11-18 20:07:32 +00001630 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001631#else
aliguoria1d1bb32008-11-18 20:07:32 +00001632 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001633#endif
1634}
1635
aliguoria1d1bb32008-11-18 20:07:32 +00001636/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001637void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001638{
bellard1fddef42005-04-17 19:16:13 +00001639#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001640 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001641
aliguoria1d1bb32008-11-18 20:07:32 +00001642 breakpoint_invalidate(env, breakpoint->pc);
1643
Anthony Liguori7267c092011-08-20 22:09:37 -05001644 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001645#endif
1646}
1647
1648/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001649void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001650{
1651#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001652 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001653
Blue Swirl72cf2d42009-09-12 07:36:22 +00001654 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001655 if (bp->flags & mask)
1656 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001657 }
bellard4c3a88a2003-07-26 12:06:08 +00001658#endif
1659}
1660
bellardc33a3462003-07-29 20:50:33 +00001661/* enable or disable single step mode. EXCP_DEBUG is returned by the
1662 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001663void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001664{
bellard1fddef42005-04-17 19:16:13 +00001665#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001666 if (env->singlestep_enabled != enabled) {
1667 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001668 if (kvm_enabled())
1669 kvm_update_guest_debug(env, 0);
1670 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001671 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001672 /* XXX: only flush what is necessary */
1673 tb_flush(env);
1674 }
bellardc33a3462003-07-29 20:50:33 +00001675 }
1676#endif
1677}
1678
Andreas Färber9349b4f2012-03-14 01:38:32 +01001679static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001680{
pbrookd5975362008-06-07 20:50:51 +00001681 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1682 problem and hope the cpu will stop of its own accord. For userspace
1683 emulation this often isn't actually as bad as it sounds. Often
1684 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001685 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001686 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001687
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001688 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001689 tb = env->current_tb;
1690 /* if the cpu is currently executing code, we must unlink it and
1691 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001692 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001693 env->current_tb = NULL;
1694 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001695 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001696 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001697}
1698
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001699#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001700/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001701static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001702{
1703 int old_mask;
1704
1705 old_mask = env->interrupt_request;
1706 env->interrupt_request |= mask;
1707
aliguori8edac962009-04-24 18:03:45 +00001708 /*
1709 * If called from iothread context, wake the target cpu in
1710 * case its halted.
1711 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001712 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001713 qemu_cpu_kick(env);
1714 return;
1715 }
aliguori8edac962009-04-24 18:03:45 +00001716
pbrook2e70f6e2008-06-29 01:03:05 +00001717 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001718 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001719 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001720 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001721 cpu_abort(env, "Raised interrupt while not in I/O function");
1722 }
pbrook2e70f6e2008-06-29 01:03:05 +00001723 } else {
aurel323098dba2009-03-07 21:28:24 +00001724 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001725 }
1726}
1727
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001728CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1729
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001730#else /* CONFIG_USER_ONLY */
1731
Andreas Färber9349b4f2012-03-14 01:38:32 +01001732void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001733{
1734 env->interrupt_request |= mask;
1735 cpu_unlink_tb(env);
1736}
1737#endif /* CONFIG_USER_ONLY */
1738
Andreas Färber9349b4f2012-03-14 01:38:32 +01001739void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001740{
1741 env->interrupt_request &= ~mask;
1742}
1743
Andreas Färber9349b4f2012-03-14 01:38:32 +01001744void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001745{
1746 env->exit_request = 1;
1747 cpu_unlink_tb(env);
1748}
1749
Andreas Färber9349b4f2012-03-14 01:38:32 +01001750void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001751{
1752 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001753 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001754
1755 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001756 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001757 fprintf(stderr, "qemu: fatal: ");
1758 vfprintf(stderr, fmt, ap);
1759 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001760 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +00001761 if (qemu_log_enabled()) {
1762 qemu_log("qemu: fatal: ");
1763 qemu_log_vprintf(fmt, ap2);
1764 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001765 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +00001766 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001767 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001768 }
pbrook493ae1f2007-11-23 16:53:59 +00001769 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001770 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001771#if defined(CONFIG_USER_ONLY)
1772 {
1773 struct sigaction act;
1774 sigfillset(&act.sa_mask);
1775 act.sa_handler = SIG_DFL;
1776 sigaction(SIGABRT, &act, NULL);
1777 }
1778#endif
bellard75012672003-06-21 13:11:07 +00001779 abort();
1780}
1781
Andreas Färber9349b4f2012-03-14 01:38:32 +01001782CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +00001783{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001784 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1785 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001786 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001787#if defined(TARGET_HAS_ICE)
1788 CPUBreakpoint *bp;
1789 CPUWatchpoint *wp;
1790#endif
1791
Andreas Färber9349b4f2012-03-14 01:38:32 +01001792 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001793
1794 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001795 new_env->next_cpu = next_cpu;
1796 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001797
1798 /* Clone all break/watchpoints.
1799 Note: Once we support ptrace with hw-debug register access, make sure
1800 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001801 QTAILQ_INIT(&env->breakpoints);
1802 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001803#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001804 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001805 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1806 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001807 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001808 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1809 wp->flags, NULL);
1810 }
1811#endif
1812
thsc5be9f02007-02-28 20:20:53 +00001813 return new_env;
1814}
1815
bellard01243112004-01-04 15:48:17 +00001816#if !defined(CONFIG_USER_ONLY)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001817void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001818{
1819 unsigned int i;
1820
1821 /* Discard jump cache entries for any tb which might potentially
1822 overlap the flushed page. */
1823 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1824 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001825 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001826
1827 i = tb_jmp_cache_hash_page(addr);
1828 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001829 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001830}
1831
Juan Quintelad24981d2012-05-22 00:42:40 +02001832static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
1833 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +00001834{
Juan Quintelad24981d2012-05-22 00:42:40 +02001835 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +00001836
bellard1ccde1c2004-02-06 19:46:14 +00001837 /* we modify the TLB cache so that the dirty bit will be set again
1838 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001839 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001840 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001841 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001842 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001843 != (end - 1) - start) {
1844 abort();
1845 }
Blue Swirle5548612012-04-21 13:08:33 +00001846 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001847
1848}
1849
1850/* Note: start and end must be within the same ram block. */
1851void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1852 int dirty_flags)
1853{
1854 uintptr_t length;
1855
1856 start &= TARGET_PAGE_MASK;
1857 end = TARGET_PAGE_ALIGN(end);
1858
1859 length = end - start;
1860 if (length == 0)
1861 return;
1862 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1863
1864 if (tcg_enabled()) {
1865 tlb_reset_dirty_range_all(start, end, length);
1866 }
bellard1ccde1c2004-02-06 19:46:14 +00001867}
1868
aliguori74576192008-10-06 14:02:03 +00001869int cpu_physical_memory_set_dirty_tracking(int enable)
1870{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001871 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00001872 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001873 return ret;
aliguori74576192008-10-06 14:02:03 +00001874}
1875
Blue Swirle5548612012-04-21 13:08:33 +00001876target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1877 MemoryRegionSection *section,
1878 target_ulong vaddr,
1879 target_phys_addr_t paddr,
1880 int prot,
1881 target_ulong *address)
1882{
1883 target_phys_addr_t iotlb;
1884 CPUWatchpoint *wp;
1885
Blue Swirlcc5bea62012-04-14 14:56:48 +00001886 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001887 /* Normal RAM. */
1888 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001889 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001890 if (!section->readonly) {
1891 iotlb |= phys_section_notdirty;
1892 } else {
1893 iotlb |= phys_section_rom;
1894 }
1895 } else {
1896 /* IO handlers are currently passed a physical address.
1897 It would be nice to pass an offset from the base address
1898 of that region. This would avoid having to special case RAM,
1899 and avoid full address decoding in every device.
1900 We can't use the high bits of pd for this because
1901 IO_MEM_ROMD uses these as a ram address. */
1902 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001903 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001904 }
1905
1906 /* Make accesses to pages with watchpoints go via the
1907 watchpoint trap routines. */
1908 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1909 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1910 /* Avoid trapping reads of pages with a write breakpoint. */
1911 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1912 iotlb = phys_section_watch + paddr;
1913 *address |= TLB_MMIO;
1914 break;
1915 }
1916 }
1917 }
1918
1919 return iotlb;
1920}
1921
bellard01243112004-01-04 15:48:17 +00001922#else
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03001923/*
1924 * Walks guest process memory "regions" one by one
1925 * and calls callback function 'fn' for each region.
1926 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001927
1928struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00001929{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001930 walk_memory_regions_fn fn;
1931 void *priv;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001932 uintptr_t start;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001933 int prot;
1934};
bellard9fa3e852004-01-04 18:06:42 +00001935
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001936static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001937 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001938{
1939 if (data->start != -1ul) {
1940 int rc = data->fn(data->priv, data->start, end, data->prot);
1941 if (rc != 0) {
1942 return rc;
bellard9fa3e852004-01-04 18:06:42 +00001943 }
bellard33417e72003-08-10 21:47:01 +00001944 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001945
1946 data->start = (new_prot ? end : -1ul);
1947 data->prot = new_prot;
1948
1949 return 0;
1950}
1951
1952static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001953 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001954{
Paul Brookb480d9b2010-03-12 23:23:29 +00001955 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001956 int i, rc;
1957
1958 if (*lp == NULL) {
1959 return walk_memory_regions_end(data, base, 0);
1960 }
1961
1962 if (level == 0) {
1963 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001964 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001965 int prot = pd[i].flags;
1966
1967 pa = base | (i << TARGET_PAGE_BITS);
1968 if (prot != data->prot) {
1969 rc = walk_memory_regions_end(data, pa, prot);
1970 if (rc != 0) {
1971 return rc;
1972 }
1973 }
1974 }
1975 } else {
1976 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001977 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00001978 pa = base | ((abi_ulong)i <<
1979 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001980 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1981 if (rc != 0) {
1982 return rc;
1983 }
1984 }
1985 }
1986
1987 return 0;
1988}
1989
1990int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1991{
1992 struct walk_memory_regions_data data;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001993 uintptr_t i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001994
1995 data.fn = fn;
1996 data.priv = priv;
1997 data.start = -1ul;
1998 data.prot = 0;
1999
2000 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002001 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002002 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2003 if (rc != 0) {
2004 return rc;
2005 }
2006 }
2007
2008 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002009}
2010
Paul Brookb480d9b2010-03-12 23:23:29 +00002011static int dump_region(void *priv, abi_ulong start,
2012 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002013{
2014 FILE *f = (FILE *)priv;
2015
Paul Brookb480d9b2010-03-12 23:23:29 +00002016 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2017 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002018 start, end, end - start,
2019 ((prot & PAGE_READ) ? 'r' : '-'),
2020 ((prot & PAGE_WRITE) ? 'w' : '-'),
2021 ((prot & PAGE_EXEC) ? 'x' : '-'));
2022
2023 return (0);
2024}
2025
2026/* dump memory mappings */
2027void page_dump(FILE *f)
2028{
2029 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2030 "start", "end", "size", "prot");
2031 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002032}
2033
pbrook53a59602006-03-25 19:31:22 +00002034int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002035{
bellard9fa3e852004-01-04 18:06:42 +00002036 PageDesc *p;
2037
2038 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002039 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002040 return 0;
2041 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002042}
2043
Richard Henderson376a7902010-03-10 15:57:04 -08002044/* Modify the flags of a page and invalidate the code if necessary.
2045 The flag PAGE_WRITE_ORG is positioned automatically depending
2046 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002047void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002048{
Richard Henderson376a7902010-03-10 15:57:04 -08002049 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002050
Richard Henderson376a7902010-03-10 15:57:04 -08002051 /* This function should never be called with addresses outside the
2052 guest address space. If this assert fires, it probably indicates
2053 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002054#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2055 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002056#endif
2057 assert(start < end);
2058
bellard9fa3e852004-01-04 18:06:42 +00002059 start = start & TARGET_PAGE_MASK;
2060 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002061
2062 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002063 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002064 }
2065
2066 for (addr = start, len = end - start;
2067 len != 0;
2068 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2069 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2070
2071 /* If the write protection bit is set, then we invalidate
2072 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002073 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002074 (flags & PAGE_WRITE) &&
2075 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002076 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002077 }
2078 p->flags = flags;
2079 }
bellard9fa3e852004-01-04 18:06:42 +00002080}
2081
ths3d97b402007-11-02 19:02:07 +00002082int page_check_range(target_ulong start, target_ulong len, int flags)
2083{
2084 PageDesc *p;
2085 target_ulong end;
2086 target_ulong addr;
2087
Richard Henderson376a7902010-03-10 15:57:04 -08002088 /* This function should never be called with addresses outside the
2089 guest address space. If this assert fires, it probably indicates
2090 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002091#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2092 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002093#endif
2094
Richard Henderson3e0650a2010-03-29 10:54:42 -07002095 if (len == 0) {
2096 return 0;
2097 }
Richard Henderson376a7902010-03-10 15:57:04 -08002098 if (start + len - 1 < start) {
2099 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002100 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002101 }
balrog55f280c2008-10-28 10:24:11 +00002102
ths3d97b402007-11-02 19:02:07 +00002103 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2104 start = start & TARGET_PAGE_MASK;
2105
Richard Henderson376a7902010-03-10 15:57:04 -08002106 for (addr = start, len = end - start;
2107 len != 0;
2108 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002109 p = page_find(addr >> TARGET_PAGE_BITS);
2110 if( !p )
2111 return -1;
2112 if( !(p->flags & PAGE_VALID) )
2113 return -1;
2114
bellarddae32702007-11-14 10:51:00 +00002115 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002116 return -1;
bellarddae32702007-11-14 10:51:00 +00002117 if (flags & PAGE_WRITE) {
2118 if (!(p->flags & PAGE_WRITE_ORG))
2119 return -1;
2120 /* unprotect the page if it was put read-only because it
2121 contains translated code */
2122 if (!(p->flags & PAGE_WRITE)) {
2123 if (!page_unprotect(addr, 0, NULL))
2124 return -1;
2125 }
2126 return 0;
2127 }
ths3d97b402007-11-02 19:02:07 +00002128 }
2129 return 0;
2130}
2131
bellard9fa3e852004-01-04 18:06:42 +00002132/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002133 page. Return TRUE if the fault was successfully handled. */
Stefan Weil6375e092012-04-06 22:26:15 +02002134int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002135{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002136 unsigned int prot;
2137 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002138 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002139
pbrookc8a706f2008-06-02 16:16:42 +00002140 /* Technically this isn't safe inside a signal handler. However we
2141 know this only ever happens in a synchronous SEGV handler, so in
2142 practice it seems to be ok. */
2143 mmap_lock();
2144
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002145 p = page_find(address >> TARGET_PAGE_BITS);
2146 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002147 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002148 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002149 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002150
bellard9fa3e852004-01-04 18:06:42 +00002151 /* if the page was really writable, then we change its
2152 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002153 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2154 host_start = address & qemu_host_page_mask;
2155 host_end = host_start + qemu_host_page_size;
2156
2157 prot = 0;
2158 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2159 p = page_find(addr >> TARGET_PAGE_BITS);
2160 p->flags |= PAGE_WRITE;
2161 prot |= p->flags;
2162
bellard9fa3e852004-01-04 18:06:42 +00002163 /* and since the content will be modified, we must invalidate
2164 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002165 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002166#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002167 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002168#endif
bellard9fa3e852004-01-04 18:06:42 +00002169 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002170 mprotect((void *)g2h(host_start), qemu_host_page_size,
2171 prot & PAGE_BITS);
2172
2173 mmap_unlock();
2174 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002175 }
pbrookc8a706f2008-06-02 16:16:42 +00002176 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002177 return 0;
2178}
bellard9fa3e852004-01-04 18:06:42 +00002179#endif /* defined(CONFIG_USER_ONLY) */
2180
pbrooke2eef172008-06-08 01:09:01 +00002181#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002182
Paul Brookc04b2b72010-03-01 03:31:14 +00002183#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2184typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002185 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002186 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002187 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002188} subpage_t;
2189
Anthony Liguoric227f092009-10-01 16:12:16 -05002190static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002191 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002192static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002193static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002194{
Avi Kivity5312bd82012-02-12 18:32:55 +02002195 MemoryRegionSection *section = &phys_sections[section_index];
2196 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002197
2198 if (mr->subpage) {
2199 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2200 memory_region_destroy(&subpage->iomem);
2201 g_free(subpage);
2202 }
2203}
2204
Avi Kivity4346ae32012-02-10 17:00:01 +02002205static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002206{
2207 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002208 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002209
Avi Kivityc19e8802012-02-13 20:25:31 +02002210 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002211 return;
2212 }
2213
Avi Kivityc19e8802012-02-13 20:25:31 +02002214 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002215 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002216 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002217 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002218 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002219 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002220 }
Avi Kivity54688b12012-02-09 17:34:32 +02002221 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002222 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002223 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002224}
2225
2226static void destroy_all_mappings(void)
2227{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002228 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002229 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002230}
2231
Avi Kivity5312bd82012-02-12 18:32:55 +02002232static uint16_t phys_section_add(MemoryRegionSection *section)
2233{
2234 if (phys_sections_nb == phys_sections_nb_alloc) {
2235 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2236 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2237 phys_sections_nb_alloc);
2238 }
2239 phys_sections[phys_sections_nb] = *section;
2240 return phys_sections_nb++;
2241}
2242
2243static void phys_sections_clear(void)
2244{
2245 phys_sections_nb = 0;
2246}
2247
Avi Kivity0f0cb162012-02-13 17:14:32 +02002248static void register_subpage(MemoryRegionSection *section)
2249{
2250 subpage_t *subpage;
2251 target_phys_addr_t base = section->offset_within_address_space
2252 & TARGET_PAGE_MASK;
Avi Kivityf3705d52012-03-08 16:16:34 +02002253 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002254 MemoryRegionSection subsection = {
2255 .offset_within_address_space = base,
2256 .size = TARGET_PAGE_SIZE,
2257 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002258 target_phys_addr_t start, end;
2259
Avi Kivityf3705d52012-03-08 16:16:34 +02002260 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002261
Avi Kivityf3705d52012-03-08 16:16:34 +02002262 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002263 subpage = subpage_init(base);
2264 subsection.mr = &subpage->iomem;
Avi Kivity29990972012-02-13 20:21:20 +02002265 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2266 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002267 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002268 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002269 }
2270 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -04002271 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +02002272 subpage_register(subpage, start, end, phys_section_add(section));
2273}
2274
2275
2276static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002277{
Avi Kivitydd811242012-01-02 12:17:03 +02002278 target_phys_addr_t start_addr = section->offset_within_address_space;
2279 ram_addr_t size = section->size;
Avi Kivity29990972012-02-13 20:21:20 +02002280 target_phys_addr_t addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002281 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002282
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002283 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002284
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002285 addr = start_addr;
Avi Kivity29990972012-02-13 20:21:20 +02002286 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2287 section_index);
bellard33417e72003-08-10 21:47:01 +00002288}
2289
Avi Kivity0f0cb162012-02-13 17:14:32 +02002290void cpu_register_physical_memory_log(MemoryRegionSection *section,
2291 bool readonly)
2292{
2293 MemoryRegionSection now = *section, remain = *section;
2294
2295 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2296 || (now.size < TARGET_PAGE_SIZE)) {
2297 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2298 - now.offset_within_address_space,
2299 now.size);
2300 register_subpage(&now);
2301 remain.size -= now.size;
2302 remain.offset_within_address_space += now.size;
2303 remain.offset_within_region += now.size;
2304 }
Tyler Hall69b67642012-07-25 18:45:04 -04002305 while (remain.size >= TARGET_PAGE_SIZE) {
2306 now = remain;
2307 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
2308 now.size = TARGET_PAGE_SIZE;
2309 register_subpage(&now);
2310 } else {
2311 now.size &= TARGET_PAGE_MASK;
2312 register_multipage(&now);
2313 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02002314 remain.size -= now.size;
2315 remain.offset_within_address_space += now.size;
2316 remain.offset_within_region += now.size;
2317 }
2318 now = remain;
2319 if (now.size) {
2320 register_subpage(&now);
2321 }
2322}
2323
2324
Anthony Liguoric227f092009-10-01 16:12:16 -05002325void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002326{
2327 if (kvm_enabled())
2328 kvm_coalesce_mmio_region(addr, size);
2329}
2330
Anthony Liguoric227f092009-10-01 16:12:16 -05002331void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002332{
2333 if (kvm_enabled())
2334 kvm_uncoalesce_mmio_region(addr, size);
2335}
2336
Sheng Yang62a27442010-01-26 19:21:16 +08002337void qemu_flush_coalesced_mmio_buffer(void)
2338{
2339 if (kvm_enabled())
2340 kvm_flush_coalesced_mmio_buffer();
2341}
2342
Marcelo Tosattic9027602010-03-01 20:25:08 -03002343#if defined(__linux__) && !defined(TARGET_S390X)
2344
2345#include <sys/vfs.h>
2346
2347#define HUGETLBFS_MAGIC 0x958458f6
2348
2349static long gethugepagesize(const char *path)
2350{
2351 struct statfs fs;
2352 int ret;
2353
2354 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002355 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002356 } while (ret != 0 && errno == EINTR);
2357
2358 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002359 perror(path);
2360 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002361 }
2362
2363 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002364 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002365
2366 return fs.f_bsize;
2367}
2368
Alex Williamson04b16652010-07-02 11:13:17 -06002369static void *file_ram_alloc(RAMBlock *block,
2370 ram_addr_t memory,
2371 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002372{
2373 char *filename;
2374 void *area;
2375 int fd;
2376#ifdef MAP_POPULATE
2377 int flags;
2378#endif
2379 unsigned long hpagesize;
2380
2381 hpagesize = gethugepagesize(path);
2382 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002383 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002384 }
2385
2386 if (memory < hpagesize) {
2387 return NULL;
2388 }
2389
2390 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2391 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2392 return NULL;
2393 }
2394
2395 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002396 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002397 }
2398
2399 fd = mkstemp(filename);
2400 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002401 perror("unable to create backing store for hugepages");
2402 free(filename);
2403 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002404 }
2405 unlink(filename);
2406 free(filename);
2407
2408 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2409
2410 /*
2411 * ftruncate is not supported by hugetlbfs in older
2412 * hosts, so don't bother bailing out on errors.
2413 * If anything goes wrong with it under other filesystems,
2414 * mmap will fail.
2415 */
2416 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002417 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002418
2419#ifdef MAP_POPULATE
2420 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2421 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2422 * to sidestep this quirk.
2423 */
2424 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2425 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2426#else
2427 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2428#endif
2429 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002430 perror("file_ram_alloc: can't mmap RAM pages");
2431 close(fd);
2432 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002433 }
Alex Williamson04b16652010-07-02 11:13:17 -06002434 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002435 return area;
2436}
2437#endif
2438
Alex Williamsond17b5282010-06-25 11:08:38 -06002439static ram_addr_t find_ram_offset(ram_addr_t size)
2440{
Alex Williamson04b16652010-07-02 11:13:17 -06002441 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002442 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002443
2444 if (QLIST_EMPTY(&ram_list.blocks))
2445 return 0;
2446
2447 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002448 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002449
2450 end = block->offset + block->length;
2451
2452 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2453 if (next_block->offset >= end) {
2454 next = MIN(next, next_block->offset);
2455 }
2456 }
2457 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002458 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002459 mingap = next - end;
2460 }
2461 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002462
2463 if (offset == RAM_ADDR_MAX) {
2464 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2465 (uint64_t)size);
2466 abort();
2467 }
2468
Alex Williamson04b16652010-07-02 11:13:17 -06002469 return offset;
2470}
2471
2472static ram_addr_t last_ram_offset(void)
2473{
Alex Williamsond17b5282010-06-25 11:08:38 -06002474 RAMBlock *block;
2475 ram_addr_t last = 0;
2476
2477 QLIST_FOREACH(block, &ram_list.blocks, next)
2478 last = MAX(last, block->offset + block->length);
2479
2480 return last;
2481}
2482
Jason Baronddb97f12012-08-02 15:44:16 -04002483static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
2484{
2485 int ret;
2486 QemuOpts *machine_opts;
2487
2488 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2489 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2490 if (machine_opts &&
2491 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
2492 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
2493 if (ret) {
2494 perror("qemu_madvise");
2495 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
2496 "but dump_guest_core=off specified\n");
2497 }
2498 }
2499}
2500
Avi Kivityc5705a72011-12-20 15:59:12 +02002501void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002502{
2503 RAMBlock *new_block, *block;
2504
Avi Kivityc5705a72011-12-20 15:59:12 +02002505 new_block = NULL;
2506 QLIST_FOREACH(block, &ram_list.blocks, next) {
2507 if (block->offset == addr) {
2508 new_block = block;
2509 break;
2510 }
2511 }
2512 assert(new_block);
2513 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002514
Anthony Liguori09e5ab62012-02-03 12:28:43 -06002515 if (dev) {
2516 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002517 if (id) {
2518 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002519 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002520 }
2521 }
2522 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2523
2524 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002525 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002526 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2527 new_block->idstr);
2528 abort();
2529 }
2530 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002531}
2532
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002533static int memory_try_enable_merging(void *addr, size_t len)
2534{
2535 QemuOpts *opts;
2536
2537 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2538 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
2539 /* disabled by the user */
2540 return 0;
2541 }
2542
2543 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
2544}
2545
Avi Kivityc5705a72011-12-20 15:59:12 +02002546ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2547 MemoryRegion *mr)
2548{
2549 RAMBlock *new_block;
2550
2551 size = TARGET_PAGE_ALIGN(size);
2552 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002553
Avi Kivity7c637362011-12-21 13:09:49 +02002554 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002555 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002556 if (host) {
2557 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002558 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002559 } else {
2560 if (mem_path) {
2561#if defined (__linux__) && !defined(TARGET_S390X)
2562 new_block->host = file_ram_alloc(new_block, size, mem_path);
2563 if (!new_block->host) {
2564 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002565 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002566 }
2567#else
2568 fprintf(stderr, "-mem-path option unsupported\n");
2569 exit(1);
2570#endif
2571 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02002572 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002573 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00002574 } else if (kvm_enabled()) {
2575 /* some s390/kvm configurations have special constraints */
2576 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01002577 } else {
2578 new_block->host = qemu_vmalloc(size);
2579 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002580 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002581 }
2582 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002583 new_block->length = size;
2584
2585 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2586
Anthony Liguori7267c092011-08-20 22:09:37 -05002587 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002588 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04002589 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2590 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02002591 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002592
Jason Baronddb97f12012-08-02 15:44:16 -04002593 qemu_ram_setup_dump(new_block->host, size);
2594
Cam Macdonell84b89d72010-07-26 18:10:57 -06002595 if (kvm_enabled())
2596 kvm_setup_guest_memory(new_block->host, size);
2597
2598 return new_block->offset;
2599}
2600
Avi Kivityc5705a72011-12-20 15:59:12 +02002601ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002602{
Avi Kivityc5705a72011-12-20 15:59:12 +02002603 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002604}
bellarde9a1ab12007-02-08 23:08:38 +00002605
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002606void qemu_ram_free_from_ptr(ram_addr_t addr)
2607{
2608 RAMBlock *block;
2609
2610 QLIST_FOREACH(block, &ram_list.blocks, next) {
2611 if (addr == block->offset) {
2612 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002613 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002614 return;
2615 }
2616 }
2617}
2618
Anthony Liguoric227f092009-10-01 16:12:16 -05002619void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002620{
Alex Williamson04b16652010-07-02 11:13:17 -06002621 RAMBlock *block;
2622
2623 QLIST_FOREACH(block, &ram_list.blocks, next) {
2624 if (addr == block->offset) {
2625 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002626 if (block->flags & RAM_PREALLOC_MASK) {
2627 ;
2628 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002629#if defined (__linux__) && !defined(TARGET_S390X)
2630 if (block->fd) {
2631 munmap(block->host, block->length);
2632 close(block->fd);
2633 } else {
2634 qemu_vfree(block->host);
2635 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002636#else
2637 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002638#endif
2639 } else {
2640#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2641 munmap(block->host, block->length);
2642#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002643 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002644 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002645 } else {
2646 qemu_vfree(block->host);
2647 }
Alex Williamson04b16652010-07-02 11:13:17 -06002648#endif
2649 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002650 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002651 return;
2652 }
2653 }
2654
bellarde9a1ab12007-02-08 23:08:38 +00002655}
2656
Huang Yingcd19cfa2011-03-02 08:56:19 +01002657#ifndef _WIN32
2658void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2659{
2660 RAMBlock *block;
2661 ram_addr_t offset;
2662 int flags;
2663 void *area, *vaddr;
2664
2665 QLIST_FOREACH(block, &ram_list.blocks, next) {
2666 offset = addr - block->offset;
2667 if (offset < block->length) {
2668 vaddr = block->host + offset;
2669 if (block->flags & RAM_PREALLOC_MASK) {
2670 ;
2671 } else {
2672 flags = MAP_FIXED;
2673 munmap(vaddr, length);
2674 if (mem_path) {
2675#if defined(__linux__) && !defined(TARGET_S390X)
2676 if (block->fd) {
2677#ifdef MAP_POPULATE
2678 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2679 MAP_PRIVATE;
2680#else
2681 flags |= MAP_PRIVATE;
2682#endif
2683 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2684 flags, block->fd, offset);
2685 } else {
2686 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2687 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2688 flags, -1, 0);
2689 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002690#else
2691 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002692#endif
2693 } else {
2694#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2695 flags |= MAP_SHARED | MAP_ANONYMOUS;
2696 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2697 flags, -1, 0);
2698#else
2699 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2700 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2701 flags, -1, 0);
2702#endif
2703 }
2704 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002705 fprintf(stderr, "Could not remap addr: "
2706 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002707 length, addr);
2708 exit(1);
2709 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002710 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04002711 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002712 }
2713 return;
2714 }
2715 }
2716}
2717#endif /* !_WIN32 */
2718
pbrookdc828ca2009-04-09 22:21:07 +00002719/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002720 With the exception of the softmmu code in this file, this should
2721 only be used for local memory (e.g. video ram) that the device owns,
2722 and knows it isn't going to access beyond the end of the block.
2723
2724 It should not be used for general purpose DMA.
2725 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2726 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002727void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002728{
pbrook94a6b542009-04-11 17:15:54 +00002729 RAMBlock *block;
2730
Alex Williamsonf471a172010-06-11 11:11:42 -06002731 QLIST_FOREACH(block, &ram_list.blocks, next) {
2732 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002733 /* Move this entry to to start of the list. */
2734 if (block != QLIST_FIRST(&ram_list.blocks)) {
2735 QLIST_REMOVE(block, next);
2736 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2737 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002738 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002739 /* We need to check if the requested address is in the RAM
2740 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002741 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002742 */
2743 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002744 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002745 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002746 block->host =
2747 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002748 }
2749 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002750 return block->host + (addr - block->offset);
2751 }
pbrook94a6b542009-04-11 17:15:54 +00002752 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002753
2754 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2755 abort();
2756
2757 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00002758}
2759
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002760/* Return a host pointer to ram allocated with qemu_ram_alloc.
2761 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2762 */
2763void *qemu_safe_ram_ptr(ram_addr_t addr)
2764{
2765 RAMBlock *block;
2766
2767 QLIST_FOREACH(block, &ram_list.blocks, next) {
2768 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02002769 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002770 /* We need to check if the requested address is in the RAM
2771 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002772 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002773 */
2774 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002775 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002776 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002777 block->host =
2778 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002779 }
2780 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002781 return block->host + (addr - block->offset);
2782 }
2783 }
2784
2785 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2786 abort();
2787
2788 return NULL;
2789}
2790
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002791/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2792 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002793void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002794{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002795 if (*size == 0) {
2796 return NULL;
2797 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002798 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002799 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02002800 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002801 RAMBlock *block;
2802
2803 QLIST_FOREACH(block, &ram_list.blocks, next) {
2804 if (addr - block->offset < block->length) {
2805 if (addr - block->offset + *size > block->length)
2806 *size = block->length - addr + block->offset;
2807 return block->host + (addr - block->offset);
2808 }
2809 }
2810
2811 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2812 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002813 }
2814}
2815
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002816void qemu_put_ram_ptr(void *addr)
2817{
2818 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002819}
2820
Marcelo Tosattie8902612010-10-11 15:31:19 -03002821int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00002822{
pbrook94a6b542009-04-11 17:15:54 +00002823 RAMBlock *block;
2824 uint8_t *host = ptr;
2825
Jan Kiszka868bb332011-06-21 22:59:09 +02002826 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002827 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002828 return 0;
2829 }
2830
Alex Williamsonf471a172010-06-11 11:11:42 -06002831 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002832 /* This case append when the block is not mapped. */
2833 if (block->host == NULL) {
2834 continue;
2835 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002836 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002837 *ram_addr = block->offset + (host - block->host);
2838 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06002839 }
pbrook94a6b542009-04-11 17:15:54 +00002840 }
Jun Nakajima432d2682010-08-31 16:41:25 +01002841
Marcelo Tosattie8902612010-10-11 15:31:19 -03002842 return -1;
2843}
Alex Williamsonf471a172010-06-11 11:11:42 -06002844
Marcelo Tosattie8902612010-10-11 15:31:19 -03002845/* Some of the softmmu routines need to translate from a host pointer
2846 (typically a TLB entry) back to a ram offset. */
2847ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2848{
2849 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06002850
Marcelo Tosattie8902612010-10-11 15:31:19 -03002851 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2852 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2853 abort();
2854 }
2855 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002856}
2857
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002858static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2859 unsigned size)
bellard33417e72003-08-10 21:47:01 +00002860{
pbrook67d3b952006-12-18 05:03:52 +00002861#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002862 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002863#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002864#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002865 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002866#endif
2867 return 0;
2868}
2869
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002870static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2871 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00002872{
2873#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002874 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00002875#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002876#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002877 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002878#endif
2879}
2880
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002881static const MemoryRegionOps unassigned_mem_ops = {
2882 .read = unassigned_mem_read,
2883 .write = unassigned_mem_write,
2884 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002885};
2886
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002887static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2888 unsigned size)
2889{
2890 abort();
2891}
2892
2893static void error_mem_write(void *opaque, target_phys_addr_t addr,
2894 uint64_t value, unsigned size)
2895{
2896 abort();
2897}
2898
2899static const MemoryRegionOps error_mem_ops = {
2900 .read = error_mem_read,
2901 .write = error_mem_write,
2902 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002903};
2904
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002905static const MemoryRegionOps rom_mem_ops = {
2906 .read = error_mem_read,
2907 .write = unassigned_mem_write,
2908 .endianness = DEVICE_NATIVE_ENDIAN,
2909};
2910
2911static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2912 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002913{
bellard3a7d9292005-08-21 09:26:42 +00002914 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002915 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002916 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2917#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002918 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002919 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002920#endif
2921 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002922 switch (size) {
2923 case 1:
2924 stb_p(qemu_get_ram_ptr(ram_addr), val);
2925 break;
2926 case 2:
2927 stw_p(qemu_get_ram_ptr(ram_addr), val);
2928 break;
2929 case 4:
2930 stl_p(qemu_get_ram_ptr(ram_addr), val);
2931 break;
2932 default:
2933 abort();
2934 }
bellardf23db162005-08-21 19:12:28 +00002935 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002936 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002937 /* we remove the notdirty callback only if the code has been
2938 flushed */
2939 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002940 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002941}
2942
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002943static const MemoryRegionOps notdirty_mem_ops = {
2944 .read = error_mem_read,
2945 .write = notdirty_mem_write,
2946 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002947};
2948
pbrook0f459d12008-06-09 00:20:13 +00002949/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00002950static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002951{
Andreas Färber9349b4f2012-03-14 01:38:32 +01002952 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00002953 target_ulong pc, cs_base;
2954 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00002955 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002956 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002957 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002958
aliguori06d55cc2008-11-18 20:24:06 +00002959 if (env->watchpoint_hit) {
2960 /* We re-entered the check after replacing the TB. Now raise
2961 * the debug interrupt so that is will trigger after the
2962 * current instruction. */
2963 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2964 return;
2965 }
pbrook2e70f6e2008-06-29 01:03:05 +00002966 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002967 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00002968 if ((vaddr == (wp->vaddr & len_mask) ||
2969 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00002970 wp->flags |= BP_WATCHPOINT_HIT;
2971 if (!env->watchpoint_hit) {
2972 env->watchpoint_hit = wp;
2973 tb = tb_find_pc(env->mem_io_pc);
2974 if (!tb) {
2975 cpu_abort(env, "check_watchpoint: could not find TB for "
2976 "pc=%p", (void *)env->mem_io_pc);
2977 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00002978 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00002979 tb_phys_invalidate(tb, -1);
2980 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2981 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04002982 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00002983 } else {
2984 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2985 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04002986 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002987 }
aliguori06d55cc2008-11-18 20:24:06 +00002988 }
aliguori6e140f22008-11-18 20:37:55 +00002989 } else {
2990 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002991 }
2992 }
2993}
2994
pbrook6658ffb2007-03-16 23:58:11 +00002995/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2996 so these check for a hit then pass through to the normal out-of-line
2997 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02002998static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
2999 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003000{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003001 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3002 switch (size) {
3003 case 1: return ldub_phys(addr);
3004 case 2: return lduw_phys(addr);
3005 case 4: return ldl_phys(addr);
3006 default: abort();
3007 }
pbrook6658ffb2007-03-16 23:58:11 +00003008}
3009
Avi Kivity1ec9b902012-01-02 12:47:48 +02003010static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3011 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003012{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003013 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3014 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04003015 case 1:
3016 stb_phys(addr, val);
3017 break;
3018 case 2:
3019 stw_phys(addr, val);
3020 break;
3021 case 4:
3022 stl_phys(addr, val);
3023 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003024 default: abort();
3025 }
pbrook6658ffb2007-03-16 23:58:11 +00003026}
3027
Avi Kivity1ec9b902012-01-02 12:47:48 +02003028static const MemoryRegionOps watch_mem_ops = {
3029 .read = watch_mem_read,
3030 .write = watch_mem_write,
3031 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003032};
pbrook6658ffb2007-03-16 23:58:11 +00003033
Avi Kivity70c68e42012-01-02 12:32:48 +02003034static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3035 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003036{
Avi Kivity70c68e42012-01-02 12:32:48 +02003037 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003038 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003039 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003040#if defined(DEBUG_SUBPAGE)
3041 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3042 mmio, len, addr, idx);
3043#endif
blueswir1db7b5422007-05-26 17:36:03 +00003044
Avi Kivity5312bd82012-02-12 18:32:55 +02003045 section = &phys_sections[mmio->sub_section[idx]];
3046 addr += mmio->base;
3047 addr -= section->offset_within_address_space;
3048 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003049 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003050}
3051
Avi Kivity70c68e42012-01-02 12:32:48 +02003052static void subpage_write(void *opaque, target_phys_addr_t addr,
3053 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003054{
Avi Kivity70c68e42012-01-02 12:32:48 +02003055 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003056 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003057 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003058#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003059 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3060 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003061 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003062#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003063
Avi Kivity5312bd82012-02-12 18:32:55 +02003064 section = &phys_sections[mmio->sub_section[idx]];
3065 addr += mmio->base;
3066 addr -= section->offset_within_address_space;
3067 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003068 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003069}
3070
Avi Kivity70c68e42012-01-02 12:32:48 +02003071static const MemoryRegionOps subpage_ops = {
3072 .read = subpage_read,
3073 .write = subpage_write,
3074 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003075};
3076
Avi Kivityde712f92012-01-02 12:41:07 +02003077static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3078 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003079{
3080 ram_addr_t raddr = addr;
3081 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003082 switch (size) {
3083 case 1: return ldub_p(ptr);
3084 case 2: return lduw_p(ptr);
3085 case 4: return ldl_p(ptr);
3086 default: abort();
3087 }
Andreas Färber56384e82011-11-30 16:26:21 +01003088}
3089
Avi Kivityde712f92012-01-02 12:41:07 +02003090static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3091 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003092{
3093 ram_addr_t raddr = addr;
3094 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003095 switch (size) {
3096 case 1: return stb_p(ptr, value);
3097 case 2: return stw_p(ptr, value);
3098 case 4: return stl_p(ptr, value);
3099 default: abort();
3100 }
Andreas Färber56384e82011-11-30 16:26:21 +01003101}
3102
Avi Kivityde712f92012-01-02 12:41:07 +02003103static const MemoryRegionOps subpage_ram_ops = {
3104 .read = subpage_ram_read,
3105 .write = subpage_ram_write,
3106 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003107};
3108
Anthony Liguoric227f092009-10-01 16:12:16 -05003109static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003110 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003111{
3112 int idx, eidx;
3113
3114 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3115 return -1;
3116 idx = SUBPAGE_IDX(start);
3117 eidx = SUBPAGE_IDX(end);
3118#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003119 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003120 mmio, start, end, idx, eidx, memory);
3121#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003122 if (memory_region_is_ram(phys_sections[section].mr)) {
3123 MemoryRegionSection new_section = phys_sections[section];
3124 new_section.mr = &io_mem_subpage_ram;
3125 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003126 }
blueswir1db7b5422007-05-26 17:36:03 +00003127 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003128 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003129 }
3130
3131 return 0;
3132}
3133
Avi Kivity0f0cb162012-02-13 17:14:32 +02003134static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003135{
Anthony Liguoric227f092009-10-01 16:12:16 -05003136 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003137
Anthony Liguori7267c092011-08-20 22:09:37 -05003138 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003139
3140 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003141 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3142 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003143 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003144#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003145 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3146 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003147#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003148 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003149
3150 return mmio;
3151}
3152
Avi Kivity5312bd82012-02-12 18:32:55 +02003153static uint16_t dummy_section(MemoryRegion *mr)
3154{
3155 MemoryRegionSection section = {
3156 .mr = mr,
3157 .offset_within_address_space = 0,
3158 .offset_within_region = 0,
3159 .size = UINT64_MAX,
3160 };
3161
3162 return phys_section_add(&section);
3163}
3164
Avi Kivity37ec01d2012-03-08 18:08:35 +02003165MemoryRegion *iotlb_to_region(target_phys_addr_t index)
Avi Kivityaa102232012-03-08 17:06:55 +02003166{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003167 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003168}
3169
Avi Kivitye9179ce2009-06-14 11:38:52 +03003170static void io_mem_init(void)
3171{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003172 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003173 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3174 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3175 "unassigned", UINT64_MAX);
3176 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3177 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003178 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3179 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003180 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3181 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003182}
3183
Avi Kivity50c1e142012-02-08 21:36:02 +02003184static void core_begin(MemoryListener *listener)
3185{
Avi Kivity54688b12012-02-09 17:34:32 +02003186 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003187 phys_sections_clear();
Avi Kivityc19e8802012-02-13 20:25:31 +02003188 phys_map.ptr = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003189 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003190 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3191 phys_section_rom = dummy_section(&io_mem_rom);
3192 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003193}
3194
3195static void core_commit(MemoryListener *listener)
3196{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003197 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003198
3199 /* since each CPU stores ram addresses in its TLB cache, we must
3200 reset the modified entries */
3201 /* XXX: slow ! */
3202 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3203 tlb_flush(env, 1);
3204 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003205}
3206
Avi Kivity93632742012-02-08 16:54:16 +02003207static void core_region_add(MemoryListener *listener,
3208 MemoryRegionSection *section)
3209{
Avi Kivity4855d412012-02-08 21:16:05 +02003210 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003211}
3212
3213static void core_region_del(MemoryListener *listener,
3214 MemoryRegionSection *section)
3215{
Avi Kivity93632742012-02-08 16:54:16 +02003216}
3217
Avi Kivity50c1e142012-02-08 21:36:02 +02003218static void core_region_nop(MemoryListener *listener,
3219 MemoryRegionSection *section)
3220{
Avi Kivity54688b12012-02-09 17:34:32 +02003221 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003222}
3223
Avi Kivity93632742012-02-08 16:54:16 +02003224static void core_log_start(MemoryListener *listener,
3225 MemoryRegionSection *section)
3226{
3227}
3228
3229static void core_log_stop(MemoryListener *listener,
3230 MemoryRegionSection *section)
3231{
3232}
3233
3234static void core_log_sync(MemoryListener *listener,
3235 MemoryRegionSection *section)
3236{
3237}
3238
3239static void core_log_global_start(MemoryListener *listener)
3240{
3241 cpu_physical_memory_set_dirty_tracking(1);
3242}
3243
3244static void core_log_global_stop(MemoryListener *listener)
3245{
3246 cpu_physical_memory_set_dirty_tracking(0);
3247}
3248
3249static void core_eventfd_add(MemoryListener *listener,
3250 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003251 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity93632742012-02-08 16:54:16 +02003252{
3253}
3254
3255static void core_eventfd_del(MemoryListener *listener,
3256 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003257 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity93632742012-02-08 16:54:16 +02003258{
3259}
3260
Avi Kivity50c1e142012-02-08 21:36:02 +02003261static void io_begin(MemoryListener *listener)
3262{
3263}
3264
3265static void io_commit(MemoryListener *listener)
3266{
3267}
3268
Avi Kivity4855d412012-02-08 21:16:05 +02003269static void io_region_add(MemoryListener *listener,
3270 MemoryRegionSection *section)
3271{
Avi Kivitya2d33522012-03-05 17:40:12 +02003272 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3273
3274 mrio->mr = section->mr;
3275 mrio->offset = section->offset_within_region;
3276 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003277 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003278 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003279}
3280
3281static void io_region_del(MemoryListener *listener,
3282 MemoryRegionSection *section)
3283{
3284 isa_unassign_ioport(section->offset_within_address_space, section->size);
3285}
3286
Avi Kivity50c1e142012-02-08 21:36:02 +02003287static void io_region_nop(MemoryListener *listener,
3288 MemoryRegionSection *section)
3289{
3290}
3291
Avi Kivity4855d412012-02-08 21:16:05 +02003292static void io_log_start(MemoryListener *listener,
3293 MemoryRegionSection *section)
3294{
3295}
3296
3297static void io_log_stop(MemoryListener *listener,
3298 MemoryRegionSection *section)
3299{
3300}
3301
3302static void io_log_sync(MemoryListener *listener,
3303 MemoryRegionSection *section)
3304{
3305}
3306
3307static void io_log_global_start(MemoryListener *listener)
3308{
3309}
3310
3311static void io_log_global_stop(MemoryListener *listener)
3312{
3313}
3314
3315static void io_eventfd_add(MemoryListener *listener,
3316 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003317 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity4855d412012-02-08 21:16:05 +02003318{
3319}
3320
3321static void io_eventfd_del(MemoryListener *listener,
3322 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003323 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity4855d412012-02-08 21:16:05 +02003324{
3325}
3326
Avi Kivity93632742012-02-08 16:54:16 +02003327static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003328 .begin = core_begin,
3329 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003330 .region_add = core_region_add,
3331 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003332 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003333 .log_start = core_log_start,
3334 .log_stop = core_log_stop,
3335 .log_sync = core_log_sync,
3336 .log_global_start = core_log_global_start,
3337 .log_global_stop = core_log_global_stop,
3338 .eventfd_add = core_eventfd_add,
3339 .eventfd_del = core_eventfd_del,
3340 .priority = 0,
3341};
3342
Avi Kivity4855d412012-02-08 21:16:05 +02003343static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003344 .begin = io_begin,
3345 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003346 .region_add = io_region_add,
3347 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003348 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003349 .log_start = io_log_start,
3350 .log_stop = io_log_stop,
3351 .log_sync = io_log_sync,
3352 .log_global_start = io_log_global_start,
3353 .log_global_stop = io_log_global_stop,
3354 .eventfd_add = io_eventfd_add,
3355 .eventfd_del = io_eventfd_del,
3356 .priority = 0,
3357};
3358
Avi Kivity62152b82011-07-26 14:26:14 +03003359static void memory_map_init(void)
3360{
Anthony Liguori7267c092011-08-20 22:09:37 -05003361 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003362 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003363 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003364
Anthony Liguori7267c092011-08-20 22:09:37 -05003365 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003366 memory_region_init(system_io, "io", 65536);
3367 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003368
Avi Kivity4855d412012-02-08 21:16:05 +02003369 memory_listener_register(&core_memory_listener, system_memory);
3370 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003371}
3372
3373MemoryRegion *get_system_memory(void)
3374{
3375 return system_memory;
3376}
3377
Avi Kivity309cb472011-08-08 16:09:03 +03003378MemoryRegion *get_system_io(void)
3379{
3380 return system_io;
3381}
3382
pbrooke2eef172008-06-08 01:09:01 +00003383#endif /* !defined(CONFIG_USER_ONLY) */
3384
bellard13eb76e2004-01-24 15:23:36 +00003385/* physical memory access (slow version, mainly for debug) */
3386#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003387int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003388 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003389{
3390 int l, flags;
3391 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003392 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003393
3394 while (len > 0) {
3395 page = addr & TARGET_PAGE_MASK;
3396 l = (page + TARGET_PAGE_SIZE) - addr;
3397 if (l > len)
3398 l = len;
3399 flags = page_get_flags(page);
3400 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003401 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003402 if (is_write) {
3403 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003404 return -1;
bellard579a97f2007-11-11 14:26:47 +00003405 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003406 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003407 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003408 memcpy(p, buf, l);
3409 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003410 } else {
3411 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003412 return -1;
bellard579a97f2007-11-11 14:26:47 +00003413 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003414 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003415 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003416 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003417 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003418 }
3419 len -= l;
3420 buf += l;
3421 addr += l;
3422 }
Paul Brooka68fe892010-03-01 00:08:59 +00003423 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003424}
bellard8df1cd02005-01-28 22:37:22 +00003425
bellard13eb76e2004-01-24 15:23:36 +00003426#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003427
3428static void invalidate_and_set_dirty(target_phys_addr_t addr,
3429 target_phys_addr_t length)
3430{
3431 if (!cpu_physical_memory_is_dirty(addr)) {
3432 /* invalidate code */
3433 tb_invalidate_phys_page_range(addr, addr + length, 0);
3434 /* set dirty bit */
3435 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
3436 }
Anthony PERARDe2269392012-10-03 13:49:22 +00003437 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003438}
3439
Anthony Liguoric227f092009-10-01 16:12:16 -05003440void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003441 int len, int is_write)
3442{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003443 int l;
bellard13eb76e2004-01-24 15:23:36 +00003444 uint8_t *ptr;
3445 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003446 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003447 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003448
bellard13eb76e2004-01-24 15:23:36 +00003449 while (len > 0) {
3450 page = addr & TARGET_PAGE_MASK;
3451 l = (page + TARGET_PAGE_SIZE) - addr;
3452 if (l > len)
3453 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003454 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003455
bellard13eb76e2004-01-24 15:23:36 +00003456 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003457 if (!memory_region_is_ram(section->mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003458 target_phys_addr_t addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00003459 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003460 /* XXX: could force cpu_single_env to NULL to avoid
3461 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003462 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003463 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003464 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003465 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003466 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003467 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003468 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003469 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003470 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003471 l = 2;
3472 } else {
bellard1c213d12005-09-03 10:49:04 +00003473 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003474 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003475 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003476 l = 1;
3477 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003478 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003479 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003480 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003481 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003482 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003483 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003484 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003485 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003486 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003487 }
3488 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003489 if (!(memory_region_is_ram(section->mr) ||
3490 memory_region_is_romd(section->mr))) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003491 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003492 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003493 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003494 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003495 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003496 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003497 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003498 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003499 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003500 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003501 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003502 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003503 l = 2;
3504 } else {
bellard1c213d12005-09-03 10:49:04 +00003505 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003506 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003507 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003508 l = 1;
3509 }
3510 } else {
3511 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00003512 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00003513 + memory_region_section_addr(section,
3514 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02003515 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003516 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003517 }
3518 }
3519 len -= l;
3520 buf += l;
3521 addr += l;
3522 }
3523}
bellard8df1cd02005-01-28 22:37:22 +00003524
bellardd0ecd2a2006-04-23 17:14:48 +00003525/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003526void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003527 const uint8_t *buf, int len)
3528{
3529 int l;
3530 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003531 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003532 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003533
bellardd0ecd2a2006-04-23 17:14:48 +00003534 while (len > 0) {
3535 page = addr & TARGET_PAGE_MASK;
3536 l = (page + TARGET_PAGE_SIZE) - addr;
3537 if (l > len)
3538 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003539 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003540
Blue Swirlcc5bea62012-04-14 14:56:48 +00003541 if (!(memory_region_is_ram(section->mr) ||
3542 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00003543 /* do nothing */
3544 } else {
3545 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003546 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003547 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003548 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003549 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003550 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003551 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003552 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003553 }
3554 len -= l;
3555 buf += l;
3556 addr += l;
3557 }
3558}
3559
aliguori6d16c2f2009-01-22 16:59:11 +00003560typedef struct {
3561 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003562 target_phys_addr_t addr;
3563 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003564} BounceBuffer;
3565
3566static BounceBuffer bounce;
3567
aliguoriba223c22009-01-22 16:59:16 +00003568typedef struct MapClient {
3569 void *opaque;
3570 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003571 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003572} MapClient;
3573
Blue Swirl72cf2d42009-09-12 07:36:22 +00003574static QLIST_HEAD(map_client_list, MapClient) map_client_list
3575 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003576
3577void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3578{
Anthony Liguori7267c092011-08-20 22:09:37 -05003579 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003580
3581 client->opaque = opaque;
3582 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003583 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003584 return client;
3585}
3586
3587void cpu_unregister_map_client(void *_client)
3588{
3589 MapClient *client = (MapClient *)_client;
3590
Blue Swirl72cf2d42009-09-12 07:36:22 +00003591 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003592 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003593}
3594
3595static void cpu_notify_map_clients(void)
3596{
3597 MapClient *client;
3598
Blue Swirl72cf2d42009-09-12 07:36:22 +00003599 while (!QLIST_EMPTY(&map_client_list)) {
3600 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003601 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003602 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003603 }
3604}
3605
aliguori6d16c2f2009-01-22 16:59:11 +00003606/* Map a physical memory region into a host virtual address.
3607 * May map a subset of the requested range, given by and returned in *plen.
3608 * May return NULL if resources needed to perform the mapping are exhausted.
3609 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003610 * Use cpu_register_map_client() to know when retrying the map operation is
3611 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003612 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003613void *cpu_physical_memory_map(target_phys_addr_t addr,
3614 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003615 int is_write)
3616{
Anthony Liguoric227f092009-10-01 16:12:16 -05003617 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003618 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003619 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003620 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003621 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003622 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003623 ram_addr_t rlen;
3624 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003625
3626 while (len > 0) {
3627 page = addr & TARGET_PAGE_MASK;
3628 l = (page + TARGET_PAGE_SIZE) - addr;
3629 if (l > len)
3630 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003631 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003632
Avi Kivityf3705d52012-03-08 16:16:34 +02003633 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003634 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003635 break;
3636 }
3637 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3638 bounce.addr = addr;
3639 bounce.len = l;
3640 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003641 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003642 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003643
3644 *plen = l;
3645 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003646 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003647 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003648 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003649 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003650 }
aliguori6d16c2f2009-01-22 16:59:11 +00003651
3652 len -= l;
3653 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003654 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003655 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003656 rlen = todo;
3657 ret = qemu_ram_ptr_length(raddr, &rlen);
3658 *plen = rlen;
3659 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003660}
3661
3662/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3663 * Will also mark the memory as dirty if is_write == 1. access_len gives
3664 * the amount of memory that was actually read or written by the caller.
3665 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003666void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3667 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003668{
3669 if (buffer != bounce.buffer) {
3670 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003671 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003672 while (access_len) {
3673 unsigned l;
3674 l = TARGET_PAGE_SIZE;
3675 if (l > access_len)
3676 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003677 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003678 addr1 += l;
3679 access_len -= l;
3680 }
3681 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003682 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003683 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003684 }
aliguori6d16c2f2009-01-22 16:59:11 +00003685 return;
3686 }
3687 if (is_write) {
3688 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3689 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003690 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003691 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003692 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003693}
bellardd0ecd2a2006-04-23 17:14:48 +00003694
bellard8df1cd02005-01-28 22:37:22 +00003695/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003696static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3697 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003698{
bellard8df1cd02005-01-28 22:37:22 +00003699 uint8_t *ptr;
3700 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003701 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003702
Avi Kivity06ef3522012-02-13 16:11:22 +02003703 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003704
Blue Swirlcc5bea62012-04-14 14:56:48 +00003705 if (!(memory_region_is_ram(section->mr) ||
3706 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00003707 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003708 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003709 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003710#if defined(TARGET_WORDS_BIGENDIAN)
3711 if (endian == DEVICE_LITTLE_ENDIAN) {
3712 val = bswap32(val);
3713 }
3714#else
3715 if (endian == DEVICE_BIG_ENDIAN) {
3716 val = bswap32(val);
3717 }
3718#endif
bellard8df1cd02005-01-28 22:37:22 +00003719 } else {
3720 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003721 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003722 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003723 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003724 switch (endian) {
3725 case DEVICE_LITTLE_ENDIAN:
3726 val = ldl_le_p(ptr);
3727 break;
3728 case DEVICE_BIG_ENDIAN:
3729 val = ldl_be_p(ptr);
3730 break;
3731 default:
3732 val = ldl_p(ptr);
3733 break;
3734 }
bellard8df1cd02005-01-28 22:37:22 +00003735 }
3736 return val;
3737}
3738
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003739uint32_t ldl_phys(target_phys_addr_t addr)
3740{
3741 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3742}
3743
3744uint32_t ldl_le_phys(target_phys_addr_t addr)
3745{
3746 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3747}
3748
3749uint32_t ldl_be_phys(target_phys_addr_t addr)
3750{
3751 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3752}
3753
bellard84b7b8e2005-11-28 21:19:04 +00003754/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003755static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3756 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003757{
bellard84b7b8e2005-11-28 21:19:04 +00003758 uint8_t *ptr;
3759 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003760 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00003761
Avi Kivity06ef3522012-02-13 16:11:22 +02003762 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003763
Blue Swirlcc5bea62012-04-14 14:56:48 +00003764 if (!(memory_region_is_ram(section->mr) ||
3765 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00003766 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003767 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003768
3769 /* XXX This is broken when device endian != cpu endian.
3770 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003771#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003772 val = io_mem_read(section->mr, addr, 4) << 32;
3773 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003774#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003775 val = io_mem_read(section->mr, addr, 4);
3776 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003777#endif
3778 } else {
3779 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003780 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003781 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003782 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003783 switch (endian) {
3784 case DEVICE_LITTLE_ENDIAN:
3785 val = ldq_le_p(ptr);
3786 break;
3787 case DEVICE_BIG_ENDIAN:
3788 val = ldq_be_p(ptr);
3789 break;
3790 default:
3791 val = ldq_p(ptr);
3792 break;
3793 }
bellard84b7b8e2005-11-28 21:19:04 +00003794 }
3795 return val;
3796}
3797
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003798uint64_t ldq_phys(target_phys_addr_t addr)
3799{
3800 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3801}
3802
3803uint64_t ldq_le_phys(target_phys_addr_t addr)
3804{
3805 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3806}
3807
3808uint64_t ldq_be_phys(target_phys_addr_t addr)
3809{
3810 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3811}
3812
bellardaab33092005-10-30 20:48:42 +00003813/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003814uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003815{
3816 uint8_t val;
3817 cpu_physical_memory_read(addr, &val, 1);
3818 return val;
3819}
3820
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003821/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003822static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3823 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003824{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003825 uint8_t *ptr;
3826 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003827 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003828
Avi Kivity06ef3522012-02-13 16:11:22 +02003829 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003830
Blue Swirlcc5bea62012-04-14 14:56:48 +00003831 if (!(memory_region_is_ram(section->mr) ||
3832 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003833 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003834 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003835 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003836#if defined(TARGET_WORDS_BIGENDIAN)
3837 if (endian == DEVICE_LITTLE_ENDIAN) {
3838 val = bswap16(val);
3839 }
3840#else
3841 if (endian == DEVICE_BIG_ENDIAN) {
3842 val = bswap16(val);
3843 }
3844#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003845 } else {
3846 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003847 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003848 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003849 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003850 switch (endian) {
3851 case DEVICE_LITTLE_ENDIAN:
3852 val = lduw_le_p(ptr);
3853 break;
3854 case DEVICE_BIG_ENDIAN:
3855 val = lduw_be_p(ptr);
3856 break;
3857 default:
3858 val = lduw_p(ptr);
3859 break;
3860 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003861 }
3862 return val;
bellardaab33092005-10-30 20:48:42 +00003863}
3864
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003865uint32_t lduw_phys(target_phys_addr_t addr)
3866{
3867 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3868}
3869
3870uint32_t lduw_le_phys(target_phys_addr_t addr)
3871{
3872 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3873}
3874
3875uint32_t lduw_be_phys(target_phys_addr_t addr)
3876{
3877 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3878}
3879
bellard8df1cd02005-01-28 22:37:22 +00003880/* warning: addr must be aligned. The ram page is not masked as dirty
3881 and the code inside is not invalidated. It is useful if the dirty
3882 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003883void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003884{
bellard8df1cd02005-01-28 22:37:22 +00003885 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003886 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003887
Avi Kivity06ef3522012-02-13 16:11:22 +02003888 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003889
Avi Kivityf3705d52012-03-08 16:16:34 +02003890 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003891 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003892 if (memory_region_is_ram(section->mr)) {
3893 section = &phys_sections[phys_section_rom];
3894 }
3895 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003896 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003897 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003898 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003899 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00003900 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003901 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003902
3903 if (unlikely(in_migration)) {
3904 if (!cpu_physical_memory_is_dirty(addr1)) {
3905 /* invalidate code */
3906 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3907 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003908 cpu_physical_memory_set_dirty_flags(
3909 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00003910 }
3911 }
bellard8df1cd02005-01-28 22:37:22 +00003912 }
3913}
3914
Anthony Liguoric227f092009-10-01 16:12:16 -05003915void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003916{
j_mayerbc98a7e2007-04-04 07:55:12 +00003917 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003918 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00003919
Avi Kivity06ef3522012-02-13 16:11:22 +02003920 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003921
Avi Kivityf3705d52012-03-08 16:16:34 +02003922 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003923 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003924 if (memory_region_is_ram(section->mr)) {
3925 section = &phys_sections[phys_section_rom];
3926 }
j_mayerbc98a7e2007-04-04 07:55:12 +00003927#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003928 io_mem_write(section->mr, addr, val >> 32, 4);
3929 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003930#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003931 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3932 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003933#endif
3934 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003935 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003936 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003937 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00003938 stq_p(ptr, val);
3939 }
3940}
3941
bellard8df1cd02005-01-28 22:37:22 +00003942/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003943static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
3944 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003945{
bellard8df1cd02005-01-28 22:37:22 +00003946 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003947 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003948
Avi Kivity06ef3522012-02-13 16:11:22 +02003949 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003950
Avi Kivityf3705d52012-03-08 16:16:34 +02003951 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003952 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003953 if (memory_region_is_ram(section->mr)) {
3954 section = &phys_sections[phys_section_rom];
3955 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003956#if defined(TARGET_WORDS_BIGENDIAN)
3957 if (endian == DEVICE_LITTLE_ENDIAN) {
3958 val = bswap32(val);
3959 }
3960#else
3961 if (endian == DEVICE_BIG_ENDIAN) {
3962 val = bswap32(val);
3963 }
3964#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02003965 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003966 } else {
3967 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003968 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003969 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00003970 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003971 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003972 switch (endian) {
3973 case DEVICE_LITTLE_ENDIAN:
3974 stl_le_p(ptr, val);
3975 break;
3976 case DEVICE_BIG_ENDIAN:
3977 stl_be_p(ptr, val);
3978 break;
3979 default:
3980 stl_p(ptr, val);
3981 break;
3982 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003983 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00003984 }
3985}
3986
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003987void stl_phys(target_phys_addr_t addr, uint32_t val)
3988{
3989 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
3990}
3991
3992void stl_le_phys(target_phys_addr_t addr, uint32_t val)
3993{
3994 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
3995}
3996
3997void stl_be_phys(target_phys_addr_t addr, uint32_t val)
3998{
3999 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4000}
4001
bellardaab33092005-10-30 20:48:42 +00004002/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004003void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004004{
4005 uint8_t v = val;
4006 cpu_physical_memory_write(addr, &v, 1);
4007}
4008
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004009/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004010static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4011 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004012{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004013 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004014 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004015
Avi Kivity06ef3522012-02-13 16:11:22 +02004016 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004017
Avi Kivityf3705d52012-03-08 16:16:34 +02004018 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00004019 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004020 if (memory_region_is_ram(section->mr)) {
4021 section = &phys_sections[phys_section_rom];
4022 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004023#if defined(TARGET_WORDS_BIGENDIAN)
4024 if (endian == DEVICE_LITTLE_ENDIAN) {
4025 val = bswap16(val);
4026 }
4027#else
4028 if (endian == DEVICE_BIG_ENDIAN) {
4029 val = bswap16(val);
4030 }
4031#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004032 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004033 } else {
4034 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004035 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004036 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004037 /* RAM case */
4038 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004039 switch (endian) {
4040 case DEVICE_LITTLE_ENDIAN:
4041 stw_le_p(ptr, val);
4042 break;
4043 case DEVICE_BIG_ENDIAN:
4044 stw_be_p(ptr, val);
4045 break;
4046 default:
4047 stw_p(ptr, val);
4048 break;
4049 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00004050 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004051 }
bellardaab33092005-10-30 20:48:42 +00004052}
4053
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004054void stw_phys(target_phys_addr_t addr, uint32_t val)
4055{
4056 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4057}
4058
4059void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4060{
4061 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4062}
4063
4064void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4065{
4066 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4067}
4068
bellardaab33092005-10-30 20:48:42 +00004069/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004070void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004071{
4072 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004073 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004074}
4075
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004076void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4077{
4078 val = cpu_to_le64(val);
4079 cpu_physical_memory_write(addr, &val, 8);
4080}
4081
4082void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4083{
4084 val = cpu_to_be64(val);
4085 cpu_physical_memory_write(addr, &val, 8);
4086}
4087
aliguori5e2972f2009-03-28 17:51:36 +00004088/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004089int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004090 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004091{
4092 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004093 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004094 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004095
4096 while (len > 0) {
4097 page = addr & TARGET_PAGE_MASK;
4098 phys_addr = cpu_get_phys_page_debug(env, page);
4099 /* if no physical page mapped, return an error */
4100 if (phys_addr == -1)
4101 return -1;
4102 l = (page + TARGET_PAGE_SIZE) - addr;
4103 if (l > len)
4104 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004105 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004106 if (is_write)
4107 cpu_physical_memory_write_rom(phys_addr, buf, l);
4108 else
aliguori5e2972f2009-03-28 17:51:36 +00004109 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004110 len -= l;
4111 buf += l;
4112 addr += l;
4113 }
4114 return 0;
4115}
Paul Brooka68fe892010-03-01 00:08:59 +00004116#endif
bellard13eb76e2004-01-24 15:23:36 +00004117
pbrook2e70f6e2008-06-29 01:03:05 +00004118/* in deterministic execution mode, instructions doing device I/Os
4119 must be at the end of the TB */
Blue Swirl20503962012-04-09 14:20:20 +00004120void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004121{
4122 TranslationBlock *tb;
4123 uint32_t n, cflags;
4124 target_ulong pc, cs_base;
4125 uint64_t flags;
4126
Blue Swirl20503962012-04-09 14:20:20 +00004127 tb = tb_find_pc(retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004128 if (!tb) {
4129 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl20503962012-04-09 14:20:20 +00004130 (void *)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004131 }
4132 n = env->icount_decr.u16.low + tb->icount;
Blue Swirl20503962012-04-09 14:20:20 +00004133 cpu_restore_state(tb, env, retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004134 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004135 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004136 n = n - env->icount_decr.u16.low;
4137 /* Generate a new TB ending on the I/O insn. */
4138 n++;
4139 /* On MIPS and SH, delay slot instructions can only be restarted if
4140 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004141 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004142 branch. */
4143#if defined(TARGET_MIPS)
4144 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4145 env->active_tc.PC -= 4;
4146 env->icount_decr.u16.low++;
4147 env->hflags &= ~MIPS_HFLAG_BMASK;
4148 }
4149#elif defined(TARGET_SH4)
4150 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4151 && n > 1) {
4152 env->pc -= 2;
4153 env->icount_decr.u16.low++;
4154 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4155 }
4156#endif
4157 /* This should never happen. */
4158 if (n > CF_COUNT_MASK)
4159 cpu_abort(env, "TB too big during recompile");
4160
4161 cflags = n | CF_LAST_IO;
4162 pc = tb->pc;
4163 cs_base = tb->cs_base;
4164 flags = tb->flags;
4165 tb_phys_invalidate(tb, -1);
4166 /* FIXME: In theory this could raise an exception. In practice
4167 we have already translated the block once so it's probably ok. */
4168 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004169 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004170 the first in the TB) then we end up generating a whole new TB and
4171 repeating the fault, which is horribly inefficient.
4172 Better would be to execute just this insn uncached, or generate a
4173 second new TB. */
4174 cpu_resume_from_signal(env, NULL);
4175}
4176
Paul Brookb3755a92010-03-12 16:54:58 +00004177#if !defined(CONFIG_USER_ONLY)
4178
Stefan Weil055403b2010-10-22 23:03:32 +02004179void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004180{
4181 int i, target_code_size, max_target_code_size;
4182 int direct_jmp_count, direct_jmp2_count, cross_page;
4183 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004184
bellarde3db7222005-01-26 22:00:47 +00004185 target_code_size = 0;
4186 max_target_code_size = 0;
4187 cross_page = 0;
4188 direct_jmp_count = 0;
4189 direct_jmp2_count = 0;
4190 for(i = 0; i < nb_tbs; i++) {
4191 tb = &tbs[i];
4192 target_code_size += tb->size;
4193 if (tb->size > max_target_code_size)
4194 max_target_code_size = tb->size;
4195 if (tb->page_addr[1] != -1)
4196 cross_page++;
4197 if (tb->tb_next_offset[0] != 0xffff) {
4198 direct_jmp_count++;
4199 if (tb->tb_next_offset[1] != 0xffff) {
4200 direct_jmp2_count++;
4201 }
4202 }
4203 }
4204 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004205 cpu_fprintf(f, "Translation buffer state:\n");
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +10004206 cpu_fprintf(f, "gen code size %td/%zd\n",
bellard26a5f132008-05-28 12:30:31 +00004207 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4208 cpu_fprintf(f, "TB count %d/%d\n",
4209 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004210 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004211 nb_tbs ? target_code_size / nb_tbs : 0,
4212 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004213 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004214 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4215 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004216 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4217 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004218 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4219 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004220 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004221 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4222 direct_jmp2_count,
4223 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004224 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004225 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4226 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4227 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004228 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004229}
4230
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004231/*
4232 * A helper function for the _utterly broken_ virtio device model to find out if
4233 * it's running on a big endian machine. Don't do this at home kids!
4234 */
4235bool virtio_is_big_endian(void);
4236bool virtio_is_big_endian(void)
4237{
4238#if defined(TARGET_WORDS_BIGENDIAN)
4239 return true;
4240#else
4241 return false;
4242#endif
4243}
4244
bellard61382a52003-10-27 21:22:23 +00004245#endif
Wen Congyang76f35532012-05-07 12:04:18 +08004246
4247#ifndef CONFIG_USER_ONLY
4248bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr)
4249{
4250 MemoryRegionSection *section;
4251
4252 section = phys_page_find(phys_addr >> TARGET_PAGE_BITS);
4253
4254 return !(memory_region_is_ram(section->mr) ||
4255 memory_region_is_romd(section->mr));
4256}
4257#endif