blob: 8958b281dc0f18b48d0021b078e8440a5e2030ca [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Blue Swirl0cac1b62012-04-09 16:50:52 +000060#include "cputlb.h"
61
Avi Kivity67d95c12011-12-15 15:25:22 +020062#define WANT_EXEC_OBSOLETE
63#include "exec-obsolete.h"
64
bellardfd6ce8f2003-05-14 19:00:11 +000065//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000066//#define DEBUG_FLUSH
pbrook67d3b952006-12-18 05:03:52 +000067//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000068
69/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000070//#define DEBUG_TB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
Richard Henderson4438c8a2012-10-16 17:30:13 +100089uint8_t *code_gen_prologue;
blueswir1bdaf78e2008-10-04 07:24:27 +000090static uint8_t *code_gen_buffer;
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +100091static size_t code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +000092/* threshold to flush the translated code buffer */
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +100093static size_t code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +020094static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +000095
pbrooke2eef172008-06-08 01:09:01 +000096#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000097int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000098static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000099
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200100RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300101
102static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300103static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300104
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200105MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200106static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200107
pbrooke2eef172008-06-08 01:09:01 +0000108#endif
bellard9fa3e852004-01-04 18:06:42 +0000109
Andreas Färber9349b4f2012-03-14 01:38:32 +0100110CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000111/* current CPU in the current thread. It is only valid inside
112 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100113DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000114/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000115 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000116 2 = Adaptive rate instruction counting. */
117int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000118
bellard54936002003-05-13 00:25:15 +0000119typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000120 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000121 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000122 /* in order to optimize self modifying code, we count the number
123 of lookups we do to a given page to use a bitmap */
124 unsigned int code_write_count;
125 uint8_t *code_bitmap;
126#if defined(CONFIG_USER_ONLY)
127 unsigned long flags;
128#endif
bellard54936002003-05-13 00:25:15 +0000129} PageDesc;
130
Paul Brook41c1b1c2010-03-12 16:54:58 +0000131/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800132 while in user mode we want it to be based on virtual addresses. */
133#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000134#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
135# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
136#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800137# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000138#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000139#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800140# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000141#endif
bellard54936002003-05-13 00:25:15 +0000142
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800143/* Size of the L2 (and L3, etc) page tables. */
144#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000145#define L2_SIZE (1 << L2_BITS)
146
Avi Kivity3eef53d2012-02-10 14:57:31 +0200147#define P_L2_LEVELS \
148 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
149
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800150/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800151#define V_L1_BITS_REM \
152 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
153
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800154#if V_L1_BITS_REM < 4
155#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
156#else
157#define V_L1_BITS V_L1_BITS_REM
158#endif
159
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800160#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
161
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800162#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
163
Stefan Weilc6d50672012-03-16 20:23:49 +0100164uintptr_t qemu_real_host_page_size;
165uintptr_t qemu_host_page_size;
166uintptr_t qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000167
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800168/* This is a multi-level map on the virtual address space.
169 The bottom level has pointers to PageDesc. */
170static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000171
pbrooke2eef172008-06-08 01:09:01 +0000172#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200173typedef struct PhysPageEntry PhysPageEntry;
174
Avi Kivity5312bd82012-02-12 18:32:55 +0200175static MemoryRegionSection *phys_sections;
176static unsigned phys_sections_nb, phys_sections_nb_alloc;
177static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200178static uint16_t phys_section_notdirty;
179static uint16_t phys_section_rom;
180static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200181
Avi Kivity4346ae32012-02-10 17:00:01 +0200182struct PhysPageEntry {
Avi Kivity07f07b32012-02-13 20:45:32 +0200183 uint16_t is_leaf : 1;
184 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
185 uint16_t ptr : 15;
Avi Kivity4346ae32012-02-10 17:00:01 +0200186};
187
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200188/* Simple allocator for PhysPageEntry nodes */
189static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
190static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
191
Avi Kivity07f07b32012-02-13 20:45:32 +0200192#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200193
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800194/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200195 The bottom level has pointers to MemoryRegionSections. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200196static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
Paul Brook6d9a1302010-02-28 23:55:53 +0000197
pbrooke2eef172008-06-08 01:09:01 +0000198static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300199static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000200
Avi Kivity1ec9b902012-01-02 12:47:48 +0200201static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000202#endif
bellard33417e72003-08-10 21:47:01 +0000203
bellarde3db7222005-01-26 22:00:47 +0000204/* statistics */
bellarde3db7222005-01-26 22:00:47 +0000205static int tb_flush_count;
206static int tb_phys_invalidate_count;
207
bellard7cb69ca2008-05-10 10:55:51 +0000208#ifdef _WIN32
Richard Henderson4438c8a2012-10-16 17:30:13 +1000209static inline void map_exec(void *addr, long size)
bellard7cb69ca2008-05-10 10:55:51 +0000210{
211 DWORD old_protect;
212 VirtualProtect(addr, size,
213 PAGE_EXECUTE_READWRITE, &old_protect);
214
215}
216#else
Richard Henderson4438c8a2012-10-16 17:30:13 +1000217static inline void map_exec(void *addr, long size)
bellard7cb69ca2008-05-10 10:55:51 +0000218{
bellard43694152008-05-29 09:35:57 +0000219 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000220
bellard43694152008-05-29 09:35:57 +0000221 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000222 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000223 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000224
225 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000226 end += page_size - 1;
227 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000228
229 mprotect((void *)start, end - start,
230 PROT_READ | PROT_WRITE | PROT_EXEC);
231}
232#endif
233
bellardb346ff42003-06-15 20:05:50 +0000234static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000235{
bellard83fb7ad2004-07-05 21:25:26 +0000236 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000237 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000238#ifdef _WIN32
239 {
240 SYSTEM_INFO system_info;
241
242 GetSystemInfo(&system_info);
243 qemu_real_host_page_size = system_info.dwPageSize;
244 }
245#else
246 qemu_real_host_page_size = getpagesize();
247#endif
bellard83fb7ad2004-07-05 21:25:26 +0000248 if (qemu_host_page_size == 0)
249 qemu_host_page_size = qemu_real_host_page_size;
250 if (qemu_host_page_size < TARGET_PAGE_SIZE)
251 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000252 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000253
Paul Brook2e9a5712010-05-05 16:32:59 +0100254#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000255 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100256#ifdef HAVE_KINFO_GETVMMAP
257 struct kinfo_vmentry *freep;
258 int i, cnt;
259
260 freep = kinfo_getvmmap(getpid(), &cnt);
261 if (freep) {
262 mmap_lock();
263 for (i = 0; i < cnt; i++) {
264 unsigned long startaddr, endaddr;
265
266 startaddr = freep[i].kve_start;
267 endaddr = freep[i].kve_end;
268 if (h2g_valid(startaddr)) {
269 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
270
271 if (h2g_valid(endaddr)) {
272 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200273 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100274 } else {
275#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
276 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200277 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100278#endif
279 }
280 }
281 }
282 free(freep);
283 mmap_unlock();
284 }
285#else
balrog50a95692007-12-12 01:16:23 +0000286 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000287
pbrook07765902008-05-31 16:33:53 +0000288 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800289
Aurelien Jarnofd436902010-04-10 17:20:36 +0200290 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000291 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800292 mmap_lock();
293
balrog50a95692007-12-12 01:16:23 +0000294 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800295 unsigned long startaddr, endaddr;
296 int n;
297
298 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
299
300 if (n == 2 && h2g_valid(startaddr)) {
301 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
302
303 if (h2g_valid(endaddr)) {
304 endaddr = h2g(endaddr);
305 } else {
306 endaddr = ~0ul;
307 }
308 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000309 }
310 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800311
balrog50a95692007-12-12 01:16:23 +0000312 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800313 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000314 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100315#endif
balrog50a95692007-12-12 01:16:23 +0000316 }
317#endif
bellard54936002003-05-13 00:25:15 +0000318}
319
Paul Brook41c1b1c2010-03-12 16:54:58 +0000320static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000321{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000322 PageDesc *pd;
323 void **lp;
324 int i;
325
pbrook17e23772008-06-09 13:47:45 +0000326#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500327 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800328# define ALLOC(P, SIZE) \
329 do { \
330 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
331 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800332 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000333#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800334# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500335 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000336#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800337
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800338 /* Level 1. Always allocated. */
339 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
340
341 /* Level 2..N-1. */
342 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
343 void **p = *lp;
344
345 if (p == NULL) {
346 if (!alloc) {
347 return NULL;
348 }
349 ALLOC(p, sizeof(void *) * L2_SIZE);
350 *lp = p;
351 }
352
353 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000354 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800355
356 pd = *lp;
357 if (pd == NULL) {
358 if (!alloc) {
359 return NULL;
360 }
361 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
362 *lp = pd;
363 }
364
365#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800366
367 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000368}
369
Paul Brook41c1b1c2010-03-12 16:54:58 +0000370static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000371{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800372 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000373}
374
Paul Brook6d9a1302010-02-28 23:55:53 +0000375#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200376
Avi Kivityf7bf5462012-02-13 20:12:05 +0200377static void phys_map_node_reserve(unsigned nodes)
378{
379 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
380 typedef PhysPageEntry Node[L2_SIZE];
381 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
382 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
383 phys_map_nodes_nb + nodes);
384 phys_map_nodes = g_renew(Node, phys_map_nodes,
385 phys_map_nodes_nb_alloc);
386 }
387}
388
389static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200390{
391 unsigned i;
392 uint16_t ret;
393
Avi Kivityf7bf5462012-02-13 20:12:05 +0200394 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200395 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200396 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200397 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200398 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200399 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200400 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200401 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200402}
403
404static void phys_map_nodes_reset(void)
405{
406 phys_map_nodes_nb = 0;
407}
408
Avi Kivityf7bf5462012-02-13 20:12:05 +0200409
Avi Kivity29990972012-02-13 20:21:20 +0200410static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
411 target_phys_addr_t *nb, uint16_t leaf,
412 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200413{
414 PhysPageEntry *p;
415 int i;
Avi Kivity07f07b32012-02-13 20:45:32 +0200416 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200417
Avi Kivity07f07b32012-02-13 20:45:32 +0200418 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200419 lp->ptr = phys_map_node_alloc();
420 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200421 if (level == 0) {
422 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200423 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200424 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200425 }
426 }
427 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200428 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200429 }
Avi Kivity29990972012-02-13 20:21:20 +0200430 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200431
Avi Kivity29990972012-02-13 20:21:20 +0200432 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200433 if ((*index & (step - 1)) == 0 && *nb >= step) {
434 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200435 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200436 *index += step;
437 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200438 } else {
439 phys_page_set_level(lp, index, nb, leaf, level - 1);
440 }
441 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200442 }
443}
444
Avi Kivity29990972012-02-13 20:21:20 +0200445static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
446 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000447{
Avi Kivity29990972012-02-13 20:21:20 +0200448 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200449 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000450
Avi Kivity29990972012-02-13 20:21:20 +0200451 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000452}
453
Blue Swirl0cac1b62012-04-09 16:50:52 +0000454MemoryRegionSection *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000455{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200456 PhysPageEntry lp = phys_map;
457 PhysPageEntry *p;
458 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200459 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200460
Avi Kivity07f07b32012-02-13 20:45:32 +0200461 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200462 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200463 goto not_found;
464 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200465 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200466 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200467 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200468
Avi Kivityc19e8802012-02-13 20:25:31 +0200469 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200470not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200471 return &phys_sections[s_index];
472}
473
Blue Swirle5548612012-04-21 13:08:33 +0000474bool memory_region_is_unassigned(MemoryRegion *mr)
475{
476 return mr != &io_mem_ram && mr != &io_mem_rom
477 && mr != &io_mem_notdirty && !mr->rom_device
478 && mr != &io_mem_watch;
479}
480
pbrookc8a706f2008-06-02 16:16:42 +0000481#define mmap_lock() do { } while(0)
482#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000483#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000484
bellard43694152008-05-29 09:35:57 +0000485#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100486/* Currently it is not recommended to allocate big chunks of data in
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000487 user mode. It will change when a dedicated libc will be used. */
488/* ??? 64-bit hosts ought to have no problem mmaping data outside the
489 region in which the guest needs to run. Revisit this. */
bellard43694152008-05-29 09:35:57 +0000490#define USE_STATIC_CODE_GEN_BUFFER
491#endif
492
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000493/* ??? Should configure for this, not list operating systems here. */
494#if (defined(__linux__) \
495 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
496 || defined(__DragonFly__) || defined(__OpenBSD__) \
497 || defined(__NetBSD__))
498# define USE_MMAP
499#endif
500
501/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
502 indicated, this is constrained by the range of direct branches on the
503 host cpu, as used by the TCG implementation of goto_tb. */
504#if defined(__x86_64__)
505# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
506#elif defined(__sparc__)
507# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
508#elif defined(__arm__)
509# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
510#elif defined(__s390x__)
511 /* We have a +- 4GB range on the branches; leave some slop. */
512# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
513#else
514# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
515#endif
516
Richard Henderson3d85a722012-10-16 17:30:11 +1000517#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
518
519#define DEFAULT_CODE_GEN_BUFFER_SIZE \
520 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
521 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000522
523static inline size_t size_code_gen_buffer(size_t tb_size)
524{
525 /* Size the buffer. */
526 if (tb_size == 0) {
527#ifdef USE_STATIC_CODE_GEN_BUFFER
528 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
529#else
530 /* ??? Needs adjustments. */
531 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
532 static buffer, we could size this on RESERVED_VA, on the text
533 segment size of the executable, or continue to use the default. */
534 tb_size = (unsigned long)(ram_size / 4);
535#endif
536 }
537 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
538 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
539 }
540 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
541 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
542 }
543 code_gen_buffer_size = tb_size;
544 return tb_size;
545}
546
bellard43694152008-05-29 09:35:57 +0000547#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200548static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000549 __attribute__((aligned(CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000550
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000551static inline void *alloc_code_gen_buffer(void)
bellard26a5f132008-05-28 12:30:31 +0000552{
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000553 map_exec(static_code_gen_buffer, code_gen_buffer_size);
554 return static_code_gen_buffer;
555}
556#elif defined(USE_MMAP)
557static inline void *alloc_code_gen_buffer(void)
558{
559 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
560 uintptr_t start = 0;
561 void *buf;
blueswir1141ac462008-07-26 15:05:57 +0000562
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000563 /* Constrain the position of the buffer based on the host cpu.
564 Note that these addresses are chosen in concert with the
565 addresses assigned in the relevant linker script file. */
Richard Henderson405def12012-10-16 17:30:12 +1000566# if defined(__PIE__) || defined(__PIC__)
567 /* Don't bother setting a preferred location if we're building
568 a position-independent executable. We're more likely to get
569 an address near the main executable if we let the kernel
570 choose the address. */
571# elif defined(__x86_64__) && defined(MAP_32BIT)
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000572 /* Force the memory down into low memory with the executable.
573 Leave the choice of exact location with the kernel. */
574 flags |= MAP_32BIT;
575 /* Cannot expect to map more than 800MB in low memory. */
576 if (code_gen_buffer_size > 800u * 1024 * 1024) {
577 code_gen_buffer_size = 800u * 1024 * 1024;
bellard26a5f132008-05-28 12:30:31 +0000578 }
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000579# elif defined(__sparc__)
580 start = 0x40000000ul;
581# elif defined(__s390x__)
582 start = 0x90000000ul;
583# endif
584
585 buf = mmap((void *)start, code_gen_buffer_size,
586 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
587 return buf == MAP_FAILED ? NULL : buf;
588}
bellard26a5f132008-05-28 12:30:31 +0000589#else
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000590static inline void *alloc_code_gen_buffer(void)
591{
592 void *buf = g_malloc(code_gen_buffer_size);
593 if (buf) {
594 map_exec(buf, code_gen_buffer_size);
595 }
596 return buf;
597}
598#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
599
600static inline void code_gen_alloc(size_t tb_size)
601{
602 code_gen_buffer_size = size_code_gen_buffer(tb_size);
603 code_gen_buffer = alloc_code_gen_buffer();
604 if (code_gen_buffer == NULL) {
605 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
606 exit(1);
607 }
608
Richard Henderson4438c8a2012-10-16 17:30:13 +1000609 /* Steal room for the prologue at the end of the buffer. This ensures
610 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
611 from TB's to the prologue are going to be in range. It also means
612 that we don't need to mark (additional) portions of the data segment
613 as executable. */
614 code_gen_prologue = code_gen_buffer + code_gen_buffer_size - 1024;
615 code_gen_buffer_size -= 1024;
616
Peter Maydella884da82011-06-22 11:58:25 +0100617 code_gen_buffer_max_size = code_gen_buffer_size -
618 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000619 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500620 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000621}
622
623/* Must be called before using the QEMU cpus. 'tb_size' is the size
624 (in bytes) allocated to the translation buffer. Zero means default
625 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200626void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000627{
bellard26a5f132008-05-28 12:30:31 +0000628 cpu_gen_init();
629 code_gen_alloc(tb_size);
630 code_gen_ptr = code_gen_buffer;
Richard Henderson813da622012-03-19 12:25:11 -0700631 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
bellard43694152008-05-29 09:35:57 +0000632 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700633#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
634 /* There's no guest base to take into account, so go ahead and
635 initialize the prologue now. */
636 tcg_prologue_init(&tcg_ctx);
637#endif
bellard26a5f132008-05-28 12:30:31 +0000638}
639
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200640bool tcg_enabled(void)
641{
642 return code_gen_buffer != NULL;
643}
644
645void cpu_exec_init_all(void)
646{
647#if !defined(CONFIG_USER_ONLY)
648 memory_map_init();
649 io_mem_init();
650#endif
651}
652
pbrook9656f322008-07-01 20:01:19 +0000653#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
654
Juan Quintelae59fb372009-09-29 22:48:21 +0200655static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200656{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100657 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200658
aurel323098dba2009-03-07 21:28:24 +0000659 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
660 version_id is increased. */
661 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000662 tlb_flush(env, 1);
663
664 return 0;
665}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200666
667static const VMStateDescription vmstate_cpu_common = {
668 .name = "cpu_common",
669 .version_id = 1,
670 .minimum_version_id = 1,
671 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200672 .post_load = cpu_common_post_load,
673 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100674 VMSTATE_UINT32(halted, CPUArchState),
675 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200676 VMSTATE_END_OF_LIST()
677 }
678};
pbrook9656f322008-07-01 20:01:19 +0000679#endif
680
Andreas Färber9349b4f2012-03-14 01:38:32 +0100681CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400682{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100683 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400684
685 while (env) {
686 if (env->cpu_index == cpu)
687 break;
688 env = env->next_cpu;
689 }
690
691 return env;
692}
693
Andreas Färber9349b4f2012-03-14 01:38:32 +0100694void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000695{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100696 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000697 int cpu_index;
698
pbrookc2764712009-03-07 15:24:59 +0000699#if defined(CONFIG_USER_ONLY)
700 cpu_list_lock();
701#endif
bellard6a00d602005-11-21 23:25:50 +0000702 env->next_cpu = NULL;
703 penv = &first_cpu;
704 cpu_index = 0;
705 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700706 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000707 cpu_index++;
708 }
709 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000710 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000711 QTAILQ_INIT(&env->breakpoints);
712 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100713#ifndef CONFIG_USER_ONLY
714 env->thread_id = qemu_get_thread_id();
715#endif
bellard6a00d602005-11-21 23:25:50 +0000716 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000717#if defined(CONFIG_USER_ONLY)
718 cpu_list_unlock();
719#endif
pbrookb3c77242008-06-30 16:31:04 +0000720#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600721 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
722 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000723 cpu_save, cpu_load, env);
724#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000725}
726
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100727/* Allocate a new translation block. Flush the translation buffer if
728 too many translation blocks or too much generated code. */
729static TranslationBlock *tb_alloc(target_ulong pc)
730{
731 TranslationBlock *tb;
732
733 if (nb_tbs >= code_gen_max_blocks ||
734 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
735 return NULL;
736 tb = &tbs[nb_tbs++];
737 tb->pc = pc;
738 tb->cflags = 0;
739 return tb;
740}
741
742void tb_free(TranslationBlock *tb)
743{
744 /* In practice this is mostly used for single use temporary TB
745 Ignore the hard cases and just back up if this TB happens to
746 be the last one generated. */
747 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
748 code_gen_ptr = tb->tc_ptr;
749 nb_tbs--;
750 }
751}
752
bellard9fa3e852004-01-04 18:06:42 +0000753static inline void invalidate_page_bitmap(PageDesc *p)
754{
755 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500756 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000757 p->code_bitmap = NULL;
758 }
759 p->code_write_count = 0;
760}
761
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800762/* Set to NULL all the 'first_tb' fields in all PageDescs. */
763
764static void page_flush_tb_1 (int level, void **lp)
765{
766 int i;
767
768 if (*lp == NULL) {
769 return;
770 }
771 if (level == 0) {
772 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000773 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800774 pd[i].first_tb = NULL;
775 invalidate_page_bitmap(pd + i);
776 }
777 } else {
778 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000779 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800780 page_flush_tb_1 (level - 1, pp + i);
781 }
782 }
783}
784
bellardfd6ce8f2003-05-14 19:00:11 +0000785static void page_flush_tb(void)
786{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800787 int i;
788 for (i = 0; i < V_L1_SIZE; i++) {
789 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000790 }
791}
792
793/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000794/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100795void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000796{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100797 CPUArchState *env;
bellard01243112004-01-04 15:48:17 +0000798#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000799 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
800 (unsigned long)(code_gen_ptr - code_gen_buffer),
801 nb_tbs, nb_tbs > 0 ?
802 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000803#endif
bellard26a5f132008-05-28 12:30:31 +0000804 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000805 cpu_abort(env1, "Internal error: code buffer overflow\n");
806
bellardfd6ce8f2003-05-14 19:00:11 +0000807 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000808
bellard6a00d602005-11-21 23:25:50 +0000809 for(env = first_cpu; env != NULL; env = env->next_cpu) {
810 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
811 }
bellard9fa3e852004-01-04 18:06:42 +0000812
bellard8a8a6082004-10-03 13:36:49 +0000813 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000814 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000815
bellardfd6ce8f2003-05-14 19:00:11 +0000816 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000817 /* XXX: flush processor icache at this point if cache flush is
818 expensive */
bellarde3db7222005-01-26 22:00:47 +0000819 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000820}
821
822#ifdef DEBUG_TB_CHECK
823
j_mayerbc98a7e2007-04-04 07:55:12 +0000824static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000825{
826 TranslationBlock *tb;
827 int i;
828 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000829 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
830 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000831 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
832 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000833 printf("ERROR invalidate: address=" TARGET_FMT_lx
834 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000835 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000836 }
837 }
838 }
839}
840
841/* verify that all the pages have correct rights for code */
842static void tb_page_check(void)
843{
844 TranslationBlock *tb;
845 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000846
pbrook99773bd2006-04-16 15:14:59 +0000847 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
848 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000849 flags1 = page_get_flags(tb->pc);
850 flags2 = page_get_flags(tb->pc + tb->size - 1);
851 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
852 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000853 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000854 }
855 }
856 }
857}
858
859#endif
860
861/* invalidate one TB */
862static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
863 int next_offset)
864{
865 TranslationBlock *tb1;
866 for(;;) {
867 tb1 = *ptb;
868 if (tb1 == tb) {
869 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
870 break;
871 }
872 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
873 }
874}
875
bellard9fa3e852004-01-04 18:06:42 +0000876static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
877{
878 TranslationBlock *tb1;
879 unsigned int n1;
880
881 for(;;) {
882 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200883 n1 = (uintptr_t)tb1 & 3;
884 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard9fa3e852004-01-04 18:06:42 +0000885 if (tb1 == tb) {
886 *ptb = tb1->page_next[n1];
887 break;
888 }
889 ptb = &tb1->page_next[n1];
890 }
891}
892
bellardd4e81642003-05-25 16:46:15 +0000893static inline void tb_jmp_remove(TranslationBlock *tb, int n)
894{
895 TranslationBlock *tb1, **ptb;
896 unsigned int n1;
897
898 ptb = &tb->jmp_next[n];
899 tb1 = *ptb;
900 if (tb1) {
901 /* find tb(n) in circular list */
902 for(;;) {
903 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200904 n1 = (uintptr_t)tb1 & 3;
905 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardd4e81642003-05-25 16:46:15 +0000906 if (n1 == n && tb1 == tb)
907 break;
908 if (n1 == 2) {
909 ptb = &tb1->jmp_first;
910 } else {
911 ptb = &tb1->jmp_next[n1];
912 }
913 }
914 /* now we can suppress tb(n) from the list */
915 *ptb = tb->jmp_next[n];
916
917 tb->jmp_next[n] = NULL;
918 }
919}
920
921/* reset the jump entry 'n' of a TB so that it is not chained to
922 another TB */
923static inline void tb_reset_jump(TranslationBlock *tb, int n)
924{
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200925 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
bellardd4e81642003-05-25 16:46:15 +0000926}
927
Paul Brook41c1b1c2010-03-12 16:54:58 +0000928void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000929{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100930 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000931 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000932 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000933 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000934 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000935
bellard9fa3e852004-01-04 18:06:42 +0000936 /* remove the TB from the hash list */
937 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
938 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000939 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000940 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000941
bellard9fa3e852004-01-04 18:06:42 +0000942 /* remove the TB from the page list */
943 if (tb->page_addr[0] != page_addr) {
944 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
945 tb_page_remove(&p->first_tb, tb);
946 invalidate_page_bitmap(p);
947 }
948 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
949 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
950 tb_page_remove(&p->first_tb, tb);
951 invalidate_page_bitmap(p);
952 }
953
bellard8a40a182005-11-20 10:35:40 +0000954 tb_invalidated_flag = 1;
955
956 /* remove the TB from the hash list */
957 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000958 for(env = first_cpu; env != NULL; env = env->next_cpu) {
959 if (env->tb_jmp_cache[h] == tb)
960 env->tb_jmp_cache[h] = NULL;
961 }
bellard8a40a182005-11-20 10:35:40 +0000962
963 /* suppress this TB from the two jump lists */
964 tb_jmp_remove(tb, 0);
965 tb_jmp_remove(tb, 1);
966
967 /* suppress any remaining jumps to this TB */
968 tb1 = tb->jmp_first;
969 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200970 n1 = (uintptr_t)tb1 & 3;
bellard8a40a182005-11-20 10:35:40 +0000971 if (n1 == 2)
972 break;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200973 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard8a40a182005-11-20 10:35:40 +0000974 tb2 = tb1->jmp_next[n1];
975 tb_reset_jump(tb1, n1);
976 tb1->jmp_next[n1] = NULL;
977 tb1 = tb2;
978 }
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200979 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
bellard8a40a182005-11-20 10:35:40 +0000980
bellarde3db7222005-01-26 22:00:47 +0000981 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000982}
983
984static inline void set_bits(uint8_t *tab, int start, int len)
985{
986 int end, mask, end1;
987
988 end = start + len;
989 tab += start >> 3;
990 mask = 0xff << (start & 7);
991 if ((start & ~7) == (end & ~7)) {
992 if (start < end) {
993 mask &= ~(0xff << (end & 7));
994 *tab |= mask;
995 }
996 } else {
997 *tab++ |= mask;
998 start = (start + 8) & ~7;
999 end1 = end & ~7;
1000 while (start < end1) {
1001 *tab++ = 0xff;
1002 start += 8;
1003 }
1004 if (start < end) {
1005 mask = ~(0xff << (end & 7));
1006 *tab |= mask;
1007 }
1008 }
1009}
1010
1011static void build_page_bitmap(PageDesc *p)
1012{
1013 int n, tb_start, tb_end;
1014 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001015
Anthony Liguori7267c092011-08-20 22:09:37 -05001016 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001017
1018 tb = p->first_tb;
1019 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001020 n = (uintptr_t)tb & 3;
1021 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001022 /* NOTE: this is subtle as a TB may span two physical pages */
1023 if (n == 0) {
1024 /* NOTE: tb_end may be after the end of the page, but
1025 it is not a problem */
1026 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1027 tb_end = tb_start + tb->size;
1028 if (tb_end > TARGET_PAGE_SIZE)
1029 tb_end = TARGET_PAGE_SIZE;
1030 } else {
1031 tb_start = 0;
1032 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1033 }
1034 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1035 tb = tb->page_next[n];
1036 }
1037}
1038
Andreas Färber9349b4f2012-03-14 01:38:32 +01001039TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001040 target_ulong pc, target_ulong cs_base,
1041 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001042{
1043 TranslationBlock *tb;
1044 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001045 tb_page_addr_t phys_pc, phys_page2;
1046 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001047 int code_gen_size;
1048
Paul Brook41c1b1c2010-03-12 16:54:58 +00001049 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001050 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001051 if (!tb) {
1052 /* flush must be done */
1053 tb_flush(env);
1054 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001055 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001056 /* Don't forget to invalidate previous TB info. */
1057 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001058 }
1059 tc_ptr = code_gen_ptr;
1060 tb->tc_ptr = tc_ptr;
1061 tb->cs_base = cs_base;
1062 tb->flags = flags;
1063 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001064 cpu_gen_code(env, tb, &code_gen_size);
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001065 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1066 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001067
bellardd720b932004-04-25 17:57:43 +00001068 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001069 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001070 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001071 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001072 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001073 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001074 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001075 return tb;
bellardd720b932004-04-25 17:57:43 +00001076}
ths3b46e622007-09-17 08:09:54 +00001077
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001078/*
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001079 * Invalidate all TBs which intersect with the target physical address range
1080 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1081 * 'is_cpu_write_access' should be true if called from a real cpu write
1082 * access: the virtual CPU will exit the current TB if code is modified inside
1083 * this TB.
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001084 */
1085void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1086 int is_cpu_write_access)
1087{
1088 while (start < end) {
1089 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1090 start &= TARGET_PAGE_MASK;
1091 start += TARGET_PAGE_SIZE;
1092 }
1093}
1094
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001095/*
1096 * Invalidate all TBs which intersect with the target physical address range
1097 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1098 * 'is_cpu_write_access' should be true if called from a real cpu write
1099 * access: the virtual CPU will exit the current TB if code is modified inside
1100 * this TB.
1101 */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001102void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001103 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001104{
aliguori6b917542008-11-18 19:46:41 +00001105 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001106 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001107 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001108 PageDesc *p;
1109 int n;
1110#ifdef TARGET_HAS_PRECISE_SMC
1111 int current_tb_not_found = is_cpu_write_access;
1112 TranslationBlock *current_tb = NULL;
1113 int current_tb_modified = 0;
1114 target_ulong current_pc = 0;
1115 target_ulong current_cs_base = 0;
1116 int current_flags = 0;
1117#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001118
1119 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001120 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001121 return;
ths5fafdf22007-09-16 21:08:06 +00001122 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001123 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1124 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001125 /* build code bitmap */
1126 build_page_bitmap(p);
1127 }
1128
1129 /* we remove all the TBs in the range [start, end[ */
1130 /* XXX: see if in some cases it could be faster to invalidate all the code */
1131 tb = p->first_tb;
1132 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001133 n = (uintptr_t)tb & 3;
1134 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001135 tb_next = tb->page_next[n];
1136 /* NOTE: this is subtle as a TB may span two physical pages */
1137 if (n == 0) {
1138 /* NOTE: tb_end may be after the end of the page, but
1139 it is not a problem */
1140 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1141 tb_end = tb_start + tb->size;
1142 } else {
1143 tb_start = tb->page_addr[1];
1144 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1145 }
1146 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001147#ifdef TARGET_HAS_PRECISE_SMC
1148 if (current_tb_not_found) {
1149 current_tb_not_found = 0;
1150 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001151 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001152 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001153 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001154 }
1155 }
1156 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001157 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001158 /* If we are modifying the current TB, we must stop
1159 its execution. We could be more precise by checking
1160 that the modification is after the current PC, but it
1161 would require a specialized function to partially
1162 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001163
bellardd720b932004-04-25 17:57:43 +00001164 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001165 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001166 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1167 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001168 }
1169#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001170 /* we need to do that to handle the case where a signal
1171 occurs while doing tb_phys_invalidate() */
1172 saved_tb = NULL;
1173 if (env) {
1174 saved_tb = env->current_tb;
1175 env->current_tb = NULL;
1176 }
bellard9fa3e852004-01-04 18:06:42 +00001177 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001178 if (env) {
1179 env->current_tb = saved_tb;
1180 if (env->interrupt_request && env->current_tb)
1181 cpu_interrupt(env, env->interrupt_request);
1182 }
bellard9fa3e852004-01-04 18:06:42 +00001183 }
1184 tb = tb_next;
1185 }
1186#if !defined(CONFIG_USER_ONLY)
1187 /* if no code remaining, no need to continue to use slow writes */
1188 if (!p->first_tb) {
1189 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001190 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001191 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001192 }
1193 }
1194#endif
1195#ifdef TARGET_HAS_PRECISE_SMC
1196 if (current_tb_modified) {
1197 /* we generate a block containing just the instruction
1198 modifying the memory. It will ensure that it cannot modify
1199 itself */
bellardea1c1802004-06-14 18:56:36 +00001200 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001201 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001202 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001203 }
1204#endif
1205}
1206
1207/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001208static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001209{
1210 PageDesc *p;
1211 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001212#if 0
bellarda4193c82004-06-03 14:01:43 +00001213 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001214 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1215 cpu_single_env->mem_io_vaddr, len,
1216 cpu_single_env->eip,
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001217 cpu_single_env->eip +
1218 (intptr_t)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001219 }
1220#endif
bellard9fa3e852004-01-04 18:06:42 +00001221 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001222 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001223 return;
1224 if (p->code_bitmap) {
1225 offset = start & ~TARGET_PAGE_MASK;
1226 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1227 if (b & ((1 << len) - 1))
1228 goto do_invalidate;
1229 } else {
1230 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001231 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001232 }
1233}
1234
bellard9fa3e852004-01-04 18:06:42 +00001235#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001236static void tb_invalidate_phys_page(tb_page_addr_t addr,
Blue Swirl20503962012-04-09 14:20:20 +00001237 uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001238{
aliguori6b917542008-11-18 19:46:41 +00001239 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001240 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001241 int n;
bellardd720b932004-04-25 17:57:43 +00001242#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001243 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001244 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001245 int current_tb_modified = 0;
1246 target_ulong current_pc = 0;
1247 target_ulong current_cs_base = 0;
1248 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001249#endif
bellard9fa3e852004-01-04 18:06:42 +00001250
1251 addr &= TARGET_PAGE_MASK;
1252 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001253 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001254 return;
1255 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001256#ifdef TARGET_HAS_PRECISE_SMC
1257 if (tb && pc != 0) {
1258 current_tb = tb_find_pc(pc);
1259 }
1260#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001261 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001262 n = (uintptr_t)tb & 3;
1263 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001264#ifdef TARGET_HAS_PRECISE_SMC
1265 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001266 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001267 /* If we are modifying the current TB, we must stop
1268 its execution. We could be more precise by checking
1269 that the modification is after the current PC, but it
1270 would require a specialized function to partially
1271 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001272
bellardd720b932004-04-25 17:57:43 +00001273 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001274 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001275 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1276 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001277 }
1278#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001279 tb_phys_invalidate(tb, addr);
1280 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001281 }
1282 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001283#ifdef TARGET_HAS_PRECISE_SMC
1284 if (current_tb_modified) {
1285 /* we generate a block containing just the instruction
1286 modifying the memory. It will ensure that it cannot modify
1287 itself */
bellardea1c1802004-06-14 18:56:36 +00001288 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001289 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001290 cpu_resume_from_signal(env, puc);
1291 }
1292#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001293}
bellard9fa3e852004-01-04 18:06:42 +00001294#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001295
1296/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001297static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001298 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001299{
1300 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001301#ifndef CONFIG_USER_ONLY
1302 bool page_already_protected;
1303#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001304
bellard9fa3e852004-01-04 18:06:42 +00001305 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001306 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001307 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001308#ifndef CONFIG_USER_ONLY
1309 page_already_protected = p->first_tb != NULL;
1310#endif
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001311 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
bellard9fa3e852004-01-04 18:06:42 +00001312 invalidate_page_bitmap(p);
1313
bellard107db442004-06-22 18:48:46 +00001314#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001315
bellard9fa3e852004-01-04 18:06:42 +00001316#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001317 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001318 target_ulong addr;
1319 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001320 int prot;
1321
bellardfd6ce8f2003-05-14 19:00:11 +00001322 /* force the host page as non writable (writes will have a
1323 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001324 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001325 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001326 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1327 addr += TARGET_PAGE_SIZE) {
1328
1329 p2 = page_find (addr >> TARGET_PAGE_BITS);
1330 if (!p2)
1331 continue;
1332 prot |= p2->flags;
1333 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001334 }
ths5fafdf22007-09-16 21:08:06 +00001335 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001336 (prot & PAGE_BITS) & ~PAGE_WRITE);
1337#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001338 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001339 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001340#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001341 }
bellard9fa3e852004-01-04 18:06:42 +00001342#else
1343 /* if some code is already present, then the pages are already
1344 protected. So we handle the case where only the first TB is
1345 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001346 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001347 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001348 }
1349#endif
bellardd720b932004-04-25 17:57:43 +00001350
1351#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001352}
1353
bellard9fa3e852004-01-04 18:06:42 +00001354/* add a new TB and link it to the physical page tables. phys_page2 is
1355 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001356void tb_link_page(TranslationBlock *tb,
1357 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001358{
bellard9fa3e852004-01-04 18:06:42 +00001359 unsigned int h;
1360 TranslationBlock **ptb;
1361
pbrookc8a706f2008-06-02 16:16:42 +00001362 /* Grab the mmap lock to stop another thread invalidating this TB
1363 before we are done. */
1364 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001365 /* add in the physical hash table */
1366 h = tb_phys_hash_func(phys_pc);
1367 ptb = &tb_phys_hash[h];
1368 tb->phys_hash_next = *ptb;
1369 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001370
1371 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001372 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1373 if (phys_page2 != -1)
1374 tb_alloc_page(tb, 1, phys_page2);
1375 else
1376 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001377
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001378 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
bellardd4e81642003-05-25 16:46:15 +00001379 tb->jmp_next[0] = NULL;
1380 tb->jmp_next[1] = NULL;
1381
1382 /* init original jump addresses */
1383 if (tb->tb_next_offset[0] != 0xffff)
1384 tb_reset_jump(tb, 0);
1385 if (tb->tb_next_offset[1] != 0xffff)
1386 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001387
1388#ifdef DEBUG_TB_CHECK
1389 tb_page_check();
1390#endif
pbrookc8a706f2008-06-02 16:16:42 +00001391 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001392}
1393
bellarda513fe12003-05-27 23:29:48 +00001394/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1395 tb[1].tc_ptr. Return NULL if not found */
Stefan Weil6375e092012-04-06 22:26:15 +02001396TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
bellarda513fe12003-05-27 23:29:48 +00001397{
1398 int m_min, m_max, m;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001399 uintptr_t v;
bellarda513fe12003-05-27 23:29:48 +00001400 TranslationBlock *tb;
1401
1402 if (nb_tbs <= 0)
1403 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001404 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1405 tc_ptr >= (uintptr_t)code_gen_ptr) {
bellarda513fe12003-05-27 23:29:48 +00001406 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001407 }
bellarda513fe12003-05-27 23:29:48 +00001408 /* binary search (cf Knuth) */
1409 m_min = 0;
1410 m_max = nb_tbs - 1;
1411 while (m_min <= m_max) {
1412 m = (m_min + m_max) >> 1;
1413 tb = &tbs[m];
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001414 v = (uintptr_t)tb->tc_ptr;
bellarda513fe12003-05-27 23:29:48 +00001415 if (v == tc_ptr)
1416 return tb;
1417 else if (tc_ptr < v) {
1418 m_max = m - 1;
1419 } else {
1420 m_min = m + 1;
1421 }
ths5fafdf22007-09-16 21:08:06 +00001422 }
bellarda513fe12003-05-27 23:29:48 +00001423 return &tbs[m_max];
1424}
bellard75012672003-06-21 13:11:07 +00001425
bellardea041c02003-06-25 16:16:50 +00001426static void tb_reset_jump_recursive(TranslationBlock *tb);
1427
1428static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1429{
1430 TranslationBlock *tb1, *tb_next, **ptb;
1431 unsigned int n1;
1432
1433 tb1 = tb->jmp_next[n];
1434 if (tb1 != NULL) {
1435 /* find head of list */
1436 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001437 n1 = (uintptr_t)tb1 & 3;
1438 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001439 if (n1 == 2)
1440 break;
1441 tb1 = tb1->jmp_next[n1];
1442 }
1443 /* we are now sure now that tb jumps to tb1 */
1444 tb_next = tb1;
1445
1446 /* remove tb from the jmp_first list */
1447 ptb = &tb_next->jmp_first;
1448 for(;;) {
1449 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001450 n1 = (uintptr_t)tb1 & 3;
1451 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001452 if (n1 == n && tb1 == tb)
1453 break;
1454 ptb = &tb1->jmp_next[n1];
1455 }
1456 *ptb = tb->jmp_next[n];
1457 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001458
bellardea041c02003-06-25 16:16:50 +00001459 /* suppress the jump to next tb in generated code */
1460 tb_reset_jump(tb, n);
1461
bellard01243112004-01-04 15:48:17 +00001462 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001463 tb_reset_jump_recursive(tb_next);
1464 }
1465}
1466
1467static void tb_reset_jump_recursive(TranslationBlock *tb)
1468{
1469 tb_reset_jump_recursive2(tb, 0);
1470 tb_reset_jump_recursive2(tb, 1);
1471}
1472
bellard1fddef42005-04-17 19:16:13 +00001473#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001474#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001475static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001476{
1477 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1478}
1479#else
Max Filippov1e7855a2012-04-10 02:48:17 +04001480void tb_invalidate_phys_addr(target_phys_addr_t addr)
bellardd720b932004-04-25 17:57:43 +00001481{
Anthony Liguoric227f092009-10-01 16:12:16 -05001482 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001483 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001484
Avi Kivity06ef3522012-02-13 16:11:22 +02001485 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001486 if (!(memory_region_is_ram(section->mr)
1487 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001488 return;
1489 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001490 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001491 + memory_region_section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001492 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001493}
Max Filippov1e7855a2012-04-10 02:48:17 +04001494
1495static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1496{
Max Filippov9d70c4b2012-05-27 20:21:08 +04001497 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1498 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +04001499}
bellardc27004e2005-01-03 23:35:10 +00001500#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001501#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001502
Paul Brookc527ee82010-03-01 03:31:14 +00001503#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001504void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001505
1506{
1507}
1508
Andreas Färber9349b4f2012-03-14 01:38:32 +01001509int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001510 int flags, CPUWatchpoint **watchpoint)
1511{
1512 return -ENOSYS;
1513}
1514#else
pbrook6658ffb2007-03-16 23:58:11 +00001515/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001516int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001517 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001518{
aliguorib4051332008-11-18 20:14:20 +00001519 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001520 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001521
aliguorib4051332008-11-18 20:14:20 +00001522 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001523 if ((len & (len - 1)) || (addr & ~len_mask) ||
1524 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001525 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1526 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1527 return -EINVAL;
1528 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001529 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001530
aliguoria1d1bb32008-11-18 20:07:32 +00001531 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001532 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001533 wp->flags = flags;
1534
aliguori2dc9f412008-11-18 20:56:59 +00001535 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001536 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001537 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001538 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001539 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001540
pbrook6658ffb2007-03-16 23:58:11 +00001541 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001542
1543 if (watchpoint)
1544 *watchpoint = wp;
1545 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001546}
1547
aliguoria1d1bb32008-11-18 20:07:32 +00001548/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001549int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001550 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001551{
aliguorib4051332008-11-18 20:14:20 +00001552 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001553 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001554
Blue Swirl72cf2d42009-09-12 07:36:22 +00001555 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001556 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001557 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001558 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001559 return 0;
1560 }
1561 }
aliguoria1d1bb32008-11-18 20:07:32 +00001562 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001563}
1564
aliguoria1d1bb32008-11-18 20:07:32 +00001565/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001566void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001567{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001568 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001569
aliguoria1d1bb32008-11-18 20:07:32 +00001570 tlb_flush_page(env, watchpoint->vaddr);
1571
Anthony Liguori7267c092011-08-20 22:09:37 -05001572 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001573}
1574
aliguoria1d1bb32008-11-18 20:07:32 +00001575/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001576void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001577{
aliguoric0ce9982008-11-25 22:13:57 +00001578 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001579
Blue Swirl72cf2d42009-09-12 07:36:22 +00001580 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001581 if (wp->flags & mask)
1582 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001583 }
aliguoria1d1bb32008-11-18 20:07:32 +00001584}
Paul Brookc527ee82010-03-01 03:31:14 +00001585#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001586
1587/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001588int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001589 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001590{
bellard1fddef42005-04-17 19:16:13 +00001591#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001592 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001593
Anthony Liguori7267c092011-08-20 22:09:37 -05001594 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001595
1596 bp->pc = pc;
1597 bp->flags = flags;
1598
aliguori2dc9f412008-11-18 20:56:59 +00001599 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001600 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001601 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001602 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001603 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001604
1605 breakpoint_invalidate(env, pc);
1606
1607 if (breakpoint)
1608 *breakpoint = bp;
1609 return 0;
1610#else
1611 return -ENOSYS;
1612#endif
1613}
1614
1615/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001616int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001617{
1618#if defined(TARGET_HAS_ICE)
1619 CPUBreakpoint *bp;
1620
Blue Swirl72cf2d42009-09-12 07:36:22 +00001621 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001622 if (bp->pc == pc && bp->flags == flags) {
1623 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001624 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001625 }
bellard4c3a88a2003-07-26 12:06:08 +00001626 }
aliguoria1d1bb32008-11-18 20:07:32 +00001627 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001628#else
aliguoria1d1bb32008-11-18 20:07:32 +00001629 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001630#endif
1631}
1632
aliguoria1d1bb32008-11-18 20:07:32 +00001633/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001634void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001635{
bellard1fddef42005-04-17 19:16:13 +00001636#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001637 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001638
aliguoria1d1bb32008-11-18 20:07:32 +00001639 breakpoint_invalidate(env, breakpoint->pc);
1640
Anthony Liguori7267c092011-08-20 22:09:37 -05001641 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001642#endif
1643}
1644
1645/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001646void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001647{
1648#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001649 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001650
Blue Swirl72cf2d42009-09-12 07:36:22 +00001651 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001652 if (bp->flags & mask)
1653 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001654 }
bellard4c3a88a2003-07-26 12:06:08 +00001655#endif
1656}
1657
bellardc33a3462003-07-29 20:50:33 +00001658/* enable or disable single step mode. EXCP_DEBUG is returned by the
1659 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001660void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001661{
bellard1fddef42005-04-17 19:16:13 +00001662#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001663 if (env->singlestep_enabled != enabled) {
1664 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001665 if (kvm_enabled())
1666 kvm_update_guest_debug(env, 0);
1667 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001668 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001669 /* XXX: only flush what is necessary */
1670 tb_flush(env);
1671 }
bellardc33a3462003-07-29 20:50:33 +00001672 }
1673#endif
1674}
1675
Andreas Färber9349b4f2012-03-14 01:38:32 +01001676static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001677{
pbrookd5975362008-06-07 20:50:51 +00001678 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1679 problem and hope the cpu will stop of its own accord. For userspace
1680 emulation this often isn't actually as bad as it sounds. Often
1681 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001682 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001683 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001684
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001685 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001686 tb = env->current_tb;
1687 /* if the cpu is currently executing code, we must unlink it and
1688 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001689 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001690 env->current_tb = NULL;
1691 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001692 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001693 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001694}
1695
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001696#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001697/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001698static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001699{
1700 int old_mask;
1701
1702 old_mask = env->interrupt_request;
1703 env->interrupt_request |= mask;
1704
aliguori8edac962009-04-24 18:03:45 +00001705 /*
1706 * If called from iothread context, wake the target cpu in
1707 * case its halted.
1708 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001709 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001710 qemu_cpu_kick(env);
1711 return;
1712 }
aliguori8edac962009-04-24 18:03:45 +00001713
pbrook2e70f6e2008-06-29 01:03:05 +00001714 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001715 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001716 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001717 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001718 cpu_abort(env, "Raised interrupt while not in I/O function");
1719 }
pbrook2e70f6e2008-06-29 01:03:05 +00001720 } else {
aurel323098dba2009-03-07 21:28:24 +00001721 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001722 }
1723}
1724
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001725CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1726
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001727#else /* CONFIG_USER_ONLY */
1728
Andreas Färber9349b4f2012-03-14 01:38:32 +01001729void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001730{
1731 env->interrupt_request |= mask;
1732 cpu_unlink_tb(env);
1733}
1734#endif /* CONFIG_USER_ONLY */
1735
Andreas Färber9349b4f2012-03-14 01:38:32 +01001736void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001737{
1738 env->interrupt_request &= ~mask;
1739}
1740
Andreas Färber9349b4f2012-03-14 01:38:32 +01001741void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001742{
1743 env->exit_request = 1;
1744 cpu_unlink_tb(env);
1745}
1746
Andreas Färber9349b4f2012-03-14 01:38:32 +01001747void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001748{
1749 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001750 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001751
1752 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001753 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001754 fprintf(stderr, "qemu: fatal: ");
1755 vfprintf(stderr, fmt, ap);
1756 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001757 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +00001758 if (qemu_log_enabled()) {
1759 qemu_log("qemu: fatal: ");
1760 qemu_log_vprintf(fmt, ap2);
1761 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001762 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +00001763 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001764 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001765 }
pbrook493ae1f2007-11-23 16:53:59 +00001766 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001767 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001768#if defined(CONFIG_USER_ONLY)
1769 {
1770 struct sigaction act;
1771 sigfillset(&act.sa_mask);
1772 act.sa_handler = SIG_DFL;
1773 sigaction(SIGABRT, &act, NULL);
1774 }
1775#endif
bellard75012672003-06-21 13:11:07 +00001776 abort();
1777}
1778
Andreas Färber9349b4f2012-03-14 01:38:32 +01001779CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +00001780{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001781 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1782 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001783 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001784#if defined(TARGET_HAS_ICE)
1785 CPUBreakpoint *bp;
1786 CPUWatchpoint *wp;
1787#endif
1788
Andreas Färber9349b4f2012-03-14 01:38:32 +01001789 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001790
1791 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001792 new_env->next_cpu = next_cpu;
1793 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001794
1795 /* Clone all break/watchpoints.
1796 Note: Once we support ptrace with hw-debug register access, make sure
1797 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001798 QTAILQ_INIT(&env->breakpoints);
1799 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001800#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001801 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001802 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1803 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001804 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001805 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1806 wp->flags, NULL);
1807 }
1808#endif
1809
thsc5be9f02007-02-28 20:20:53 +00001810 return new_env;
1811}
1812
bellard01243112004-01-04 15:48:17 +00001813#if !defined(CONFIG_USER_ONLY)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001814void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001815{
1816 unsigned int i;
1817
1818 /* Discard jump cache entries for any tb which might potentially
1819 overlap the flushed page. */
1820 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1821 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001822 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001823
1824 i = tb_jmp_cache_hash_page(addr);
1825 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001826 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001827}
1828
Juan Quintelad24981d2012-05-22 00:42:40 +02001829static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
1830 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +00001831{
Juan Quintelad24981d2012-05-22 00:42:40 +02001832 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +00001833
bellard1ccde1c2004-02-06 19:46:14 +00001834 /* we modify the TLB cache so that the dirty bit will be set again
1835 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001836 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001837 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001838 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001839 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001840 != (end - 1) - start) {
1841 abort();
1842 }
Blue Swirle5548612012-04-21 13:08:33 +00001843 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001844
1845}
1846
1847/* Note: start and end must be within the same ram block. */
1848void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1849 int dirty_flags)
1850{
1851 uintptr_t length;
1852
1853 start &= TARGET_PAGE_MASK;
1854 end = TARGET_PAGE_ALIGN(end);
1855
1856 length = end - start;
1857 if (length == 0)
1858 return;
1859 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1860
1861 if (tcg_enabled()) {
1862 tlb_reset_dirty_range_all(start, end, length);
1863 }
bellard1ccde1c2004-02-06 19:46:14 +00001864}
1865
aliguori74576192008-10-06 14:02:03 +00001866int cpu_physical_memory_set_dirty_tracking(int enable)
1867{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001868 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00001869 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001870 return ret;
aliguori74576192008-10-06 14:02:03 +00001871}
1872
Blue Swirle5548612012-04-21 13:08:33 +00001873target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1874 MemoryRegionSection *section,
1875 target_ulong vaddr,
1876 target_phys_addr_t paddr,
1877 int prot,
1878 target_ulong *address)
1879{
1880 target_phys_addr_t iotlb;
1881 CPUWatchpoint *wp;
1882
Blue Swirlcc5bea62012-04-14 14:56:48 +00001883 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001884 /* Normal RAM. */
1885 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001886 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001887 if (!section->readonly) {
1888 iotlb |= phys_section_notdirty;
1889 } else {
1890 iotlb |= phys_section_rom;
1891 }
1892 } else {
1893 /* IO handlers are currently passed a physical address.
1894 It would be nice to pass an offset from the base address
1895 of that region. This would avoid having to special case RAM,
1896 and avoid full address decoding in every device.
1897 We can't use the high bits of pd for this because
1898 IO_MEM_ROMD uses these as a ram address. */
1899 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001900 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001901 }
1902
1903 /* Make accesses to pages with watchpoints go via the
1904 watchpoint trap routines. */
1905 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1906 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1907 /* Avoid trapping reads of pages with a write breakpoint. */
1908 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1909 iotlb = phys_section_watch + paddr;
1910 *address |= TLB_MMIO;
1911 break;
1912 }
1913 }
1914 }
1915
1916 return iotlb;
1917}
1918
bellard01243112004-01-04 15:48:17 +00001919#else
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03001920/*
1921 * Walks guest process memory "regions" one by one
1922 * and calls callback function 'fn' for each region.
1923 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001924
1925struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00001926{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001927 walk_memory_regions_fn fn;
1928 void *priv;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001929 uintptr_t start;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001930 int prot;
1931};
bellard9fa3e852004-01-04 18:06:42 +00001932
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001933static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001934 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001935{
1936 if (data->start != -1ul) {
1937 int rc = data->fn(data->priv, data->start, end, data->prot);
1938 if (rc != 0) {
1939 return rc;
bellard9fa3e852004-01-04 18:06:42 +00001940 }
bellard33417e72003-08-10 21:47:01 +00001941 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001942
1943 data->start = (new_prot ? end : -1ul);
1944 data->prot = new_prot;
1945
1946 return 0;
1947}
1948
1949static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001950 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001951{
Paul Brookb480d9b2010-03-12 23:23:29 +00001952 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001953 int i, rc;
1954
1955 if (*lp == NULL) {
1956 return walk_memory_regions_end(data, base, 0);
1957 }
1958
1959 if (level == 0) {
1960 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001961 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001962 int prot = pd[i].flags;
1963
1964 pa = base | (i << TARGET_PAGE_BITS);
1965 if (prot != data->prot) {
1966 rc = walk_memory_regions_end(data, pa, prot);
1967 if (rc != 0) {
1968 return rc;
1969 }
1970 }
1971 }
1972 } else {
1973 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001974 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00001975 pa = base | ((abi_ulong)i <<
1976 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001977 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1978 if (rc != 0) {
1979 return rc;
1980 }
1981 }
1982 }
1983
1984 return 0;
1985}
1986
1987int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1988{
1989 struct walk_memory_regions_data data;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001990 uintptr_t i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001991
1992 data.fn = fn;
1993 data.priv = priv;
1994 data.start = -1ul;
1995 data.prot = 0;
1996
1997 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00001998 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001999 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2000 if (rc != 0) {
2001 return rc;
2002 }
2003 }
2004
2005 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002006}
2007
Paul Brookb480d9b2010-03-12 23:23:29 +00002008static int dump_region(void *priv, abi_ulong start,
2009 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002010{
2011 FILE *f = (FILE *)priv;
2012
Paul Brookb480d9b2010-03-12 23:23:29 +00002013 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2014 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002015 start, end, end - start,
2016 ((prot & PAGE_READ) ? 'r' : '-'),
2017 ((prot & PAGE_WRITE) ? 'w' : '-'),
2018 ((prot & PAGE_EXEC) ? 'x' : '-'));
2019
2020 return (0);
2021}
2022
2023/* dump memory mappings */
2024void page_dump(FILE *f)
2025{
2026 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2027 "start", "end", "size", "prot");
2028 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002029}
2030
pbrook53a59602006-03-25 19:31:22 +00002031int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002032{
bellard9fa3e852004-01-04 18:06:42 +00002033 PageDesc *p;
2034
2035 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002036 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002037 return 0;
2038 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002039}
2040
Richard Henderson376a7902010-03-10 15:57:04 -08002041/* Modify the flags of a page and invalidate the code if necessary.
2042 The flag PAGE_WRITE_ORG is positioned automatically depending
2043 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002044void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002045{
Richard Henderson376a7902010-03-10 15:57:04 -08002046 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002047
Richard Henderson376a7902010-03-10 15:57:04 -08002048 /* This function should never be called with addresses outside the
2049 guest address space. If this assert fires, it probably indicates
2050 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002051#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2052 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002053#endif
2054 assert(start < end);
2055
bellard9fa3e852004-01-04 18:06:42 +00002056 start = start & TARGET_PAGE_MASK;
2057 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002058
2059 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002060 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002061 }
2062
2063 for (addr = start, len = end - start;
2064 len != 0;
2065 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2066 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2067
2068 /* If the write protection bit is set, then we invalidate
2069 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002070 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002071 (flags & PAGE_WRITE) &&
2072 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002073 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002074 }
2075 p->flags = flags;
2076 }
bellard9fa3e852004-01-04 18:06:42 +00002077}
2078
ths3d97b402007-11-02 19:02:07 +00002079int page_check_range(target_ulong start, target_ulong len, int flags)
2080{
2081 PageDesc *p;
2082 target_ulong end;
2083 target_ulong addr;
2084
Richard Henderson376a7902010-03-10 15:57:04 -08002085 /* This function should never be called with addresses outside the
2086 guest address space. If this assert fires, it probably indicates
2087 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002088#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2089 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002090#endif
2091
Richard Henderson3e0650a2010-03-29 10:54:42 -07002092 if (len == 0) {
2093 return 0;
2094 }
Richard Henderson376a7902010-03-10 15:57:04 -08002095 if (start + len - 1 < start) {
2096 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002097 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002098 }
balrog55f280c2008-10-28 10:24:11 +00002099
ths3d97b402007-11-02 19:02:07 +00002100 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2101 start = start & TARGET_PAGE_MASK;
2102
Richard Henderson376a7902010-03-10 15:57:04 -08002103 for (addr = start, len = end - start;
2104 len != 0;
2105 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002106 p = page_find(addr >> TARGET_PAGE_BITS);
2107 if( !p )
2108 return -1;
2109 if( !(p->flags & PAGE_VALID) )
2110 return -1;
2111
bellarddae32702007-11-14 10:51:00 +00002112 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002113 return -1;
bellarddae32702007-11-14 10:51:00 +00002114 if (flags & PAGE_WRITE) {
2115 if (!(p->flags & PAGE_WRITE_ORG))
2116 return -1;
2117 /* unprotect the page if it was put read-only because it
2118 contains translated code */
2119 if (!(p->flags & PAGE_WRITE)) {
2120 if (!page_unprotect(addr, 0, NULL))
2121 return -1;
2122 }
2123 return 0;
2124 }
ths3d97b402007-11-02 19:02:07 +00002125 }
2126 return 0;
2127}
2128
bellard9fa3e852004-01-04 18:06:42 +00002129/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002130 page. Return TRUE if the fault was successfully handled. */
Stefan Weil6375e092012-04-06 22:26:15 +02002131int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002132{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002133 unsigned int prot;
2134 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002135 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002136
pbrookc8a706f2008-06-02 16:16:42 +00002137 /* Technically this isn't safe inside a signal handler. However we
2138 know this only ever happens in a synchronous SEGV handler, so in
2139 practice it seems to be ok. */
2140 mmap_lock();
2141
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002142 p = page_find(address >> TARGET_PAGE_BITS);
2143 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002144 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002145 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002146 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002147
bellard9fa3e852004-01-04 18:06:42 +00002148 /* if the page was really writable, then we change its
2149 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002150 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2151 host_start = address & qemu_host_page_mask;
2152 host_end = host_start + qemu_host_page_size;
2153
2154 prot = 0;
2155 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2156 p = page_find(addr >> TARGET_PAGE_BITS);
2157 p->flags |= PAGE_WRITE;
2158 prot |= p->flags;
2159
bellard9fa3e852004-01-04 18:06:42 +00002160 /* and since the content will be modified, we must invalidate
2161 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002162 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002163#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002164 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002165#endif
bellard9fa3e852004-01-04 18:06:42 +00002166 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002167 mprotect((void *)g2h(host_start), qemu_host_page_size,
2168 prot & PAGE_BITS);
2169
2170 mmap_unlock();
2171 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002172 }
pbrookc8a706f2008-06-02 16:16:42 +00002173 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002174 return 0;
2175}
bellard9fa3e852004-01-04 18:06:42 +00002176#endif /* defined(CONFIG_USER_ONLY) */
2177
pbrooke2eef172008-06-08 01:09:01 +00002178#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002179
Paul Brookc04b2b72010-03-01 03:31:14 +00002180#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2181typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002182 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002183 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002184 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002185} subpage_t;
2186
Anthony Liguoric227f092009-10-01 16:12:16 -05002187static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002188 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002189static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002190static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002191{
Avi Kivity5312bd82012-02-12 18:32:55 +02002192 MemoryRegionSection *section = &phys_sections[section_index];
2193 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002194
2195 if (mr->subpage) {
2196 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2197 memory_region_destroy(&subpage->iomem);
2198 g_free(subpage);
2199 }
2200}
2201
Avi Kivity4346ae32012-02-10 17:00:01 +02002202static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002203{
2204 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002205 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002206
Avi Kivityc19e8802012-02-13 20:25:31 +02002207 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002208 return;
2209 }
2210
Avi Kivityc19e8802012-02-13 20:25:31 +02002211 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002212 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002213 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002214 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002215 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002216 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002217 }
Avi Kivity54688b12012-02-09 17:34:32 +02002218 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002219 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002220 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002221}
2222
2223static void destroy_all_mappings(void)
2224{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002225 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002226 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002227}
2228
Avi Kivity5312bd82012-02-12 18:32:55 +02002229static uint16_t phys_section_add(MemoryRegionSection *section)
2230{
2231 if (phys_sections_nb == phys_sections_nb_alloc) {
2232 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2233 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2234 phys_sections_nb_alloc);
2235 }
2236 phys_sections[phys_sections_nb] = *section;
2237 return phys_sections_nb++;
2238}
2239
2240static void phys_sections_clear(void)
2241{
2242 phys_sections_nb = 0;
2243}
2244
Avi Kivity0f0cb162012-02-13 17:14:32 +02002245static void register_subpage(MemoryRegionSection *section)
2246{
2247 subpage_t *subpage;
2248 target_phys_addr_t base = section->offset_within_address_space
2249 & TARGET_PAGE_MASK;
Avi Kivityf3705d52012-03-08 16:16:34 +02002250 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002251 MemoryRegionSection subsection = {
2252 .offset_within_address_space = base,
2253 .size = TARGET_PAGE_SIZE,
2254 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002255 target_phys_addr_t start, end;
2256
Avi Kivityf3705d52012-03-08 16:16:34 +02002257 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002258
Avi Kivityf3705d52012-03-08 16:16:34 +02002259 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002260 subpage = subpage_init(base);
2261 subsection.mr = &subpage->iomem;
Avi Kivity29990972012-02-13 20:21:20 +02002262 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2263 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002264 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002265 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002266 }
2267 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -04002268 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +02002269 subpage_register(subpage, start, end, phys_section_add(section));
2270}
2271
2272
2273static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002274{
Avi Kivitydd811242012-01-02 12:17:03 +02002275 target_phys_addr_t start_addr = section->offset_within_address_space;
2276 ram_addr_t size = section->size;
Avi Kivity29990972012-02-13 20:21:20 +02002277 target_phys_addr_t addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002278 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002279
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002280 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002281
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002282 addr = start_addr;
Avi Kivity29990972012-02-13 20:21:20 +02002283 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2284 section_index);
bellard33417e72003-08-10 21:47:01 +00002285}
2286
Avi Kivity0f0cb162012-02-13 17:14:32 +02002287void cpu_register_physical_memory_log(MemoryRegionSection *section,
2288 bool readonly)
2289{
2290 MemoryRegionSection now = *section, remain = *section;
2291
2292 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2293 || (now.size < TARGET_PAGE_SIZE)) {
2294 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2295 - now.offset_within_address_space,
2296 now.size);
2297 register_subpage(&now);
2298 remain.size -= now.size;
2299 remain.offset_within_address_space += now.size;
2300 remain.offset_within_region += now.size;
2301 }
Tyler Hall69b67642012-07-25 18:45:04 -04002302 while (remain.size >= TARGET_PAGE_SIZE) {
2303 now = remain;
2304 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
2305 now.size = TARGET_PAGE_SIZE;
2306 register_subpage(&now);
2307 } else {
2308 now.size &= TARGET_PAGE_MASK;
2309 register_multipage(&now);
2310 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02002311 remain.size -= now.size;
2312 remain.offset_within_address_space += now.size;
2313 remain.offset_within_region += now.size;
2314 }
2315 now = remain;
2316 if (now.size) {
2317 register_subpage(&now);
2318 }
2319}
2320
2321
Anthony Liguoric227f092009-10-01 16:12:16 -05002322void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002323{
2324 if (kvm_enabled())
2325 kvm_coalesce_mmio_region(addr, size);
2326}
2327
Anthony Liguoric227f092009-10-01 16:12:16 -05002328void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002329{
2330 if (kvm_enabled())
2331 kvm_uncoalesce_mmio_region(addr, size);
2332}
2333
Sheng Yang62a27442010-01-26 19:21:16 +08002334void qemu_flush_coalesced_mmio_buffer(void)
2335{
2336 if (kvm_enabled())
2337 kvm_flush_coalesced_mmio_buffer();
2338}
2339
Marcelo Tosattic9027602010-03-01 20:25:08 -03002340#if defined(__linux__) && !defined(TARGET_S390X)
2341
2342#include <sys/vfs.h>
2343
2344#define HUGETLBFS_MAGIC 0x958458f6
2345
2346static long gethugepagesize(const char *path)
2347{
2348 struct statfs fs;
2349 int ret;
2350
2351 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002352 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002353 } while (ret != 0 && errno == EINTR);
2354
2355 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002356 perror(path);
2357 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002358 }
2359
2360 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002361 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002362
2363 return fs.f_bsize;
2364}
2365
Alex Williamson04b16652010-07-02 11:13:17 -06002366static void *file_ram_alloc(RAMBlock *block,
2367 ram_addr_t memory,
2368 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002369{
2370 char *filename;
2371 void *area;
2372 int fd;
2373#ifdef MAP_POPULATE
2374 int flags;
2375#endif
2376 unsigned long hpagesize;
2377
2378 hpagesize = gethugepagesize(path);
2379 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002380 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002381 }
2382
2383 if (memory < hpagesize) {
2384 return NULL;
2385 }
2386
2387 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2388 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2389 return NULL;
2390 }
2391
2392 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002393 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002394 }
2395
2396 fd = mkstemp(filename);
2397 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002398 perror("unable to create backing store for hugepages");
2399 free(filename);
2400 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002401 }
2402 unlink(filename);
2403 free(filename);
2404
2405 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2406
2407 /*
2408 * ftruncate is not supported by hugetlbfs in older
2409 * hosts, so don't bother bailing out on errors.
2410 * If anything goes wrong with it under other filesystems,
2411 * mmap will fail.
2412 */
2413 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002414 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002415
2416#ifdef MAP_POPULATE
2417 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2418 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2419 * to sidestep this quirk.
2420 */
2421 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2422 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2423#else
2424 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2425#endif
2426 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002427 perror("file_ram_alloc: can't mmap RAM pages");
2428 close(fd);
2429 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002430 }
Alex Williamson04b16652010-07-02 11:13:17 -06002431 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002432 return area;
2433}
2434#endif
2435
Alex Williamsond17b5282010-06-25 11:08:38 -06002436static ram_addr_t find_ram_offset(ram_addr_t size)
2437{
Alex Williamson04b16652010-07-02 11:13:17 -06002438 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002439 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002440
2441 if (QLIST_EMPTY(&ram_list.blocks))
2442 return 0;
2443
2444 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002445 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002446
2447 end = block->offset + block->length;
2448
2449 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2450 if (next_block->offset >= end) {
2451 next = MIN(next, next_block->offset);
2452 }
2453 }
2454 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002455 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002456 mingap = next - end;
2457 }
2458 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002459
2460 if (offset == RAM_ADDR_MAX) {
2461 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2462 (uint64_t)size);
2463 abort();
2464 }
2465
Alex Williamson04b16652010-07-02 11:13:17 -06002466 return offset;
2467}
2468
2469static ram_addr_t last_ram_offset(void)
2470{
Alex Williamsond17b5282010-06-25 11:08:38 -06002471 RAMBlock *block;
2472 ram_addr_t last = 0;
2473
2474 QLIST_FOREACH(block, &ram_list.blocks, next)
2475 last = MAX(last, block->offset + block->length);
2476
2477 return last;
2478}
2479
Jason Baronddb97f12012-08-02 15:44:16 -04002480static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
2481{
2482 int ret;
2483 QemuOpts *machine_opts;
2484
2485 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2486 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2487 if (machine_opts &&
2488 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
2489 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
2490 if (ret) {
2491 perror("qemu_madvise");
2492 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
2493 "but dump_guest_core=off specified\n");
2494 }
2495 }
2496}
2497
Avi Kivityc5705a72011-12-20 15:59:12 +02002498void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002499{
2500 RAMBlock *new_block, *block;
2501
Avi Kivityc5705a72011-12-20 15:59:12 +02002502 new_block = NULL;
2503 QLIST_FOREACH(block, &ram_list.blocks, next) {
2504 if (block->offset == addr) {
2505 new_block = block;
2506 break;
2507 }
2508 }
2509 assert(new_block);
2510 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002511
Anthony Liguori09e5ab62012-02-03 12:28:43 -06002512 if (dev) {
2513 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002514 if (id) {
2515 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002516 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002517 }
2518 }
2519 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2520
2521 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002522 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002523 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2524 new_block->idstr);
2525 abort();
2526 }
2527 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002528}
2529
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002530static int memory_try_enable_merging(void *addr, size_t len)
2531{
2532 QemuOpts *opts;
2533
2534 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2535 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
2536 /* disabled by the user */
2537 return 0;
2538 }
2539
2540 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
2541}
2542
Avi Kivityc5705a72011-12-20 15:59:12 +02002543ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2544 MemoryRegion *mr)
2545{
2546 RAMBlock *new_block;
2547
2548 size = TARGET_PAGE_ALIGN(size);
2549 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002550
Avi Kivity7c637362011-12-21 13:09:49 +02002551 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002552 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002553 if (host) {
2554 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002555 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002556 } else {
2557 if (mem_path) {
2558#if defined (__linux__) && !defined(TARGET_S390X)
2559 new_block->host = file_ram_alloc(new_block, size, mem_path);
2560 if (!new_block->host) {
2561 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002562 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002563 }
2564#else
2565 fprintf(stderr, "-mem-path option unsupported\n");
2566 exit(1);
2567#endif
2568 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02002569 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002570 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00002571 } else if (kvm_enabled()) {
2572 /* some s390/kvm configurations have special constraints */
2573 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01002574 } else {
2575 new_block->host = qemu_vmalloc(size);
2576 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002577 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002578 }
2579 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002580 new_block->length = size;
2581
2582 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2583
Anthony Liguori7267c092011-08-20 22:09:37 -05002584 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002585 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04002586 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2587 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02002588 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002589
Jason Baronddb97f12012-08-02 15:44:16 -04002590 qemu_ram_setup_dump(new_block->host, size);
2591
Cam Macdonell84b89d72010-07-26 18:10:57 -06002592 if (kvm_enabled())
2593 kvm_setup_guest_memory(new_block->host, size);
2594
2595 return new_block->offset;
2596}
2597
Avi Kivityc5705a72011-12-20 15:59:12 +02002598ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002599{
Avi Kivityc5705a72011-12-20 15:59:12 +02002600 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002601}
bellarde9a1ab12007-02-08 23:08:38 +00002602
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002603void qemu_ram_free_from_ptr(ram_addr_t addr)
2604{
2605 RAMBlock *block;
2606
2607 QLIST_FOREACH(block, &ram_list.blocks, next) {
2608 if (addr == block->offset) {
2609 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002610 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002611 return;
2612 }
2613 }
2614}
2615
Anthony Liguoric227f092009-10-01 16:12:16 -05002616void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002617{
Alex Williamson04b16652010-07-02 11:13:17 -06002618 RAMBlock *block;
2619
2620 QLIST_FOREACH(block, &ram_list.blocks, next) {
2621 if (addr == block->offset) {
2622 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002623 if (block->flags & RAM_PREALLOC_MASK) {
2624 ;
2625 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002626#if defined (__linux__) && !defined(TARGET_S390X)
2627 if (block->fd) {
2628 munmap(block->host, block->length);
2629 close(block->fd);
2630 } else {
2631 qemu_vfree(block->host);
2632 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002633#else
2634 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002635#endif
2636 } else {
2637#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2638 munmap(block->host, block->length);
2639#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002640 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002641 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002642 } else {
2643 qemu_vfree(block->host);
2644 }
Alex Williamson04b16652010-07-02 11:13:17 -06002645#endif
2646 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002647 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002648 return;
2649 }
2650 }
2651
bellarde9a1ab12007-02-08 23:08:38 +00002652}
2653
Huang Yingcd19cfa2011-03-02 08:56:19 +01002654#ifndef _WIN32
2655void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2656{
2657 RAMBlock *block;
2658 ram_addr_t offset;
2659 int flags;
2660 void *area, *vaddr;
2661
2662 QLIST_FOREACH(block, &ram_list.blocks, next) {
2663 offset = addr - block->offset;
2664 if (offset < block->length) {
2665 vaddr = block->host + offset;
2666 if (block->flags & RAM_PREALLOC_MASK) {
2667 ;
2668 } else {
2669 flags = MAP_FIXED;
2670 munmap(vaddr, length);
2671 if (mem_path) {
2672#if defined(__linux__) && !defined(TARGET_S390X)
2673 if (block->fd) {
2674#ifdef MAP_POPULATE
2675 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2676 MAP_PRIVATE;
2677#else
2678 flags |= MAP_PRIVATE;
2679#endif
2680 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2681 flags, block->fd, offset);
2682 } else {
2683 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2684 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2685 flags, -1, 0);
2686 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002687#else
2688 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002689#endif
2690 } else {
2691#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2692 flags |= MAP_SHARED | MAP_ANONYMOUS;
2693 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2694 flags, -1, 0);
2695#else
2696 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2697 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2698 flags, -1, 0);
2699#endif
2700 }
2701 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002702 fprintf(stderr, "Could not remap addr: "
2703 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002704 length, addr);
2705 exit(1);
2706 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002707 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04002708 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002709 }
2710 return;
2711 }
2712 }
2713}
2714#endif /* !_WIN32 */
2715
pbrookdc828ca2009-04-09 22:21:07 +00002716/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002717 With the exception of the softmmu code in this file, this should
2718 only be used for local memory (e.g. video ram) that the device owns,
2719 and knows it isn't going to access beyond the end of the block.
2720
2721 It should not be used for general purpose DMA.
2722 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2723 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002724void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002725{
pbrook94a6b542009-04-11 17:15:54 +00002726 RAMBlock *block;
2727
Alex Williamsonf471a172010-06-11 11:11:42 -06002728 QLIST_FOREACH(block, &ram_list.blocks, next) {
2729 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002730 /* Move this entry to to start of the list. */
2731 if (block != QLIST_FIRST(&ram_list.blocks)) {
2732 QLIST_REMOVE(block, next);
2733 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2734 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002735 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002736 /* We need to check if the requested address is in the RAM
2737 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002738 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002739 */
2740 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002741 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002742 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002743 block->host =
2744 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002745 }
2746 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002747 return block->host + (addr - block->offset);
2748 }
pbrook94a6b542009-04-11 17:15:54 +00002749 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002750
2751 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2752 abort();
2753
2754 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00002755}
2756
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002757/* Return a host pointer to ram allocated with qemu_ram_alloc.
2758 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2759 */
2760void *qemu_safe_ram_ptr(ram_addr_t addr)
2761{
2762 RAMBlock *block;
2763
2764 QLIST_FOREACH(block, &ram_list.blocks, next) {
2765 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02002766 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002767 /* We need to check if the requested address is in the RAM
2768 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002769 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002770 */
2771 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002772 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002773 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002774 block->host =
2775 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002776 }
2777 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002778 return block->host + (addr - block->offset);
2779 }
2780 }
2781
2782 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2783 abort();
2784
2785 return NULL;
2786}
2787
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002788/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2789 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002790void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002791{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002792 if (*size == 0) {
2793 return NULL;
2794 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002795 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002796 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02002797 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002798 RAMBlock *block;
2799
2800 QLIST_FOREACH(block, &ram_list.blocks, next) {
2801 if (addr - block->offset < block->length) {
2802 if (addr - block->offset + *size > block->length)
2803 *size = block->length - addr + block->offset;
2804 return block->host + (addr - block->offset);
2805 }
2806 }
2807
2808 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2809 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002810 }
2811}
2812
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002813void qemu_put_ram_ptr(void *addr)
2814{
2815 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002816}
2817
Marcelo Tosattie8902612010-10-11 15:31:19 -03002818int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00002819{
pbrook94a6b542009-04-11 17:15:54 +00002820 RAMBlock *block;
2821 uint8_t *host = ptr;
2822
Jan Kiszka868bb332011-06-21 22:59:09 +02002823 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002824 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002825 return 0;
2826 }
2827
Alex Williamsonf471a172010-06-11 11:11:42 -06002828 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002829 /* This case append when the block is not mapped. */
2830 if (block->host == NULL) {
2831 continue;
2832 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002833 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002834 *ram_addr = block->offset + (host - block->host);
2835 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06002836 }
pbrook94a6b542009-04-11 17:15:54 +00002837 }
Jun Nakajima432d2682010-08-31 16:41:25 +01002838
Marcelo Tosattie8902612010-10-11 15:31:19 -03002839 return -1;
2840}
Alex Williamsonf471a172010-06-11 11:11:42 -06002841
Marcelo Tosattie8902612010-10-11 15:31:19 -03002842/* Some of the softmmu routines need to translate from a host pointer
2843 (typically a TLB entry) back to a ram offset. */
2844ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2845{
2846 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06002847
Marcelo Tosattie8902612010-10-11 15:31:19 -03002848 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2849 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2850 abort();
2851 }
2852 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002853}
2854
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002855static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2856 unsigned size)
bellard33417e72003-08-10 21:47:01 +00002857{
pbrook67d3b952006-12-18 05:03:52 +00002858#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002859 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002860#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002861#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002862 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002863#endif
2864 return 0;
2865}
2866
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002867static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2868 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00002869{
2870#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002871 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00002872#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002873#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002874 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002875#endif
2876}
2877
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002878static const MemoryRegionOps unassigned_mem_ops = {
2879 .read = unassigned_mem_read,
2880 .write = unassigned_mem_write,
2881 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002882};
2883
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002884static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2885 unsigned size)
2886{
2887 abort();
2888}
2889
2890static void error_mem_write(void *opaque, target_phys_addr_t addr,
2891 uint64_t value, unsigned size)
2892{
2893 abort();
2894}
2895
2896static const MemoryRegionOps error_mem_ops = {
2897 .read = error_mem_read,
2898 .write = error_mem_write,
2899 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002900};
2901
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002902static const MemoryRegionOps rom_mem_ops = {
2903 .read = error_mem_read,
2904 .write = unassigned_mem_write,
2905 .endianness = DEVICE_NATIVE_ENDIAN,
2906};
2907
2908static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2909 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002910{
bellard3a7d9292005-08-21 09:26:42 +00002911 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002912 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002913 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2914#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002915 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002916 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002917#endif
2918 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002919 switch (size) {
2920 case 1:
2921 stb_p(qemu_get_ram_ptr(ram_addr), val);
2922 break;
2923 case 2:
2924 stw_p(qemu_get_ram_ptr(ram_addr), val);
2925 break;
2926 case 4:
2927 stl_p(qemu_get_ram_ptr(ram_addr), val);
2928 break;
2929 default:
2930 abort();
2931 }
bellardf23db162005-08-21 19:12:28 +00002932 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002933 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002934 /* we remove the notdirty callback only if the code has been
2935 flushed */
2936 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002937 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002938}
2939
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002940static const MemoryRegionOps notdirty_mem_ops = {
2941 .read = error_mem_read,
2942 .write = notdirty_mem_write,
2943 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002944};
2945
pbrook0f459d12008-06-09 00:20:13 +00002946/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00002947static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002948{
Andreas Färber9349b4f2012-03-14 01:38:32 +01002949 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00002950 target_ulong pc, cs_base;
2951 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00002952 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002953 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002954 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002955
aliguori06d55cc2008-11-18 20:24:06 +00002956 if (env->watchpoint_hit) {
2957 /* We re-entered the check after replacing the TB. Now raise
2958 * the debug interrupt so that is will trigger after the
2959 * current instruction. */
2960 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2961 return;
2962 }
pbrook2e70f6e2008-06-29 01:03:05 +00002963 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002964 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00002965 if ((vaddr == (wp->vaddr & len_mask) ||
2966 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00002967 wp->flags |= BP_WATCHPOINT_HIT;
2968 if (!env->watchpoint_hit) {
2969 env->watchpoint_hit = wp;
2970 tb = tb_find_pc(env->mem_io_pc);
2971 if (!tb) {
2972 cpu_abort(env, "check_watchpoint: could not find TB for "
2973 "pc=%p", (void *)env->mem_io_pc);
2974 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00002975 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00002976 tb_phys_invalidate(tb, -1);
2977 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2978 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04002979 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00002980 } else {
2981 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2982 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04002983 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002984 }
aliguori06d55cc2008-11-18 20:24:06 +00002985 }
aliguori6e140f22008-11-18 20:37:55 +00002986 } else {
2987 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002988 }
2989 }
2990}
2991
pbrook6658ffb2007-03-16 23:58:11 +00002992/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2993 so these check for a hit then pass through to the normal out-of-line
2994 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02002995static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
2996 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00002997{
Avi Kivity1ec9b902012-01-02 12:47:48 +02002998 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
2999 switch (size) {
3000 case 1: return ldub_phys(addr);
3001 case 2: return lduw_phys(addr);
3002 case 4: return ldl_phys(addr);
3003 default: abort();
3004 }
pbrook6658ffb2007-03-16 23:58:11 +00003005}
3006
Avi Kivity1ec9b902012-01-02 12:47:48 +02003007static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3008 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003009{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003010 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3011 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04003012 case 1:
3013 stb_phys(addr, val);
3014 break;
3015 case 2:
3016 stw_phys(addr, val);
3017 break;
3018 case 4:
3019 stl_phys(addr, val);
3020 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003021 default: abort();
3022 }
pbrook6658ffb2007-03-16 23:58:11 +00003023}
3024
Avi Kivity1ec9b902012-01-02 12:47:48 +02003025static const MemoryRegionOps watch_mem_ops = {
3026 .read = watch_mem_read,
3027 .write = watch_mem_write,
3028 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003029};
pbrook6658ffb2007-03-16 23:58:11 +00003030
Avi Kivity70c68e42012-01-02 12:32:48 +02003031static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3032 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003033{
Avi Kivity70c68e42012-01-02 12:32:48 +02003034 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003035 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003036 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003037#if defined(DEBUG_SUBPAGE)
3038 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3039 mmio, len, addr, idx);
3040#endif
blueswir1db7b5422007-05-26 17:36:03 +00003041
Avi Kivity5312bd82012-02-12 18:32:55 +02003042 section = &phys_sections[mmio->sub_section[idx]];
3043 addr += mmio->base;
3044 addr -= section->offset_within_address_space;
3045 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003046 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003047}
3048
Avi Kivity70c68e42012-01-02 12:32:48 +02003049static void subpage_write(void *opaque, target_phys_addr_t addr,
3050 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003051{
Avi Kivity70c68e42012-01-02 12:32:48 +02003052 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003053 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003054 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003055#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003056 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3057 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003058 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003059#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003060
Avi Kivity5312bd82012-02-12 18:32:55 +02003061 section = &phys_sections[mmio->sub_section[idx]];
3062 addr += mmio->base;
3063 addr -= section->offset_within_address_space;
3064 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003065 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003066}
3067
Avi Kivity70c68e42012-01-02 12:32:48 +02003068static const MemoryRegionOps subpage_ops = {
3069 .read = subpage_read,
3070 .write = subpage_write,
3071 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003072};
3073
Avi Kivityde712f92012-01-02 12:41:07 +02003074static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3075 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003076{
3077 ram_addr_t raddr = addr;
3078 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003079 switch (size) {
3080 case 1: return ldub_p(ptr);
3081 case 2: return lduw_p(ptr);
3082 case 4: return ldl_p(ptr);
3083 default: abort();
3084 }
Andreas Färber56384e82011-11-30 16:26:21 +01003085}
3086
Avi Kivityde712f92012-01-02 12:41:07 +02003087static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3088 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003089{
3090 ram_addr_t raddr = addr;
3091 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003092 switch (size) {
3093 case 1: return stb_p(ptr, value);
3094 case 2: return stw_p(ptr, value);
3095 case 4: return stl_p(ptr, value);
3096 default: abort();
3097 }
Andreas Färber56384e82011-11-30 16:26:21 +01003098}
3099
Avi Kivityde712f92012-01-02 12:41:07 +02003100static const MemoryRegionOps subpage_ram_ops = {
3101 .read = subpage_ram_read,
3102 .write = subpage_ram_write,
3103 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003104};
3105
Anthony Liguoric227f092009-10-01 16:12:16 -05003106static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003107 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003108{
3109 int idx, eidx;
3110
3111 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3112 return -1;
3113 idx = SUBPAGE_IDX(start);
3114 eidx = SUBPAGE_IDX(end);
3115#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003116 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003117 mmio, start, end, idx, eidx, memory);
3118#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003119 if (memory_region_is_ram(phys_sections[section].mr)) {
3120 MemoryRegionSection new_section = phys_sections[section];
3121 new_section.mr = &io_mem_subpage_ram;
3122 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003123 }
blueswir1db7b5422007-05-26 17:36:03 +00003124 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003125 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003126 }
3127
3128 return 0;
3129}
3130
Avi Kivity0f0cb162012-02-13 17:14:32 +02003131static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003132{
Anthony Liguoric227f092009-10-01 16:12:16 -05003133 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003134
Anthony Liguori7267c092011-08-20 22:09:37 -05003135 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003136
3137 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003138 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3139 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003140 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003141#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003142 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3143 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003144#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003145 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003146
3147 return mmio;
3148}
3149
Avi Kivity5312bd82012-02-12 18:32:55 +02003150static uint16_t dummy_section(MemoryRegion *mr)
3151{
3152 MemoryRegionSection section = {
3153 .mr = mr,
3154 .offset_within_address_space = 0,
3155 .offset_within_region = 0,
3156 .size = UINT64_MAX,
3157 };
3158
3159 return phys_section_add(&section);
3160}
3161
Avi Kivity37ec01d2012-03-08 18:08:35 +02003162MemoryRegion *iotlb_to_region(target_phys_addr_t index)
Avi Kivityaa102232012-03-08 17:06:55 +02003163{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003164 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003165}
3166
Avi Kivitye9179ce2009-06-14 11:38:52 +03003167static void io_mem_init(void)
3168{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003169 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003170 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3171 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3172 "unassigned", UINT64_MAX);
3173 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3174 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003175 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3176 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003177 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3178 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003179}
3180
Avi Kivity50c1e142012-02-08 21:36:02 +02003181static void core_begin(MemoryListener *listener)
3182{
Avi Kivity54688b12012-02-09 17:34:32 +02003183 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003184 phys_sections_clear();
Avi Kivityc19e8802012-02-13 20:25:31 +02003185 phys_map.ptr = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003186 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003187 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3188 phys_section_rom = dummy_section(&io_mem_rom);
3189 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003190}
3191
3192static void core_commit(MemoryListener *listener)
3193{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003194 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003195
3196 /* since each CPU stores ram addresses in its TLB cache, we must
3197 reset the modified entries */
3198 /* XXX: slow ! */
3199 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3200 tlb_flush(env, 1);
3201 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003202}
3203
Avi Kivity93632742012-02-08 16:54:16 +02003204static void core_region_add(MemoryListener *listener,
3205 MemoryRegionSection *section)
3206{
Avi Kivity4855d412012-02-08 21:16:05 +02003207 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003208}
3209
3210static void core_region_del(MemoryListener *listener,
3211 MemoryRegionSection *section)
3212{
Avi Kivity93632742012-02-08 16:54:16 +02003213}
3214
Avi Kivity50c1e142012-02-08 21:36:02 +02003215static void core_region_nop(MemoryListener *listener,
3216 MemoryRegionSection *section)
3217{
Avi Kivity54688b12012-02-09 17:34:32 +02003218 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003219}
3220
Avi Kivity93632742012-02-08 16:54:16 +02003221static void core_log_start(MemoryListener *listener,
3222 MemoryRegionSection *section)
3223{
3224}
3225
3226static void core_log_stop(MemoryListener *listener,
3227 MemoryRegionSection *section)
3228{
3229}
3230
3231static void core_log_sync(MemoryListener *listener,
3232 MemoryRegionSection *section)
3233{
3234}
3235
3236static void core_log_global_start(MemoryListener *listener)
3237{
3238 cpu_physical_memory_set_dirty_tracking(1);
3239}
3240
3241static void core_log_global_stop(MemoryListener *listener)
3242{
3243 cpu_physical_memory_set_dirty_tracking(0);
3244}
3245
3246static void core_eventfd_add(MemoryListener *listener,
3247 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003248 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity93632742012-02-08 16:54:16 +02003249{
3250}
3251
3252static void core_eventfd_del(MemoryListener *listener,
3253 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003254 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity93632742012-02-08 16:54:16 +02003255{
3256}
3257
Avi Kivity50c1e142012-02-08 21:36:02 +02003258static void io_begin(MemoryListener *listener)
3259{
3260}
3261
3262static void io_commit(MemoryListener *listener)
3263{
3264}
3265
Avi Kivity4855d412012-02-08 21:16:05 +02003266static void io_region_add(MemoryListener *listener,
3267 MemoryRegionSection *section)
3268{
Avi Kivitya2d33522012-03-05 17:40:12 +02003269 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3270
3271 mrio->mr = section->mr;
3272 mrio->offset = section->offset_within_region;
3273 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003274 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003275 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003276}
3277
3278static void io_region_del(MemoryListener *listener,
3279 MemoryRegionSection *section)
3280{
3281 isa_unassign_ioport(section->offset_within_address_space, section->size);
3282}
3283
Avi Kivity50c1e142012-02-08 21:36:02 +02003284static void io_region_nop(MemoryListener *listener,
3285 MemoryRegionSection *section)
3286{
3287}
3288
Avi Kivity4855d412012-02-08 21:16:05 +02003289static void io_log_start(MemoryListener *listener,
3290 MemoryRegionSection *section)
3291{
3292}
3293
3294static void io_log_stop(MemoryListener *listener,
3295 MemoryRegionSection *section)
3296{
3297}
3298
3299static void io_log_sync(MemoryListener *listener,
3300 MemoryRegionSection *section)
3301{
3302}
3303
3304static void io_log_global_start(MemoryListener *listener)
3305{
3306}
3307
3308static void io_log_global_stop(MemoryListener *listener)
3309{
3310}
3311
3312static void io_eventfd_add(MemoryListener *listener,
3313 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003314 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity4855d412012-02-08 21:16:05 +02003315{
3316}
3317
3318static void io_eventfd_del(MemoryListener *listener,
3319 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003320 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity4855d412012-02-08 21:16:05 +02003321{
3322}
3323
Avi Kivity93632742012-02-08 16:54:16 +02003324static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003325 .begin = core_begin,
3326 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003327 .region_add = core_region_add,
3328 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003329 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003330 .log_start = core_log_start,
3331 .log_stop = core_log_stop,
3332 .log_sync = core_log_sync,
3333 .log_global_start = core_log_global_start,
3334 .log_global_stop = core_log_global_stop,
3335 .eventfd_add = core_eventfd_add,
3336 .eventfd_del = core_eventfd_del,
3337 .priority = 0,
3338};
3339
Avi Kivity4855d412012-02-08 21:16:05 +02003340static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003341 .begin = io_begin,
3342 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003343 .region_add = io_region_add,
3344 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003345 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003346 .log_start = io_log_start,
3347 .log_stop = io_log_stop,
3348 .log_sync = io_log_sync,
3349 .log_global_start = io_log_global_start,
3350 .log_global_stop = io_log_global_stop,
3351 .eventfd_add = io_eventfd_add,
3352 .eventfd_del = io_eventfd_del,
3353 .priority = 0,
3354};
3355
Avi Kivity62152b82011-07-26 14:26:14 +03003356static void memory_map_init(void)
3357{
Anthony Liguori7267c092011-08-20 22:09:37 -05003358 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003359 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003360 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003361
Anthony Liguori7267c092011-08-20 22:09:37 -05003362 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003363 memory_region_init(system_io, "io", 65536);
3364 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003365
Avi Kivity4855d412012-02-08 21:16:05 +02003366 memory_listener_register(&core_memory_listener, system_memory);
3367 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003368}
3369
3370MemoryRegion *get_system_memory(void)
3371{
3372 return system_memory;
3373}
3374
Avi Kivity309cb472011-08-08 16:09:03 +03003375MemoryRegion *get_system_io(void)
3376{
3377 return system_io;
3378}
3379
pbrooke2eef172008-06-08 01:09:01 +00003380#endif /* !defined(CONFIG_USER_ONLY) */
3381
bellard13eb76e2004-01-24 15:23:36 +00003382/* physical memory access (slow version, mainly for debug) */
3383#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003384int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003385 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003386{
3387 int l, flags;
3388 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003389 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003390
3391 while (len > 0) {
3392 page = addr & TARGET_PAGE_MASK;
3393 l = (page + TARGET_PAGE_SIZE) - addr;
3394 if (l > len)
3395 l = len;
3396 flags = page_get_flags(page);
3397 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003398 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003399 if (is_write) {
3400 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003401 return -1;
bellard579a97f2007-11-11 14:26:47 +00003402 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003403 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003404 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003405 memcpy(p, buf, l);
3406 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003407 } else {
3408 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003409 return -1;
bellard579a97f2007-11-11 14:26:47 +00003410 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003411 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003412 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003413 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003414 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003415 }
3416 len -= l;
3417 buf += l;
3418 addr += l;
3419 }
Paul Brooka68fe892010-03-01 00:08:59 +00003420 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003421}
bellard8df1cd02005-01-28 22:37:22 +00003422
bellard13eb76e2004-01-24 15:23:36 +00003423#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003424
3425static void invalidate_and_set_dirty(target_phys_addr_t addr,
3426 target_phys_addr_t length)
3427{
3428 if (!cpu_physical_memory_is_dirty(addr)) {
3429 /* invalidate code */
3430 tb_invalidate_phys_page_range(addr, addr + length, 0);
3431 /* set dirty bit */
3432 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
3433 }
Anthony PERARDe2269392012-10-03 13:49:22 +00003434 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003435}
3436
Anthony Liguoric227f092009-10-01 16:12:16 -05003437void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003438 int len, int is_write)
3439{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003440 int l;
bellard13eb76e2004-01-24 15:23:36 +00003441 uint8_t *ptr;
3442 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003443 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003444 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003445
bellard13eb76e2004-01-24 15:23:36 +00003446 while (len > 0) {
3447 page = addr & TARGET_PAGE_MASK;
3448 l = (page + TARGET_PAGE_SIZE) - addr;
3449 if (l > len)
3450 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003451 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003452
bellard13eb76e2004-01-24 15:23:36 +00003453 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003454 if (!memory_region_is_ram(section->mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003455 target_phys_addr_t addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00003456 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003457 /* XXX: could force cpu_single_env to NULL to avoid
3458 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003459 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003460 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003461 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003462 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003463 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003464 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003465 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003466 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003467 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003468 l = 2;
3469 } else {
bellard1c213d12005-09-03 10:49:04 +00003470 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003471 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003472 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003473 l = 1;
3474 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003475 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003476 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003477 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003478 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003479 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003480 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003481 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003482 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003483 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003484 }
3485 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003486 if (!(memory_region_is_ram(section->mr) ||
3487 memory_region_is_romd(section->mr))) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003488 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003489 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003490 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003491 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003492 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003493 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003494 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003495 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003496 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003497 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003498 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003499 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003500 l = 2;
3501 } else {
bellard1c213d12005-09-03 10:49:04 +00003502 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003503 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003504 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003505 l = 1;
3506 }
3507 } else {
3508 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00003509 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00003510 + memory_region_section_addr(section,
3511 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02003512 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003513 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003514 }
3515 }
3516 len -= l;
3517 buf += l;
3518 addr += l;
3519 }
3520}
bellard8df1cd02005-01-28 22:37:22 +00003521
bellardd0ecd2a2006-04-23 17:14:48 +00003522/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003523void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003524 const uint8_t *buf, int len)
3525{
3526 int l;
3527 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003528 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003529 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003530
bellardd0ecd2a2006-04-23 17:14:48 +00003531 while (len > 0) {
3532 page = addr & TARGET_PAGE_MASK;
3533 l = (page + TARGET_PAGE_SIZE) - addr;
3534 if (l > len)
3535 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003536 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003537
Blue Swirlcc5bea62012-04-14 14:56:48 +00003538 if (!(memory_region_is_ram(section->mr) ||
3539 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00003540 /* do nothing */
3541 } else {
3542 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003543 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003544 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003545 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003546 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003547 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003548 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003549 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003550 }
3551 len -= l;
3552 buf += l;
3553 addr += l;
3554 }
3555}
3556
aliguori6d16c2f2009-01-22 16:59:11 +00003557typedef struct {
3558 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003559 target_phys_addr_t addr;
3560 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003561} BounceBuffer;
3562
3563static BounceBuffer bounce;
3564
aliguoriba223c22009-01-22 16:59:16 +00003565typedef struct MapClient {
3566 void *opaque;
3567 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003568 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003569} MapClient;
3570
Blue Swirl72cf2d42009-09-12 07:36:22 +00003571static QLIST_HEAD(map_client_list, MapClient) map_client_list
3572 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003573
3574void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3575{
Anthony Liguori7267c092011-08-20 22:09:37 -05003576 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003577
3578 client->opaque = opaque;
3579 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003580 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003581 return client;
3582}
3583
3584void cpu_unregister_map_client(void *_client)
3585{
3586 MapClient *client = (MapClient *)_client;
3587
Blue Swirl72cf2d42009-09-12 07:36:22 +00003588 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003589 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003590}
3591
3592static void cpu_notify_map_clients(void)
3593{
3594 MapClient *client;
3595
Blue Swirl72cf2d42009-09-12 07:36:22 +00003596 while (!QLIST_EMPTY(&map_client_list)) {
3597 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003598 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003599 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003600 }
3601}
3602
aliguori6d16c2f2009-01-22 16:59:11 +00003603/* Map a physical memory region into a host virtual address.
3604 * May map a subset of the requested range, given by and returned in *plen.
3605 * May return NULL if resources needed to perform the mapping are exhausted.
3606 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003607 * Use cpu_register_map_client() to know when retrying the map operation is
3608 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003609 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003610void *cpu_physical_memory_map(target_phys_addr_t addr,
3611 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003612 int is_write)
3613{
Anthony Liguoric227f092009-10-01 16:12:16 -05003614 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003615 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003616 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003617 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003618 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003619 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003620 ram_addr_t rlen;
3621 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003622
3623 while (len > 0) {
3624 page = addr & TARGET_PAGE_MASK;
3625 l = (page + TARGET_PAGE_SIZE) - addr;
3626 if (l > len)
3627 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003628 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003629
Avi Kivityf3705d52012-03-08 16:16:34 +02003630 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003631 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003632 break;
3633 }
3634 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3635 bounce.addr = addr;
3636 bounce.len = l;
3637 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003638 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003639 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003640
3641 *plen = l;
3642 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003643 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003644 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003645 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003646 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003647 }
aliguori6d16c2f2009-01-22 16:59:11 +00003648
3649 len -= l;
3650 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003651 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003652 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003653 rlen = todo;
3654 ret = qemu_ram_ptr_length(raddr, &rlen);
3655 *plen = rlen;
3656 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003657}
3658
3659/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3660 * Will also mark the memory as dirty if is_write == 1. access_len gives
3661 * the amount of memory that was actually read or written by the caller.
3662 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003663void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3664 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003665{
3666 if (buffer != bounce.buffer) {
3667 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003668 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003669 while (access_len) {
3670 unsigned l;
3671 l = TARGET_PAGE_SIZE;
3672 if (l > access_len)
3673 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003674 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003675 addr1 += l;
3676 access_len -= l;
3677 }
3678 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003679 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003680 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003681 }
aliguori6d16c2f2009-01-22 16:59:11 +00003682 return;
3683 }
3684 if (is_write) {
3685 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3686 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003687 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003688 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003689 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003690}
bellardd0ecd2a2006-04-23 17:14:48 +00003691
bellard8df1cd02005-01-28 22:37:22 +00003692/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003693static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3694 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003695{
bellard8df1cd02005-01-28 22:37:22 +00003696 uint8_t *ptr;
3697 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003698 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003699
Avi Kivity06ef3522012-02-13 16:11:22 +02003700 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003701
Blue Swirlcc5bea62012-04-14 14:56:48 +00003702 if (!(memory_region_is_ram(section->mr) ||
3703 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00003704 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003705 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003706 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003707#if defined(TARGET_WORDS_BIGENDIAN)
3708 if (endian == DEVICE_LITTLE_ENDIAN) {
3709 val = bswap32(val);
3710 }
3711#else
3712 if (endian == DEVICE_BIG_ENDIAN) {
3713 val = bswap32(val);
3714 }
3715#endif
bellard8df1cd02005-01-28 22:37:22 +00003716 } else {
3717 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003718 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003719 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003720 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003721 switch (endian) {
3722 case DEVICE_LITTLE_ENDIAN:
3723 val = ldl_le_p(ptr);
3724 break;
3725 case DEVICE_BIG_ENDIAN:
3726 val = ldl_be_p(ptr);
3727 break;
3728 default:
3729 val = ldl_p(ptr);
3730 break;
3731 }
bellard8df1cd02005-01-28 22:37:22 +00003732 }
3733 return val;
3734}
3735
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003736uint32_t ldl_phys(target_phys_addr_t addr)
3737{
3738 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3739}
3740
3741uint32_t ldl_le_phys(target_phys_addr_t addr)
3742{
3743 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3744}
3745
3746uint32_t ldl_be_phys(target_phys_addr_t addr)
3747{
3748 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3749}
3750
bellard84b7b8e2005-11-28 21:19:04 +00003751/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003752static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3753 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003754{
bellard84b7b8e2005-11-28 21:19:04 +00003755 uint8_t *ptr;
3756 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003757 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00003758
Avi Kivity06ef3522012-02-13 16:11:22 +02003759 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003760
Blue Swirlcc5bea62012-04-14 14:56:48 +00003761 if (!(memory_region_is_ram(section->mr) ||
3762 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00003763 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003764 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003765
3766 /* XXX This is broken when device endian != cpu endian.
3767 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003768#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003769 val = io_mem_read(section->mr, addr, 4) << 32;
3770 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003771#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003772 val = io_mem_read(section->mr, addr, 4);
3773 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003774#endif
3775 } else {
3776 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003777 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003778 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003779 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003780 switch (endian) {
3781 case DEVICE_LITTLE_ENDIAN:
3782 val = ldq_le_p(ptr);
3783 break;
3784 case DEVICE_BIG_ENDIAN:
3785 val = ldq_be_p(ptr);
3786 break;
3787 default:
3788 val = ldq_p(ptr);
3789 break;
3790 }
bellard84b7b8e2005-11-28 21:19:04 +00003791 }
3792 return val;
3793}
3794
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003795uint64_t ldq_phys(target_phys_addr_t addr)
3796{
3797 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3798}
3799
3800uint64_t ldq_le_phys(target_phys_addr_t addr)
3801{
3802 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3803}
3804
3805uint64_t ldq_be_phys(target_phys_addr_t addr)
3806{
3807 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3808}
3809
bellardaab33092005-10-30 20:48:42 +00003810/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003811uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003812{
3813 uint8_t val;
3814 cpu_physical_memory_read(addr, &val, 1);
3815 return val;
3816}
3817
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003818/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003819static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3820 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003821{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003822 uint8_t *ptr;
3823 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003824 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003825
Avi Kivity06ef3522012-02-13 16:11:22 +02003826 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003827
Blue Swirlcc5bea62012-04-14 14:56:48 +00003828 if (!(memory_region_is_ram(section->mr) ||
3829 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003830 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003831 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003832 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003833#if defined(TARGET_WORDS_BIGENDIAN)
3834 if (endian == DEVICE_LITTLE_ENDIAN) {
3835 val = bswap16(val);
3836 }
3837#else
3838 if (endian == DEVICE_BIG_ENDIAN) {
3839 val = bswap16(val);
3840 }
3841#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003842 } else {
3843 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003844 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003845 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003846 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003847 switch (endian) {
3848 case DEVICE_LITTLE_ENDIAN:
3849 val = lduw_le_p(ptr);
3850 break;
3851 case DEVICE_BIG_ENDIAN:
3852 val = lduw_be_p(ptr);
3853 break;
3854 default:
3855 val = lduw_p(ptr);
3856 break;
3857 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003858 }
3859 return val;
bellardaab33092005-10-30 20:48:42 +00003860}
3861
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003862uint32_t lduw_phys(target_phys_addr_t addr)
3863{
3864 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3865}
3866
3867uint32_t lduw_le_phys(target_phys_addr_t addr)
3868{
3869 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3870}
3871
3872uint32_t lduw_be_phys(target_phys_addr_t addr)
3873{
3874 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3875}
3876
bellard8df1cd02005-01-28 22:37:22 +00003877/* warning: addr must be aligned. The ram page is not masked as dirty
3878 and the code inside is not invalidated. It is useful if the dirty
3879 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003880void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003881{
bellard8df1cd02005-01-28 22:37:22 +00003882 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003883 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003884
Avi Kivity06ef3522012-02-13 16:11:22 +02003885 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003886
Avi Kivityf3705d52012-03-08 16:16:34 +02003887 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003888 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003889 if (memory_region_is_ram(section->mr)) {
3890 section = &phys_sections[phys_section_rom];
3891 }
3892 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003893 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003894 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003895 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003896 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00003897 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003898 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003899
3900 if (unlikely(in_migration)) {
3901 if (!cpu_physical_memory_is_dirty(addr1)) {
3902 /* invalidate code */
3903 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3904 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003905 cpu_physical_memory_set_dirty_flags(
3906 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00003907 }
3908 }
bellard8df1cd02005-01-28 22:37:22 +00003909 }
3910}
3911
Anthony Liguoric227f092009-10-01 16:12:16 -05003912void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003913{
j_mayerbc98a7e2007-04-04 07:55:12 +00003914 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003915 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00003916
Avi Kivity06ef3522012-02-13 16:11:22 +02003917 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003918
Avi Kivityf3705d52012-03-08 16:16:34 +02003919 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003920 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003921 if (memory_region_is_ram(section->mr)) {
3922 section = &phys_sections[phys_section_rom];
3923 }
j_mayerbc98a7e2007-04-04 07:55:12 +00003924#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003925 io_mem_write(section->mr, addr, val >> 32, 4);
3926 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003927#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003928 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3929 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003930#endif
3931 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003932 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003933 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003934 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00003935 stq_p(ptr, val);
3936 }
3937}
3938
bellard8df1cd02005-01-28 22:37:22 +00003939/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003940static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
3941 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003942{
bellard8df1cd02005-01-28 22:37:22 +00003943 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003944 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003945
Avi Kivity06ef3522012-02-13 16:11:22 +02003946 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003947
Avi Kivityf3705d52012-03-08 16:16:34 +02003948 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003949 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003950 if (memory_region_is_ram(section->mr)) {
3951 section = &phys_sections[phys_section_rom];
3952 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003953#if defined(TARGET_WORDS_BIGENDIAN)
3954 if (endian == DEVICE_LITTLE_ENDIAN) {
3955 val = bswap32(val);
3956 }
3957#else
3958 if (endian == DEVICE_BIG_ENDIAN) {
3959 val = bswap32(val);
3960 }
3961#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02003962 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003963 } else {
3964 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003965 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003966 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00003967 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003968 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003969 switch (endian) {
3970 case DEVICE_LITTLE_ENDIAN:
3971 stl_le_p(ptr, val);
3972 break;
3973 case DEVICE_BIG_ENDIAN:
3974 stl_be_p(ptr, val);
3975 break;
3976 default:
3977 stl_p(ptr, val);
3978 break;
3979 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003980 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00003981 }
3982}
3983
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003984void stl_phys(target_phys_addr_t addr, uint32_t val)
3985{
3986 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
3987}
3988
3989void stl_le_phys(target_phys_addr_t addr, uint32_t val)
3990{
3991 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
3992}
3993
3994void stl_be_phys(target_phys_addr_t addr, uint32_t val)
3995{
3996 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
3997}
3998
bellardaab33092005-10-30 20:48:42 +00003999/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004000void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004001{
4002 uint8_t v = val;
4003 cpu_physical_memory_write(addr, &v, 1);
4004}
4005
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004006/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004007static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4008 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004009{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004010 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004011 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004012
Avi Kivity06ef3522012-02-13 16:11:22 +02004013 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004014
Avi Kivityf3705d52012-03-08 16:16:34 +02004015 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00004016 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004017 if (memory_region_is_ram(section->mr)) {
4018 section = &phys_sections[phys_section_rom];
4019 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004020#if defined(TARGET_WORDS_BIGENDIAN)
4021 if (endian == DEVICE_LITTLE_ENDIAN) {
4022 val = bswap16(val);
4023 }
4024#else
4025 if (endian == DEVICE_BIG_ENDIAN) {
4026 val = bswap16(val);
4027 }
4028#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004029 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004030 } else {
4031 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004032 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004033 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004034 /* RAM case */
4035 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004036 switch (endian) {
4037 case DEVICE_LITTLE_ENDIAN:
4038 stw_le_p(ptr, val);
4039 break;
4040 case DEVICE_BIG_ENDIAN:
4041 stw_be_p(ptr, val);
4042 break;
4043 default:
4044 stw_p(ptr, val);
4045 break;
4046 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00004047 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004048 }
bellardaab33092005-10-30 20:48:42 +00004049}
4050
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004051void stw_phys(target_phys_addr_t addr, uint32_t val)
4052{
4053 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4054}
4055
4056void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4057{
4058 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4059}
4060
4061void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4062{
4063 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4064}
4065
bellardaab33092005-10-30 20:48:42 +00004066/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004067void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004068{
4069 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004070 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004071}
4072
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004073void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4074{
4075 val = cpu_to_le64(val);
4076 cpu_physical_memory_write(addr, &val, 8);
4077}
4078
4079void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4080{
4081 val = cpu_to_be64(val);
4082 cpu_physical_memory_write(addr, &val, 8);
4083}
4084
aliguori5e2972f2009-03-28 17:51:36 +00004085/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004086int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004087 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004088{
4089 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004090 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004091 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004092
4093 while (len > 0) {
4094 page = addr & TARGET_PAGE_MASK;
4095 phys_addr = cpu_get_phys_page_debug(env, page);
4096 /* if no physical page mapped, return an error */
4097 if (phys_addr == -1)
4098 return -1;
4099 l = (page + TARGET_PAGE_SIZE) - addr;
4100 if (l > len)
4101 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004102 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004103 if (is_write)
4104 cpu_physical_memory_write_rom(phys_addr, buf, l);
4105 else
aliguori5e2972f2009-03-28 17:51:36 +00004106 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004107 len -= l;
4108 buf += l;
4109 addr += l;
4110 }
4111 return 0;
4112}
Paul Brooka68fe892010-03-01 00:08:59 +00004113#endif
bellard13eb76e2004-01-24 15:23:36 +00004114
pbrook2e70f6e2008-06-29 01:03:05 +00004115/* in deterministic execution mode, instructions doing device I/Os
4116 must be at the end of the TB */
Blue Swirl20503962012-04-09 14:20:20 +00004117void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004118{
4119 TranslationBlock *tb;
4120 uint32_t n, cflags;
4121 target_ulong pc, cs_base;
4122 uint64_t flags;
4123
Blue Swirl20503962012-04-09 14:20:20 +00004124 tb = tb_find_pc(retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004125 if (!tb) {
4126 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl20503962012-04-09 14:20:20 +00004127 (void *)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004128 }
4129 n = env->icount_decr.u16.low + tb->icount;
Blue Swirl20503962012-04-09 14:20:20 +00004130 cpu_restore_state(tb, env, retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004131 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004132 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004133 n = n - env->icount_decr.u16.low;
4134 /* Generate a new TB ending on the I/O insn. */
4135 n++;
4136 /* On MIPS and SH, delay slot instructions can only be restarted if
4137 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004138 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004139 branch. */
4140#if defined(TARGET_MIPS)
4141 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4142 env->active_tc.PC -= 4;
4143 env->icount_decr.u16.low++;
4144 env->hflags &= ~MIPS_HFLAG_BMASK;
4145 }
4146#elif defined(TARGET_SH4)
4147 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4148 && n > 1) {
4149 env->pc -= 2;
4150 env->icount_decr.u16.low++;
4151 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4152 }
4153#endif
4154 /* This should never happen. */
4155 if (n > CF_COUNT_MASK)
4156 cpu_abort(env, "TB too big during recompile");
4157
4158 cflags = n | CF_LAST_IO;
4159 pc = tb->pc;
4160 cs_base = tb->cs_base;
4161 flags = tb->flags;
4162 tb_phys_invalidate(tb, -1);
4163 /* FIXME: In theory this could raise an exception. In practice
4164 we have already translated the block once so it's probably ok. */
4165 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004166 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004167 the first in the TB) then we end up generating a whole new TB and
4168 repeating the fault, which is horribly inefficient.
4169 Better would be to execute just this insn uncached, or generate a
4170 second new TB. */
4171 cpu_resume_from_signal(env, NULL);
4172}
4173
Paul Brookb3755a92010-03-12 16:54:58 +00004174#if !defined(CONFIG_USER_ONLY)
4175
Stefan Weil055403b2010-10-22 23:03:32 +02004176void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004177{
4178 int i, target_code_size, max_target_code_size;
4179 int direct_jmp_count, direct_jmp2_count, cross_page;
4180 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004181
bellarde3db7222005-01-26 22:00:47 +00004182 target_code_size = 0;
4183 max_target_code_size = 0;
4184 cross_page = 0;
4185 direct_jmp_count = 0;
4186 direct_jmp2_count = 0;
4187 for(i = 0; i < nb_tbs; i++) {
4188 tb = &tbs[i];
4189 target_code_size += tb->size;
4190 if (tb->size > max_target_code_size)
4191 max_target_code_size = tb->size;
4192 if (tb->page_addr[1] != -1)
4193 cross_page++;
4194 if (tb->tb_next_offset[0] != 0xffff) {
4195 direct_jmp_count++;
4196 if (tb->tb_next_offset[1] != 0xffff) {
4197 direct_jmp2_count++;
4198 }
4199 }
4200 }
4201 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004202 cpu_fprintf(f, "Translation buffer state:\n");
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +10004203 cpu_fprintf(f, "gen code size %td/%zd\n",
bellard26a5f132008-05-28 12:30:31 +00004204 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4205 cpu_fprintf(f, "TB count %d/%d\n",
4206 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004207 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004208 nb_tbs ? target_code_size / nb_tbs : 0,
4209 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004210 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004211 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4212 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004213 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4214 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004215 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4216 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004217 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004218 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4219 direct_jmp2_count,
4220 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004221 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004222 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4223 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4224 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004225 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004226}
4227
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004228/*
4229 * A helper function for the _utterly broken_ virtio device model to find out if
4230 * it's running on a big endian machine. Don't do this at home kids!
4231 */
4232bool virtio_is_big_endian(void);
4233bool virtio_is_big_endian(void)
4234{
4235#if defined(TARGET_WORDS_BIGENDIAN)
4236 return true;
4237#else
4238 return false;
4239#endif
4240}
4241
bellard61382a52003-10-27 21:22:23 +00004242#endif
Wen Congyang76f35532012-05-07 12:04:18 +08004243
4244#ifndef CONFIG_USER_ONLY
4245bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr)
4246{
4247 MemoryRegionSection *section;
4248
4249 section = phys_page_find(phys_addr >> TARGET_PAGE_BITS);
4250
4251 return !(memory_region_is_ram(section->mr) ||
4252 memory_region_is_romd(section->mr));
4253}
4254#endif